diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml
index 0a5091614037ff..0e0df7af27b3fa 100644
--- a/.ci/azure/linux.yml
+++ b/.ci/azure/linux.yml
@@ -1,3 +1,15 @@
+resources:
+ repositories:
+ - repository: openvino_contrib
+ type: github
+ endpoint: openvinotoolkit
+ name: openvinotoolkit/openvino_contrib
+
+ - repository: testdata
+ type: github
+ endpoint: openvinotoolkit
+ name: openvinotoolkit/testdata
+
jobs:
- job: Lin
# About 150% of total time
@@ -13,6 +25,8 @@ jobs:
WORKERS_NUMBER: 8
BUILD_TYPE: Release
REPO_DIR: $(Build.Repository.LocalPath)
+ OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib
+ MODELS_PATH: $(REPO_DIR)/../testdata
WORK_DIR: $(Pipeline.Workspace)/_w
BUILD_DIR: $(WORK_DIR)/build
BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE)
@@ -48,6 +62,17 @@ jobs:
submodules: recursive
path: openvino
+ - checkout: openvino_contrib
+ clean: true
+ lfs: false
+ submodules: recursive
+ path: openvino_contrib
+
+ - checkout: testdata
+ clean: true
+ lfs: true
+ path: testdata
+
- script: |
sudo apt --assume-yes install libusb-1.0-0-dev
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/requirements.txt
@@ -65,7 +90,7 @@ jobs:
- task: CMake@1
inputs:
# CMake must get Python 3.x version by default
- cmakeArgs: -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DENABLE_TESTS=ON $(REPO_DIR)
+ cmakeArgs: -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules $(REPO_DIR)
workingDirectory: $(BUILD_DIR)
- script: ninja
@@ -116,28 +141,23 @@ jobs:
continueOnError: false
- script: |
- git clone https://github.com/openvinotoolkit/testdata.git
- workingDirectory: $(WORK_DIR)
- displayName: 'Clone testdata'
-
- - script: |
- export DATA_PATH=$(WORK_DIR)/testdata
- export MODELS_PATH=$(WORK_DIR)/testdata
+ export DATA_PATH=$(MODELS_PATH)
+ export MODELS_PATH=$(MODELS_PATH)
python3 $(WORK_DIR)/gtest-parallel/gtest-parallel $(BIN_DIR)/MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke* -- --gtest_print_time=1
workingDirectory: $(WORK_DIR)
displayName: 'MklDnnFunctionalTests'
continueOnError: false
- script: |
- export DATA_PATH=$(WORK_DIR)/testdata
- export MODELS_PATH=$(WORK_DIR)/testdata
+ export DATA_PATH=$(MODELS_PATH)
+ export MODELS_PATH=$(MODELS_PATH)
$(BIN_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
displayName: 'IE CAPITests'
continueOnError: false
- script: |
- export DATA_PATH=$(WORK_DIR)/testdata
- export MODELS_PATH=$(WORK_DIR)/testdata
+ export DATA_PATH=$(MODELS_PATH)
+ export MODELS_PATH=$(MODELS_PATH)
export LD_LIBRARY_PATH=$(BIN_DIR)/lib
export PYTHONPATH=$(BIN_DIR)/lib/python_api/python3.6
env
diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml
index df8e1d2bac4907..30032ddd25a745 100644
--- a/.ci/azure/mac.yml
+++ b/.ci/azure/mac.yml
@@ -1,3 +1,15 @@
+resources:
+ repositories:
+ - repository: openvino_contrib
+ type: github
+ endpoint: openvinotoolkit
+ name: openvinotoolkit/openvino_contrib
+
+ - repository: testdata
+ type: github
+ endpoint: openvinotoolkit
+ name: openvinotoolkit/testdata
+
jobs:
- job: Mac
# About 200% of total time (perfomace of Mac hosts is unstable)
@@ -13,6 +25,8 @@ jobs:
WORKERS_NUMBER: 3
BUILD_TYPE: Release
REPO_DIR: $(Build.Repository.LocalPath)
+ OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib
+ MODELS_PATH: $(REPO_DIR)/../testdata
WORK_DIR: $(Pipeline.Workspace)/_w
BUILD_DIR: $(WORK_DIR)/build
BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE)
@@ -42,6 +56,17 @@ jobs:
submodules: recursive
path: openvino
+ - checkout: openvino_contrib
+ clean: true
+ lfs: false
+ submodules: recursive
+ path: openvino_contrib
+
+ - checkout: testdata
+ clean: true
+ lfs: true
+ path: testdata
+
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
@@ -63,7 +88,7 @@ jobs:
# Disable errors with Ninja
export CXXFLAGS="-Wno-error=unused-command-line-argument"
export CFLAGS="-Wno-error=unused-command-line-argument"
- cmake -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON $(REPO_DIR)
+ cmake -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules $(REPO_DIR)
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
@@ -111,21 +136,16 @@ jobs:
continueOnError: false
- script: |
- git clone https://github.com/openvinotoolkit/testdata.git
- workingDirectory: $(WORK_DIR)
- displayName: 'Clone testdata'
-
- - script: |
- export DATA_PATH=$(WORK_DIR)/testdata
- export MODELS_PATH=$(WORK_DIR)/testdata
+ export DATA_PATH=$(MODELS_PATH)
+ export MODELS_PATH=$(MODELS_PATH)
python3 $(WORK_DIR)/gtest-parallel/gtest-parallel $(BIN_DIR)/MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric* -- --gtest_print_time=1
workingDirectory: $(WORK_DIR)
displayName: 'MklDnnFunctionalTests'
continueOnError: false
- script: |
- export DATA_PATH=$(WORK_DIR)/testdata
- export MODELS_PATH=$(WORK_DIR)/testdata
+ export DATA_PATH=$(MODELS_PATH)
+ export MODELS_PATH=$(MODELS_PATH)
$(BIN_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
displayName: 'IE CAPITests'
continueOnError: false
diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml
index 30257601a805d8..efd5afba0a0a9d 100644
--- a/.ci/azure/windows.yml
+++ b/.ci/azure/windows.yml
@@ -1,3 +1,15 @@
+resources:
+ repositories:
+ - repository: openvino_contrib
+ type: github
+ endpoint: openvinotoolkit
+ name: openvinotoolkit/openvino_contrib
+
+ - repository: testdata
+ type: github
+ endpoint: openvinotoolkit
+ name: openvinotoolkit/testdata
+
jobs:
- job: Win
# About 150% of total time
@@ -13,6 +25,8 @@ jobs:
WORKERS_NUMBER: 8
BUILD_TYPE: Release
REPO_DIR: $(Build.Repository.LocalPath)
+ OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)\..\openvino_contrib
+ MODELS_PATH: $(REPO_DIR)\..\testdata
WORK_DIR: $(Pipeline.Workspace)\_w
BUILD_DIR: D:\build
BIN_DIR: $(REPO_DIR)\bin\intel64
@@ -45,6 +59,17 @@ jobs:
submodules: recursive
path: openvino
+ - checkout: openvino_contrib
+ clean: true
+ lfs: false
+ submodules: recursive
+ path: openvino_contrib
+
+ - checkout: testdata
+ clean: true
+ lfs: true
+ path: testdata
+
- script: |
certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-win.zip ninja-win.zip
powershell -command "Expand-Archive -Force ninja-win.zip"
@@ -65,7 +90,7 @@ jobs:
- script: |
set PATH=$(WORK_DIR)\ninja-win;%PATH%
- call "$(MSVS_VARS_PATH)" && cmake -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
+ call "$(MSVS_VARS_PATH)" && cmake -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
@@ -141,25 +166,20 @@ jobs:
displayName: 'MklDnnBehaviorTests'
continueOnError: false
- - script: |
- git clone https://github.com/openvinotoolkit/testdata.git
- workingDirectory: $(BUILD_DIR)
- displayName: 'Clone testdata'
-
# Add for gtest-parallel, it hangs now (CVS-33386)
#python $(BUILD_DIR)\gtest-parallel\gtest-parallel $(BIN_DIR)\MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke* -- --gtest_print_time=1
- script: |
set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.0\opencv\bin;%PATH%
- set DATA_PATH=$(BUILD_DIR)\testdata
- set MODELS_PATH=$(BUILD_DIR)\testdata
+ set DATA_PATH=$(MODELS_PATH)
+ set MODELS_PATH=$(MODELS_PATH)
$(BIN_DIR)\MklDnnFunctionalTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-MklDnnFunctionalTests.xml
displayName: 'MklDnnFunctionalTests'
continueOnError: false
- script: |
set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.0\opencv\bin;%PATH%
- set DATA_PATH=$(BUILD_DIR)\testdata
- set MODELS_PATH=$(BUILD_DIR)\testdata
+ set DATA_PATH=$(MODELS_PATH)
+ set MODELS_PATH=$(MODELS_PATH)
$(BIN_DIR)\InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
displayName: 'IE CAPITests'
continueOnError: false
diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile
index 40f62f3cbea821..954b1634ed2a23 100644
--- a/.ci/openvino-onnx/Dockerfile
+++ b/.ci/openvino-onnx/Dockerfile
@@ -75,8 +75,8 @@ RUN make -j $(nproc) install
# Run tests via tox
WORKDIR /openvino/ngraph/python
-ENV NGRAPH_CPP_BUILD_PATH=/openvino/dist
-ENV LD_LIBRARY_PATH=/openvino/dist/lib
+ENV NGRAPH_CPP_BUILD_PATH=/openvino/dist/deployment_tools/ngraph
+ENV LD_LIBRARY_PATH=/openvino/dist/deployment_tools/ngraph/lib
ENV NGRAPH_ONNX_IMPORT_ENABLE=TRUE
ENV PYTHONPATH=/openvino/bin/intel64/Release/lib/python_api/python3.8:${PYTHONPATH}
RUN git clone --recursive https://github.com/pybind/pybind11.git -b v2.5.0 --depth 1
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5c3585a3e9625c..d6bf93044b9ce5 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -114,7 +114,7 @@ function(build_ngraph)
ie_cpack_add_component(ngraph)
set(SDL_cmake_included ON)
- # set(NGRAPH_COMPONENT_PREFIX "deployment_tools/ngraph/")
+ set(NGRAPH_COMPONENT_PREFIX "deployment_tools/ngraph/")
add_subdirectory(ngraph)
set(NGRAPH_LIBRARIES ngraph PARENT_SCOPE)
endfunction()
diff --git a/inference-engine/cmake/clang_format.cmake b/cmake/clang_format/clang_format.cmake
similarity index 92%
rename from inference-engine/cmake/clang_format.cmake
rename to cmake/clang_format/clang_format.cmake
index d2ff778ce5de0b..ae37ae134e3f4f 100644
--- a/inference-engine/cmake/clang_format.cmake
+++ b/cmake/clang_format/clang_format.cmake
@@ -76,10 +76,10 @@ function(add_clang_format_target TARGET_NAME)
-D "CLANG_FORMAT=${CLANG_FORMAT}"
-D "INPUT_FILE=${source_file}"
-D "OUTPUT_FILE=${output_file}"
- -P "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_check.cmake"
+ -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_check.cmake"
DEPENDS
"${source_file}"
- "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_check.cmake"
+ "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_check.cmake"
COMMENT
"[clang-format] ${source_file}"
VERBATIM)
@@ -102,10 +102,10 @@ function(add_clang_format_target TARGET_NAME)
-D "CLANG_FORMAT=${CLANG_FORMAT}"
-D "INPUT_FILES=${CLANG_FORMAT_FOR_SOURCES}"
-D "EXCLUDE_PATTERNS=${CLANG_FORMAT_EXCLUDE_PATTERNS}"
- -P "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_fix.cmake"
+ -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_fix.cmake"
DEPENDS
"${CLANG_FORMAT_FOR_SOURCES}"
- "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_fix.cmake"
+ "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_fix.cmake"
COMMENT
"[clang-format] ${TARGET_NAME}_fix"
VERBATIM)
diff --git a/inference-engine/cmake/clang_format_check.cmake b/cmake/clang_format/clang_format_check.cmake
similarity index 100%
rename from inference-engine/cmake/clang_format_check.cmake
rename to cmake/clang_format/clang_format_check.cmake
diff --git a/inference-engine/cmake/clang_format_fix.cmake b/cmake/clang_format/clang_format_fix.cmake
similarity index 100%
rename from inference-engine/cmake/clang_format_fix.cmake
rename to cmake/clang_format/clang_format_fix.cmake
diff --git a/inference-engine/cmake/cpplint.cmake b/cmake/cpplint/cpplint.cmake
similarity index 80%
rename from inference-engine/cmake/cpplint.cmake
rename to cmake/cpplint/cpplint.cmake
index 6c58d4aa532d4d..23e022d6a514ad 100644
--- a/inference-engine/cmake/cpplint.cmake
+++ b/cmake/cpplint/cpplint.cmake
@@ -68,17 +68,17 @@ function(add_cpplint_target TARGET_NAME)
"${output_file}"
COMMAND
"${CMAKE_COMMAND}"
- -D "CPPLINT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cpplint.py"
+ -D "CPPLINT_SCRIPT=${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint.py"
-D "INPUT_FILE=${source_file}"
-D "OUTPUT_FILE=${output_file}"
-D "WORKING_DIRECTORY=${CMAKE_CURRENT_SOURCE_DIR}"
-D "SKIP_RETURN_CODE=${ENABLE_CPPLINT_REPORT}"
-D "CUSTOM_FILTER=${custom_filter}"
- -P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_run.cmake"
+ -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_run.cmake"
DEPENDS
"${source_file}"
- "${IE_MAIN_SOURCE_DIR}/scripts/cpplint.py"
- "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_run.cmake"
+ "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint.py"
+ "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_run.cmake"
COMMENT
"[cpplint] ${source_file}"
VERBATIM)
@@ -118,10 +118,10 @@ function(add_cpplint_report_target)
"${CMAKE_COMMAND}"
-D "FINAL_OUTPUT_FILE=${cpplint_output_file}"
-D "OUTPUT_FILES=${CPPLINT_ALL_OUTPUT_FILES}"
- -P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_merge.cmake"
+ -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_merge.cmake"
DEPENDS
${CPPLINT_ALL_OUTPUT_FILES}
- "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_merge.cmake"
+ "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_merge.cmake"
COMMENT
"[cpplint] Merge all output files"
VERBATIM)
@@ -133,19 +133,19 @@ function(add_cpplint_report_target)
COMMAND
"${CMAKE_COMMAND}"
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
- -D "CONVERT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py"
+ -D "CONVERT_SCRIPT=${OpenVINO_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py"
-D "INPUT_FILE=${cpplint_output_file}"
-D "OUTPUT_FILE=${cppcheck_output_file}"
- -P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_to_cppcheck_xml.cmake"
+ -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_to_cppcheck_xml.cmake"
DEPENDS
"${cpplint_output_file}"
- "${IE_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py"
- "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_to_cppcheck_xml.cmake"
+ "${OpenVINO_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py"
+ "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_to_cppcheck_xml.cmake"
COMMENT
"[cpplint] Convert to cppcheck XML format"
VERBATIM)
- set(report_dir "${IE_MAIN_SOURCE_DIR}/report/cpplint")
+ set(report_dir "${OpenVINO_MAIN_SOURCE_DIR}/report/cpplint")
set(html_output_file "${report_dir}/index.html")
add_custom_command(
OUTPUT
@@ -153,16 +153,16 @@ function(add_cpplint_report_target)
COMMAND
"${CMAKE_COMMAND}"
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
- -D "CONVERT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py"
+ -D "CONVERT_SCRIPT=${OpenVINO_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py"
-D "INPUT_FILE=${cppcheck_output_file}"
-D "REPORT_DIR=${report_dir}"
- -D "SOURCE_DIR=${IE_MAIN_SOURCE_DIR}"
+ -D "SOURCE_DIR=${OpenVINO_MAIN_SOURCE_DIR}"
-D "TITLE=${CMAKE_PROJECT_NAME}"
- -P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_html.cmake"
+ -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_html.cmake"
DEPENDS
"${cppcheck_output_file}"
- "${IE_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py"
- "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_html.cmake"
+ "${OpenVINO_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py"
+ "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_html.cmake"
COMMENT
"[cpplint] Generate HTML report"
VERBATIM)
diff --git a/inference-engine/scripts/cpplint.py b/cmake/cpplint/cpplint.py
similarity index 100%
rename from inference-engine/scripts/cpplint.py
rename to cmake/cpplint/cpplint.py
diff --git a/inference-engine/cmake/cpplint_html.cmake b/cmake/cpplint/cpplint_html.cmake
similarity index 100%
rename from inference-engine/cmake/cpplint_html.cmake
rename to cmake/cpplint/cpplint_html.cmake
diff --git a/inference-engine/cmake/cpplint_merge.cmake b/cmake/cpplint/cpplint_merge.cmake
similarity index 100%
rename from inference-engine/cmake/cpplint_merge.cmake
rename to cmake/cpplint/cpplint_merge.cmake
diff --git a/inference-engine/cmake/cpplint_run.cmake b/cmake/cpplint/cpplint_run.cmake
similarity index 100%
rename from inference-engine/cmake/cpplint_run.cmake
rename to cmake/cpplint/cpplint_run.cmake
diff --git a/inference-engine/cmake/cpplint_to_cppcheck_xml.cmake b/cmake/cpplint/cpplint_to_cppcheck_xml.cmake
similarity index 100%
rename from inference-engine/cmake/cpplint_to_cppcheck_xml.cmake
rename to cmake/cpplint/cpplint_to_cppcheck_xml.cmake
diff --git a/cmake/developer_package.cmake b/cmake/developer_package.cmake
index cda7afd2940629..b9ea3e3d3b78fd 100644
--- a/cmake/developer_package.cmake
+++ b/cmake/developer_package.cmake
@@ -249,3 +249,8 @@ endfunction()
set_ci_build_number()
include(vs_version/vs_version)
+
+# Code style utils
+
+include(cpplint/cpplint)
+include(clang_format/clang_format)
diff --git a/cmake/features.cmake b/cmake/features.cmake
index 3bc43005abd32d..a99de90445a92f 100644
--- a/cmake/features.cmake
+++ b/cmake/features.cmake
@@ -46,8 +46,7 @@ ie_dependent_option (ENABLE_AVX512F "Enable AVX512 optimizations" ON "X86_64 OR
ie_option (ENABLE_PROFILING_ITT "Build with ITT tracing. Optionally configure pre-built ittnotify library though INTEL_VTUNE_DIR variable." OFF)
-# Documentation build
-ie_option (ENABLE_DOCS "build docs using Doxygen" OFF)
+ie_option (ENABLE_DOCS "Build docs using Doxygen" OFF)
ie_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to speed up build time" OFF "CMAKE_VERSION VERSION_GREATER_EQUAL 3.16" OFF)
@@ -55,8 +54,15 @@ ie_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to
# FIXME: Ah this moment setting this to OFF will only build ngraph a static library
ie_option (BUILD_SHARED_LIBS "Build as a shared library" ON)
+ie_dependent_option(ENABLE_CPPLINT "Enable cpplint checks during the build" ON "UNIX;NOT ANDROID" OFF)
+
+ie_dependent_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF)
+
+ie_option(ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ON)
+
ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \
In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \
Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF
ALLOWED_VALUES ON OFF COLLECT)
+set(LINKCHECKER_PY "" CACHE FILEPATH "Path to linkchecker.py for documentation check")
diff --git a/cmake/os_flags.cmake b/cmake/os_flags.cmake
index 0ed6e258298496..9803e7439fd271 100644
--- a/cmake/os_flags.cmake
+++ b/cmake/os_flags.cmake
@@ -253,10 +253,12 @@ if(WIN32)
ie_add_compiler_flags(/Qdiag-disable:161,177,556,1744,1879,2586,2651,3180,11075,15335)
endif()
- # Debug information flags
-
- set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /Z7")
- set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Z7")
+ # Debug information flags, by default CMake adds /Zi option
+ # but provides no way to specify CMAKE_COMPILE_PDB_NAME on root level
+ # In order to avoid issues with ninja we are replacing default flag instead of having two of them
+ # and observing warning D9025 about flag override
+ string(REPLACE "/Zi" "/Z7" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}")
+ string(REPLACE "/Zi" "/Z7" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}")
else()
# TODO: enable for C sources as well
# ie_add_compiler_flags(-Werror)
diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt
index 501564f6af2c14..5d14fd7e16bc5c 100644
--- a/docs/CMakeLists.txt
+++ b/docs/CMakeLists.txt
@@ -6,13 +6,17 @@ if(NOT ENABLE_DOCKER)
add_subdirectory(snippets)
# Detect nGraph
- find_package(ngraph QUIET)
+ find_package(ngraph QUIET
+ PATHS "${CMAKE_BINARY_DIR}/ngraph"
+ NO_DEFAULT_PATH)
if(NOT ngraph_FOUND)
set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph)
endif()
# Detect InferenceEngine
- find_package(InferenceEngine QUIET)
+ find_package(InferenceEngine QUIET
+ PATHS "${CMAKE_BINARY_DIR}"
+ NO_DEFAULT_PATH)
if(NOT InferenceEngine_FOUND)
set(InferenceEngine_DIR ${CMAKE_BINARY_DIR})
endif()
@@ -29,6 +33,9 @@ if(NOT ENABLE_DOCKER)
foreach(target_name IN LISTS all_docs_targets)
if (TARGET ${target_name})
set_target_properties(${target_name} PROPERTIES FOLDER docs)
+ if(WIN32)
+ set_target_properties(${target_name} PROPERTIES COMPILE_PDB_NAME ${target_name})
+ endif()
endif()
endforeach()
endif()
@@ -50,13 +57,16 @@ function(build_docs)
message(FATAL_ERROR "LATEX is required to build the documentation")
endif()
- set(DOCS_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}")
+ set(DOCS_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}")
set(DOXYGEN_DIR "${OpenVINO_MAIN_SOURCE_DIR}/docs/doxygen")
set(IE_SOURCE_DIR "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine")
set(PYTHON_API_IN "${IE_SOURCE_DIR}/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx")
- set(PYTHON_API_OUT "${DOCS_BINARY_DIR}/python_api/ie_api.pyx")
+ set(PYTHON_API_OUT "${DOCS_BUILD_DIR}/python_api/ie_api.pyx")
set(C_API "${IE_SOURCE_DIR}/ie_bridges/c/include")
- set(PLUGIN_API_DIR "${DOCS_BINARY_DIR}/IE_PLUGIN_DG")
+ set(PLUGIN_API_DIR "${DOCS_BUILD_DIR}/IE_PLUGIN_DG")
+ set(NGRAPH_DIR "${OpenVINO_MAIN_SOURCE_DIR}/ngraph")
+ set(NGRAPH_PY_DIR "${NGRAPH_DIR}/python/src/ngraph/")
+ set(NGRAPH_CPP_DIR "${NGRAPH_DIR}/core/include/" "${NGRAPH_DIR}/frontend/onnx_import/include")
# Preprocessing scripts
set(DOXY_MD_FILTER "${DOXYGEN_DIR}/doxy_md_filter.py")
@@ -64,10 +74,10 @@ function(build_docs)
file(GLOB_RECURSE doc_source_files
LIST_DIRECTORIES true RELATIVE ${OpenVINO_MAIN_SOURCE_DIR}
- "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.md"
- "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.png"
- "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.gif"
- "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.jpg"
+ "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.md"
+ "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.png"
+ "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.gif"
+ "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.jpg"
"${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.md"
"${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.png"
"${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.gif"
@@ -75,55 +85,81 @@ function(build_docs)
configure_file(${PYTHON_API_IN} ${PYTHON_API_OUT} @ONLY)
+ set(NGRAPH_CPP_CONFIG_SOURCE "${DOXYGEN_DIR}/ngraph_cpp_api.config")
+ set(NGRAPH_PY_CONFIG_SOURCE "${DOXYGEN_DIR}/ngraph_py_api.config")
set(IE_CONFIG_SOURCE "${DOXYGEN_DIR}/ie_docs.config")
set(C_CONFIG_SOURCE "${DOXYGEN_DIR}/ie_c_api.config")
set(PY_CONFIG_SOURCE "${DOXYGEN_DIR}/ie_py_api.config")
set(PLUGIN_CONFIG_SOURCE "${DOXYGEN_DIR}/ie_plugin_api.config")
- set(IE_CONFIG_BINARY "${DOCS_BINARY_DIR}/ie_docs.config")
- set(C_CONFIG_BINARY "${DOCS_BINARY_DIR}/ie_c_api.config")
- set(PY_CONFIG_BINARY "${DOCS_BINARY_DIR}/ie_py_api.config")
- set(PLUGIN_CONFIG_BINARY "${DOCS_BINARY_DIR}/ie_plugin_api.config")
+ set(NGRAPH_CPP_CONFIG_BUILD "${DOCS_BUILD_DIR}/ngraph_cpp_api.config")
+ set(NGRAPH_PY_CONFIG_BUILD "${DOCS_BUILD_DIR}/ngraph_py_api.config")
+ set(IE_CONFIG_BUILD "${DOCS_BUILD_DIR}/ie_docs.config")
+ set(C_CONFIG_BUILD "${DOCS_BUILD_DIR}/ie_c_api.config")
+ set(PY_CONFIG_BUILD "${DOCS_BUILD_DIR}/ie_py_api.config")
+ set(PLUGIN_CONFIG_BUILD "${DOCS_BUILD_DIR}/ie_plugin_api.config")
+ set(NGRAPH_CPP_LAYOUT_SOURCE "${DOXYGEN_DIR}/ngraph_cpp_api.xml")
+ set(NGRAPH_PY_LAYOUT_SOURCE "${DOXYGEN_DIR}/ngraph_py_api.xml")
set(IE_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_docs.xml")
set(C_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_c_api.xml")
set(PY_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_py_api.xml")
set(PLUGIN_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_plugin_api.xml")
- set(IE_LAYOUT_BINARY "${DOCS_BINARY_DIR}/ie_docs.xml")
- set(C_LAYOUT_BINARY "${DOCS_BINARY_DIR}/ie_c_api.xml")
- set(PY_LAYOUT_BINARY "${DOCS_BINARY_DIR}/ie_py_api.xml")
- set(PLUGIN_LAYOUT_BINARY "${DOCS_BINARY_DIR}/ie_plugin_api.xml")
+ set(NGRAPH_CPP_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ngraph_cpp_api.xml")
+ set(NGRAPH_PY_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ngraph_py_api.xml")
+ set(IE_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_docs.xml")
+ set(C_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_c_api.xml")
+ set(PY_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_py_api.xml")
+ set(PLUGIN_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_plugin_api.xml")
# Tables of contents
- configure_file(${IE_LAYOUT_SOURCE} ${IE_LAYOUT_BINARY} @ONLY)
- configure_file(${C_LAYOUT_SOURCE} ${C_LAYOUT_BINARY} @ONLY)
- configure_file(${PY_LAYOUT_SOURCE} ${PY_LAYOUT_BINARY} @ONLY)
- configure_file(${PLUGIN_LAYOUT_SOURCE} ${PLUGIN_LAYOUT_BINARY} @ONLY)
+ configure_file(${NGRAPH_CPP_LAYOUT_SOURCE} ${NGRAPH_CPP_LAYOUT_BUILD} @ONLY)
+ configure_file(${NGRAPH_PY_LAYOUT_SOURCE} ${NGRAPH_PY_LAYOUT_BUILD} @ONLY)
+ configure_file(${IE_LAYOUT_SOURCE} ${IE_LAYOUT_BUILD} @ONLY)
+ configure_file(${C_LAYOUT_SOURCE} ${C_LAYOUT_BUILD} @ONLY)
+ configure_file(${PY_LAYOUT_SOURCE} ${PY_LAYOUT_BUILD} @ONLY)
+ configure_file(${PLUGIN_LAYOUT_SOURCE} ${PLUGIN_LAYOUT_BUILD} @ONLY)
# Doxygen config files
- configure_file(${IE_CONFIG_SOURCE} ${IE_CONFIG_BINARY} @ONLY)
- configure_file(${C_CONFIG_SOURCE} ${C_CONFIG_BINARY} @ONLY)
- configure_file(${PY_CONFIG_SOURCE} ${PY_CONFIG_BINARY} @ONLY)
- configure_file(${PLUGIN_CONFIG_SOURCE} ${PLUGIN_CONFIG_BINARY} @ONLY)
+ configure_file(${NGRAPH_CPP_CONFIG_SOURCE} ${NGRAPH_CPP_CONFIG_BUILD} @ONLY)
+ configure_file(${NGRAPH_PY_CONFIG_SOURCE} ${NGRAPH_PY_CONFIG_BUILD} @ONLY)
+ configure_file(${IE_CONFIG_SOURCE} ${IE_CONFIG_BUILD} @ONLY)
+ configure_file(${C_CONFIG_SOURCE} ${C_CONFIG_BUILD} @ONLY)
+ configure_file(${PY_CONFIG_SOURCE} ${PY_CONFIG_BUILD} @ONLY)
+ configure_file(${PLUGIN_CONFIG_SOURCE} ${PLUGIN_CONFIG_BUILD} @ONLY)
# Preprocessing scripts
set(DOXY_MD_FILTER "${DOXYGEN_DIR}/doxy_md_filter.py")
set(PYX_FILTER "${DOXYGEN_DIR}/pyx_filter.py")
+ # nGraph C++ API
+
+ add_custom_target(ngraph_cpp_api
+ COMMAND ${DOXYGEN_EXECUTABLE} ${NGRAPH_CPP_CONFIG_BUILD}
+ WORKING_DIRECTORY ${DOCS_BUILD_DIR}
+ VERBATIM)
+
+ # nGraph Python API
+
+ add_custom_target(ngraph_py_api
+ COMMAND ${DOXYGEN_EXECUTABLE} ${NGRAPH_PY_CONFIG_BUILD}
+ WORKING_DIRECTORY ${DOCS_BUILD_DIR}
+ VERBATIM)
+
# C API
add_custom_target(c_api
- COMMAND ${DOXYGEN_EXECUTABLE} ${C_CONFIG_BINARY}
- WORKING_DIRECTORY ${DOCS_BINARY_DIR}
+ COMMAND ${DOXYGEN_EXECUTABLE} ${C_CONFIG_BUILD}
+ WORKING_DIRECTORY ${DOCS_BUILD_DIR}
COMMENT "Generating C API Reference"
VERBATIM)
# Python API
add_custom_target(py_api
- COMMAND ${DOXYGEN_EXECUTABLE} ${PY_CONFIG_BINARY}
- WORKING_DIRECTORY ${DOCS_BINARY_DIR}
+ COMMAND ${DOXYGEN_EXECUTABLE} ${PY_CONFIG_BUILD}
+ WORKING_DIRECTORY ${DOCS_BUILD_DIR}
COMMENT "Generating Python API Reference"
VERBATIM)
@@ -132,14 +168,6 @@ function(build_docs)
COMMAND ${Python3_EXECUTABLE} ${PYX_FILTER} ${PYTHON_API_OUT}
COMMENT "Pre-process Python API")
- # Plugin API
-
- add_custom_target(plugin_api
- COMMAND ${DOXYGEN_EXECUTABLE} ${PLUGIN_CONFIG_BINARY}
- WORKING_DIRECTORY ${DOCS_BINARY_DIR}
- COMMENT "Generating Plugin API Reference"
- VERBATIM)
-
# Preprocess docs
add_custom_target(preprocess_docs
@@ -148,33 +176,56 @@ function(build_docs)
foreach(source_file ${doc_source_files})
list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy
- "${OpenVINO_MAIN_SOURCE_DIR}/${source_file}" "${DOCS_BINARY_DIR}/${source_file}")
+ "${OpenVINO_MAIN_SOURCE_DIR}/${source_file}" "${DOCS_BUILD_DIR}/${source_file}")
endforeach()
add_custom_command(TARGET preprocess_docs
PRE_BUILD
${commands}
- COMMAND ${Python3_EXECUTABLE} ${DOXY_MD_FILTER} ${DOCS_BINARY_DIR}
+ COMMAND ${Python3_EXECUTABLE} ${DOXY_MD_FILTER} ${DOCS_BUILD_DIR}
COMMENT "Pre-process markdown and image links")
# IE dev guide and C++ API
add_custom_target(ie_docs
- DEPENDS preprocess_docs
- COMMAND ${DOXYGEN_EXECUTABLE} ${IE_CONFIG_BINARY}
- WORKING_DIRECTORY ${DOCS_BINARY_DIR}
+ DEPENDS ngraph_cpp_api preprocess_docs
+ COMMAND ${DOXYGEN_EXECUTABLE} ${IE_CONFIG_BUILD}
+ WORKING_DIRECTORY ${DOCS_BUILD_DIR}
+ VERBATIM)
+
+ # Plugin API
+
+ add_custom_target(plugin_api
+ DEPENDS ngraph_cpp_api ie_docs
+ COMMAND ${DOXYGEN_EXECUTABLE} ${PLUGIN_CONFIG_BUILD}
+ WORKING_DIRECTORY ${DOCS_BUILD_DIR}
+ COMMENT "Generating Plugin API Reference"
VERBATIM)
# Umbrella OpenVINO target
add_custom_target(openvino_docs
- DEPENDS c_api py_api ie_docs plugin_api
+ DEPENDS ngraph_cpp_api ngraph_py_api c_api py_api ie_docs plugin_api
COMMENT "Generating OpenVINO documentation"
VERBATIM)
set_target_properties(openvino_docs ie_docs c_api py_api preprocess_docs plugin_api
+ ngraph_py_api ngraph_cpp_api
PROPERTIES FOLDER docs)
+ # added linkcheker
+
+ if(EXISTS "${LINKCHECKER_PY}")
+ add_custom_target(docs_check
+ COMMAND ${Python3_EXECUTABLE} "${LINKCHECKER_PY}"
+ "${DOCS_BUILD_DIR}/html/" -f "${DOXYGEN_DIR}/linkchecker_filter.yaml"
+ --no_recursive -l "${DOCS_BUILD_DIR}"
+ COMMENT "Check links in generated documentation"
+ WORKING_DIRECTORY "${DOCS_BUILD_DIR}"
+ VERBATIM)
+ set_target_properties(docs_check PROPERTIES FOLDER docs)
+ endif()
+
find_program(browser NAMES xdg-open)
if(browser)
add_custom_target(ie_docs_open
diff --git a/docs/IE_DG/API_Changes.md b/docs/IE_DG/API_Changes.md
index cd3311b6a22fc7..41681e58d8a3ad 100644
--- a/docs/IE_DG/API_Changes.md
+++ b/docs/IE_DG/API_Changes.md
@@ -8,16 +8,22 @@ The sections below contain detailed list of changes made to the Inference Engine
**State API**
- * InferRequest::QueryState query state value of network on current infer request
- * IVariableState class instead of IMemoryState (rename)
- * IVariableState::GetState instead of IMemoryState::GetLastState (rename)
+ * InferenceEngine::InferRequest::QueryState query state value of network on current infer request
+ * InferenceEngine::IVariableState class instead of IMemoryState (rename)
+ * InferenceEngine::IVariableState::GetState instead of IMemoryState::GetLastState (rename)
+
+ **BatchedBlob** - represents a InferenceEngine::BatchedBlob containing other blobs - one per batch.
+
+ **Transformations API** - added a new header `ie_transformations.hpp` which contains transformations for InferenceEngine::CNNNetwork object. Such transformations can be called prior to loading network for compilation for particular device:
+
+ * InferenceEngine::LowLatency
### Deprecated API
**State API**
- * ExecutableNetwork::QueryState - use InferRequest::QueryState
- * IVariableState::GetLastState - use IVariableState::GetState
+ * InferenceEngine::ExecutableNetwork::QueryState - use InferenceEngine::InferRequest::QueryState
+ * InferenceEngine::IVariableState::GetLastState - use InferenceEngine::IVariableState::GetState
## 2021.1
diff --git a/docs/IE_DG/Bfloat16Inference.md b/docs/IE_DG/Bfloat16Inference.md
index 8e2028ea773a9d..e814a8948c44bb 100644
--- a/docs/IE_DG/Bfloat16Inference.md
+++ b/docs/IE_DG/Bfloat16Inference.md
@@ -20,7 +20,7 @@ There are two ways to check if CPU device can support bfloat16 computations for
1. Query the instruction set via system `lscpu | grep avx512_bf16` or `cat /proc/cpuinfo | grep avx512_bf16`.
2. Use [Query API](InferenceEngine_QueryAPI.md) with `METRIC_KEY(OPTIMIZATION_CAPABILITIES)`, which should return `BF16` in the list of CPU optimization options:
-@snippet openvino/docs/snippets/Bfloat16Inference0.cpp part0
+@snippet snippets/Bfloat16Inference0.cpp part0
Current Inference Engine solution for bfloat16 inference uses Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) and supports inference of the following layers in BF16 computation mode:
* Convolution
@@ -46,11 +46,11 @@ Bfloat16 data usage provides the following benefits that increase performance:
For default optimization on CPU, source model converts from FP32 or FP16 to BF16 and executes internally on platforms with native BF16 support. In that case, `KEY_ENFORCE_BF16` is set to `YES`.
The code below demonstrates how to check if the key is set:
-@snippet openvino/docs/snippets/Bfloat16Inference1.cpp part1
+@snippet snippets/Bfloat16Inference1.cpp part1
To disable BF16 internal transformations, set the `KEY_ENFORCE_BF16` to `NO`. In this case, the model infers AS IS without modifications with precisions that were set on each layer edge.
-@snippet openvino/docs/snippets/Bfloat16Inference2.cpp part2
+@snippet snippets/Bfloat16Inference2.cpp part2
An exception with message `Platform doesn't support BF16 format` is formed in case of setting `KEY_ENFORCE_BF16` to `YES` on CPU without native BF16 support.
diff --git a/docs/IE_DG/DynamicBatching.md b/docs/IE_DG/DynamicBatching.md
index 3f4df0ce843300..a05c218b6193e3 100644
--- a/docs/IE_DG/DynamicBatching.md
+++ b/docs/IE_DG/DynamicBatching.md
@@ -18,7 +18,7 @@ The batch size that was set in passed CNNNetwork object will be use
Here is a code example:
-@snippet openvino/docs/snippets/DynamicBatching.cpp part0
+@snippet snippets/DynamicBatching.cpp part0
## Limitations
diff --git a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md
index 383ce0ec9f9cca..42eda8f83c0fa4 100644
--- a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md
+++ b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md
@@ -20,7 +20,7 @@ To add your custom nGraph operation, create a new class that extends `ngraph::Op
Based on that, declaration of a operation class can look as follows:
-@snippet op.hpp op:header
+@snippet template_extension/op.hpp op:header
### Class Fields
@@ -33,37 +33,37 @@ The provided implementation has several fields:
nGraph operation contains two constructors: a default constructor, which allows to create operation without attributes and a constructor that creates and validates operation with specified inputs and attributes.
-@snippet op.cpp op:ctor
+@snippet template_extension/op.cpp op:ctor
### `validate_and_infer_types()`
`ngraph::Node::validate_and_infer_types` method validates operation attributes and calculates output shapes using attributes of operation.
-@snippet op.cpp op:validate
+@snippet template_extension/op.cpp op:validate
### `clone_with_new_inputs()`
`ngraph::Node::clone_with_new_inputs` method creates a copy of nGraph operation with new inputs.
-@snippet op.cpp op:copy
+@snippet template_extension/op.cpp op:copy
### `visit_attributes()`
`ngraph::Node::visit_attributes` method allows to visit all operation attributes.
-@snippet op.cpp op:visit_attributes
+@snippet template_extension/op.cpp op:visit_attributes
### `evaluate()`
`ngraph::Node::evaluate` method allows to apply constant folding to an operation.
-@snippet op.cpp op:evaluate
+@snippet template_extension/op.cpp op:evaluate
## Register Custom Operations in Extension Class
To add custom operations to the [Extension](Extension.md) class, create an operation set with custom operations and implement the `InferenceEngine::IExtension::getOpSets` method:
-@snippet extension.cpp extension:getOpSets
+@snippet template_extension/extension.cpp extension:getOpSets
This method returns a map of opsets that exist in the extension library.
diff --git a/docs/IE_DG/Extensibility_DG/CPU_Kernel.md b/docs/IE_DG/Extensibility_DG/CPU_Kernel.md
index d04e47858d1ea3..205ae64a6e1825 100644
--- a/docs/IE_DG/Extensibility_DG/CPU_Kernel.md
+++ b/docs/IE_DG/Extensibility_DG/CPU_Kernel.md
@@ -7,7 +7,7 @@ The primary vehicle for the performance of the CPU codepath in the Inference Eng
All custom kernels for the CPU plugin should be inherited from the InferenceEngine::ILayerExecImpl interface.
Based on that, declaration of a kernel implementation class can look as follows:
-@snippet cpu_kernel.hpp cpu_implementation:header
+@snippet template_extension/cpu_kernel.hpp cpu_implementation:header
### Class Fields
@@ -22,25 +22,25 @@ The provided implementation has several fields:
An implementation constructor checks parameters of nGraph operation, stores needed attributes, and stores an error message in the case of an error.
-@snippet cpu_kernel.cpp cpu_implementation:ctor
+@snippet template_extension/cpu_kernel.cpp cpu_implementation:ctor
### `getSupportedConfigurations`
InferenceEngine::ILayerExecImpl::getSupportedConfigurations method returns all supported configuration formats (input/output tensor layouts) for your implementation. To specify formats of data, use InferenceEngine::TensorDesc. Refer to the [Memory Primitives](../Memory_primitives.md) section for instructions on how to do it.
-@snippet cpu_kernel.cpp cpu_implementation:getSupportedConfigurations
+@snippet template_extension/cpu_kernel.cpp cpu_implementation:getSupportedConfigurations
### `init`
InferenceEngine::ILayerExecImpl::init method gets a runtime-selected configuration from a vector that is populated from the `getSupportedConfigurations` method and checks the parameters:
-@snippet cpu_kernel.cpp cpu_implementation:init
+@snippet template_extension/cpu_kernel.cpp cpu_implementation:init
### `execute`
InferenceEngine::ILayerExecImpl::execute method accepts and processes the actual tenors as input/output blobs:
-@snippet cpu_kernel.cpp cpu_implementation:execute
+@snippet template_extension/cpu_kernel.cpp cpu_implementation:execute
## Register Implementation in `Extension` Class
@@ -52,18 +52,18 @@ To register custom kernel implementation in the [Extension](Extension.md) class,
InferenceEngine::IExtension::getImplTypes returns a vector of implementation types for an operation.
-@snippet extension.cpp extension:getImplTypes
+@snippet template_extension/extension.cpp extension:getImplTypes
### getImplementation
InferenceEngine::IExtension::getImplementation returns the kernel implementation with a specified type for an operation.
-@snippet extension.cpp extension:getImplementation
+@snippet template_extension/extension.cpp extension:getImplementation
## Load Extension with Executable Kernels to Plugin
Use the `AddExtension` method of the general plugin interface to load your primitives:
-@snippet openvino/docs/snippets/CPU_Kernel.cpp part0
+@snippet snippets/CPU_Kernel.cpp part0
diff --git a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md
index 47d80ba8ca921a..0999679ae0caa2 100644
--- a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md
+++ b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md
@@ -38,12 +38,12 @@ If operator is no longer needed, it can be unregistered by calling `unregister_o
The same principles apply when registering custom ONNX operator based on custom nGraph operations.
This example shows how to register custom ONNX operator based on `Operation` presented in [this tutorial](AddingNGraphOps.md), which is used in [TemplateExtension](Extension.md).
-@snippet extension.cpp extension:ctor
+@snippet template_extension/extension.cpp extension:ctor
Here, the `register_operator` function is called in Extension's constructor, which makes sure that it is called before InferenceEngine::Core::ReadNetwork (since InferenceEngine::Core::AddExtension must be called before a model with custom operator is read).
The example below demonstrates how to unregister operator from Extension's destructor:
-@snippet extension.cpp extension:dtor
+@snippet template_extension/extension.cpp extension:dtor
Note that it is mandatory to unregister custom ONNX operator if it is defined in dynamic shared library.
## Requirements for building with CMake
diff --git a/docs/IE_DG/Extensibility_DG/Extension.md b/docs/IE_DG/Extensibility_DG/Extension.md
index 3bc96f90376ce8..6df3a1424ec0e4 100644
--- a/docs/IE_DG/Extensibility_DG/Extension.md
+++ b/docs/IE_DG/Extensibility_DG/Extension.md
@@ -5,11 +5,11 @@ All extension libraries should be inherited from this interface.
Based on that, declaration of an extension class can look as follows:
-@snippet extension.hpp extension:header
+@snippet template_extension/extension.hpp extension:header
The extension library should contain and export the method InferenceEngine::CreateExtension, which creates an `Extension` class:
-@snippet extension.cpp extension:CreateExtension
+@snippet template_extension/extension.cpp extension:CreateExtension
Also, an `Extension` object should implement the following methods:
@@ -17,7 +17,7 @@ Also, an `Extension` object should implement the following methods:
* InferenceEngine::IExtension::GetVersion returns information about version of the library
-@snippet extension.cpp extension:GetVersion
+@snippet template_extension/extension.cpp extension:GetVersion
Implement the InferenceEngine::IExtension::getOpSets method if the extension contains custom layers.
Read the [guide about custom operations](AddingNGraphOps.md) for more information.
diff --git a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md
index 7a0d794457c890..a918076e756112 100644
--- a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md
+++ b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md
@@ -7,7 +7,7 @@ There are two options of using custom layer configuration file:
* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/deployment_tools/inference_engine/bin/intel64/{Debug/Release}` folder
* Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom layers to the plugin:
-@snippet openvino/docs/snippets/GPU_Kernel.cpp part0
+@snippet snippets/GPU_Kernel.cpp part0
All Inference Engine samples, except trivial `hello_classification`,
feature a dedicated command-line option `-c` to load custom kernels. For example, to load custom layers for the classification sample, run the command below:
@@ -227,7 +227,7 @@ floating-point, and integer kernel parameters. To get the dump, add the
following line to your code that configures the GPU plugin to output the
custom kernels:
-@snippet openvino/docs/snippets/GPU_Kernel.cpp part1
+@snippet snippets/GPU_Kernel.cpp part1
When the Inference Engine compiles the kernels for the specific network,
it also outputs the resulting code for the custom kernels. In the
diff --git a/docs/IE_DG/GPU_Kernels_Tuning.md b/docs/IE_DG/GPU_Kernels_Tuning.md
index 47ed958bf24063..4bbe315e42c2f3 100644
--- a/docs/IE_DG/GPU_Kernels_Tuning.md
+++ b/docs/IE_DG/GPU_Kernels_Tuning.md
@@ -30,7 +30,7 @@ File with tuned data is the result of this step.
The example below shows how to set and use the key files:
-@snippet openvino/docs/snippets/GPU_Kernels_Tuning.cpp part0
+@snippet snippets/GPU_Kernels_Tuning.cpp part0
---
diff --git a/docs/IE_DG/Glossary.md b/docs/IE_DG/Glossary.md
index 047d4484a6682b..5a05757977a6eb 100644
--- a/docs/IE_DG/Glossary.md
+++ b/docs/IE_DG/Glossary.md
@@ -72,7 +72,7 @@ Glossary of terms used in the Inference Engine
| InferenceEngineProfileInfo | Represents basic inference profiling information per layer |
| Inference Engine | A C++ library with a set of classes that you can use in your application to infer input data (images) and get the result |
| Inference Engine API | The basic default API for all supported devices, which allows you to load a model from Intermediate Representation, set input and output formats and execute the model on various devices |
-| Inference Engine Core | Inference Engine Core is a software component that manages inference on certain Intel(R) hardware devices: CPU, GPU, MYRIAD, GNA, etc. |
+| Inference Engine Core | Inference Engine Core is a software component that manages inference on certain Intel(R) hardware devices: CPU, GPU, MYRIAD, GNA, etc. |
| Layer catalog or Operations specification | A list of supported layers or operations and its parameters. Sets of supported layers are different for different plugins, please check the documentation on plugins to verify if the Inference Engine supports certain layer on the dedicated hardware |
| Layout | Image data layout refers to the representation of images batch. Layout shows a sequence of 4D or 5D tensor data in memory. A typical NCHW format represents pixel in horizontal direction, rows by vertical dimension, planes by channel and images into batch |
| OutputsDataMap | Structure which contains information about output precisions and layouts |
diff --git a/docs/IE_DG/InferenceEngine_QueryAPI.md b/docs/IE_DG/InferenceEngine_QueryAPI.md
index 9ee5beaa479494..788c2d580324a9 100644
--- a/docs/IE_DG/InferenceEngine_QueryAPI.md
+++ b/docs/IE_DG/InferenceEngine_QueryAPI.md
@@ -23,7 +23,7 @@ The `InferenceEngine::ExecutableNetwork` class is also extended to support the Q
### GetAvailableDevices
-@snippet openvino/docs/snippets/InferenceEngine_QueryAPI0.cpp part0
+@snippet snippets/InferenceEngine_QueryAPI0.cpp part0
The function returns list of available devices, for example:
```
@@ -46,7 +46,7 @@ Each device name can then be passed to:
The code below demonstrates how to understand whether `HETERO` device dumps `.dot` files with split graphs during the split stage:
-@snippet openvino/docs/snippets/InferenceEngine_QueryAPI1.cpp part1
+@snippet snippets/InferenceEngine_QueryAPI1.cpp part1
For documentation about common configuration keys, refer to `ie_plugin_config.hpp`. Device specific configuration keys can be found in corresponding plugin folders.
@@ -54,7 +54,7 @@ For documentation about common configuration keys, refer to `ie_plugin_config.hp
* To extract device properties such as available device, device name, supported configuration keys, and others, use the `InferenceEngine::Core::GetMetric` method:
-@snippet openvino/docs/snippets/InferenceEngine_QueryAPI2.cpp part2
+@snippet snippets/InferenceEngine_QueryAPI2.cpp part2
A returned value looks as follows: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz`.
@@ -66,17 +66,17 @@ A returned value looks as follows: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz`.
The method is used to get executable network specific metric such as `METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)`:
-@snippet openvino/docs/snippets/InferenceEngine_QueryAPI3.cpp part3
+@snippet snippets/InferenceEngine_QueryAPI3.cpp part3
Or the current temperature of `MYRIAD` device:
-@snippet openvino/docs/snippets/InferenceEngine_QueryAPI4.cpp part4
+@snippet snippets/InferenceEngine_QueryAPI4.cpp part4
### GetConfig()
The method is used to get information about configuration values the executable network has been created with:
-@snippet openvino/docs/snippets/InferenceEngine_QueryAPI5.cpp part5
+@snippet snippets/InferenceEngine_QueryAPI5.cpp part5
### SetConfig()
diff --git a/docs/IE_DG/Integrate_with_customer_application_new_API.md b/docs/IE_DG/Integrate_with_customer_application_new_API.md
index e9909848f6533a..108c7cd06f3404 100644
--- a/docs/IE_DG/Integrate_with_customer_application_new_API.md
+++ b/docs/IE_DG/Integrate_with_customer_application_new_API.md
@@ -29,20 +29,20 @@ Integration process includes the following steps:
1) **Create Inference Engine Core** to manage available devices and read network objects:
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part0
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part0
2) **Read a model IR** created by the Model Optimizer (.xml is supported format):
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part1
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part1
**Or read the model from ONNX format** (.onnx and .prototxt are supported formats). You can find more information about the ONNX format support in the document [ONNX format support in the OpenVINO™](./ONNX_Support.md).
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part2
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part2
3) **Configure input and output**. Request input and output information using `InferenceEngine::CNNNetwork::getInputsInfo()`, and `InferenceEngine::CNNNetwork::getOutputsInfo()`
methods:
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part3
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part3
Optionally, set the number format (precision) and memory layout for inputs and outputs. Refer to the
[Supported configurations](supported_plugins/Supported_Devices.md) chapter to choose the relevant configuration.
@@ -67,7 +67,7 @@ methods:
You can use the following code snippet to configure input and output:
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part4
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part4
> **NOTE**: NV12 input color format pre-processing differs from other color conversions. In case of NV12,
> Inference Engine expects two separate image planes (Y and UV). You must use a specific
@@ -91,31 +91,31 @@ methods:
4) **Load the model** to the device using `InferenceEngine::Core::LoadNetwork()`:
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part5
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part5
It creates an executable network from a network object. The executable network is associated with single hardware device.
It is possible to create as many networks as needed and to use them simultaneously (up to the limitation of the hardware resources).
Third parameter is a configuration for plugin. It is map of pairs: (parameter name, parameter value). Choose device from
[Supported devices](supported_plugins/Supported_Devices.md) page for more details about supported configuration parameters.
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part6
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part6
5) **Create an infer request**:
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part7
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part7
6) **Prepare input**. You can use one of the following options to prepare input:
* **Optimal way for a single network.** Get blobs allocated by an infer request using `InferenceEngine::InferRequest::GetBlob()`
and feed an image and the input data to the blobs. In this case, input data must be aligned (resized manually) with a
given blob size and have a correct color format.
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part8
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part8
* **Optimal way for a cascade of networks (output of one network is input for another).** Get output blob from the first
request using `InferenceEngine::InferRequest::GetBlob()` and set it as input for the second request using
`InferenceEngine::InferRequest::SetBlob()`.
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part9
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part9
* **Optimal way to handle ROI (a ROI object located inside of input of one network is input for another).** It is
possible to re-use shared input by several networks. You do not need to allocate separate input blob for a network if
@@ -126,7 +126,7 @@ methods:
ROI without allocation of new memory using `InferenceEngine::make_shared_blob()` with passing of
`InferenceEngine::Blob::Ptr` and `InferenceEngine::ROI` as parameters.
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part10
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part10
Make sure that shared input is kept valid during execution of each network. Otherwise, ROI blob may be corrupted if the
original input blob (that ROI is cropped from) has already been rewritten.
@@ -134,7 +134,7 @@ methods:
* Allocate input blobs of the appropriate types and sizes, feed an image and the input data to the blobs, and call
`InferenceEngine::InferRequest::SetBlob()` to set these blobs for an infer request:
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part11
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part11
A blob can be filled before and after `SetBlob()`.
@@ -157,11 +157,11 @@ methods:
7) **Do inference** by calling the `InferenceEngine::InferRequest::StartAsync` and `InferenceEngine::InferRequest::Wait`
methods for asynchronous request:
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part12
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part12
or by calling the `InferenceEngine::InferRequest::Infer` method for synchronous request:
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part13
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part13
`StartAsync` returns immediately and starts inference without blocking main thread, `Infer` blocks
main thread and returns when inference is completed.
@@ -185,7 +185,7 @@ exception.
Note that casting `Blob` to `TBlob` via `std::dynamic_pointer_cast` is not recommended way,
better to access data via `buffer()` and `as()` methods as follows:
-@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part14
+@snippet snippets/Integrate_with_customer_application_new_API.cpp part14
## Build Your Application
diff --git a/docs/IE_DG/Migration_CoreAPI.md b/docs/IE_DG/Migration_CoreAPI.md
index 5edac6052633f8..d49bd425bc87c6 100644
--- a/docs/IE_DG/Migration_CoreAPI.md
+++ b/docs/IE_DG/Migration_CoreAPI.md
@@ -27,44 +27,44 @@ Common migration process includes the following steps:
1. Migrate from the `InferenceEngine::InferencePlugin` initialization:
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part0
+@snippet snippets/Migration_CoreAPI.cpp part0
to the `InferenceEngine::Core` class initialization:
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part1
+@snippet snippets/Migration_CoreAPI.cpp part1
2. Instead of using `InferenceEngine::CNNNetReader` to read IR:
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part2
+@snippet snippets/Migration_CoreAPI.cpp part2
read networks using the Core class:
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part3
+@snippet snippets/Migration_CoreAPI.cpp part3
The Core class also allows reading models from the ONNX format (more information is [here](./ONNX_Support.md)):
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part4
+@snippet snippets/Migration_CoreAPI.cpp part4
3. Instead of adding CPU device extensions to the plugin:
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part5
+@snippet snippets/Migration_CoreAPI.cpp part5
add extensions to CPU device using the Core class:
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part6
+@snippet snippets/Migration_CoreAPI.cpp part6
4. Instead of setting configuration keys to a particular plugin, set (key, value) pairs via `InferenceEngine::Core::SetConfig`
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part7
+@snippet snippets/Migration_CoreAPI.cpp part7
> **NOTE**: If `deviceName` is omitted as the last argument, configuration is set for all Inference Engine devices.
5. Migrate from loading the network to a particular plugin:
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part8
+@snippet snippets/Migration_CoreAPI.cpp part8
to `InferenceEngine::Core::LoadNetwork` to a particular device:
-@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part9
+@snippet snippets/Migration_CoreAPI.cpp part9
After you have an instance of `InferenceEngine::ExecutableNetwork`, all other steps are as usual.
diff --git a/docs/IE_DG/OnnxImporterTutorial.md b/docs/IE_DG/OnnxImporterTutorial.md
index 5f63e1754f0f71..f4538633a7e805 100644
--- a/docs/IE_DG/OnnxImporterTutorial.md
+++ b/docs/IE_DG/OnnxImporterTutorial.md
@@ -18,7 +18,7 @@ Two categories of API functions:
To list all supported ONNX ops in a specific version and domain, use the `get_supported_operators`
as shown in the example below:
-@snippet openvino/docs/snippets/OnnxImporterTutorial0.cpp part0
+@snippet snippets/OnnxImporterTutorial0.cpp part0
The above code produces a list of all the supported operators for the `version` and `domain` you specified and outputs a list similar to this:
```cpp
@@ -30,7 +30,7 @@ Xor
To determine whether a specific ONNX operator in a particular version and domain is supported by the importer, use the `is_operator_supported` function as shown in the example below:
-@snippet openvino/docs/snippets/OnnxImporterTutorial1.cpp part1
+@snippet snippets/OnnxImporterTutorial1.cpp part1
## Import ONNX Model
@@ -55,13 +55,13 @@ As it was shown in [Build a Model with nGraph Library](../nGraph_DG/build_functi
The code below shows how to convert the ONNX ResNet50 model to the nGraph function using `import_onnx_model` with the stream as an input:
-@snippet openvino/docs/snippets/OnnxImporterTutorial2.cpp part2
+@snippet snippets/OnnxImporterTutorial2.cpp part2
### Filepath as Input
The code below shows how to convert the ONNX ResNet50 model to the nGraph function using `import_onnx_model` with the filepath as an input:
-@snippet openvino/docs/snippets/OnnxImporterTutorial3.cpp part3
+@snippet snippets/OnnxImporterTutorial3.cpp part3
[onnx_header]: https://github.com/NervanaSystems/ngraph/blob/master/src/ngraph/frontend/onnx_import/onnx.hpp
[onnx_model_zoo]: https://github.com/onnx/models
diff --git a/docs/IE_DG/ShapeInference.md b/docs/IE_DG/ShapeInference.md
index f684b4a38cc1b5..a7cdddb784d676 100644
--- a/docs/IE_DG/ShapeInference.md
+++ b/docs/IE_DG/ShapeInference.md
@@ -94,7 +94,7 @@ The algorithm for resizing network is the following:
Here is a code example:
-@snippet openvino/docs/snippets/ShapeInference.cpp part0
+@snippet snippets/ShapeInference.cpp part0
Shape Inference feature is used in [Smart classroom sample](@ref omz_demos_smart_classroom_demo_README).
diff --git a/docs/IE_DG/inference_engine_intro.md b/docs/IE_DG/inference_engine_intro.md
index dbcc3244a13a5f..41e1b1dd1b08e8 100644
--- a/docs/IE_DG/inference_engine_intro.md
+++ b/docs/IE_DG/inference_engine_intro.md
@@ -16,17 +16,17 @@ For complete API Reference, see the [Inference Engine API References](./api_refe
Inference Engine uses a plugin architecture. Inference Engine plugin is a software component that contains complete implementation for inference on a certain Intel® hardware device: CPU, GPU, VPU, etc. Each plugin implements the unified API and provides additional hardware-specific APIs.
Modules in the Inference Engine component
----------------------------------------
+-----------------------------------------
### Core Inference Engine Libraries ###
Your application must link to the core Inference Engine libraries:
* Linux* OS:
- - `libinference_engine.so`, which depends on `libinference_engine_transformations.so` and `libngraph.so`
- - `libinference_engine_legacy.so`, which depends on `libtbb.so`
+ - `libinference_engine.so`, which depends on `libinference_engine_transformations.so`, `libtbb.so`, `libtbbmalloc.so` and `libngraph.so`
* Windows* OS:
- - `inference_engine.dll`, which depends on `inference_engine_transformations.dll` and `ngraph.dll`
- - `inference_engine_legacy.dll`, which depends on `tbb.dll`
+ - `inference_engine.dll`, which depends on `inference_engine_transformations.dll`, `tbb.dll`, `tbbmalloc.dll` and `ngraph.dll`
+* macOS*:
+ - `libinference_engine.dylib`, which depends on `libinference_engine_transformations.dylib`, `libtbb.dylib`, `libtbbmalloc.dylib` and `libngraph.dylib`
The required C++ header files are located in the `include` directory.
@@ -49,26 +49,26 @@ Starting from 2020.4 release, Inference Engine introduced a concept of `CNNNetwo
For each supported target device, Inference Engine provides a plugin — a DLL/shared library that contains complete implementation for inference on this particular device. The following plugins are available:
-| Plugin | Device Type |
-| ------------- | ------------- |
-|CPU| Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE |
-|GPU| Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics
-|MYRIAD| Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X|
-|GNA| Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver J5005 Processor, Intel® Pentium® Silver N5000 Processor, Intel® Celeron® J4005 Processor, Intel® Celeron® J4105 Processor, Intel® Celeron® Processor N4100, Intel® Celeron® Processor N4000, Intel® Core™ i3-8121U Processor, Intel® Core™ i7-1065G7 Processor, Intel® Core™ i7-1060G7 Processor, Intel® Core™ i5-1035G4 Processor, Intel® Core™ i5-1035G7 Processor, Intel® Core™ i5-1035G1 Processor, Intel® Core™ i5-1030G7 Processor, Intel® Core™ i5-1030G4 Processor, Intel® Core™ i3-1005G1 Processor, Intel® Core™ i3-1000G1 Processor, Intel® Core™ i3-1000G4 Processor
-|HETERO|Automatic splitting of a network inference between several devices (for example if a device doesn't support certain layers|
-|MULTI| Simultaneous inference of the same network on several devices in parallel|
-
-The table below shows the plugin libraries and additional dependencies for Linux and Windows platforms.
-
-| Plugin | Library name for Linux | Dependency libraries for Linux | Library name for Windows | Dependency libraries for Windows |
-|--------|------------------------|-------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------|
-| CPU | `libMKLDNNPlugin.so` | `libinference_engine_lp_transformations.so` | `MKLDNNPlugin.dll` | `inference_engine_lp_transformations.dll` |
-| GPU | `libclDNNPlugin.so` | `libinference_engine_lp_transformations.so`, `libOpenCL.so` | `clDNNPlugin.dll` | `OpenCL.dll`, `inference_engine_lp_transformations.dll` |
-| MYRIAD | `libmyriadPlugin.so` | `libusb.so`, `libinference_engine_lp_transformations.so` | `myriadPlugin.dll` | `usb.dll`, `inference_engine_lp_transformations.dll` |
-| HDDL | `libHDDLPlugin.so` | `libbsl.so`, `libhddlapi.so`, `libmvnc-hddl.so`, `libinference_engine_lp_transformations.so`| `HDDLPlugin.dll` | `bsl.dll`, `hddlapi.dll`, `json-c.dll`, `libcrypto-1_1-x64.dll`, `libssl-1_1-x64.dll`, `mvnc-hddl.dll`, `inference_engine_lp_transformations.dll` |
-| GNA | `libGNAPlugin.so` | `libgna.so`, `libinference_engine_lp_transformations.so` | `GNAPlugin.dll` | `gna.dll`, `inference_engine_lp_transformations.dll` |
-| HETERO | `libHeteroPlugin.so` | Same as for selected plugins | `HeteroPlugin.dll` | Same as for selected plugins |
-| MULTI | `libMultiDevicePlugin.so` | Same as for selected plugins | `MultiDevicePlugin.dll` | Same as for selected plugins |
+| Plugin | Device Type |
+| ------- | ----------------------------- |
+|CPU | Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE |
+|GPU | Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics |
+|MYRIAD | Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X |
+|GNA | Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver J5005 Processor, Intel® Pentium® Silver N5000 Processor, Intel® Celeron® J4005 Processor, Intel® Celeron® J4105 Processor, Intel® Celeron® Processor N4100, Intel® Celeron® Processor N4000, Intel® Core™ i3-8121U Processor, Intel® Core™ i7-1065G7 Processor, Intel® Core™ i7-1060G7 Processor, Intel® Core™ i5-1035G4 Processor, Intel® Core™ i5-1035G7 Processor, Intel® Core™ i5-1035G1 Processor, Intel® Core™ i5-1030G7 Processor, Intel® Core™ i5-1030G4 Processor, Intel® Core™ i3-1005G1 Processor, Intel® Core™ i3-1000G1 Processor, Intel® Core™ i3-1000G4 Processor |
+|HETERO | Automatic splitting of a network inference between several devices (for example if a device doesn't support certain layers|
+|MULTI | Simultaneous inference of the same network on several devices in parallel|
+
+The table below shows the plugin libraries and additional dependencies for Linux, Windows and macOS platforms.
+
+| Plugin | Library name for Linux | Dependency libraries for Linux | Library name for Windows | Dependency libraries for Windows | Library name for macOS | Dependency libraries for macOS |
+|--------|-----------------------------|-------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------|------------------------------|---------------------------------------------|
+| CPU | `libMKLDNNPlugin.so` | `libinference_engine_lp_transformations.so` | `MKLDNNPlugin.dll` | `inference_engine_lp_transformations.dll` | `libMKLDNNPlugin.dylib` | `inference_engine_lp_transformations.dylib` |
+| GPU | `libclDNNPlugin.so` | `libinference_engine_lp_transformations.so`, `libOpenCL.so` | `clDNNPlugin.dll` | `OpenCL.dll`, `inference_engine_lp_transformations.dll` | Is not supported | - |
+| MYRIAD | `libmyriadPlugin.so` | `libusb.so`, | `myriadPlugin.dll` | `usb.dll` | `libmyriadPlugin.dylib` | `libusb.dylib` |
+| HDDL | `libHDDLPlugin.so` | `libbsl.so`, `libhddlapi.so`, `libmvnc-hddl.so` | `HDDLPlugin.dll` | `bsl.dll`, `hddlapi.dll`, `json-c.dll`, `libcrypto-1_1-x64.dll`, `libssl-1_1-x64.dll`, `mvnc-hddl.dll` | Is not supported | - |
+| GNA | `libGNAPlugin.so` | `libgna.so`, | `GNAPlugin.dll` | `gna.dll` | Is not supported | - |
+| HETERO | `libHeteroPlugin.so` | Same as for selected plugins | `HeteroPlugin.dll` | Same as for selected plugins | `libHeteroPlugin.dylib` | Same as for selected plugins |
+| MULTI | `libMultiDevicePlugin.so` | Same as for selected plugins | `MultiDevicePlugin.dll` | Same as for selected plugins | `libMultiDevicePlugin.dylib` | Same as for selected plugins |
> **NOTE**: All plugin libraries also depend on core Inference Engine libraries.
@@ -76,15 +76,16 @@ Make sure those libraries are in your computer's path or in the place you pointe
* Linux: `LD_LIBRARY_PATH`
* Windows: `PATH`
+* macOS: `DYLD_LIBRARY_PATH`
-On Linux, use the script `bin/setupvars.sh` to set the environment variables.
+On Linux and macOS, use the script `bin/setupvars.sh` to set the environment variables.
On Windows, run the `bin\setupvars.bat` batch file to set the environment variables.
To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter.
Common Workflow for Using the Inference Engine API
----------------------------
+--------------------------------------------------
The common workflow contains the following steps:
1. **Create Inference Engine Core object** - Create an `InferenceEngine::Core` object to work with different devices, all device plugins are managed internally by the `Core` object. Register extensions with custom nGraph operations (`InferenceEngine::Core::AddExtension`).
diff --git a/docs/IE_DG/protecting_model_guide.md b/docs/IE_DG/protecting_model_guide.md
index 59ac3ba6ca2c03..99b7836b1b25d1 100644
--- a/docs/IE_DG/protecting_model_guide.md
+++ b/docs/IE_DG/protecting_model_guide.md
@@ -33,7 +33,7 @@ a temporary memory block for model decryption, and use
For more information, see the `InferenceEngine::Core` Class
Reference Documentation.
-@snippet openvino/docs/snippets/protecting_model_guide.cpp part0
+@snippet snippets/protecting_model_guide.cpp part0
Hardware-based protection, such as Intel® Software Guard Extensions
(Intel® SGX), can be utilized to protect decryption operation secrets and
@@ -47,7 +47,7 @@ Currently there are no possibility to read external weights from memory for ONNX
The `ReadNetwork(const std::string& model, const Blob::CPtr& weights)` function
should be called with `weights` passed as an empty `Blob`.
-@snippet openvino/docs/snippets/protecting_model_guide.cpp part1
+@snippet snippets/protecting_model_guide.cpp part1
[deploy_encrypted_model]: img/deploy_encrypted_model.png
diff --git a/docs/IE_DG/supported_plugins/GPU_RemoteBlob_API.md b/docs/IE_DG/supported_plugins/GPU_RemoteBlob_API.md
index 55169fb2aabfcb..227ce101723283 100644
--- a/docs/IE_DG/supported_plugins/GPU_RemoteBlob_API.md
+++ b/docs/IE_DG/supported_plugins/GPU_RemoteBlob_API.md
@@ -102,15 +102,15 @@ Refer to the sections below to see pseudo-code of usage examples.
This example uses the OpenCL context obtained from an executable network object.
-@snippet openvino/docs/snippets/GPU_RemoteBlob_API0.cpp part0
+@snippet snippets/GPU_RemoteBlob_API0.cpp part0
### Running GPU Plugin Inference within User-Supplied Shared Context
-@snippet openvino/docs/snippets/GPU_RemoteBlob_API1.cpp part1
+@snippet snippets/GPU_RemoteBlob_API1.cpp part1
### Direct Consuming of the NV12 VAAPI Video Decoder Surface on Linux
-@snippet openvino/docs/snippets/GPU_RemoteBlob_API2.cpp part2
+@snippet snippets/GPU_RemoteBlob_API2.cpp part2
## See Also
diff --git a/docs/IE_DG/supported_plugins/HETERO.md b/docs/IE_DG/supported_plugins/HETERO.md
index e417907b0a7620..9b5f69ce687e95 100644
--- a/docs/IE_DG/supported_plugins/HETERO.md
+++ b/docs/IE_DG/supported_plugins/HETERO.md
@@ -28,17 +28,17 @@ Default fallback policy decides which layer goes to which device automatically a
Another way to annotate a network is to set affinity manually using ngraph::Node::get_rt_info with key `"affinity"`:
-@snippet openvino/docs/snippets/HETERO0.cpp part0
+@snippet snippets/HETERO0.cpp part0
The fallback policy does not work if even one layer has an initialized affinity. The sequence should be calling of automating affinity settings and then fix manually.
> **NOTE**: If you set affinity manually, be careful at the current moment Inference Engine plugins don't support constant (`Constant`->`Result`) and empty (`Parameter`->`Result`) networks. Please avoid such subgraphs when you set affinity manually.
-@snippet openvino/docs/snippets/HETERO1.cpp part1
+@snippet snippets/HETERO1.cpp part1
If you rely on the default affinity distribution, you can avoid calling InferenceEngine::Core::QueryNetwork and just call InferenceEngine::Core::LoadNetwork instead:
-@snippet openvino/docs/snippets/HETERO2.cpp part2
+@snippet snippets/HETERO2.cpp part2
> **NOTE**: `InferenceEngine::Core::QueryNetwork` does not depend on affinities set by a user, but queries for layer support based on device capabilities.
@@ -74,7 +74,7 @@ Heterogeneous plugin can generate two files:
* `hetero_affinity_.dot` - annotation of affinities per layer. This file is written to the disk only if default fallback policy was executed
* `hetero_subgraphs_.dot` - annotation of affinities per graph. This file is written to the disk during execution of ICNNNetwork::LoadNetwork() for heterogeneous plugin
-@snippet openvino/docs/snippets/HETERO3.cpp part3
+@snippet snippets/HETERO3.cpp part3
You can use GraphViz* utility or converters to `.png` formats. On Ubuntu* operating system, you can use the following utilities:
* `sudo apt-get install xdot`
diff --git a/docs/IE_DG/supported_plugins/MULTI.md b/docs/IE_DG/supported_plugins/MULTI.md
index 32a9555b380f07..a3166c3de8e956 100644
--- a/docs/IE_DG/supported_plugins/MULTI.md
+++ b/docs/IE_DG/supported_plugins/MULTI.md
@@ -32,11 +32,11 @@ You can use name of the configuration directly as a string, or use MultiDeviceCo
Basically, there are three ways to specify the devices to be use by the "MULTI":
-@snippet openvino/docs/snippets/MULTI0.cpp part0
+@snippet snippets/MULTI0.cpp part0
Notice that the priorities of the devices can be changed in real-time for the executable network:
-@snippet openvino/docs/snippets/MULTI1.cpp part1
+@snippet snippets/MULTI1.cpp part1
Finally, there is a way to specify number of requests that the multi-device will internally keep for each device.
Say if your original app was running 4 cameras with 4 inference requests now you would probably want to share these 4 requests between 2 devices used in the MULTI. The easiest way is to specify a number of requests for each device using parentheses: "MULTI:CPU(2),GPU(2)" and use the same 4 requests in your app. However, such an explicit configuration is not performance portable and hence not recommended. Instead, the better way is to configure the individual devices and query the resulting number of requests to be used in the application level (see [Configuring the Individual Devices and Creating the Multi-Device On Top](#configuring-the-individual-devices-and-creating-the-multi-device-on-top)).
@@ -55,7 +55,7 @@ Available devices:
```
Simple programmatic way to enumerate the devices and use with the multi-device is as follows:
-@snippet openvino/docs/snippets/MULTI2.cpp part2
+@snippet snippets/MULTI2.cpp part2
Beyond trivial "CPU", "GPU", "HDDL" and so on, when multiple instances of a device are available the names are more qualified.
For example this is how two Intel® Movidius™ Myriad™ X sticks are listed with the hello_query_sample:
@@ -68,13 +68,13 @@ For example this is how two Intel® Movidius™ Myriad™ X sticks are listed wi
So the explicit configuration to use both would be "MULTI:MYRIAD.1.2-ma2480,MYRIAD.1.4-ma2480".
Accordingly, the code that loops over all available devices of "MYRIAD" type only is below:
-@snippet openvino/docs/snippets/MULTI3.cpp part3
+@snippet snippets/MULTI3.cpp part3
## Configuring the Individual Devices and Creating the Multi-Device On Top
As discussed in the first section, you shall configure each individual device as usual and then just create the "MULTI" device on top:
-@snippet openvino/docs/snippets/MULTI4.cpp part4
+@snippet snippets/MULTI4.cpp part4
Alternatively, you can combine all the individual device settings into single config and load that, allowing the multi-device plugin to parse and apply that to the right devices. See code example in the next section.
@@ -84,7 +84,7 @@ See section of the [Using the multi-device with OpenVINO samples and benchmarkin
## Querying the Optimal Number of Inference Requests
Notice that until R2 you had to calculate number of requests in your application for any device, e.g. you had to know that Intel® Vision Accelerator Design with Intel® Movidius™ VPUs required at least 32 inference requests to perform well. Now you can use the new GetMetric API to query the optimal number of requests. Similarly, when using the multi-device you don't need to sum over included devices yourself, you can query metric directly:
-@snippet openvino/docs/snippets/MULTI5.cpp part5
+@snippet snippets/MULTI5.cpp part5
## Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance
Notice that every OpenVINO sample that supports "-d" (which stays for "device") command-line option transparently accepts the multi-device.
diff --git a/docs/IE_PLUGIN_DG/Doxyfile b/docs/IE_PLUGIN_DG/Doxyfile
index d72cbe5b9fcb40..3d66d22b4a2000 100644
--- a/docs/IE_PLUGIN_DG/Doxyfile
+++ b/docs/IE_PLUGIN_DG/Doxyfile
@@ -844,11 +844,7 @@ EXCLUDE_SYMLINKS = NO
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
-EXCLUDE_PATTERNS = cnn_network_ngraph_impl.hpp \
- ie_imemory_state_internal.hpp \
- ie_memory_state_internal.hpp \
- ie_memory_state_base.hpp \
- generic_ie.hpp \
+EXCLUDE_PATTERNS = generic_ie.hpp \
function_name.hpp \
macro_overload.hpp
diff --git a/docs/IE_PLUGIN_DG/ExecutableNetwork.md b/docs/IE_PLUGIN_DG/ExecutableNetwork.md
index a52872946c2bf3..2685c518a0ec58 100644
--- a/docs/IE_PLUGIN_DG/ExecutableNetwork.md
+++ b/docs/IE_PLUGIN_DG/ExecutableNetwork.md
@@ -92,7 +92,7 @@ Returns a metric value for a metric with the name `name`. A metric is a static
@snippet src/template_executable_network.cpp executable_network:get_metric
-The IE_SET_METRIC helper macro sets metric value and checks that the actual metric type matches a type of the specified value.
+The IE_SET_METRIC_RETURN helper macro sets metric value and checks that the actual metric type matches a type of the specified value.
### `GetConfig()`
diff --git a/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.md b/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.md
index 9ff8088a366745..c00507d6c37453 100644
--- a/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.md
+++ b/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.md
@@ -1,11 +1,11 @@
-# Representation of low-precision models
+# Representation of low-precision models {#lp_representation}
The goal of this document is to describe how optimized models are represented in OpenVINO Intermediate Representation (IR) and provide guidance on interpretation rules for such models at runtime.
Currently, there are two groups of optimization methods that can influence on the IR after applying them to the full-precision model:
- **Sparsity**. It is represented by zeros inside the weights and this is up to the hardware plugin how to interpret these zeros (use weights as is or apply special compression algorithms and sparse arithmetic). No additional mask is provided with the model.
- **Quantization**. The rest of this document is dedicated to the representation of quantized models.
## Representation of quantized models
-The OpenVINO Toolkit represents all the quantized models using the so-called FakeQuantize operation (see the description in [this document](../MO_DG/prepare_model/convert_model/Legacy_IR_Layers_Catalog_Spec.md)). This operation is very expressive and allows mapping values from arbitrary input and output ranges. The whole idea behind that is quite simple: we project (discretize) the input values to the low-precision data type using affine transformation (with clamp and rounding) and then reproject discrete values back to the original range and data type. It can be considered as an emulation of the quantization process which happens at runtime.
+The OpenVINO Toolkit represents all the quantized models using the so-called FakeQuantize operation (see the description in [this document](@ref openvino_docs_ops_quantization_FakeQuantize_1)). This operation is very expressive and allows mapping values from arbitrary input and output ranges. The whole idea behind that is quite simple: we project (discretize) the input values to the low-precision data type using affine transformation (with clamp and rounding) and then reproject discrete values back to the original range and data type. It can be considered as an emulation of the quantization process which happens at runtime.
In order to be able to execute a particular DL operation in low-precision all its inputs should be quantized i.e. should have FakeQuantize between operation and data blobs. The figure below shows an example of quantized Convolution which contains two FakeQuantize nodes: one for weights and one for activations (bias is quantized using the same parameters).
![quantized_convolution]
Figure 1. Example of quantized Convolution operation.