From 0b82fefd212b80a0db908e01fe1e5eff94224168 Mon Sep 17 00:00:00 2001 From: WolframRhodium Date: Fri, 8 Mar 2024 18:22:29 +0800 Subject: [PATCH] upgrade to ov 2024.0 --- .github/workflows/linux-ov.yml | 9 ++- vsov/CMakeLists.txt | 18 ++--- vsov/vs_openvino.cpp | 127 +++++++++++++++------------------ 3 files changed, 71 insertions(+), 83 deletions(-) diff --git a/.github/workflows/linux-ov.yml b/.github/workflows/linux-ov.yml index e154789..fb03816 100644 --- a/.github/workflows/linux-ov.yml +++ b/.github/workflows/linux-ov.yml @@ -36,7 +36,7 @@ jobs: if: steps.cache-protobuf.outputs.cache-hit != 'true' with: repository: protocolbuffers/protobuf - # follows protobuf in https://github.com/openvinotoolkit/openvino/tree/2023.2.0/thirdparty/protobuf + # follows protobuf in https://github.com/openvinotoolkit/openvino/tree/2024.0.0/thirdparty/protobuf # if you change this, remember to bump the version of the cache key. ref: fe271ab76f2ad2b2b28c10443865d2af21e27e0e fetch-depth: 1 @@ -69,9 +69,9 @@ jobs: uses: actions/checkout@v4 with: repository: onnx/onnx - # follows onnx in https://github.com/openvinotoolkit/openvino/tree/2023.2.0/thirdparty/onnx + # follows onnx in https://github.com/openvinotoolkit/openvino/tree/2024.0.0/thirdparty/onnx # if you change this, remember to bump the version of the cache key. - ref: 1014f41f17ecc778d63e760a994579d96ba471ff + ref: b86cc54efce19530fb953e4b21f57e6b3888534c fetch-depth: 1 path: vsov/onnx @@ -103,7 +103,7 @@ jobs: - name: Setup OpenVINO run: | - curl -L -o ov.tgz https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_ubuntu22_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz + curl -L -o ov.tgz https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.0/linux/l_openvino_toolkit_ubuntu22_2024.0.0.14509.34caeefd078_x86_64.tgz tar -xf ov.tgz mv l_openvino_* openvino -v @@ -112,7 +112,6 @@ jobs: -D CMAKE_BUILD_TYPE=Release -D CMAKE_CXX_FLAGS="-Wall -ffast-math -march=x86-64-v3" -D VAPOURSYNTH_INCLUDE_DIRECTORY="`pwd`/vapoursynth/include" - -D InferenceEngine_DIR=openvino/runtime/cmake -D OpenVINO_DIR=openvino/runtime/cmake -D ENABLE_VISUALIZATION=ON -D WIN32_SHARED_OPENVINO=ON diff --git a/vsov/CMakeLists.txt b/vsov/CMakeLists.txt index f003806..e6f033c 100644 --- a/vsov/CMakeLists.txt +++ b/vsov/CMakeLists.txt @@ -7,7 +7,6 @@ set(ENABLE_VISUALIZATION OFF CACHE BOOL "Enable support for network visualizatio set(WIN32_SHARED_OPENVINO OFF CACHE BOOL "Build for win32 with shared openvino library") find_package(OpenVINO REQUIRED CONFIG) -find_package(InferenceEngine REQUIRED CONFIG) add_library(vsov SHARED vs_openvino.cpp @@ -24,20 +23,21 @@ if(WIN32_SHARED_OPENVINO) target_compile_definitions(vsov PRIVATE WIN32_SHARED_OPENVINO) endif() -target_include_directories(vsov PRIVATE - ${VAPOURSYNTH_INCLUDE_DIRECTORY} - ${ONNX_INCLUDE_DIRS} - ${InferenceEngine_INCLUDE_DIRS} -) - if(WIN32_SHARED_OPENVINO) find_package(protobuf REQUIRED CONFIG) find_package(ONNX REQUIRED CONFIG) - target_link_libraries(vsov PRIVATE ${InferenceEngine_LIBRARIES} onnx) + target_link_libraries(vsov PRIVATE onnx) else() - target_link_libraries(vsov PRIVATE ${InferenceEngine_LIBRARIES} openvino::onnx) + target_link_libraries(vsov PRIVATE openvino::frontend::onnx) endif() +target_include_directories(vsov PRIVATE + ${VAPOURSYNTH_INCLUDE_DIRECTORY} + ${ONNX_INCLUDE_DIRS} +) + +target_link_libraries(vsov PRIVATE openvino::runtime) + set_target_properties(vsov PROPERTIES CXX_EXTENSIONS OFF CXX_STANDARD 17 diff --git a/vsov/vs_openvino.cpp b/vsov/vs_openvino.cpp index f186be4..2881728 100644 --- a/vsov/vs_openvino.cpp +++ b/vsov/vs_openvino.cpp @@ -20,7 +20,7 @@ #include #include -#include +#include #include #ifdef ENABLE_VISUALIZATION @@ -50,16 +50,16 @@ static const VSPlugin * myself = nullptr; static std::array getShape( - const InferenceEngine::ExecutableNetwork & network, + const ov::CompiledModel & network, bool input ) { - InferenceEngine::SizeVector dims; + ov::Shape dims; if (input) { - dims = network.GetInputsInfo().cbegin()->second->getTensorDesc().getDims(); + dims = network.input().get_shape(); } else { - dims = network.GetOutputsInfo().cbegin()->second->getTensorDesc().getDims(); + dims = network.output().get_shape(); } std::array ret; @@ -119,14 +119,13 @@ static std::optional checkIOInfo( bool is_output ) { - if (info->getPrecision() != InferenceEngine::Precision::FP32) { + if (info.get_element_type() != ov::element::f32) { return "expects network IO with type fp32"; } - const auto & desc = info->getTensorDesc(); - if (desc.getLayout() != InferenceEngine::Layout::NCHW) { - return "expects network IO with layout NCHW"; - } - const auto & dims = desc.getDims(); + // if (ov::layout::get_layout(info) != ov::Layout("NCHW")) { + // return "expects network IO with layout NCHW"; + // } + const auto & dims = info.get_shape(); if (dims.size() != 4) { return "expects network with 4-D IO"; } @@ -148,27 +147,23 @@ static std::optional checkIOInfo( [[nodiscard]] static std::optional checkNetwork( - const InferenceEngine::CNNNetwork & network + const std::shared_ptr & network ) { - const auto & inputs_info = network.getInputsInfo(); - - if (auto num_inputs = std::size(inputs_info); num_inputs != 1) { + if (auto num_inputs = std::size(network->inputs()); num_inputs != 1) { return "network input count must be 1, got " + std::to_string(num_inputs); } - const auto & input_info = inputs_info.cbegin()->second; + const auto & input_info = network->input(); if (auto err = checkIOInfo(input_info, false); err.has_value()) { return err.value(); } - const auto & outputs_info = network.getOutputsInfo(); - - if (auto num_outputs = std::size(outputs_info); num_outputs != 1) { + if (auto num_outputs = std::size(network->outputs()); num_outputs != 1) { return "network output count must be 1, got " + std::to_string(num_outputs); } - const auto & output_info = outputs_info.cbegin()->second; + const auto & output_info = network->output(); if (auto err = checkIOInfo(output_info, true); err.has_value()) { return err.value(); } @@ -179,12 +174,12 @@ static std::optional checkNetwork( [[nodiscard]] static std::optional checkNodesAndNetwork( - const InferenceEngine::ExecutableNetwork & network, + const ov::CompiledModel & network, const std::vector & vis ) { const auto & network_in_dims = ( - network.GetInputsInfo().cbegin()->second->getTensorDesc().getDims() + network.input().get_tensor().get_shape() ); int network_in_channels = static_cast(network_in_dims[1]); @@ -205,15 +200,16 @@ static std::optional checkNodesAndNetwork( } + static void setDimensions( std::unique_ptr & vi, - const InferenceEngine::ExecutableNetwork & network, + const ov::CompiledModel & network, VSCore * core, const VSAPI * vsapi ) { - auto in_dims = network.GetInputsInfo().cbegin()->second->getTensorDesc().getDims(); - auto out_dims = network.GetOutputsInfo().cbegin()->second->getTensorDesc().getDims(); + const auto & in_dims = network.input().get_shape(); + const auto & out_dims = network.output().get_shape(); vi->height *= out_dims[2] / in_dims[2]; vi->width *= out_dims[3] / in_dims[3]; @@ -226,13 +222,13 @@ static void setDimensions( } -static std::variant> getConfig( +static std::variant getConfig( VSFuncRef * config_func, VSCore * core, const VSAPI * vsapi ) { - std::map config; + ov::AnyMap config; if (config_func == nullptr) { return config; @@ -285,9 +281,9 @@ struct OVData { int overlap_w, overlap_h; - InferenceEngine::Core core; - InferenceEngine::ExecutableNetwork executable_network; - std::unordered_map infer_requests; + ov::Core core; + ov::CompiledModel executable_network; + std::unordered_map infer_requests; std::shared_mutex infer_requests_lock; std::string input_name; @@ -396,7 +392,7 @@ static const VSFrameRef *VS_CC vsOvGetFrame( auto thread_id = std::this_thread::get_id(); bool initialized = true; - InferenceEngine::InferRequest * infer_request; + ov::InferRequest * infer_request; d->infer_requests_lock.lock_shared(); try { @@ -409,9 +405,9 @@ static const VSFrameRef *VS_CC vsOvGetFrame( if (!initialized) { std::lock_guard _ { d->infer_requests_lock }; try { - d->infer_requests.emplace(thread_id, d->executable_network.CreateInferRequest()); - } catch (const InferenceEngine::Exception& e) { - return set_error("[IE exception] Create inference request: "s + e.what()); + d->infer_requests.emplace(thread_id, d->executable_network.create_infer_request()); + } catch (const ov::Exception & e) { + return set_error("[OV exception] Create inference request: "s + e.what()); } catch (const std::exception& e) { return set_error("[Standard exception] Create inference request: "s + e.what()); } @@ -429,11 +425,7 @@ static const VSFrameRef *VS_CC vsOvGetFrame( int x_crop_end = (x == src_width - src_tile_w) ? 0 : d->overlap_w; { - InferenceEngine::Blob::Ptr input = infer_request->GetBlob(d->input_name); - - auto minput = input->as(); - auto minputHolder = minput->wmap(); - uint8_t * input_buffer = minputHolder.as(); + auto input_buffer = (uint8_t *) infer_request->get_input_tensor().data(); for (const auto & _src_ptr : src_ptrs) { const uint8_t * src_ptr { _src_ptr + @@ -451,19 +443,15 @@ static const VSFrameRef *VS_CC vsOvGetFrame( } try { - infer_request->Infer(); - } catch (const InferenceEngine::Exception & e) { - return set_error("[IE exception] Create inference request: "s + e.what()); + infer_request->infer(); + } catch (const ov::Exception & e) { + return set_error("[OV exception] Create inference request: "s + e.what()); } catch (const std::exception& e) { return set_error("[Standard exception] Create inference request: "s + e.what()); } { - InferenceEngine::Blob::CPtr output = infer_request->GetBlob(d->output_name); - - auto moutput = output->as(); - auto moutputHolder = moutput->rmap(); - const uint8_t * output_buffer = moutputHolder.as(); + auto output_buffer = (const uint8_t *) infer_request->get_output_tensor().data(); for (int plane = 0; plane < dst_planes; ++plane) { uint8_t * dst_ptr = (dst_ptrs[plane] + @@ -533,11 +521,11 @@ static void VS_CC vsOvCreate( ) { std::unique_ptr d = nullptr; - + try { d = std::make_unique(); - } catch (const InferenceEngine::Exception& e) { - vsapi->setError(out, ("[IE exception] Initialize inference engine: "s + e.what()).c_str()); + } catch (const ov::Exception& e) { + vsapi->setError(out, ("[OV exception] Initialize inference engine: "s + e.what()).c_str()); return ; } catch (const std::exception& e) { vsapi->setError(out, ("[Standard exception] Initialize inference engine: "s + e.what()).c_str()); @@ -675,12 +663,11 @@ static void VS_CC vsOvCreate( } { - InferenceEngine::CNNNetwork network; + std::shared_ptr network; try { - auto empty = InferenceEngine::Blob::CPtr(); - network = d->core.ReadNetwork(onnx_data, empty); - } catch (const InferenceEngine::Exception& e) { - return set_error("[IE exception] ReadNetwork(): "s + e.what()); + network = d->core.read_model(onnx_data, ov::Tensor()); + } catch (const ov::Exception& e) { + return set_error("[OV exception] ReadNetwork(): "s + e.what()); } catch (const std::exception& e) { return set_error("[Standard exception] ReadNetwork(): "s + e.what()); } @@ -689,10 +676,8 @@ static void VS_CC vsOvCreate( return set_error(err.value()); } - auto function = network.getFunction(); // mutable - try { - ov::pass::ConstantFolding().run_on_model(function); + ov::pass::ConstantFolding().run_on_model(network); } catch (const ov::Exception & e) { return set_error(e.what()); } @@ -701,7 +686,7 @@ static void VS_CC vsOvCreate( const char * dot_path = vsapi->propGetData(in, "dot_path", 0, &error); if (!error) { try { - ov::pass::VisualizeTree(dot_path, nullptr, true).run_on_model(function); + ov::pass::VisualizeTree(dot_path, nullptr, true).run_on_model(network); } catch (const ov::Exception & e) { return set_error(e.what()); } @@ -714,11 +699,11 @@ static void VS_CC vsOvCreate( if (std::holds_alternative(config_ret)) { return set_error(std::get(config_ret)); } - auto & config = std::get>(config_ret); + auto & config = std::get(config_ret); try { - d->executable_network = d->core.LoadNetwork(network, device, config); - } catch (const InferenceEngine::Exception & e) { + d->executable_network = d->core.compile_model(network, device, config); + } catch (const ov::Exception & e) { return set_error(e.what()); } @@ -728,8 +713,8 @@ static void VS_CC vsOvCreate( setDimensions(d->out_vi, d->executable_network, core, vsapi); - d->input_name = d->executable_network.GetInputsInfo().cbegin()->first; - d->output_name = d->executable_network.GetOutputsInfo().cbegin()->first; + d->input_name = d->executable_network.input().get_names().cbegin()->front(); + d->output_name = d->executable_network.output().get_names().cbegin()->front(); VSCoreInfo core_info; vsapi->getCoreInfo2(core, &core_info); @@ -780,7 +765,11 @@ VS_EXTERNAL_API(void) VapourSynthPluginInit( vsapi->propSetData(out, "version", VERSION, -1, paReplace); std::ostringstream ostream; - ostream << IE_VERSION_MAJOR << '.' << IE_VERSION_MINOR << '.' << IE_VERSION_PATCH; + ostream << OPENVINO_VERSION_MAJOR << '.' << OPENVINO_VERSION_MINOR << '.' << OPENVINO_VERSION_PATCH; + vsapi->propSetData(out, "inference_engine_version_build", ostream.str().c_str(), -1, paReplace); + + ostream.clear(); + ostream << ov::get_openvino_version(); vsapi->propSetData(out, "inference_engine_version", ostream.str().c_str(), -1, paReplace); vsapi->propSetData( @@ -798,13 +787,13 @@ VS_EXTERNAL_API(void) VapourSynthPluginInit( auto availableDevices = [](const VSMap *, VSMap * out, void *, VSCore *, const VSAPI *vsapi) { try { - auto core = InferenceEngine::Core(); - auto devices = core.GetAvailableDevices(); + auto core = ov::Core(); + auto devices = core.get_available_devices(); for (const auto & device : devices) { vsapi->propSetData(out, "devices", device.c_str(), -1, paAppend); } - } catch (const InferenceEngine::Exception& e) { - vsapi->setError(out, ("[IE exception] Initialize inference engine: "s + e.what()).c_str()); + } catch (const ov::Exception& e) { + vsapi->setError(out, ("[OV exception] Initialize inference engine: "s + e.what()).c_str()); } catch (const std::exception& e) { vsapi->setError(out, ("[Standard exception] Initialize inference engine: "s + e.what()).c_str()); }