Skip to content

Commit

Permalink
upgrade to ov 2024.0
Browse files Browse the repository at this point in the history
  • Loading branch information
WolframRhodium committed Mar 8, 2024
1 parent d414e90 commit 0b82fef
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 83 deletions.
9 changes: 4 additions & 5 deletions .github/workflows/linux-ov.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
if: steps.cache-protobuf.outputs.cache-hit != 'true'
with:
repository: protocolbuffers/protobuf
# follows protobuf in https://github.com/openvinotoolkit/openvino/tree/2023.2.0/thirdparty/protobuf
# follows protobuf in https://github.com/openvinotoolkit/openvino/tree/2024.0.0/thirdparty/protobuf
# if you change this, remember to bump the version of the cache key.
ref: fe271ab76f2ad2b2b28c10443865d2af21e27e0e
fetch-depth: 1
Expand Down Expand Up @@ -69,9 +69,9 @@ jobs:
uses: actions/checkout@v4
with:
repository: onnx/onnx
# follows onnx in https://github.com/openvinotoolkit/openvino/tree/2023.2.0/thirdparty/onnx
# follows onnx in https://github.com/openvinotoolkit/openvino/tree/2024.0.0/thirdparty/onnx
# if you change this, remember to bump the version of the cache key.
ref: 1014f41f17ecc778d63e760a994579d96ba471ff
ref: b86cc54efce19530fb953e4b21f57e6b3888534c
fetch-depth: 1
path: vsov/onnx

Expand Down Expand Up @@ -103,7 +103,7 @@ jobs:
- name: Setup OpenVINO
run: |
curl -L -o ov.tgz https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.2/linux/l_openvino_toolkit_ubuntu22_2023.2.0.13089.cfd42bd2cb0_x86_64.tgz
curl -L -o ov.tgz https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.0/linux/l_openvino_toolkit_ubuntu22_2024.0.0.14509.34caeefd078_x86_64.tgz
tar -xf ov.tgz
mv l_openvino_* openvino -v
Expand All @@ -112,7 +112,6 @@ jobs:
-D CMAKE_BUILD_TYPE=Release
-D CMAKE_CXX_FLAGS="-Wall -ffast-math -march=x86-64-v3"
-D VAPOURSYNTH_INCLUDE_DIRECTORY="`pwd`/vapoursynth/include"
-D InferenceEngine_DIR=openvino/runtime/cmake
-D OpenVINO_DIR=openvino/runtime/cmake
-D ENABLE_VISUALIZATION=ON
-D WIN32_SHARED_OPENVINO=ON
Expand Down
18 changes: 9 additions & 9 deletions vsov/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ set(ENABLE_VISUALIZATION OFF CACHE BOOL "Enable support for network visualizatio
set(WIN32_SHARED_OPENVINO OFF CACHE BOOL "Build for win32 with shared openvino library")

find_package(OpenVINO REQUIRED CONFIG)
find_package(InferenceEngine REQUIRED CONFIG)

add_library(vsov SHARED
vs_openvino.cpp
Expand All @@ -24,20 +23,21 @@ if(WIN32_SHARED_OPENVINO)
target_compile_definitions(vsov PRIVATE WIN32_SHARED_OPENVINO)
endif()

target_include_directories(vsov PRIVATE
${VAPOURSYNTH_INCLUDE_DIRECTORY}
${ONNX_INCLUDE_DIRS}
${InferenceEngine_INCLUDE_DIRS}
)

if(WIN32_SHARED_OPENVINO)
find_package(protobuf REQUIRED CONFIG)
find_package(ONNX REQUIRED CONFIG)
target_link_libraries(vsov PRIVATE ${InferenceEngine_LIBRARIES} onnx)
target_link_libraries(vsov PRIVATE onnx)
else()
target_link_libraries(vsov PRIVATE ${InferenceEngine_LIBRARIES} openvino::onnx)
target_link_libraries(vsov PRIVATE openvino::frontend::onnx)
endif()

target_include_directories(vsov PRIVATE
${VAPOURSYNTH_INCLUDE_DIRECTORY}
${ONNX_INCLUDE_DIRS}
)

target_link_libraries(vsov PRIVATE openvino::runtime)

set_target_properties(vsov PROPERTIES
CXX_EXTENSIONS OFF
CXX_STANDARD 17
Expand Down
127 changes: 58 additions & 69 deletions vsov/vs_openvino.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
#include <onnx/common/version.h>
#include <onnx/onnx_pb.h>

#include <ie_core.hpp>
#include <openvino/openvino.hpp>
#include <openvino/pass/constant_folding.hpp>

#ifdef ENABLE_VISUALIZATION
Expand Down Expand Up @@ -50,16 +50,16 @@ static const VSPlugin * myself = nullptr;


static std::array<int, 4> getShape(
const InferenceEngine::ExecutableNetwork & network,
const ov::CompiledModel & network,
bool input
) {

InferenceEngine::SizeVector dims;
ov::Shape dims;

if (input) {
dims = network.GetInputsInfo().cbegin()->second->getTensorDesc().getDims();
dims = network.input().get_shape();
} else {
dims = network.GetOutputsInfo().cbegin()->second->getTensorDesc().getDims();
dims = network.output().get_shape();
}

std::array<int, 4> ret;
Expand Down Expand Up @@ -119,14 +119,13 @@ static std::optional<std::string> checkIOInfo(
bool is_output
) {

if (info->getPrecision() != InferenceEngine::Precision::FP32) {
if (info.get_element_type() != ov::element::f32) {
return "expects network IO with type fp32";
}
const auto & desc = info->getTensorDesc();
if (desc.getLayout() != InferenceEngine::Layout::NCHW) {
return "expects network IO with layout NCHW";
}
const auto & dims = desc.getDims();
// if (ov::layout::get_layout(info) != ov::Layout("NCHW")) {
// return "expects network IO with layout NCHW";
// }
const auto & dims = info.get_shape();
if (dims.size() != 4) {
return "expects network with 4-D IO";
}
Expand All @@ -148,27 +147,23 @@ static std::optional<std::string> checkIOInfo(

[[nodiscard]]
static std::optional<std::string> checkNetwork(
const InferenceEngine::CNNNetwork & network
const std::shared_ptr<ov::Model> & network
) {

const auto & inputs_info = network.getInputsInfo();

if (auto num_inputs = std::size(inputs_info); num_inputs != 1) {
if (auto num_inputs = std::size(network->inputs()); num_inputs != 1) {
return "network input count must be 1, got " + std::to_string(num_inputs);
}

const auto & input_info = inputs_info.cbegin()->second;
const auto & input_info = network->input();
if (auto err = checkIOInfo(input_info, false); err.has_value()) {
return err.value();
}

const auto & outputs_info = network.getOutputsInfo();

if (auto num_outputs = std::size(outputs_info); num_outputs != 1) {
if (auto num_outputs = std::size(network->outputs()); num_outputs != 1) {
return "network output count must be 1, got " + std::to_string(num_outputs);
}

const auto & output_info = outputs_info.cbegin()->second;
const auto & output_info = network->output();
if (auto err = checkIOInfo(output_info, true); err.has_value()) {
return err.value();
}
Expand All @@ -179,12 +174,12 @@ static std::optional<std::string> checkNetwork(

[[nodiscard]]
static std::optional<std::string> checkNodesAndNetwork(
const InferenceEngine::ExecutableNetwork & network,
const ov::CompiledModel & network,
const std::vector<const VSVideoInfo *> & vis
) {

const auto & network_in_dims = (
network.GetInputsInfo().cbegin()->second->getTensorDesc().getDims()
network.input().get_tensor().get_shape()
);

int network_in_channels = static_cast<int>(network_in_dims[1]);
Expand All @@ -205,15 +200,16 @@ static std::optional<std::string> checkNodesAndNetwork(
}



static void setDimensions(
std::unique_ptr<VSVideoInfo> & vi,
const InferenceEngine::ExecutableNetwork & network,
const ov::CompiledModel & network,
VSCore * core,
const VSAPI * vsapi
) {

auto in_dims = network.GetInputsInfo().cbegin()->second->getTensorDesc().getDims();
auto out_dims = network.GetOutputsInfo().cbegin()->second->getTensorDesc().getDims();
const auto & in_dims = network.input().get_shape();
const auto & out_dims = network.output().get_shape();

vi->height *= out_dims[2] / in_dims[2];
vi->width *= out_dims[3] / in_dims[3];
Expand All @@ -226,13 +222,13 @@ static void setDimensions(
}


static std::variant<std::string, std::map<std::string, std::string>> getConfig(
static std::variant<std::string, ov::AnyMap> getConfig(
VSFuncRef * config_func,
VSCore * core,
const VSAPI * vsapi
) {

std::map<std::string, std::string> config;
ov::AnyMap config;

if (config_func == nullptr) {
return config;
Expand Down Expand Up @@ -285,9 +281,9 @@ struct OVData {

int overlap_w, overlap_h;

InferenceEngine::Core core;
InferenceEngine::ExecutableNetwork executable_network;
std::unordered_map<std::thread::id, InferenceEngine::InferRequest> infer_requests;
ov::Core core;
ov::CompiledModel executable_network;
std::unordered_map<std::thread::id, ov::InferRequest> infer_requests;
std::shared_mutex infer_requests_lock;

std::string input_name;
Expand Down Expand Up @@ -396,7 +392,7 @@ static const VSFrameRef *VS_CC vsOvGetFrame(

auto thread_id = std::this_thread::get_id();
bool initialized = true;
InferenceEngine::InferRequest * infer_request;
ov::InferRequest * infer_request;

d->infer_requests_lock.lock_shared();
try {
Expand All @@ -409,9 +405,9 @@ static const VSFrameRef *VS_CC vsOvGetFrame(
if (!initialized) {
std::lock_guard _ { d->infer_requests_lock };
try {
d->infer_requests.emplace(thread_id, d->executable_network.CreateInferRequest());
} catch (const InferenceEngine::Exception& e) {
return set_error("[IE exception] Create inference request: "s + e.what());
d->infer_requests.emplace(thread_id, d->executable_network.create_infer_request());
} catch (const ov::Exception & e) {
return set_error("[OV exception] Create inference request: "s + e.what());
} catch (const std::exception& e) {
return set_error("[Standard exception] Create inference request: "s + e.what());
}
Expand All @@ -429,11 +425,7 @@ static const VSFrameRef *VS_CC vsOvGetFrame(
int x_crop_end = (x == src_width - src_tile_w) ? 0 : d->overlap_w;

{
InferenceEngine::Blob::Ptr input = infer_request->GetBlob(d->input_name);

auto minput = input->as<InferenceEngine::MemoryBlob>();
auto minputHolder = minput->wmap();
uint8_t * input_buffer = minputHolder.as<uint8_t *>();
auto input_buffer = (uint8_t *) infer_request->get_input_tensor().data<float>();

for (const auto & _src_ptr : src_ptrs) {
const uint8_t * src_ptr { _src_ptr +
Expand All @@ -451,19 +443,15 @@ static const VSFrameRef *VS_CC vsOvGetFrame(
}

try {
infer_request->Infer();
} catch (const InferenceEngine::Exception & e) {
return set_error("[IE exception] Create inference request: "s + e.what());
infer_request->infer();
} catch (const ov::Exception & e) {
return set_error("[OV exception] Create inference request: "s + e.what());
} catch (const std::exception& e) {
return set_error("[Standard exception] Create inference request: "s + e.what());
}

{
InferenceEngine::Blob::CPtr output = infer_request->GetBlob(d->output_name);

auto moutput = output->as<const InferenceEngine::MemoryBlob>();
auto moutputHolder = moutput->rmap();
const uint8_t * output_buffer = moutputHolder.as<const uint8_t *>();
auto output_buffer = (const uint8_t *) infer_request->get_output_tensor().data<float>();

for (int plane = 0; plane < dst_planes; ++plane) {
uint8_t * dst_ptr = (dst_ptrs[plane] +
Expand Down Expand Up @@ -533,11 +521,11 @@ static void VS_CC vsOvCreate(
) {

std::unique_ptr<OVData> d = nullptr;

try {
d = std::make_unique<OVData>();
} catch (const InferenceEngine::Exception& e) {
vsapi->setError(out, ("[IE exception] Initialize inference engine: "s + e.what()).c_str());
} catch (const ov::Exception& e) {
vsapi->setError(out, ("[OV exception] Initialize inference engine: "s + e.what()).c_str());
return ;
} catch (const std::exception& e) {
vsapi->setError(out, ("[Standard exception] Initialize inference engine: "s + e.what()).c_str());
Expand Down Expand Up @@ -675,12 +663,11 @@ static void VS_CC vsOvCreate(
}

{
InferenceEngine::CNNNetwork network;
std::shared_ptr<ov::Model> network;
try {
auto empty = InferenceEngine::Blob::CPtr();
network = d->core.ReadNetwork(onnx_data, empty);
} catch (const InferenceEngine::Exception& e) {
return set_error("[IE exception] ReadNetwork(): "s + e.what());
network = d->core.read_model(onnx_data, ov::Tensor());
} catch (const ov::Exception& e) {
return set_error("[OV exception] ReadNetwork(): "s + e.what());
} catch (const std::exception& e) {
return set_error("[Standard exception] ReadNetwork(): "s + e.what());
}
Expand All @@ -689,10 +676,8 @@ static void VS_CC vsOvCreate(
return set_error(err.value());
}

auto function = network.getFunction(); // mutable

try {
ov::pass::ConstantFolding().run_on_model(function);
ov::pass::ConstantFolding().run_on_model(network);
} catch (const ov::Exception & e) {
return set_error(e.what());
}
Expand All @@ -701,7 +686,7 @@ static void VS_CC vsOvCreate(
const char * dot_path = vsapi->propGetData(in, "dot_path", 0, &error);
if (!error) {
try {
ov::pass::VisualizeTree(dot_path, nullptr, true).run_on_model(function);
ov::pass::VisualizeTree(dot_path, nullptr, true).run_on_model(network);
} catch (const ov::Exception & e) {
return set_error(e.what());
}
Expand All @@ -714,11 +699,11 @@ static void VS_CC vsOvCreate(
if (std::holds_alternative<std::string>(config_ret)) {
return set_error(std::get<std::string>(config_ret));
}
auto & config = std::get<std::map<std::string, std::string>>(config_ret);
auto & config = std::get<ov::AnyMap>(config_ret);

try {
d->executable_network = d->core.LoadNetwork(network, device, config);
} catch (const InferenceEngine::Exception & e) {
d->executable_network = d->core.compile_model(network, device, config);
} catch (const ov::Exception & e) {
return set_error(e.what());
}

Expand All @@ -728,8 +713,8 @@ static void VS_CC vsOvCreate(

setDimensions(d->out_vi, d->executable_network, core, vsapi);

d->input_name = d->executable_network.GetInputsInfo().cbegin()->first;
d->output_name = d->executable_network.GetOutputsInfo().cbegin()->first;
d->input_name = d->executable_network.input().get_names().cbegin()->front();
d->output_name = d->executable_network.output().get_names().cbegin()->front();

VSCoreInfo core_info;
vsapi->getCoreInfo2(core, &core_info);
Expand Down Expand Up @@ -780,7 +765,11 @@ VS_EXTERNAL_API(void) VapourSynthPluginInit(
vsapi->propSetData(out, "version", VERSION, -1, paReplace);

std::ostringstream ostream;
ostream << IE_VERSION_MAJOR << '.' << IE_VERSION_MINOR << '.' << IE_VERSION_PATCH;
ostream << OPENVINO_VERSION_MAJOR << '.' << OPENVINO_VERSION_MINOR << '.' << OPENVINO_VERSION_PATCH;
vsapi->propSetData(out, "inference_engine_version_build", ostream.str().c_str(), -1, paReplace);

ostream.clear();
ostream << ov::get_openvino_version();
vsapi->propSetData(out, "inference_engine_version", ostream.str().c_str(), -1, paReplace);

vsapi->propSetData(
Expand All @@ -798,13 +787,13 @@ VS_EXTERNAL_API(void) VapourSynthPluginInit(

auto availableDevices = [](const VSMap *, VSMap * out, void *, VSCore *, const VSAPI *vsapi) {
try {
auto core = InferenceEngine::Core();
auto devices = core.GetAvailableDevices();
auto core = ov::Core();
auto devices = core.get_available_devices();
for (const auto & device : devices) {
vsapi->propSetData(out, "devices", device.c_str(), -1, paAppend);
}
} catch (const InferenceEngine::Exception& e) {
vsapi->setError(out, ("[IE exception] Initialize inference engine: "s + e.what()).c_str());
} catch (const ov::Exception& e) {
vsapi->setError(out, ("[OV exception] Initialize inference engine: "s + e.what()).c_str());
} catch (const std::exception& e) {
vsapi->setError(out, ("[Standard exception] Initialize inference engine: "s + e.what()).c_str());
}
Expand Down

0 comments on commit 0b82fef

Please sign in to comment.