diff --git a/onnxruntime/core/providers/openvino/backends/basic_backend.cc b/onnxruntime/core/providers/openvino/backends/basic_backend.cc index ede988edea94b..be46733a39190 100644 --- a/onnxruntime/core/providers/openvino/backends/basic_backend.cc +++ b/onnxruntime/core/providers/openvino/backends/basic_backend.cc @@ -346,7 +346,7 @@ void BasicBackend::StartAsyncInference(Ort::KernelContext& context, OVInferReque input_tensor_shape[tensor_iter] = *i; tensor_iter += 1; } - auto input = graph_input_info.at(input_idx); + const auto& input = graph_input_info.at(input_idx); OVTensorPtr tensor_ptr; // avoid input copies on the CPU device if (global_context_.device_type.find("CPU") != std::string::npos) { @@ -387,7 +387,7 @@ void BasicBackend::StartAsyncInference(Ort::KernelContext& context, OVInferReque ort_ov_tensor_map[ort_tensor_key] = ov_tensor_data; try { - infer_request->SetTensor(input_name, ov_tensor_data.tensor_ptr); + infer_request->SetTensor(std::move(input_name), ov_tensor_data.tensor_ptr); } catch (const char* msg) { ORT_THROW(msg); } @@ -425,14 +425,14 @@ void BasicBackend::StartAsyncInference(Ort::KernelContext& context, OVInferReque if ((it == ort_ov_tensor_map.end()) || (it != ort_ov_tensor_map.end() && (it->second.ort_ptr != tensor.GetTensorRawData()))) { ov_tensor_data_t ov_tensor_data; - auto output = graph_output_info.at(output_idx); + const auto& output = graph_output_info.at(output_idx); ov_tensor_data.ort_ptr = tensor.GetTensorRawData(); ov_tensor_data.tensor_ptr = std::make_shared(output.get_element_type(), output.get_shape(), const_cast(tensor.GetTensorRawData())); ort_ov_tensor_map[ort_tensor_key] = ov_tensor_data; try { - infer_request->SetTensor(output_name, ov_tensor_data.tensor_ptr); + infer_request->SetTensor(std::move(output_name), ov_tensor_data.tensor_ptr); } catch (const char* msg) { ORT_THROW(msg); }