Skip to content

Commit

Permalink
Enable sharing for InferenceCalculatorGlAdvanced
Browse files Browse the repository at this point in the history
Allows sharing the same instance of GpuInferenceRunner in InferenceCalculatorGlAdvanced between multiple instances in the same graph or other graphs.

PiperOrigin-RevId: 701024167
  • Loading branch information
MediaPipe Team authored and copybara-github committed Nov 28, 2024
1 parent a987a99 commit 39cc1d7
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 12 deletions.
1 change: 1 addition & 0 deletions mediapipe/calculators/tensor/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -633,6 +633,7 @@ cc_library(
":inference_io_mapper",
":inference_on_disk_cache_helper",
":tensor_span",
"//mediapipe/calculators/tensor:inference_runner",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:mediapipe_profiling",
"//mediapipe/framework/api2:packet",
Expand Down
24 changes: 12 additions & 12 deletions mediapipe/calculators/tensor/inference_calculator_gl_advanced.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "mediapipe/calculators/tensor/inference_calculator.h"
#include "mediapipe/calculators/tensor/inference_io_mapper.h"
#include "mediapipe/calculators/tensor/inference_on_disk_cache_helper.h"
#include "mediapipe/calculators/tensor/inference_runner.h"
#include "mediapipe/calculators/tensor/tensor_span.h"
#include "mediapipe/framework/api2/packet.h"
#include "mediapipe/framework/calculator_framework.h"
Expand Down Expand Up @@ -63,17 +64,17 @@ class InferenceCalculatorGlAdvancedImpl

private:
// Helper class that wraps everything related to GPU inference acceleration.
class GpuInferenceRunner {
class GpuInferenceRunner : public InferenceRunner {
public:
~GpuInferenceRunner();

absl::Status Init(CalculatorContext* cc,
std::shared_ptr<GlContext> gl_context);

absl::StatusOr<std::vector<Tensor>> Process(
CalculatorContext* cc, const TensorSpan& input_tensors);
absl::StatusOr<std::vector<Tensor>> Run(
CalculatorContext* cc, const TensorSpan& input_tensors) override;

const InputOutputTensorNames& GetInputOutputTensorNames() const;
const InputOutputTensorNames& GetInputOutputTensorNames() const override;

private:
absl::Status InitTFLiteGPURunner(
Expand All @@ -99,7 +100,7 @@ class InferenceCalculatorGlAdvancedImpl
absl::StatusOr<std::unique_ptr<GpuInferenceRunner>> CreateInferenceRunner(
CalculatorContext* cc);

std::unique_ptr<GpuInferenceRunner> gpu_inference_runner_;
std::unique_ptr<InferenceRunner> inference_runner_;
mediapipe::GlCalculatorHelper gpu_helper_;
};

Expand Down Expand Up @@ -141,7 +142,7 @@ absl::Status InferenceCalculatorGlAdvancedImpl::GpuInferenceRunner::Init(
}

absl::StatusOr<std::vector<Tensor>>
InferenceCalculatorGlAdvancedImpl::GpuInferenceRunner::Process(
InferenceCalculatorGlAdvancedImpl::GpuInferenceRunner::Run(
CalculatorContext* cc, const TensorSpan& input_tensors) {
std::vector<Tensor> output_tensors;
for (int i = 0; i < input_tensors.size(); ++i) {
Expand Down Expand Up @@ -267,26 +268,25 @@ absl::Status InferenceCalculatorGlAdvancedImpl::UpdateContract(

absl::Status InferenceCalculatorGlAdvancedImpl::Open(CalculatorContext* cc) {
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
gpu_inference_runner_ = std::make_unique<GpuInferenceRunner>();
MP_RETURN_IF_ERROR(
gpu_inference_runner_->Init(cc, gpu_helper_.GetSharedGlContext()));

MP_ASSIGN_OR_RETURN(inference_runner_, CreateInferenceRunner(cc));
return InferenceCalculatorNodeImpl::UpdateIoMapping(
cc, gpu_inference_runner_->GetInputOutputTensorNames());
cc, inference_runner_->GetInputOutputTensorNames());
}

absl::StatusOr<std::vector<Tensor>> InferenceCalculatorGlAdvancedImpl::Process(
CalculatorContext* cc, const TensorSpan& tensor_span) {
std::vector<Tensor> output_tensors;
MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([&]() -> absl::Status {
MP_ASSIGN_OR_RETURN(output_tensors,
gpu_inference_runner_->Process(cc, tensor_span));
inference_runner_->Run(cc, tensor_span));
return absl::OkStatus();
}));
return output_tensors;
}

absl::Status InferenceCalculatorGlAdvancedImpl::Close(CalculatorContext* cc) {
gpu_inference_runner_.reset();
inference_runner_.reset();

return absl::OkStatus();
}
Expand Down

0 comments on commit 39cc1d7

Please sign in to comment.