diff --git a/mediapipe/tasks/cc/audio/audio_classifier/audio_classifier.h b/mediapipe/tasks/cc/audio/audio_classifier/audio_classifier.h index 373b315195..04463dad3d 100644 --- a/mediapipe/tasks/cc/audio/audio_classifier/audio_classifier.h +++ b/mediapipe/tasks/cc/audio/audio_classifier/audio_classifier.h @@ -165,7 +165,7 @@ class AudioClassifier : tasks::audio::core::BaseAudioTaskApi { // into multiple chunks. For this reason, the callback may be called multiple // times (once per chunk) for each call to this function. absl::Status ClassifyAsync(mediapipe::Matrix audio_block, - double audio_sample_rate, int64 timestamp_ms); + double audio_sample_rate, int64_t timestamp_ms); // Shuts down the AudioClassifier when all works are done. absl::Status Close() { return runner_->Close(); } diff --git a/mediapipe/tasks/cc/audio/audio_embedder/audio_embedder.h b/mediapipe/tasks/cc/audio/audio_embedder/audio_embedder.h index 1035fa0aa9..1dd6ce87fb 100644 --- a/mediapipe/tasks/cc/audio/audio_embedder/audio_embedder.h +++ b/mediapipe/tasks/cc/audio/audio_embedder/audio_embedder.h @@ -121,7 +121,7 @@ class AudioEmbedder : core::BaseAudioTaskApi { // into multiple chunks. For this reason, the callback may be called multiple // times (once per chunk) for each call to this function. absl::Status EmbedAsync(Matrix audio_block, double audio_sample_rate, - int64 timestamp_ms); + int64_t timestamp_ms); // Shuts down the AudioEmbedder when all works are done. absl::Status Close() { return runner_->Close(); } diff --git a/mediapipe/tasks/cc/core/external_file_handler.h b/mediapipe/tasks/cc/core/external_file_handler.h index 3150fde59a..1e23da0e7f 100644 --- a/mediapipe/tasks/cc/core/external_file_handler.h +++ b/mediapipe/tasks/cc/core/external_file_handler.h @@ -74,17 +74,17 @@ class ExternalFileHandler { void* buffer_{}; // The mapped memory buffer offset, if any. - int64 buffer_offset_{}; + int64_t buffer_offset_{}; // The size in bytes of the mapped memory buffer, if any. - int64 buffer_size_{}; + int64_t buffer_size_{}; // As mmap(2) requires the offset to be a multiple of sysconf(_SC_PAGE_SIZE): // The aligned mapped memory buffer offset, if any. - int64 buffer_aligned_offset_{}; + int64_t buffer_aligned_offset_{}; // The aligned mapped memory buffer size in bytes taking into account the // offset shift introduced by buffer_aligned_memory_offset_, if any. - int64 buffer_aligned_size_{}; + int64_t buffer_aligned_size_{}; }; } // namespace core diff --git a/mediapipe/tasks/cc/text/custom_ops/ragged/ragged_tensor_to_tensor_tflite_test.cc b/mediapipe/tasks/cc/text/custom_ops/ragged/ragged_tensor_to_tensor_tflite_test.cc index e0c8604e17..fe850f7d8a 100644 --- a/mediapipe/tasks/cc/text/custom_ops/ragged/ragged_tensor_to_tensor_tflite_test.cc +++ b/mediapipe/tasks/cc/text/custom_ops/ragged/ragged_tensor_to_tensor_tflite_test.cc @@ -15,6 +15,7 @@ limitations under the License. #include "mediapipe/tasks/cc/text/custom_ops/ragged/ragged_tensor_to_tensor_tflite.h" +#include #include #include #include @@ -79,7 +80,9 @@ class RaggedTensorToTensorOpModel : public tflite::SingleOpModel { std::vector GetOutputShape() { return GetTensorShape(output_); } std::vector GetOutputFloat() { return ExtractVector(output_); } - std::vector GetOutputInt() { return ExtractVector(output_); } + std::vector GetOutputInt() { + return ExtractVector(output_); + } void InvokeFloat(const std::vector& shape, const std::vector& values, float default_value, @@ -93,7 +96,7 @@ class RaggedTensorToTensorOpModel : public tflite::SingleOpModel { SingleOpModel::Invoke(); } void InvokeInt(const std::vector& shape, - const std::vector& values, int32 default_value, + const std::vector& values, int32_t default_value, const std::vector>& partition_values) { PopulateTensor(input_shape_, shape); PopulateTensor(input_values_, values); diff --git a/mediapipe/tasks/cc/text/custom_ops/sentencepiece/sentencepiece_tokenizer_tflite.cc b/mediapipe/tasks/cc/text/custom_ops/sentencepiece/sentencepiece_tokenizer_tflite.cc index 481fd52376..ccb1e8a999 100644 --- a/mediapipe/tasks/cc/text/custom_ops/sentencepiece/sentencepiece_tokenizer_tflite.cc +++ b/mediapipe/tasks/cc/text/custom_ops/sentencepiece/sentencepiece_tokenizer_tflite.cc @@ -15,6 +15,8 @@ limitations under the License. #include "mediapipe/tasks/cc/text/custom_ops/sentencepiece/sentencepiece_tokenizer_tflite.h" +#include + #include "flatbuffers/flexbuffers.h" #include "mediapipe/tasks/cc/text/custom_ops/sentencepiece/optimized_encoder.h" #include "tensorflow/lite/c/common.h" @@ -85,8 +87,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { context->tensors[node->inputs->data[kReverseInput]]; const bool reverse = reverse_tensor.data.b[0]; - std::vector encoded; - std::vector splits; + std::vector encoded; + std::vector splits; const int num_strings = tflite::GetStringCount(&input_text); for (int i = 0; i < num_strings; ++i) { const auto strref = tflite::GetString(&input_text, i); diff --git a/mediapipe/tasks/cc/vision/gesture_recognizer/gesture_recognizer.h b/mediapipe/tasks/cc/vision/gesture_recognizer/gesture_recognizer.h index b752840da6..1ea10ed2ab 100644 --- a/mediapipe/tasks/cc/vision/gesture_recognizer/gesture_recognizer.h +++ b/mediapipe/tasks/cc/vision/gesture_recognizer/gesture_recognizer.h @@ -82,7 +82,7 @@ struct GestureRecognizerOptions { // The result callback should only be specified when the running mode is set // to RunningMode::LIVE_STREAM. std::function, const Image&, - int64)> + int64_t)> result_callback = nullptr; }; @@ -157,7 +157,7 @@ class GestureRecognizer : tasks::vision::core::BaseVisionTaskApi { // provide the video frame's timestamp (in milliseconds). The input timestamps // must be monotonically increasing. absl::StatusOr RecognizeForVideo( - Image image, int64 timestamp_ms, + Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); @@ -185,7 +185,7 @@ class GestureRecognizer : tasks::vision::core::BaseVisionTaskApi { // longer be valid when the callback returns. To access the image data // outside of the callback, callers need to make a copy of the image. // - The input timestamp in milliseconds. - absl::Status RecognizeAsync(Image image, int64 timestamp_ms, + absl::Status RecognizeAsync(Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); diff --git a/mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator.cc b/mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator.cc index 53a359d22d..b88d54b21e 100644 --- a/mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator.cc +++ b/mediapipe/tasks/cc/vision/hand_landmarker/calculators/hand_association_calculator.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include #include #include #include @@ -161,7 +162,7 @@ class HandAssociationCalculator : public CalculatorBase { // Note: This rect_id_ is local to an instance of this calculator. And it is // expected that the hand tracking graph to have only one instance of // this association calculator. - int64 rect_id_ = 1; + int64_t rect_id_ = 1; inline int GetNextRectId() { return rect_id_++; } }; diff --git a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarker.h b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarker.h index 726780ff2c..e00f86bb56 100644 --- a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarker.h +++ b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarker.h @@ -70,7 +70,8 @@ struct HandLandmarkerOptions { // The user-defined result callback for processing live stream data. // The result callback should only be specified when the running mode is set // to RunningMode::LIVE_STREAM. - std::function, const Image&, int64)> + std::function, const Image&, + int64_t)> result_callback = nullptr; }; @@ -146,7 +147,7 @@ class HandLandmarker : tasks::vision::core::BaseVisionTaskApi { // provide the video frame's timestamp (in milliseconds). The input timestamps // must be monotonically increasing. absl::StatusOr DetectForVideo( - Image image, int64 timestamp_ms, + Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); @@ -174,7 +175,7 @@ class HandLandmarker : tasks::vision::core::BaseVisionTaskApi { // longer be valid when the callback returns. To access the image data // outside of the callback, callers need to make a copy of the image. // - The input timestamp in milliseconds. - absl::Status DetectAsync(Image image, int64 timestamp_ms, + absl::Status DetectAsync(Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); diff --git a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc index 9575e2970b..14c6499e61 100644 --- a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc +++ b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include #include #include @@ -55,7 +56,7 @@ using ::mediapipe::api2::builder::Source; using ::mediapipe::tasks::components::utils::AllowIf; using ::mediapipe::tasks::vision::hand_landmarker::proto:: HandLandmarksDetectorGraphOptions; -using LabelItems = mediapipe::proto_ns::Map; +using LabelItems = mediapipe::proto_ns::Map; constexpr char kImageTag[] = "IMAGE"; constexpr char kHandRectTag[] = "HAND_RECT"; diff --git a/mediapipe/tasks/cc/vision/image_classifier/image_classifier.h b/mediapipe/tasks/cc/vision/image_classifier/image_classifier.h index 96050cbd07..4dad18c069 100644 --- a/mediapipe/tasks/cc/vision/image_classifier/image_classifier.h +++ b/mediapipe/tasks/cc/vision/image_classifier/image_classifier.h @@ -61,7 +61,7 @@ struct ImageClassifierOptions { // The result callback should only be specified when the running mode is set // to RunningMode::LIVE_STREAM. std::function, const Image&, - int64)> + int64_t)> result_callback = nullptr; }; @@ -148,7 +148,7 @@ class ImageClassifier : tasks::vision::core::BaseVisionTaskApi { // provide the video frame's timestamp (in milliseconds). The input timestamps // must be monotonically increasing. absl::StatusOr ClassifyForVideo( - mediapipe::Image image, int64 timestamp_ms, + mediapipe::Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); @@ -179,7 +179,7 @@ class ImageClassifier : tasks::vision::core::BaseVisionTaskApi { // longer be valid when the callback returns. To access the image data // outside of the callback, callers need to make a copy of the image. // - The input timestamp in milliseconds. - absl::Status ClassifyAsync(mediapipe::Image image, int64 timestamp_ms, + absl::Status ClassifyAsync(mediapipe::Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); diff --git a/mediapipe/tasks/cc/vision/image_embedder/image_embedder.h b/mediapipe/tasks/cc/vision/image_embedder/image_embedder.h index 586b8cdca6..258b498e33 100644 --- a/mediapipe/tasks/cc/vision/image_embedder/image_embedder.h +++ b/mediapipe/tasks/cc/vision/image_embedder/image_embedder.h @@ -59,7 +59,8 @@ struct ImageEmbedderOptions { // The user-defined result callback for processing live stream data. // The result callback should only be specified when the running mode is set // to RunningMode::LIVE_STREAM. - std::function, const Image&, int64)> + std::function, const Image&, + int64_t)> result_callback = nullptr; }; @@ -130,7 +131,7 @@ class ImageEmbedder : core::BaseVisionTaskApi { // provide the video frame's timestamp (in milliseconds). The input timestamps // must be monotonically increasing. absl::StatusOr EmbedForVideo( - mediapipe::Image image, int64 timestamp_ms, + mediapipe::Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); @@ -163,7 +164,7 @@ class ImageEmbedder : core::BaseVisionTaskApi { // longer be valid when the callback returns. To access the image data // outside of the callback, callers need to make a copy of the image. // - The input timestamp in milliseconds. - absl::Status EmbedAsync(mediapipe::Image image, int64 timestamp_ms, + absl::Status EmbedAsync(mediapipe::Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); diff --git a/mediapipe/tasks/cc/vision/object_detector/object_detector.h b/mediapipe/tasks/cc/vision/object_detector/object_detector.h index de2c0dbafd..197cdad765 100644 --- a/mediapipe/tasks/cc/vision/object_detector/object_detector.h +++ b/mediapipe/tasks/cc/vision/object_detector/object_detector.h @@ -84,7 +84,8 @@ struct ObjectDetectorOptions { // The user-defined result callback for processing live stream data. // The result callback should only be specified when the running mode is set // to RunningMode::LIVE_STREAM. - std::function, const Image&, int64)> + std::function, const Image&, + int64_t)> result_callback = nullptr; }; @@ -206,7 +207,7 @@ class ObjectDetector : public tasks::vision::core::BaseVisionTaskApi { // image_width) x [0, image_height)`, which are the dimensions of the // underlying image data. absl::StatusOr DetectForVideo( - mediapipe::Image image, int64 timestamp_ms, + mediapipe::Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); @@ -236,7 +237,7 @@ class ObjectDetector : public tasks::vision::core::BaseVisionTaskApi { // longer be valid when the callback returns. To access the image data // outside of the callback, callers need to make a copy of the image. // - The input timestamp in milliseconds. - absl::Status DetectAsync(mediapipe::Image image, int64 timestamp_ms, + absl::Status DetectAsync(mediapipe::Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); diff --git a/mediapipe/tasks/cc/vision/pose_landmarker/pose_landmarker.h b/mediapipe/tasks/cc/vision/pose_landmarker/pose_landmarker.h index 314356aa01..69042b1772 100644 --- a/mediapipe/tasks/cc/vision/pose_landmarker/pose_landmarker.h +++ b/mediapipe/tasks/cc/vision/pose_landmarker/pose_landmarker.h @@ -68,7 +68,8 @@ struct PoseLandmarkerOptions { // The user-defined result callback for processing live stream data. // The result callback should only be specified when the running mode is set // to RunningMode::LIVE_STREAM. - std::function, const Image&, int64)> + std::function, const Image&, + int64_t)> result_callback = nullptr; // Whether to output segmentation masks. @@ -146,7 +147,7 @@ class PoseLandmarker : tasks::vision::core::BaseVisionTaskApi { // provide the video frame's timestamp (in milliseconds). The input timestamps // must be monotonically increasing. absl::StatusOr DetectForVideo( - Image image, int64 timestamp_ms, + Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt); @@ -174,7 +175,7 @@ class PoseLandmarker : tasks::vision::core::BaseVisionTaskApi { // longer be valid when the callback returns. To access the image data // outside of the callback, callers need to make a copy of the image. // - The input timestamp in milliseconds. - absl::Status DetectAsync(Image image, int64 timestamp_ms, + absl::Status DetectAsync(Image image, int64_t timestamp_ms, std::optional image_processing_options = std::nullopt);