Skip to content

Commit

Permalink
more verbose logging
Browse files Browse the repository at this point in the history
  • Loading branch information
fmessmer committed Jul 17, 2023
1 parent 1b40977 commit 33a3fcb
Showing 1 changed file with 28 additions and 7 deletions.
35 changes: 28 additions & 7 deletions include/ros1_bridge/action_factory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ class ActionFactory_1_2 : public ActionFactoryInterface
virtual void shutdown()
{
std::lock_guard<std::mutex> lock(mutex_);
RCLCPP_INFO(ros2_node_->get_logger(), "ActionFactory_1_2::shutdown");
for (auto goal : goals_) {
std::thread([handler = goal.second]() mutable {handler->cancel();}).detach();
}
Expand All @@ -90,6 +91,7 @@ class ActionFactory_1_2 : public ActionFactoryInterface
{
// try to find goal and cancel it
std::lock_guard<std::mutex> lock(mutex_);
RCLCPP_INFO(ros2_node_->get_logger(), "ActionFactory_1_2::cancel_cb");
auto it = goals_.find(gh1.getGoalID().id);
if (it != goals_.end()) {
std::thread([handler = it->second]() mutable {handler->cancel();}).detach();
Expand All @@ -99,14 +101,15 @@ class ActionFactory_1_2 : public ActionFactoryInterface
void goal_cb(ROS1GoalHandle gh1)
{
const std::string goal_id = gh1.getGoalID().id;
RCLCPP_INFO(ros2_node_->get_logger(), "ActionFactory_1_2::goal_cb");

// create a new handler for the goal
std::shared_ptr<GoalHandler> handler;
handler = std::make_shared<GoalHandler>(gh1, client_, ros2_node_->get_logger());
std::lock_guard<std::mutex> lock(mutex_);
goals_.insert(std::make_pair(goal_id, handler));

RCLCPP_INFO(ros2_node_->get_logger(), "Sending goal");
RCLCPP_INFO(ros2_node_->get_logger(), "ActionFactory_1_2::goal_cb Sending goal");
std::thread(
[handler, goal_id, this]() mutable {
// execute the goal remotely
Expand All @@ -125,6 +128,7 @@ class ActionFactory_1_2 : public ActionFactoryInterface
void cancel()
{
std::lock_guard<std::mutex> lock(mutex_);
RCLCPP_INFO(this->logger_, "ActionFactory_1_2::cancel");
canceled_ = true;
if (gh2_) { // cancel goal if possible
auto fut = client_->async_cancel_goal(gh2_);
Expand All @@ -133,15 +137,18 @@ class ActionFactory_1_2 : public ActionFactoryInterface

void handle()
{
RCLCPP_INFO(this->logger_, "ActionFactory_1_2::handle");
auto goal1 = gh1_.getGoal();
ROS2Goal goal2;
translate_goal_1_to_2(*gh1_.getGoal(), goal2);
RCLCPP_INFO(this->logger_, "ActionFactory_1_2::handle translate_goal_1_to_2");

if (!client_->wait_for_action_server(std::chrono::seconds(1))) {
RCLCPP_INFO(this->logger_, "Action server not available after waiting");
gh1_.setRejected();
return;
}
RCLCPP_INFO(this->logger_, "ActionFactory_1_2::handle action_server available");

std::shared_future<ROS2ClientGoalHandle> gh2_future;
auto send_goal_ops = ROS2SendGoalOptions();
Expand Down Expand Up @@ -170,15 +177,19 @@ class ActionFactory_1_2 : public ActionFactoryInterface
translate_feedback_2_to_1(feedback1, *feedback2);
gh1_.publishFeedback(feedback1);
};
RCLCPP_INFO(this->logger_, "ActionFactory_1_2::handle send_goal_ops configured");

// send goal to ROS2 server, set-up feedback
gh2_future = client_->async_send_goal(goal2, send_goal_ops);
RCLCPP_INFO(this->logger_, "ActionFactory_1_2::handle async_send_goal");

auto future_result = client_->async_get_result(gh2_future.get());
RCLCPP_INFO(this->logger_, "ActionFactory_1_2::handle async_get_result");
auto res2 = future_result.get();

ROS1Result res1;
translate_result_2_to_1(res1, *(res2.result));
RCLCPP_INFO(this->logger_, "ActionFactory_1_2::handle translate_result_2_to_1");

std::lock_guard<std::mutex> lock(mutex_);
if (res2.code == rclcpp_action::ResultCode::SUCCEEDED) {
Expand Down Expand Up @@ -293,11 +304,11 @@ class ActionFactory_2_1 : public ActionFactoryInterface
{
std::size_t goal_id = get_goal_id_hash(gh2->get_goal_id());
std::shared_ptr<GoalHandler> handler;
handler = std::make_shared<GoalHandler>(gh2, client_);
handler = std::make_shared<GoalHandler>(gh2, client_, ros2_node_->get_logger());
std::lock_guard<std::mutex> lock(mutex_);
goals_.insert(std::make_pair(goal_id, handler));

RCLCPP_INFO(ros2_node_->get_logger(), "Sending goal");
RCLCPP_INFO(ros2_node_->get_logger(), "ActionFactory_2_1::handle_accepted Sending goal");
std::thread(
[handler, goal_id, this]() mutable {
// execute the goal remotely
Expand Down Expand Up @@ -342,7 +353,10 @@ class ActionFactory_2_1 : public ActionFactoryInterface
[this, &result_ready,
&cond_result](ROS1ClientGoalHandle goal_handle) mutable // transition_cb
{
ROS_INFO("Goal [%s]", goal_handle.getCommState().toString().c_str());
RCLCPP_INFO(
this->logger_,
"ActionFactory_2_1::handle Goal [%s]",
goal_handle.getCommState().toString().c_str());
if (goal_handle.getCommState() == actionlib::CommState::RECALLING) {
// cancelled before being processed
auto result2 = std::make_shared<ROS2Result>();
Expand All @@ -355,7 +369,10 @@ class ActionFactory_2_1 : public ActionFactoryInterface
auto result2 = std::make_shared<ROS2Result>();
auto result1 = goal_handle.getResult();
translate_result_1_to_2(*result2, *result1);
ROS_INFO("Goal [%s]", goal_handle.getTerminalState().toString().c_str());
RCLCPP_INFO(
this->logger_,
"ActionFactory_2_1::handle Goal [%s]",
goal_handle.getTerminalState().toString().c_str());
if (goal_handle.getTerminalState() == actionlib::TerminalState::SUCCEEDED) {
gh2_->succeed(result2);
} else {
Expand All @@ -379,13 +396,17 @@ class ActionFactory_2_1 : public ActionFactoryInterface
cond_result.wait(lck, [&result_ready] {return result_ready.load();});
}

GoalHandler(std::shared_ptr<ROS2ServerGoalHandle> & gh2, std::shared_ptr<ROS1Client> & client)
: gh2_(gh2), client_(client), canceled_(false) {}
GoalHandler(
std::shared_ptr<ROS2ServerGoalHandle> & gh2,
std::shared_ptr<ROS1Client> & client,
rclcpp::Logger logger)
: gh2_(gh2), client_(client), logger_(logger), canceled_(false) {}

private:
std::shared_ptr<ROS1ClientGoalHandle> gh1_;
std::shared_ptr<ROS2ServerGoalHandle> gh2_;
std::shared_ptr<ROS1Client> client_;
rclcpp::Logger logger_;
bool canceled_; // cancel was called
std::mutex mutex_;
};
Expand Down

0 comments on commit 33a3fcb

Please sign in to comment.