diff --git a/.isort.cfg b/.isort.cfg index 62c45c2d0..246ab876a 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -1,5 +1,5 @@ [settings] -known_third_party = PIL,actionlib,actionlib_msgs,arm_navigation_msgs,at_work_robot_example_ros,atwork_ros_msgs,brics_actuator,catkin_pkg,diagnostic_msgs,dynamic_reconfigure,geometry_msgs,mas_perception_msgs,mcr_common_converters_ros,mcr_manipulation_measurers_ros,mcr_manipulation_msgs,mcr_manipulation_pose_selector_ros,mcr_perception_msgs,mcr_perception_states,mcr_pose_generation_ros,mcr_states,mercury_planner,mir_actions,mir_controller_msgs,mir_knowledge,mir_knowledge_ros,mir_navigation_msgs,mir_planning_msgs,mir_planning_visualisation,mir_pregrasp_planning,mir_pregrasp_planning_ros,mir_states,mir_states_common,mir_world_generation,move_base_msgs,moveit_commander,moveit_msgs,nav_msgs,numpy,pandas,param_server_utils,roslib,rospkg,rosplan_dispatch_msgs,rosplan_knowledge_msgs,rospy,rostest,rosunit,sensor_msgs,shape_msgs,simple_script_server,smach,smach_ros,std_msgs,std_srvs,tf,visualization_msgs,yaml,zmq +known_third_party = PIL,actionlib,actionlib_msgs,arm_navigation_msgs,at_work_robot_example_ros,atwork_ros_msgs,brics_actuator,catkin_pkg,diagnostic_msgs,dynamic_reconfigure,geometry_msgs,mas_perception_msgs,mcr_common_converters_ros,mcr_manipulation_measurers_ros,mcr_manipulation_msgs,mir_manipulation_msgs,mcr_manipulation_pose_selector_ros,mcr_perception_msgs,mcr_perception_states,mcr_pose_generation_ros,mcr_states,mercury_planner,mir_actions,mir_controller_msgs,mir_knowledge,mir_knowledge_ros,mir_navigation_msgs,mir_planning_msgs,mir_planning_visualisation,mir_pregrasp_planning,mir_pregrasp_planning_ros,mir_states,mir_states_common,mir_world_generation,move_base_msgs,moveit_commander,moveit_msgs,nav_msgs,numpy,pandas,param_server_utils,roslib,rospkg,rosplan_dispatch_msgs,rosplan_knowledge_msgs,rospy,rostest,rosunit,sensor_msgs,shape_msgs,simple_script_server,smach,smach_ros,std_msgs,std_srvs,tf,visualization_msgs,yaml,zmq multi_line_output=3 include_trailing_comma=True force_grid_wrap=0 diff --git a/docs/source/mir_perception/camera.rst b/docs/source/mir_perception/camera.rst index 45d0d9bf2..1d790090f 100644 --- a/docs/source/mir_perception/camera.rst +++ b/docs/source/mir_perception/camera.rst @@ -171,7 +171,7 @@ How to use the RealSense2 camera .. code-block:: bash - roslaunch mcr_scene_segmentation scene_segmentation.launch input_pointcloud_topic:=/camera/depth_registered/points + roslaunch mcr_scene_segmentation scene_segmentation.launch input_pointcloud_topic:=/camera/depth/color/points Publish the message 'e-start': diff --git a/docs/source/mir_planning/planning_core.rst b/docs/source/mir_planning/planning_core.rst index 7b20e48e1..2cfd7c111 100644 --- a/docs/source/mir_planning/planning_core.rst +++ b/docs/source/mir_planning/planning_core.rst @@ -29,7 +29,7 @@ To test with mockup action servers without a robot roscore roslaunch mir_planning_core task_planning_components.launch - rosrun mir_task_executor task_executor_mockup + rosrun mir_planner_executor planner_executor_mockup roslaunch mir_task_planning upload_problem.launch roslaunch mir_planning_core task_planning_sm.launch diff --git a/mir_command_tools/mir_teleop/CMakeLists.txt b/mir_command_tools/mir_teleop/CMakeLists.txt index bf630f28b..a8c6767e1 100644 --- a/mir_command_tools/mir_teleop/CMakeLists.txt +++ b/mir_command_tools/mir_teleop/CMakeLists.txt @@ -12,6 +12,7 @@ find_package(catkin REQUIRED sensor_msgs std_srvs mcr_manipulation_msgs + mir_manipulation_msgs roslint ) @@ -33,6 +34,7 @@ catkin_package( sensor_msgs std_srvs mcr_manipulation_msgs + mir_manipulation_msgs ) include_directories( diff --git a/mir_command_tools/mir_teleop/package.xml b/mir_command_tools/mir_teleop/package.xml index 67b9e49a1..d91aad0e0 100644 --- a/mir_command_tools/mir_teleop/package.xml +++ b/mir_command_tools/mir_teleop/package.xml @@ -22,6 +22,7 @@ sensor_msgs std_srvs mcr_manipulation_msgs + mir_manipulation_msgs roslint brics_actuator @@ -34,6 +35,7 @@ sensor_msgs std_srvs mcr_manipulation_msgs + mir_manipulation_msgs roslaunch diff --git a/mir_command_tools/mir_teleop/ros/include/mir_teleop/teleop_joypad.h b/mir_command_tools/mir_teleop/ros/include/mir_teleop/teleop_joypad.h index 2e1d3f656..4cc32f582 100644 --- a/mir_command_tools/mir_teleop/ros/include/mir_teleop/teleop_joypad.h +++ b/mir_command_tools/mir_teleop/ros/include/mir_teleop/teleop_joypad.h @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/mir_command_tools/mir_teleop/ros/src/teleop_joypad.cpp b/mir_command_tools/mir_teleop/ros/src/teleop_joypad.cpp index 76bc76afc..cfc8b6a36 100644 --- a/mir_command_tools/mir_teleop/ros/src/teleop_joypad.cpp +++ b/mir_command_tools/mir_teleop/ros/src/teleop_joypad.cpp @@ -53,7 +53,7 @@ TeleOpJoypad::TeleOpJoypad(ros::NodeHandle &nh) pub_arm_cart_vel_ = nh_->advertise( "/arm_1/arm_controller/cartesian_velocity_command", 1); pub_gripper_command_ = - nh_->advertise("gripper_command", 1); + nh_->advertise("gripper_command", 1); srv_base_motors_on_ = nh_->serviceClient("/base/switchOnMotors"); srv_base_motors_off_ = nh_->serviceClient("/base/switchOffMotors"); @@ -203,7 +203,7 @@ bool TeleOpJoypad::getArmParameter() bool TeleOpJoypad::moveGripper(int gripper_command) { - mcr_manipulation_msgs::GripperCommand command_msgs; + mir_manipulation_msgs::GripperCommand command_msgs; command_msgs.command = gripper_command; @@ -337,10 +337,10 @@ void TeleOpJoypad::cbJoypad(const sensor_msgs::Joy::ConstPtr &command) button_gripper_active_ = !button_gripper_active_; if (button_gripper_active_) { ROS_INFO("open gripper"); - this->moveGripper(mcr_manipulation_msgs::GripperCommand::OPEN); + this->moveGripper(mir_manipulation_msgs::GripperCommand::OPEN); } else { ROS_INFO("close gripper"); - this->moveGripper(mcr_manipulation_msgs::GripperCommand::CLOSE); + this->moveGripper(mir_manipulation_msgs::GripperCommand::CLOSE); } } diff --git a/mir_common/mir_description/meshes/misc/e_stop_mount.STL b/mir_common/mir_description/meshes/misc/e_stop_mount.STL new file mode 100644 index 000000000..708682a39 Binary files /dev/null and b/mir_common/mir_description/meshes/misc/e_stop_mount.STL differ diff --git a/mir_common/mir_description/meshes/sensor_mounts/intel_camera_mount.STL b/mir_common/mir_description/meshes/sensor_mounts/intel_camera_mount.STL new file mode 100644 index 000000000..a9cee7b7e Binary files /dev/null and b/mir_common/mir_description/meshes/sensor_mounts/intel_camera_mount.STL differ diff --git a/mir_common/mir_description/meshes/sensor_mounts/intel_camera_mount_back.STL b/mir_common/mir_description/meshes/sensor_mounts/intel_camera_mount_back.STL new file mode 100644 index 000000000..4069eaf93 Binary files /dev/null and b/mir_common/mir_description/meshes/sensor_mounts/intel_camera_mount_back.STL differ diff --git a/mir_common/mir_description/meshes/sensor_mounts/side_camera_mount.STL b/mir_common/mir_description/meshes/sensor_mounts/side_camera_mount.STL new file mode 100644 index 000000000..a267a7daa Binary files /dev/null and b/mir_common/mir_description/meshes/sensor_mounts/side_camera_mount.STL differ diff --git a/mir_common/mir_description/urdf/grippers/dynamixel/v2/dynamixel_gripper.urdf.xacro b/mir_common/mir_description/urdf/grippers/dynamixel/v2/dynamixel_gripper.urdf.xacro index dffd77fa9..2bc5c7c3b 100644 --- a/mir_common/mir_description/urdf/grippers/dynamixel/v2/dynamixel_gripper.urdf.xacro +++ b/mir_common/mir_description/urdf/grippers/dynamixel/v2/dynamixel_gripper.urdf.xacro @@ -32,20 +32,23 @@ - + - + + + + - + diff --git a/mir_controllers/mir_visual_servoing/ros/launch/visual_servoing_pipeline.launch b/mir_controllers/mir_visual_servoing/ros/launch/visual_servoing_pipeline.launch index 97f700c53..ba2ffba39 100644 --- a/mir_controllers/mir_visual_servoing/ros/launch/visual_servoing_pipeline.launch +++ b/mir_controllers/mir_visual_servoing/ros/launch/visual_servoing_pipeline.launch @@ -4,7 +4,7 @@ - + diff --git a/mir_manipulation/mir_gripper_controller/CMakeLists.txt b/mir_manipulation/mir_gripper_controller/CMakeLists.txt index 56725d598..8612a5c25 100644 --- a/mir_manipulation/mir_gripper_controller/CMakeLists.txt +++ b/mir_manipulation/mir_gripper_controller/CMakeLists.txt @@ -4,7 +4,7 @@ project(mir_gripper_controller) find_package(catkin REQUIRED COMPONENTS actionlib - mcr_manipulation_msgs + mir_manipulation_msgs roscpp roslint std_msgs @@ -12,7 +12,7 @@ find_package(catkin REQUIRED catkin_package( CATKIN_DEPENDS - mcr_manipulation_msgs + mir_manipulation_msgs rospy ) diff --git a/mir_manipulation/mir_gripper_controller/README.md b/mir_manipulation/mir_gripper_controller/README.md index 55a60c2b8..91a991f89 100644 --- a/mir_manipulation/mir_gripper_controller/README.md +++ b/mir_manipulation/mir_gripper_controller/README.md @@ -14,7 +14,10 @@ More details on the controller and corresponding code can be found [here](https: 1. Make sure to uplaod the latest [code](https://github.com/b-it-bots/youbot_dynamixel_gripper_controller/tree/master/openrb_150_controller) on to the controller. 5. Run `roscore` command. 5. Run `dynamixel_gripper_controller_ros` in the `ros/scripts` directory. -6. To control the gripper publish to the `/arm_1/gripper_command` topic, e.g. `rostopic pub /arm_1/gripper_command std_msgs/String "command: 0.0"` +6. To control the gripper publish to the `/arm_1/gripper_command` topic, e.g.: + ```bash + rostopic pub /arm_1/gripper_command mir_manipulation_msgs/GripperCommand "command: 0.0" + ``` 7. To see the state of the gripper subscribe to the `/arm_1/gripper_feedback`, e.g. `rostopic echo /arm_1/gripper_feedback`. ## Usage @@ -22,3 +25,24 @@ More details on the controller and corresponding code can be found [here](https: * For more specific position control, one can publish a value between `0.0 and 1.0`. * To further open the gripper, one can publish a value between `-1.5 and 0.0`. * One can subscribe the state of the gripper, the format consists of the following fields: `state` (`GRIPPER_CLOSE`, `OBJECT_GRASPED`, `OBJECT_SLIPPED`, `GRIPPER_OPEN`, `GRIPPER_INTER`), `parsing_error` (0 - false, 1 - true), `last_command` (0, 1). + +## Troubleshooting + +### Board at /dev/youbout/gripper not found +* Check if the board is connected to the computer. +* Check if the board is connected to the correct USB port. +* Check the `idProduct` of the Open-RB board is properly set in /etc/udev/rules.d/80-youboot-brsu-2-devices.rules +* To get the `idProduct` of the Open-RB board: + - run `dmesg | grep OpenRB`, you will see something like this: + ```bash + [ 6.449181] usb 1-2.3: Product: OpenRB-150 + ``` + - run `dmesg | grep "usb 1-2.3"`: in the generated output, find the line that looks like: + ```bash + usb 1-2.3: New USB device found, idVendor=2f5d, idProduct=2202, bcdDevice= 1.00 + ``` + - From this, get the `idProduct` and update it in the file `/etc/udev/rules.d/80-youboot-brsu-2-devices.rules` + - Then run `sudo udevadm control --reload` + - Now, unplug and plug the board again. It should be detected now. + - To test, run the `serial_interface.py` file in the `ros/src` directory. It should close and open the gripper. + diff --git a/mir_manipulation/mir_gripper_controller/package.xml b/mir_manipulation/mir_gripper_controller/package.xml index b4ecd1533..5f4963136 100644 --- a/mir_manipulation/mir_gripper_controller/package.xml +++ b/mir_manipulation/mir_gripper_controller/package.xml @@ -20,10 +20,10 @@ roslint sensor_msgs std_msgs - mcr_manipulation_msgs + mir_manipulation_msgs dynamixel_controllers - mcr_manipulation_msgs + mir_manipulation_msgs rospy sensor_msgs brics_actuator diff --git a/mir_manipulation/mir_gripper_controller/ros/scripts/dynamixel_gripper_controller_ros b/mir_manipulation/mir_gripper_controller/ros/scripts/dynamixel_gripper_controller_ros index 519addbe1..d89a3088c 100755 --- a/mir_manipulation/mir_gripper_controller/ros/scripts/dynamixel_gripper_controller_ros +++ b/mir_manipulation/mir_gripper_controller/ros/scripts/dynamixel_gripper_controller_ros @@ -3,7 +3,7 @@ import rospy from std_msgs.msg import String from sensor_msgs.msg import JointState -from mcr_manipulation_msgs.msg import GripperCommand +from mir_manipulation_msgs.msg import GripperCommand import serial_interface import math import json @@ -53,7 +53,7 @@ class GripperController: if command > 1.0: command = 1.0 - elif command < -1.5: + elif command < -1.76: command = 0.0 if command == 1.0: @@ -66,7 +66,7 @@ class GripperController: elif command > 0.0 and command < 1.0: rospy.logdebug('Moving the gripper to a specific position.') json_command['command'] = command - elif command < 0.0 and command > -1.5: + elif command < 0.0 and command > -1.76: rospy.logdebug('Opening the gripper above and beyond.') json_command['command'] = command @@ -92,8 +92,8 @@ class GripperController: self.feedback_publisher.publish(json.dumps(msg)) # convert gripper positions from degrees to radians - gripper_right_motor_position = math.radians(msg['right_gripper_pos']) - gripper_left_motor_position = math.radians(msg['left_gripper_pos']) + gripper_right_motor_position = 0.0 # math.radians(msg['right_gripper_pos']) + gripper_left_motor_position = 0.0 # math.radians(msg['left_gripper_pos']) # Publish joint states joint_state = JointState() diff --git a/mir_manipulation/mir_gripper_controller/ros/scripts/list_ports.py b/mir_manipulation/mir_gripper_controller/ros/scripts/list_ports.py new file mode 100644 index 000000000..0c8fee12d --- /dev/null +++ b/mir_manipulation/mir_gripper_controller/ros/scripts/list_ports.py @@ -0,0 +1,5 @@ +import serial.tools.list_ports +ports = serial.tools.list_ports.comports() + +for port, desc, hwid in sorted(ports): + print("{}: {} [{}]".format(port, desc, hwid)) \ No newline at end of file diff --git a/mir_manipulation/mir_gripper_controller/ros/scripts/serial_interface.py b/mir_manipulation/mir_gripper_controller/ros/scripts/serial_interface.py index 272addfa2..ed189b211 100644 --- a/mir_manipulation/mir_gripper_controller/ros/scripts/serial_interface.py +++ b/mir_manipulation/mir_gripper_controller/ros/scripts/serial_interface.py @@ -58,12 +58,15 @@ def receive(self): msg_str = msg_str.decode().strip() - if len(msg_str) < 5 or not '{' in msg_str: return None if '}{' not in msg_str: - return [json.loads(msg_str)] + try: + return [json.loads(msg_str)] + except Exception as e: + rospy.logerr(e) + return None msgs = msg_str.split("}{") @@ -79,3 +82,18 @@ def receive(self): msgs_json.append(json.loads(msg)) return msgs_json + +if __name__ == '__main__': + serial_interface = SerialInterface(9600, 0.1, '239A') + serial_interface.open_port() + serial_interface.send({'command': 1}) + rospy.sleep(1) + serial_interface.send({'command': 0.5}) + rospy.sleep(1) + serial_interface.send({'command': 0.0}) + rospy.sleep(1) + serial_interface.send({'command': 0.2}) + rospy.sleep(1) + serial_interface.send({'command': 0.8}) + rospy.sleep(1) + serial_interface.send({'command': 0.0}) \ No newline at end of file diff --git a/mir_manipulation/mir_moveit_youbot_brsu_2/config/joint_limits.yaml b/mir_manipulation/mir_moveit_youbot_brsu_2/config/joint_limits.yaml index 4fab6e4d4..034f7e113 100644 --- a/mir_manipulation/mir_moveit_youbot_brsu_2/config/joint_limits.yaml +++ b/mir_manipulation/mir_moveit_youbot_brsu_2/config/joint_limits.yaml @@ -37,5 +37,5 @@ joint_limits: max_velocity: 6.178465545 has_acceleration_limits: true max_acceleration: 1.5 -default_velocity_scaling_factor: 0.7 -default_acceleration_scaling_factor: 0.7 +default_velocity_scaling_factor: 1.0 +default_acceleration_scaling_factor: 1.0 diff --git a/mir_manipulation/mir_moveit_youbot_brsu_2/config/youbot.srdf b/mir_manipulation/mir_moveit_youbot_brsu_2/config/youbot.srdf index 0e371f1cd..f8f4b8ced 100644 --- a/mir_manipulation/mir_moveit_youbot_brsu_2/config/youbot.srdf +++ b/mir_manipulation/mir_moveit_youbot_brsu_2/config/youbot.srdf @@ -84,9 +84,6 @@ - - - @@ -130,69 +127,76 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - - - + + + + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + @@ -305,6 +309,8 @@ + + @@ -312,6 +318,7 @@ + @@ -319,121 +326,166 @@ + + - - - - - + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + - - - - - - - - - - - - - + + + + + + + - - - - - + + + + + - - - - - - - - - - - - + + + + + + + - - - - - + + + + + - - + + - - + + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + @@ -443,32 +495,32 @@ - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + @@ -492,32 +544,32 @@ - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + @@ -553,21 +605,6 @@ - - - - - - - - - - - - - - - diff --git a/mir_manipulation/mir_pregrasp_planning/ros/src/mir_pregrasp_planning_ros/orientation_independent_ik.py b/mir_manipulation/mir_pregrasp_planning/ros/src/mir_pregrasp_planning_ros/orientation_independent_ik.py index 3ed110b3b..4a32211ad 100644 --- a/mir_manipulation/mir_pregrasp_planning/ros/src/mir_pregrasp_planning_ros/orientation_independent_ik.py +++ b/mir_manipulation/mir_pregrasp_planning/ros/src/mir_pregrasp_planning_ros/orientation_independent_ik.py @@ -22,6 +22,9 @@ def __init__(self, debug=False): self._base_link_to_arm_base_offset = None self._initialise_base_to_arm_offset() + # create subscriber to pick from shelf topic to limit orientation independent ik to certain pitch ranges + self.is_picking_from_shelf = False + if self.debug: self._pose_array_pub = rospy.Publisher('~pose_samples', PoseArray, queue_size=1) self._pose_out_pub = rospy.Publisher('~pose_out', PoseStamped, queue_size=1) @@ -58,7 +61,13 @@ def get_reachable_pose_and_joint_msg_from_point(self, x, y, z, frame_id): :returns: (geometry_msgs.msg.PoseStamped, brics_actuator.JointPositions) or None """ - pitch_ranges = [(0.0, 0.0), (-30.0, 0.0), (-60.0, -30.0), (-90.0, -60.0), (0.0, 10.0)] + # check is_picking_from_shelf from parameter server + self.is_picking_from_shelf = rospy.get_param("/pick_from_shelf_server/pick_statemachine_says_shelf", False) + rospy.loginfo(f"[orientation_independent_ik] shelf picking is set to: {self.is_picking_from_shelf}") + if self.is_picking_from_shelf=='True': + pitch_ranges = [(-20.0, -15.0)] # limiting the pitch range to avoid collisions with shelf and table + else: + pitch_ranges = [(0.0, 0.0), (-30.0, 0.0), (-60.0, -30.0), (-90.0, -60.0), (0.0, 10.0)] return self._get_reachable_pose_and_joint_msg_from_point_and_pitch_ranges( x, y, z, frame_id, pitch_ranges) @@ -145,9 +154,9 @@ def _get_reachable_pose_and_joint_msg_from_point_and_pitch_ranges( reachable_pose, joint_angles = self._get_reachable_pose_and_configuration(pose_samples) if reachable_pose is not None: found_solution = True - rospy.logdebug('Found solution') - rospy.logdebug('Pitch range: ' + str(pitch_range)) - rospy.logdebug('Yaw : ' + str(yaw)) + rospy.loginfo('Found solution') + rospy.loginfo('Pitch range: ' + str(pitch_range)) + rospy.loginfo('Yaw : ' + str(yaw)) if self.debug: self._pose_out_pub.publish(reachable_pose) return (reachable_pose, OrientationIndependentIK.get_joint_pos_msg_from_joint_angles(joint_angles)) diff --git a/mir_manipulation/mir_pregrasp_planning/ros/src/mir_pregrasp_planning_ros/pregrasp_planner_node.py b/mir_manipulation/mir_pregrasp_planning/ros/src/mir_pregrasp_planning_ros/pregrasp_planner_node.py index 18adcf056..0df739275 100644 --- a/mir_manipulation/mir_pregrasp_planning/ros/src/mir_pregrasp_planning_ros/pregrasp_planner_node.py +++ b/mir_manipulation/mir_pregrasp_planning/ros/src/mir_pregrasp_planning_ros/pregrasp_planner_node.py @@ -105,6 +105,9 @@ def __init__(self): # joint offset for creating pre-pregrasp pose self.joint_offset = None + # needed for the pregrasp planner to shift to independent ik when dealing with shift + self.is_picking_from_shelf = False + # pose generator self.gripper = rospy.get_param("~gripper_config_matrix", None) assert self.gripper is not None, "Gripper config matrix must be specified." @@ -138,6 +141,9 @@ def __init__(self): rospy.Subscriber("~pose_in", geometry_msgs.msg.PoseStamped, self.pose_cb) # publishers + self.original_pose_pub = rospy.Publisher( + "~original_pose", geometry_msgs.msg.PoseStamped, queue_size=1 + ) self.event_out = rospy.Publisher( "~event_out", std_msgs.msg.String, queue_size=1 ) @@ -334,6 +340,7 @@ def orientation_independent_ik_routine(self, transformed_pose, grasp_type): input_pose.header = transformed_pose.header input_pose.pose.position = transformed_pose.pose.position if grasp_type == "side_grasp": + rospy.loginfo("[Pregrasp Planning] Using side grasp") input_pose.pose.position.x += self.side_grasp_offset_x solution = self.orientation_independent_ik.get_reachable_pose_and_joint_msg_from_point( input_pose.pose.position.x, input_pose.pose.position.y, @@ -351,6 +358,7 @@ def orientation_independent_ik_routine(self, transformed_pose, grasp_type): joint_waypoints = mcr_manipulation_msgs.msg.JointSpaceWayPointsList() joint_config = [p.value for p in joint_msg.positions] if grasp_type == "side_grasp" and self.generate_pregrasp_waypoint: + rospy.loginfo("[Pregrasp Planning] using side grasp and generating pregrasp waypoint") pregrasp_input_pose = copy.deepcopy(input_pose) pregrasp_input_pose.pose.position.x -= 0.05 pregrasp_solution = self.orientation_independent_ik.get_reachable_pose_and_joint_msg_from_point( @@ -427,6 +435,7 @@ def running_state(self): grasp_type = "top_grasp" else: grasp_type = "side_grasp" + rospy.loginfo("[Pregrasp Planning] Using grasp type: {0}".format(grasp_type)) if grasp_type == "side_grasp": @@ -437,6 +446,11 @@ def running_state(self): self.grasp_type.publish(grasp_type) pose_samples = self.pose_generator.calculate_poses_list(modified_pose) self.pose_samples_pub.publish(pose_samples) + # self.original_pose_pub.publish(modified_pose) + + # check is_picking_from_shelf from parameter server + self.is_picking_from_shelf = rospy.get_param("/pick_from_shelf_server/pick_statemachine_says_shelf", False) + rospy.loginfo(f"[Pregrasp Planning] shelf picking is set to: {self.is_picking_from_shelf}") # if default ik is true, try default ik routine if self.default_ik_flag and not self.adaptive_ik_flag: @@ -452,7 +466,7 @@ def running_state(self): return "INIT" # if orientation independent IK failed and adaptive IK is set to true, try again with orientation independent IK - if self.orientation_independent_ik_flag and not self.adaptive_ik_flag: + if self.is_picking_from_shelf=='True' or (self.orientation_independent_ik_flag and not self.adaptive_ik_flag): if self.orientation_independent_ik_routine(transformed_pose, grasp_type): self.event_out.publish("e_success") self.reset_component_data() @@ -463,7 +477,7 @@ def running_state(self): return 'INIT' # if adaptive IK is set to true, try both default and orientation independent IK routines - if self.adaptive_ik_flag: + if self.adaptive_ik_flag and not self.is_picking_from_shelf=='True': if self.default_ik_routine(pose_samples, modified_pose, grasp_type): self.event_out.publish("e_success") self.reset_component_data() diff --git a/mir_perception/mir_barrier_tape_detection/CMakeLists.txt b/mir_perception/mir_barrier_tape_detection/CMakeLists.txt index d08882cd3..077835500 100644 --- a/mir_perception/mir_barrier_tape_detection/CMakeLists.txt +++ b/mir_perception/mir_barrier_tape_detection/CMakeLists.txt @@ -18,9 +18,10 @@ find_package(OpenCV REQUIRED) find_package(PCL 1.10 REQUIRED) set (CMAKE_CXX_STANDARD 14) -add_compile_options( - -O3 -) +set (CMAKE_BUILD_TYPE Release) +# add_compile_options( +# -O3 +# ) generate_dynamic_reconfigure_options( ros/config/BarrierTapeDetection.cfg diff --git a/mir_perception/mir_barrier_tape_detection/ros/config/barrier_tape_detection.yaml b/mir_perception/mir_barrier_tape_detection/ros/config/barrier_tape_detection.yaml index a6d4cd4ee..86fe5f194 100644 --- a/mir_perception/mir_barrier_tape_detection/ros/config/barrier_tape_detection.yaml +++ b/mir_perception/mir_barrier_tape_detection/ros/config/barrier_tape_detection.yaml @@ -2,7 +2,7 @@ mir_perception/front_camera/barrier_tape_detection: min_area: 10 color_thresh_min_h: 40 color_thresh_min_s: 50 - color_thresh_min_v: 70 + color_thresh_min_v: 60 color_thresh_max_h: 70 color_thresh_max_s: 100 color_thresh_max_v: 100 @@ -10,9 +10,9 @@ mir_perception/front_camera/barrier_tape_detection: mir_perception/back_camera/barrier_tape_detection: min_area: 10 - color_thresh_min_h: 30 + color_thresh_min_h: 40 color_thresh_min_s: 50 - color_thresh_min_v: 28 + color_thresh_min_v: 60 color_thresh_max_h: 70 color_thresh_max_s: 100 color_thresh_max_v: 100 diff --git a/mir_perception/mir_barrier_tape_detection/ros/launch/barrier_tape_detection.launch b/mir_perception/mir_barrier_tape_detection/ros/launch/barrier_tape_detection.launch index 0a362e6aa..6acda8c52 100644 --- a/mir_perception/mir_barrier_tape_detection/ros/launch/barrier_tape_detection.launch +++ b/mir_perception/mir_barrier_tape_detection/ros/launch/barrier_tape_detection.launch @@ -10,17 +10,17 @@ - - - + + + - + - + target_frame: base_laser_front_link # Leave disabled to output scan in pointcloud frame @@ -48,18 +48,18 @@ - - - - + + + + - + - + target_frame: base_laser_rear_link # Leave disabled to output scan in pointcloud frame diff --git a/mir_perception/mir_cavity_detector/ros/launch/cavity_detector.launch b/mir_perception/mir_cavity_detector/ros/launch/cavity_detector.launch index 5bdc5beca..2aa7eb0ab 100644 --- a/mir_perception/mir_cavity_detector/ros/launch/cavity_detector.launch +++ b/mir_perception/mir_cavity_detector/ros/launch/cavity_detector.launch @@ -4,8 +4,8 @@ - - + + diff --git a/mir_perception/mir_cavity_detector/ros/launch/detector_selector_cavity.launch b/mir_perception/mir_cavity_detector/ros/launch/detector_selector_cavity.launch index 125c3295d..0b89711d0 100644 --- a/mir_perception/mir_cavity_detector/ros/launch/detector_selector_cavity.launch +++ b/mir_perception/mir_cavity_detector/ros/launch/detector_selector_cavity.launch @@ -9,8 +9,8 @@ - - + + diff --git a/mir_perception/mir_empty_space_detection/README.md b/mir_perception/mir_empty_space_detection/README.md index 58d012ee5..d7d26e021 100644 --- a/mir_perception/mir_empty_space_detection/README.md +++ b/mir_perception/mir_empty_space_detection/README.md @@ -37,7 +37,7 @@ present in its circle. ### In - `/mir_perception/empty_space_detector/event_in` -- `/mir_perception/empty_space_detector/input_point_cloud` -> `/arm_cam3d/depth_registered/points` +- `/mir_perception/empty_space_detector/input_point_cloud` -> `/arm_cam3d/depth/color/points` ### Out - `/mir_perception/empty_space_detector/empty_spaces` diff --git a/mir_perception/mir_empty_space_detection/ros/config/params.yaml b/mir_perception/mir_empty_space_detection/ros/config/params.yaml index da149e2cc..cc179d92d 100644 --- a/mir_perception/mir_empty_space_detection/ros/config/params.yaml +++ b/mir_perception/mir_empty_space_detection/ros/config/params.yaml @@ -5,16 +5,16 @@ voxel_filter_limit_min: -0.15 voxel_filter_limit_max: 0.3 enable_passthrough_filter: False passthrough_filter_field_name: x -passthrough_filter_limit_min: 0.45 # all the min distance starts from the baselink +passthrough_filter_limit_min: 0.46 # all the min distance starts from the baselink passthrough_filter_limit_max: 0.7 enable_cropbox_filter: True -cropbox_filter_min_x: 0.45 -cropbox_filter_max_x: 0.8 -cropbox_filter_min_y: -0.4 -cropbox_filter_max_y: 0.4 -cropbox_filter_min_z: 0.0 -cropbox_filter_max_z: 0.6 -normal_radius_search: 0.03 +cropbox_filter_min_x: 0.41 #0.45 ideal_grass = 0.40 ideal_plain=0.38 +cropbox_filter_max_x: 0.56 #0.55 ideal_grass = 0.621 +cropbox_filter_min_y: -0.40 #-0.25 +cropbox_filter_max_y: 0.31 #0.25 +cropbox_filter_min_z: -0.1 #-0.1 +cropbox_filter_max_z: 0.6 #0.6 +normal_radius_search: 0.03 #0.03 use_omp: true num_cores: 8 sac_max_iterations: 1000 @@ -37,10 +37,10 @@ cluster_max_height: 0.09 cluster_max_length: 0.25 cluster_min_distance_to_polygon: 0.04 octree_resolution: 0.01 -object_height_above_workspace: 0.035 +object_height_above_workspace: 0.0 # setting it to 0 because we reduced the gripper height # parameters for finding empty spaces on a plane -empty_space_point_count_percentage_threshold: 0.8 -empty_space_radius: 0.045 -num_of_empty_spaces_required: 4 +empty_space_point_count_percentage_threshold: 0.83 +empty_space_radius: 0.0675 +num_of_empty_spaces_required: 2 trial_duration: 3.0 -enable_debug_pc: False \ No newline at end of file +enable_debug_pc: True diff --git a/mir_perception/mir_empty_space_detection/ros/launch/empty_space_detector.launch b/mir_perception/mir_empty_space_detection/ros/launch/empty_space_detector.launch index 8232ec851..8869a5d56 100644 --- a/mir_perception/mir_empty_space_detection/ros/launch/empty_space_detector.launch +++ b/mir_perception/mir_empty_space_detection/ros/launch/empty_space_detector.launch @@ -1,8 +1,8 @@ - - + + diff --git a/mir_perception/mir_empty_space_detection/ros/src/empty_space_detector.cpp b/mir_perception/mir_empty_space_detection/ros/src/empty_space_detector.cpp index 50390a378..cd5532d36 100644 --- a/mir_perception/mir_empty_space_detection/ros/src/empty_space_detector.cpp +++ b/mir_perception/mir_empty_space_detection/ros/src/empty_space_detector.cpp @@ -8,7 +8,7 @@ EmptySpaceDetector::EmptySpaceDetector() : nh_("~") { nh_.param("output_frame", output_frame_, "base_link"); - nh_.param("enable_debug_pc", enable_debug_pc_pub_, false); + nh_.param("enable_debug_pc", enable_debug_pc_pub_, true); float octree_resolution; nh_.param("octree_resolution", octree_resolution, 0.0025); add_to_octree_ = false; @@ -16,7 +16,7 @@ EmptySpaceDetector::EmptySpaceDetector() : nh_("~") pose_array_pub_ = nh_.advertise("empty_spaces", 1); event_out_pub_ = nh_.advertise("event_out", 1); - pc_sub_ = nh_.subscribe("input_point_cloud", 1, &EmptySpaceDetector::pcCallback, this); + event_in_sub_ = nh_.subscribe("event_in", 1, &EmptySpaceDetector::eventInCallback, this); tf_listener_.reset(new tf::TransformListener); @@ -87,10 +87,13 @@ void EmptySpaceDetector::loadParams() void EmptySpaceDetector::eventInCallback(const std_msgs::String::ConstPtr &msg) { std_msgs::String event_out; - if (msg->data == "e_add_cloud") { + if (msg->data == "e_add_cloud") + { + pc_sub_ = nh_.subscribe("input_point_cloud", 1, &EmptySpaceDetector::pcCallback, this); //subscribe to the point cloud add_to_octree_ = true; return; } else if (msg->data == "e_add_cloud_stop") { + pc_sub_.shutdown(); add_to_octree_ = false; event_out.data = "e_add_cloud_stopped"; } else if (msg->data == "e_trigger") { @@ -99,8 +102,9 @@ void EmptySpaceDetector::eventInCallback(const std_msgs::String::ConstPtr &msg) event_out.data = "e_success"; cloud_accumulation_->reset(); //resetting the octree } else { + ROS_ERROR("Failed to find empty spaces"); event_out.data = "e_failure"; - add_to_octree_ = true; + // add_to_octree_ = true; } //event_out.data = (success) ? "e_success" : "e_failure"; } else { @@ -115,7 +119,10 @@ void EmptySpaceDetector::pcCallback(const sensor_msgs::PointCloud2::ConstPtr &ms sensor_msgs::PointCloud2 msg_transformed; if (!mpu::pointcloud::transformPointCloudMsg(tf_listener_, output_frame_, *msg, msg_transformed)) + { + ROS_WARN("Failed to transform point cloud"); return; + } PointCloud::Ptr input_pc(new PointCloud); pcl::fromROSMsg(msg_transformed, *input_pc); @@ -199,6 +206,7 @@ void EmptySpaceDetector::findEmptySpacesOnPlane(const PointCloud::Ptr &plane, } } if (success) { + ROS_INFO_STREAM("Pose: " << samples[0].x << " " << samples[0].y << " " << samples[0].z); ROS_INFO_STREAM("Found solution at attempt: " << attempts); for (PointT p : samples) { geometry_msgs::Pose pose; @@ -219,6 +227,7 @@ bool EmptySpaceDetector::findPlane(PointCloud::Ptr plane) PointCloud::Ptr debug(new PointCloud); cloud_accumulation_->getAccumulatedCloud(*cloud_in); + ROS_INFO("Finding plane-----------"); PointCloud::Ptr hull(new PointCloud); pcl::ModelCoefficients::Ptr model_coefficients(new pcl::ModelCoefficients); double workspace_height; @@ -227,6 +236,7 @@ bool EmptySpaceDetector::findPlane(PointCloud::Ptr plane) if (enable_debug_pc_pub_) { /* publish debug pointcloud */ + sensor_msgs::PointCloud2 output; pcl::toROSMsg(*debug, output); output.header.frame_id = output_frame_; diff --git a/mir_perception/mir_handle_detection/README.md b/mir_perception/mir_handle_detection/README.md index c7be393d0..3d33d52bb 100644 --- a/mir_perception/mir_handle_detection/README.md +++ b/mir_perception/mir_handle_detection/README.md @@ -31,7 +31,7 @@ This is the pose of the handle ### In - `/mir_perception/drawer_handle_perceiver/event_in` -- `/mir_perception/drawer_handle_perceiver/input_point_cloud` -> `/arm_cam3d/depth_registered/points` +- `/mir_perception/drawer_handle_perceiver/input_point_cloud` -> `/arm_cam3d/depth/color/points` ### Out - `/mir_perception/drawer_handle_perceiver/output_pose` diff --git a/mir_perception/mir_handle_detection/ros/launch/drawer_handle_perceiver.launch b/mir_perception/mir_handle_detection/ros/launch/drawer_handle_perceiver.launch index 01c69f2ce..8cc323d2c 100644 --- a/mir_perception/mir_handle_detection/ros/launch/drawer_handle_perceiver.launch +++ b/mir_perception/mir_handle_detection/ros/launch/drawer_handle_perceiver.launch @@ -2,7 +2,7 @@ - + diff --git a/mir_perception/mir_object_recognition/ros/config/SceneSegmentation.cfg b/mir_perception/mir_object_recognition/ros/config/SceneSegmentation.cfg index 853c97e41..8c71b5e6a 100755 --- a/mir_perception/mir_object_recognition/ros/config/SceneSegmentation.cfg +++ b/mir_perception/mir_object_recognition/ros/config/SceneSegmentation.cfg @@ -58,7 +58,9 @@ pc_os_cluster.add ("pad_cluster", bool_t, 0, "Pad cluster so that it has the sa pc_os_cluster.add ("padded_cluster_size", int_t, 0, "The size of the padded cluster", 2048, 128, 4096) object_pose = gen.add_group("Object pose") -object_pose.add ("object_height_above_workspace", double_t, 0, "The height of the object above the workspace", 0.038, 0, 2.0) +object_pose.add ("use_fixed_heights", bool_t, 0, "Used fixed heights of objects by assuming platforms are 0, 5, 10 or 15 cm exactly", False) +object_pose.add ("height_of_floor", double_t, 0, "The height of floor (or 0cm platform) based on scene segmentation", -0.08, -1.0, 2.0) +object_pose.add ("object_height_above_workspace", double_t, 0, "The height of the object above the workspace", 0.03, -1.0, 2.0) object_pose.add ("container_height", double_t, 0, "The height of the container pose", 0.0335, 0, 2.0) rgb_bbox_proposal = gen.add_group("RGB bbox proposal") @@ -78,6 +80,7 @@ roi.add ("roi_min_bbox_z", double_t, 0, "Min height of objects", 0.03, 0, 1) object_recognizer = gen.add_group("Object recognizer") object_recognizer.add ("enable_rgb_recognizer", bool_t, 0, "Enable rgb object detection and recognition", True) object_recognizer.add ("enable_pc_recognizer", bool_t, 0, "Enable pointcloud object detection and recognition", True) +object_recognizer.add ("obj_category", str_t, 0, "Object category name (eg. atwork, cavity, container)", "atwork") exit (gen.generate (PACKAGE, "mir_object_recognition", "SceneSegmentation")) diff --git a/mir_perception/mir_object_recognition/ros/config/objects.xml b/mir_perception/mir_object_recognition/ros/config/objects.xml index ab2c15633..2b1e445d1 100644 --- a/mir_perception/mir_object_recognition/ros/config/objects.xml +++ b/mir_perception/mir_object_recognition/ros/config/objects.xml @@ -15,9 +15,13 @@ CONTAINER_BOX_REDboxred BEARING_BOXboxgray BEARINGspheregray + BEARING2spheregray AXIScylindergray + ALLENKEYflatgray + WRENCHflatgray + DRILLflatgray BLUE_CONTAINERboxblue RED_CONTAINERboxred - \ No newline at end of file + diff --git a/mir_perception/mir_object_recognition/ros/config/scene_segmentation_constraints.yaml b/mir_perception/mir_object_recognition/ros/config/scene_segmentation_constraints.yaml index ac0c8bff2..9645df84d 100644 --- a/mir_perception/mir_object_recognition/ros/config/scene_segmentation_constraints.yaml +++ b/mir_perception/mir_object_recognition/ros/config/scene_segmentation_constraints.yaml @@ -13,7 +13,7 @@ multimodal_object_recognition: cropbox_filter_max_x: 0.8 cropbox_filter_min_y: -0.4 cropbox_filter_max_y: 0.4 - cropbox_filter_min_z: 0.0 + cropbox_filter_min_z: -0.1 cropbox_filter_max_z: 0.6 normal_radius_search: 0.03 use_omp: False @@ -41,14 +41,17 @@ multimodal_object_recognition: pad_cluster: True padded_cluster_size: 2048 octree_resolution: 0.0025 - object_height_above_workspace: 0.012 + height_of_floor: -0.083 + use_fixed_heights: False + object_height_above_workspace: -0.002 container_height: 0.07 enable_rgb_recognizer: true enable_pc_recognizer: false + obj_category: 'atwork' # atwork, cavity, container rgb_roi_adjustment: 2 rgb_bbox_min_diag: 21 - rgb_bbox_max_diag: 175 - rgb_cluster_filter_limit_min: 0.02 # for tower camera configuration (youbot 2) + rgb_bbox_max_diag: 200 + rgb_cluster_filter_limit_min: 0.01 # for tower camera configuration (youbot 2) rgb_cluster_filter_limit_max: 0.35 rgb_cluster_remove_outliers: True enable_roi: True diff --git a/mir_perception/mir_object_recognition/ros/include/mir_object_recognition/multimodal_object_recognition_node.h b/mir_perception/mir_object_recognition/ros/include/mir_object_recognition/multimodal_object_recognition_node.h index da6c30f17..d7204b5fe 100644 --- a/mir_perception/mir_object_recognition/ros/include/mir_object_recognition/multimodal_object_recognition_node.h +++ b/mir_perception/mir_object_recognition/ros/include/mir_object_recognition/multimodal_object_recognition_node.h @@ -111,7 +111,6 @@ class MultimodalObjectRecognitionROS ros::Publisher pub_rgb_object_pose_array_; // Publisher debug ros::Publisher pub_debug_cloud_plane_; - ros::Publisher pub_filtered_rgb_cloud_plane_; std::string horizontal_object_list[9]; // Synchronize callback for image and pointcloud @@ -155,11 +154,13 @@ class MultimodalObjectRecognitionROS // Enable recognizer bool enable_rgb_recognizer_; bool enable_pc_recognizer_ ; + std::string obj_category_; // Visualization BoundingBoxVisualizer bounding_box_visualizer_pc_; ClusteredPointCloudVisualizer cluster_visualizer_rgb_; ClusteredPointCloudVisualizer cluster_visualizer_pc_; + ClusteredPointCloudVisualizer cluster_visualizer_filtered_rgb_; LabelVisualizer label_visualizer_rgb_; LabelVisualizer label_visualizer_pc_; @@ -168,11 +169,14 @@ class MultimodalObjectRecognitionROS std::string target_frame_id_; std::string pointcloud_source_frame_id_; std::set round_objects_; + std::set flat_objects_; ObjectInfo object_info_; std::string object_info_path_; // Dynamic parameter double object_height_above_workspace_; + double height_of_floor_; + bool use_fixed_heights_; double container_height_; int rgb_roi_adjustment_; int rgb_bbox_min_diag_; @@ -238,7 +242,8 @@ class MultimodalObjectRecognitionROS **/ void publishDebug(mas_perception_msgs::ObjectList &combined_object_list, std::vector &clusters_3d, - std::vector &clusters_2d); + std::vector &clusters_2d, + std::vector &filtered_clusters_2d); /** \brief Load qualitative object info * \param[in] Path to the xml object file diff --git a/mir_perception/mir_object_recognition/ros/launch/multimodal_object_recognition.launch b/mir_perception/mir_object_recognition/ros/launch/multimodal_object_recognition.launch index 6cfd48bc3..6a5c55add 100644 --- a/mir_perception/mir_object_recognition/ros/launch/multimodal_object_recognition.launch +++ b/mir_perception/mir_object_recognition/ros/launch/multimodal_object_recognition.launch @@ -1,9 +1,9 @@ - - - + + + @@ -24,7 +24,7 @@ - + diff --git a/mir_perception/mir_object_recognition/ros/launch/pc_object_recognition.launch b/mir_perception/mir_object_recognition/ros/launch/pc_object_recognition.launch index 79ca89bd3..b05fff1f1 100644 --- a/mir_perception/mir_object_recognition/ros/launch/pc_object_recognition.launch +++ b/mir_perception/mir_object_recognition/ros/launch/pc_object_recognition.launch @@ -2,7 +2,7 @@ - + diff --git a/mir_perception/mir_object_recognition/ros/launch/rgb_object_recognition.launch b/mir_perception/mir_object_recognition/ros/launch/rgb_object_recognition.launch index 63933b622..a16cab2ce 100644 --- a/mir_perception/mir_object_recognition/ros/launch/rgb_object_recognition.launch +++ b/mir_perception/mir_object_recognition/ros/launch/rgb_object_recognition.launch @@ -1,8 +1,8 @@ - - + + @@ -14,6 +14,10 @@ + + - atwork_obb + - cavity + diff --git a/mir_perception/mir_object_recognition/ros/scripts/coworker_assembly_detection_node b/mir_perception/mir_object_recognition/ros/scripts/coworker_assembly_detection_node new file mode 100755 index 000000000..2e3944c2a --- /dev/null +++ b/mir_perception/mir_object_recognition/ros/scripts/coworker_assembly_detection_node @@ -0,0 +1,338 @@ +#!/usr/bin/env python3 +import os +import random +import time +import cv2 +import numpy as np +import roslib +import rospy +import torch +import math +import csv +from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion +from cv_bridge import CvBridge, CvBridgeError +from std_msgs.msg import ColorRGBA, String +import message_filters +from mas_perception_msgs.msg import ImageList, Object, ObjectList, TimeStampedPose +from sensor_msgs.msg import Image, RegionOfInterest, PointCloud2, PointField +from sensor_msgs import point_cloud2 +from ultralytics import YOLO +import tf +from sklearn.decomposition import PCA +from geometry_msgs.msg import PoseStamped, Pose +from visualization_msgs.msg import Marker +from scipy.optimize import least_squares +from sklearn.mixture import GaussianMixture +import tf.transformations as tr +from scipy.spatial.transform import Rotation as R +import tf2_sensor_msgs +from tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud +import tf2_ros +import tf2_py as tf2 +import ros_numpy +from scipy.optimize import least_squares +import pdb + +class CoworkerAssembly(): + def __init__(self, debug_mode=True): + + self.cvbridge = CvBridge() + self.debug = debug_mode + self.pub_debug = rospy.Publisher("/mir_perception/coworker_assembly/output/pick_object_debug", Image, queue_size=1) + self.pub_pose = rospy.Publisher("mcr_perception/object_selector/output/object_pose", PoseStamped, queue_size=1) + + self.tf_buffer = tf2_ros.Buffer(cache_time=rospy.Duration(12)) + self.tf_listener = tf.TransformListener() + self.tf2_listener = tf2_ros.TransformListener(self.tf_buffer) + + rospy.Subscriber("/mir_perception/coworker_assembly/event_in", String, self.event_in_cb) + self.event_out = rospy.Publisher("/mir_perception/coworker_assembly/event_out", String, queue_size=1) + + self.net = 'detection' + self.model_name = 'yolov8' + weights = os.path.join(roslib.packages.get_pkg_dir("mir_rgb_object_recognition_models"), + 'common', 'models', 'yolov8', 'robocup_2023_dataset', "coworker_assembly_model.pt") + self.weights = weights + self.confidence_threshold = 0.8 + self.iou_threshold = 0.45 + self.augment = False + + # Initialize + self.device = torch.device('cpu') + self.half = self.device.type != 'cpu' # half precision only supported on CUDA + + # Load model + if self.model_name == 'yolov8': + self.model = YOLO(weights) + + self.event = None + self.run_once = False + self.pose_sent = False + # self.event_in_cb("e_start") + + + def callback(self, img_msg, point_cloud_msg): + try: + self.run(img_msg, point_cloud_msg) + except Exception as e: + rospy.loginfo("[coworker_assembly_detection_node] killing callback") + + def event_in_cb(self, msg): + """ + Starts a planned motion based on the specified arm position. + + # """ + self.event = msg.data + if self.event.startswith("e_start") and not self.run_once: + # if True: + # Subscribe to image topic + self.sub_img = message_filters.Subscriber("/tower_cam3d_front/color/image_raw", Image) + + # subscribe to point cloud topic + self.sub_point_cloud = message_filters.Subscriber("/tower_cam3d_front/depth/color/points", PointCloud2) + + # synchronize image and point cloud + self.ts = message_filters.ApproximateTimeSynchronizer([self.sub_img, self.sub_point_cloud], queue_size=10, slop=0.1) + self.ts.registerCallback(self.callback) + self.run_once = True + + if self.run_once and self.pose_sent: + self.sub_img.sub.unregister() + self.sub_point_cloud.sub.unregister() + rospy.loginfo("[coworker_assembly_detection_node] Unregistered image and pointcloud subscribers") + + def yolo_detect(self, cv_img): + if self.net == 'detection' and self.model_name == 'yolov8': + predictions = self.model.predict(source=cv_img, + conf=self.confidence_threshold, + iou=self.iou_threshold, + device=self.device, + verbose = False, + ) + # convert results to numpy array + predictions_np = predictions[0].boxes.numpy() + class_ids = predictions_np.cls + class_names = predictions[0].names + class_labels = [class_names[i] for i in class_ids] + class_scores = predictions_np.conf + class_bboxes = predictions_np.xyxy # x, y, w, h + + return class_bboxes, class_scores, class_labels + + def drawAxis(self, img, p_, q_, color, scale): + p = list(p_) + q = list(q_) + + ## [visualization1] + angle = math.atan2(p[1] - q[1], p[0] - q[0]) # angle in radians + hypotenuse = math.sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0])) + + # Here we lengthen the arrow by a factor of scale + q[0] = p[0] - scale * hypotenuse * math.cos(angle) + q[1] = p[1] - scale * hypotenuse * math.sin(angle) + cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 1, cv2.LINE_AA) + + # create the arrow hooks + p[0] = q[0] + 9 * math.cos(angle + math.pi / 4) + p[1] = q[1] + 9 * math.sin(angle + math.pi / 4) + cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 1, cv2.LINE_AA) + + p[0] = q[0] + 9 * math.cos(angle - math.pi / 4) + p[1] = q[1] + 9 * math.sin(angle - math.pi / 4) + cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 1, cv2.LINE_AA) + return img + + def get_orientation(self, pts, img): + ## [pca] + # Construct a buffer used by the pca analysis + sz = len(pts) + data_pts = np.empty((sz, 2), dtype=np.float64) + for i in range(data_pts.shape[0]): + data_pts[i,0] = pts[i,0,0] + data_pts[i,1] = pts[i,0,1] + + # Perform PCA analysis + mean = np.empty((0)) + mean, eigenvectors, eigenvalues = cv2.PCACompute2(data_pts, mean) + + # Store the center of the object + cntr = (int(mean[0,0]), int(mean[0,1])) + ## [pca] + + ## [visualization] + # Draw the principal components + # cv2.circle(img, cntr, 3, (255, 0, 255), 2) + p1 = (cntr[0] + 0.02 * eigenvectors[0,0] * eigenvalues[0,0], cntr[1] + 0.02 * eigenvectors[0,1] * eigenvalues[0,0]) + p2 = (cntr[0] - 0.02 * eigenvectors[1,0] * eigenvalues[1,0], cntr[1] - 0.02 * eigenvectors[1,1] * eigenvalues[1,0]) + img = self.drawAxis(img, cntr, p1, (255, 255, 0), 1) + # img = self.drawAxis(img, cntr, p2, (0, 0, 255), 5) + + angle = math.atan2(eigenvectors[0,1], eigenvectors[0,0]) # orientation in radians + return angle + + def get_pointcloud_position(self, pointcloud, center): + """ + pointcloud: pointcloud data of current frame with PointCloud2 data type + center: x and y coordinates of center of the bounding box of detected object + """ + + # Get the pointcloud data + pc = np.array(list(point_cloud2.read_points(pointcloud, skip_nans=False, field_names=("x", "y", "z"))), dtype=np.float32) + pc = pc.reshape((480,640,3)) + + # Get the center point of the bounding box + center_x, center_y = center + # center_point = pc[center_y, center_x] + + # Define the radius of the circle + radius = 7 + + # Calculate the coordinates of the circle points within the radius + min_x = max(0, center_x - radius) + max_x = min(pc.shape[1] - 1, center_x + radius) + min_y = max(0, center_y - radius) + max_y = min(pc.shape[0] - 1, center_y + radius) + + circle_points = [] + for y in range(min_y, max_y + 1): + for x in range(min_x, max_x + 1): + distance = math.sqrt((center_x - x) ** 2 + (center_y - y) ** 2) + if distance <= radius: + point = pc[y, x] + if not np.isnan(point).any(): + circle_points.append(point) + + # Determine the centroid of the non-NaN points + if circle_points: + circle_points_pc = np.array([point for point in circle_points]) + center_point = np.mean(circle_points_pc, axis=0) + + # Get the pose of the center point of the bounding box + pose = PoseStamped() + pose.header.frame_id = pointcloud.header.frame_id + pose.header.stamp = pointcloud.header.stamp + pose.pose.position.x = center_point[0] + pose.pose.position.y = center_point[1] + pose.pose.position.z = center_point[2] + pose.pose.orientation.x = 0.0 + pose.pose.orientation.y = 0.0 + pose.pose.orientation.z = 0.0 + pose.pose.orientation.w = 1.0 + + try: + t = self.tf_listener.getLatestCommonTime("base_link", pose.header.frame_id) + pose.header.stamp = t + pose = self.tf_listener.transformPose("base_link", pose) + # if pose doesnot has nan values then append pose to the list + if not math.isnan(pose.pose.position.x): + center_wrt_BL = pose.pose.position.x, pose.pose.position.y, pose.pose.position.z + return center_wrt_BL + except ( + tf.LookupException, + tf.ConnectivityException, + tf.ExtrapolationException, + ) as e: + rospy.logerr("Tf error: %s" % str(e)) + + return None + + def run(self, image, pointcloud): + """ + image: image data of current frame with Image data type + pointcloud: pointcloud data of current frame with PointCloud2 data type + """ + if image: + try: + cv_img = self.cvbridge.imgmsg_to_cv2(image, "bgr8") + bboxes, probs, labels = self.yolo_detect(cv_img) + + # Capatilize labels + labels = [label.upper() for label in labels] + bbox_img = None + + # Fit a Ellipse to the centers of the bounding boxes + for bbox, prob, label in zip(bboxes, probs, labels): + if label == "SCREW_NUT_ASSEMBLY": + # Calculate center of bounding box + center_x = (int(bbox[0]) + int(bbox[2])) // 2 + center_y = (int(bbox[1]) + int(bbox[3])) // 2 + center = np.array([center_x, center_y]) + + # Convert image to grayscale + bbox_img = cv_img[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])] + if bbox_img is None: + continue + gray = cv2.cvtColor(bbox_img, cv2.COLOR_BGR2GRAY) + + # Convert image to binary + _, bw = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) + + # Find all the contours in the thresholded image + contours, _ = cv2.findContours(bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) + + # get contour with maximum area + max_area = 0 + best_cnt = None + for cnt in contours: + area = cv2.contourArea(cnt) + if area > max_area: + max_area = area + best_cnt = cnt + + yaw_angle = self.get_orientation(best_cnt, bbox_img) + + center_BL = self.get_pointcloud_position(pointcloud, center) + + predicted_pose = PoseStamped() + predicted_pose.header.stamp = pointcloud.header.stamp + predicted_pose.header.frame_id = "base_link_static" + predicted_pose.pose.position.x = center_BL[0] + predicted_pose.pose.position.y = center_BL[1] + predicted_pose.pose.position.z = 0.06 + + # convert yaw_list_CL to yaw_list_BL + yaw_BL = np.pi/2 - yaw_angle # from CL to BL + if yaw_BL > np.pi: + yaw_BL -= 2*np.pi + if yaw_BL < 0: + yaw_BL += np.pi + # now yaw_list is b/w 0 to np.pi (180 degrees) + orientation = tr.quaternion_from_euler(0, 0, yaw_BL) + predicted_pose.pose.orientation.x = orientation[0] + predicted_pose.pose.orientation.y = orientation[1] + predicted_pose.pose.orientation.z = orientation[2] + predicted_pose.pose.orientation.w = orientation[3] + + # publish the pose + self.pub_pose.publish(predicted_pose) + self.pose_sent = True + msg_str = f'e_done' + self.event_out.publish(String(data=msg_str)) + # print("pose e_done sent") + + if self.debug: + # computes the bounding box for the contour, and draws it on the frame, + (x,y,w,h) = cv2.boundingRect(best_cnt) + if w > 40 and h > 40: + cv2.rectangle(bbox_img, (x,y), (x+w,y+h), (255, 0, 0), 2) + + self.publish_debug_img(bbox_img) + + except CvBridgeError as e: + rospy.logerr(e) + return + else: + rospy.logwarn("No image received") + + def publish_debug_img(self, debug_img): + debug_img = np.array(debug_img, dtype=np.uint8) + debug_img = self.cvbridge.cv2_to_imgmsg(debug_img, "bgr8") + self.pub_debug.publish(debug_img) + + +if __name__ == '__main__': + rospy.init_node("coworker_assembly_detection") + rospy.loginfo('Started Coworker assembly detection Node.') + object_recognizer = CoworkerAssembly(debug_mode=True) + rospy.loginfo("Coworker assembly detection node is ready to give pose") + rospy.spin() diff --git a/mir_perception/mir_object_recognition/ros/scripts/hand_gesture_node b/mir_perception/mir_object_recognition/ros/scripts/hand_gesture_node new file mode 100755 index 000000000..ea22bc6bd --- /dev/null +++ b/mir_perception/mir_object_recognition/ros/scripts/hand_gesture_node @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 +import os +import random +import time +import cv2 +import numpy as np +import roslib +import rospy +import torch +import math +import csv +from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion +from cv_bridge import CvBridge, CvBridgeError +from std_msgs.msg import ColorRGBA, String +import message_filters +from mas_perception_msgs.msg import ImageList, Object, ObjectList, TimeStampedPose +from sensor_msgs.msg import Image, RegionOfInterest, PointCloud2, PointField +from sensor_msgs import point_cloud2 +from ultralytics import YOLO +import tf +from sklearn.decomposition import PCA +from geometry_msgs.msg import PoseStamped, Pose +from visualization_msgs.msg import Marker +from scipy.optimize import least_squares +from sklearn.mixture import GaussianMixture +import tf.transformations as tr +from scipy.spatial.transform import Rotation as R +import tf2_sensor_msgs +from tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud +import tf2_ros +import tf2_py as tf2 +import ros_numpy +from scipy.optimize import least_squares +import pdb + +class GestureRecognition(): + def __init__(self, debug_mode=True): + + self.cvbridge = CvBridge() + self.debug = debug_mode + self.pub_debug = rospy.Publisher("/mir_perception/gesture_recognition/output/gesture_recognition_debug", Image, queue_size=1) + + rospy.Subscriber("/mir_perception/gesture_recognition/event_in", String, self.event_in_cb) + self.event_out = rospy.Publisher("/mir_perception/gesture_recognition/event_out", String, queue_size=1) + + self.net = 'detection' + self.model_name = 'yolov8' + weights = os.path.join(roslib.packages.get_pkg_dir("mir_rgb_object_recognition_models"), + 'common', 'models', 'yolov8', 'robocup_2023_dataset', "hand_gesture_model.pt") + self.weights = weights + self.confidence_threshold = 0.8 + self.iou_threshold = 0.45 + self.augment = False + + # Initialize + self.device = torch.device('cpu') + self.half = self.device.type != 'cpu' # half precision only supported on CUDA + + # Load model + if self.model_name == 'yolov8': + self.model = YOLO(weights) + + self.event = None + self.gesture_sent = False + self.frame_counter = 0 + # self.event_in_cb("e_start") + + + def callback(self, img_msg): + try: + self.run(img_msg) + except Exception as e: + rospy.loginfo("[hand_gesture_node] killing callback") + + def event_in_cb(self, msg): + """ + Starts a planned motion based on the specified arm position. + + # """ + self.event = msg.data + if self.event.startswith("e_start") and not self.gesture_sent: + # if not self.gesture_sent: + # Subscribe to image topic + self.sub_img = rospy.Subscriber("/tower_cam3d_front/color/image_raw", Image, self.callback) + + if self.gesture_sent: + self.sub_img.sub.unregister() + rospy.loginfo("[gesture_recognition_node] Unregistered image subscribers") + + def yolo_detect(self, cv_img): + if self.net == 'detection' and self.model_name == 'yolov8': + predictions = self.model.predict(source=cv_img, + conf=self.confidence_threshold, + iou=self.iou_threshold, + device=self.device, + verbose = False, + ) + # convert results to numpy array + predictions_np = predictions[0].boxes.numpy() + class_ids = predictions_np.cls + class_names = predictions[0].names + class_labels = [class_names[i] for i in class_ids] + class_scores = predictions_np.conf + class_bboxes = predictions_np.xyxy # x, y, w, h + + return class_bboxes, class_scores, class_labels, predictions + + def run(self, image): + """ + image: image data of current frame with Image data type + pointcloud: pointcloud data of current frame with PointCloud2 data type + """ + if image: + try: + cv_img = self.cvbridge.imgmsg_to_cv2(image, "bgr8") + bboxes, probs, labels, predictions = self.yolo_detect(cv_img) + + # Capatilize labels + labels = [label.upper() for label in labels] + + # Fit a Ellipse to the centers of the bounding boxes + for bbox, prob, label in zip(bboxes, probs, labels): + if label == "VICTORY": + self.frame_counter += 1 + else: + self.frame_counter = 0 + + if self.frame_counter == 5: + self.gesture_sent = True + msg_str = f'e_done' + self.event_out.publish(String(data=msg_str)) + # print("gesture e_done sent") + + if self.debug: + # Draw bounding boxes and labels of detections + debug_img = predictions[0].plot() + self.publish_debug_img(debug_img) + + except CvBridgeError as e: + rospy.logerr(e) + return + else: + rospy.logwarn("No image received") + + def publish_debug_img(self, debug_img): + debug_img = np.array(debug_img, dtype=np.uint8) + debug_img = self.cvbridge.cv2_to_imgmsg(debug_img, "bgr8") + self.pub_debug.publish(debug_img) + + +if __name__ == '__main__': + rospy.init_node("gesture_recognition") + rospy.loginfo('Started Gesture Recognition Node.') + object_recognizer = GestureRecognition(debug_mode=True) + rospy.loginfo("Gesture Recognition node is ready to recognize gesture") + rospy.spin() diff --git a/mir_perception/mir_object_recognition/ros/scripts/pc_object_recognizer_node b/mir_perception/mir_object_recognition/ros/scripts/pc_object_recognizer_node index 31822be38..4b4eb7ea8 100755 --- a/mir_perception/mir_object_recognition/ros/scripts/pc_object_recognizer_node +++ b/mir_perception/mir_object_recognition/ros/scripts/pc_object_recognizer_node @@ -158,8 +158,9 @@ if __name__ == '__main__': model_dir = rospy.get_param("~model_dir") dataset = rospy.get_param("~dataset") - object_recognizer = PointcloudObjectRecognizer( - model, model_id, dataset, model_dir) - rospy.loginfo('\033[92m'+"PCL Recognizer is ready using %s , model: %s ", - model, model_id) + # commented as we'are not using it + #object_recognizer = PointcloudObjectRecognizer( + # model, model_id, dataset, model_dir) + #rospy.loginfo('\033[92m'+"PCL Recognizer is ready using %s , model: %s ", + # model, model_id) rospy.spin() diff --git a/mir_perception/mir_object_recognition/ros/scripts/rgb_object_recognizer_node b/mir_perception/mir_object_recognition/ros/scripts/rgb_object_recognizer_node index bc0146d74..504ea3873 100755 --- a/mir_perception/mir_object_recognition/ros/scripts/rgb_object_recognizer_node +++ b/mir_perception/mir_object_recognition/ros/scripts/rgb_object_recognizer_node @@ -6,9 +6,11 @@ import numpy as np import roslib import rospy import torch +import math from cv_bridge import CvBridge, CvBridgeError -from mas_perception_msgs.msg import ImageList, Object, ObjectList +from mas_perception_msgs.msg import ImageList, Object, ObjectList, BoundingBox from sensor_msgs.msg import Image, RegionOfInterest +from geometry_msgs.msg import Point from rgb_object_recognition.yolov7.models.experimental import attempt_load from rgb_object_recognition.yolov7.utils.datasets import letterbox from rgb_object_recognition.yolov7.utils.general import check_img_size, non_max_suppression, apply_classifier, \ @@ -16,11 +18,14 @@ from rgb_object_recognition.yolov7.utils.general import check_img_size, non_max_ from rgb_object_recognition.yolov7.utils.plots import plot_one_box from rgb_object_recognition.yolov7.utils.torch_utils import select_device, TracedModel, load_classifier, \ time_synchronized +from ultralytics import YOLO class RGBObjectRecognizer(): - def __init__(self, weights, net='detection', model_name='yolov7', - confidence_threshold=0.8, + def __init__(self, weights, + net='detection', + model_name='yolov8', + confidence_threshold=0.65, iou_threshold=0.45, img_size=640, trace=True, @@ -28,9 +33,6 @@ class RGBObjectRecognizer(): augment=False, device='cpu', debug_mode=True): - import sys - sys.path.insert(0, os.path.join(roslib.packages.get_pkg_dir("mir_object_recognition"), - 'common', 'src', 'rgb_object_recognition', 'yolov7')) self.cvbridge = CvBridge() self.debug = debug_mode @@ -45,32 +47,12 @@ class RGBObjectRecognizer(): self.weights = weights self.confidence_threshold = confidence_threshold self.iou_threshold = iou_threshold - self.augment = augment - - # Initialize - self.device = select_device(device) - self.half = self.device.type != 'cpu' # half precision only supported on CUDA + self.device = "cuda" if torch.cuda.is_available() else device # Load model - self.model = attempt_load(weights, map_location=self.device) # load FP32 model - self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names - self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names] - self.stride = int(self.model.stride.max()) # model stride - img_size = check_img_size(img_size, s=self.stride) # check img_size - self.img_size = img_size - - if trace: - self.model = TracedModel(self.model, self.device, img_size) - - if self.half: - self.model.half() # to FP16 - - # Second-stage classifier - self.classify = classify - if self.classify: - self.modelc = load_classifier(name='resnet101', n=2) # initialize - self.modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=self.device)['model']).to( - self.device).eval() + if self.model_name == 'yolov8': + self.model_atwork = YOLO(self.weights[0]) + self.model_cavity = YOLO(self.weights[1]) def image_recognition_cb(self, img_msg): if img_msg.images: @@ -82,77 +64,110 @@ class RGBObjectRecognizer(): try: cv_img = self.cvbridge.imgmsg_to_cv2( img_msg.images[0], "bgr8") - # Padded resize - img = letterbox(cv_img, self.img_size, stride=self.stride)[0] - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - if self.model_name == 'yolov7': - old_img_w = old_img_h = self.img_size - old_img_b = 1 - - img = torch.from_numpy(img).to(self.device) - img = img.half() if self.half else img.float() # uint8 to fp16/32 - img /= 255.0 # 0 - 255 to 0.0 - 1.0 - if img.ndimension() == 3: - img = img.unsqueeze(0) - - # Inference - t1 = time_synchronized() - pred = self.model(img, augment=self.augment)[0] - t2 = time_synchronized() - - # Apply NMS - pred = non_max_suppression(pred, self.confidence_threshold, self.iou_threshold, classes=None, - agnostic=True) - t3 = time_synchronized() - - # Apply Classifier - if self.classify: - pred = apply_classifier(pred, self.modelc, img, cv_img) - - # Process detections - bboxes = [] - probs = [] - labels = [] - for i, det in enumerate(pred): # detections per image - gn = torch.tensor(cv_img.shape)[[1, 0, 1, 0]] # normalization gain whwh - if len(det): - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(img.shape[2:], det[:, :4], cv_img.shape).round() - - # Write results - for *xyxy, conf, cls in reversed(det): - if self.debug: - label = f'{self.names[int(cls)]} {conf:.2f}' - plot_one_box(xyxy, cv_img, label=label, color=self.colors[int(cls)], - line_thickness=1) - #xywh = (xyxy2xywh(torch.tensor(xyxy).view(-1, 4)) / gn).view(-1).tolist() # normalized xywh - bboxes.append(xyxy) - probs.append(conf) - labels.append(self.names[int(cls)]) - if self.debug: - # publish bbox and label - self.publish_debug_img(cv_img) - print(labels) - for i in range(len(labels)): - result = Object() - result.name = labels[i].upper() - result.probability = probs[i] - roi = RegionOfInterest() - roi.x_offset = int(bboxes[i][0]) - roi.y_offset = int(bboxes[i][1]) - roi.width = int(bboxes[i][2]) - int(bboxes[i][0]) - roi.height = int(bboxes[i][3]) - int(bboxes[i][1]) - result.roi = roi - objects.append(result) + obj_category = None + + if rospy.has_param("/mir_perception/multimodal_object_recognition/obj_category"): + obj_category = rospy.get_param("/mir_perception/multimodal_object_recognition/obj_category") + if obj_category == "cavity": + self.model = self.model_cavity + else: + self.model = self.model_atwork + # print the model name + rospy.loginfo(f"\033[92m" + f"[RGB Recognizer] is using {obj_category} objects category model" + f"\033[0m") + else: + rospy.loginfo(f"\033[92m" + f"[RGB Recognizer] is using atwork objects category model" + f"\033[0m") + + if self.model_name == 'yolov8': + + predictions = self.model.predict(source=cv_img, + conf=self.confidence_threshold, + iou=self.iou_threshold, + device=self.device, + verbose=False + ) + + if obj_category == "cavity": + # # Uncomment to run segmention model + # preds = predictions[0] + # class_ids = preds.boxes.cls + # class_labels = [self.model.names[int(label)] for label in class_ids.cpu().numpy()] + # class_scores = preds.boxes.conf.cpu().numpy() + # class_masks = preds.masks + + # masks, probs, labels = class_masks, class_scores, class_labels + + # for i in range(len(labels)): + # result = Object() + # result.name = labels[i].upper() + # result.probability = probs[i] + # result.mask = self.mask_to_sensor_msgs_image(masks[i].data[0].cpu().numpy()) + # objects.append(result) + + # convert results to numpy array + predictions_np = predictions[0].boxes.numpy() + class_ids = predictions_np.cls + class_names = predictions[0].names + class_labels = [class_names[i] for i in class_ids] + class_scores = predictions_np.conf + class_bboxes = predictions_np.xyxy # x, y, w, h + + bboxes, probs, labels = class_bboxes, class_scores, class_labels + + for i in range(len(labels)): + result = Object() + result.name = labels[i].upper() + result.probability = probs[i] + roi = RegionOfInterest() + roi.x_offset = int(bboxes[i][0]) # is it center??? + roi.y_offset = int(bboxes[i][1]) + roi.width = int(bboxes[i][2] - bboxes[i][0]) + roi.height = int(bboxes[i][3] - bboxes[i][1]) + result.roi = roi + + objects.append(result) + + else: + predictions_cp = predictions[0].obb.cpu() + class_ids = predictions_cp.cls.numpy() + class_labels = [self.model.names[int(label)] for label in class_ids] + class_scores = predictions_cp.conf.numpy() + class_bboxes = predictions_cp.xyxyxyxy.numpy() + + bboxes, probs, labels = class_bboxes, class_scores, class_labels + + for i in range(len(labels)): + if labels[i].upper() == "CONTAINER_BLUE": + labels[i] = "CONTAINER_BOX_BLUE" + elif labels[i].upper() == "CONTAINER_RED": + labels[i] = "CONTAINER_BOX_RED" + + result = Object() + result.name = labels[i].upper() + result.probability = probs[i] + obbbox = BoundingBox() + for j, vertex in enumerate(bboxes[i]): + point = Point() + point.x = vertex[0] + point.y = vertex[1] + obbbox.vertices.append(point) + length, breadth = self.rectangle_dimensions(obbbox.vertices) + obbbox.dimensions.x = length + obbbox.dimensions.y = breadth + result.bounding_box = obbbox + + objects.append(result) + if self.debug: - rospy.logdebug("Detected Objects: %s", labels) + # Draw bounding boxes and labels of detections + debug_img = predictions[0].plot() + + # publish bbox and label + self.publish_debug_img(debug_img) + else: - bboxes, probs, labels = self.model.classify(cv_img) + rospy.logerr("[RGB Recognition] Model not supported") + return # Publish result_list result_list.objects = objects @@ -165,6 +180,55 @@ class RGBObjectRecognizer(): elif self.net == 'classification': rospy.logwarn("TODO: MobileNet") + def rectangle_dimensions(self, vertices): + """Compute the length and breadth of a rectangle from its vertices.""" + # Assuming vertices are provided in the order: bottom1, bottom2, top2, top1 + + # Extracting coordinates from geometry_msgs.Point + bottom1_x, bottom1_y = vertices[0].x, vertices[0].y + bottom2_x, bottom2_y = vertices[1].x, vertices[1].y + top2_x, top2_y = vertices[2].x, vertices[2].y + + # Compute lengths along x and y axes + length = math.sqrt((top2_x - bottom1_x) ** 2 + (top2_y - bottom1_y) ** 2) + breadth = math.sqrt((bottom2_x - bottom1_x) ** 2 + (bottom2_y - bottom1_y) ** 2) + + return length, breadth + + def mask_to_sensor_msgs_image(self, mask): + """ + Convert a binary mask (numpy array of 0s and 1s) to a ROS sensor_msgs/Image message. + + Parameters: + mask (numpy.ndarray): The binary mask to be converted. + + Returns: + sensor_msgs/Image: The populated ROS Image message with the mask data. + """ + # Create a new Image message + image_msg = Image() + + # Set the header (optional, but good practice) + image_msg.header.stamp = rospy.Time.now() + image_msg.header.frame_id = "mask_frame" + + # Set the image dimensions + image_msg.height = mask.shape[0] # Number of rows + image_msg.width = mask.shape[1] # Number of columns + + # Set the encoding to mono8 since it's a single-channel binary mask + image_msg.encoding = "mono8" + + # Set dummy values for these fields, necessary for the message but not used + image_msg.is_bigendian = 0 + image_msg.step = image_msg.width # Full row length in bytes + + # Convert the mask to uint8 and flatten it to a list for the data field + mask_uint8 = (mask * 255).astype(np.uint8) # Convert 1s to 255s for visibility + image_msg.data = mask_uint8.flatten().tolist() + + return image_msg + def publish_debug_img(self, debug_img): debug_img = np.array(debug_img, dtype=np.uint8) debug_img = self.cvbridge.cv2_to_imgmsg(debug_img, "bgr8") @@ -178,11 +242,23 @@ if __name__ == '__main__': classifier_name = rospy.get_param("~classifier") dataset = rospy.get_param("~dataset") model_dir = rospy.get_param("~model_dir") - weights = os.path.join(roslib.packages.get_pkg_dir("mir_rgb_object_recognition_models"), - 'common', 'models', classifier_name, dataset, "best.pt") - + model_category = rospy.get_param("~model_category") + # breakpoint() + weight_1 = os.path.join(roslib.packages.get_pkg_dir("mir_rgb_object_recognition_models"), + 'common', 'models', classifier_name, dataset, model_category[0]+".pt") + weight_2 = os.path.join(roslib.packages.get_pkg_dir("mir_rgb_object_recognition_models"), + 'common', 'models', classifier_name, dataset, model_category[1]+".pt") + # check if the model exists + if not os.path.isfile(weight_1): + rospy.logerr("[RGB Recognition] Model not found: %s", weight_1) + exit(-1) + if not os.path.isfile(weight_2): + rospy.logerr("[RGB Recognition] Model not found: %s", weight_2) + exit(-1) + weights = [weight_1, weight_2] # 0th index is the default model, eg. atwork object_recognizer = RGBObjectRecognizer( - weights=weights, debug_mode=True) - rospy.loginfo("RGB Recognizer is ready using %s : %s , dataset: %s ", - net, classifier_name, dataset) + weights, + debug_mode=True) + rospy.loginfo("\033[92m" + "RGB Recognizer is ready using %s : %s , dataset: %s " + "\033[0m", + net, classifier_name, dataset) rospy.spin() diff --git a/mir_perception/mir_object_recognition/ros/scripts/rgb_object_recognizer_node_yolov7 b/mir_perception/mir_object_recognition/ros/scripts/rgb_object_recognizer_node_yolov7 new file mode 100755 index 000000000..1c98638e1 --- /dev/null +++ b/mir_perception/mir_object_recognition/ros/scripts/rgb_object_recognizer_node_yolov7 @@ -0,0 +1,188 @@ +#!/usr/bin/env python3 +import os +import random + +import numpy as np +import roslib +import rospy +import torch +from cv_bridge import CvBridge, CvBridgeError +from mas_perception_msgs.msg import ImageList, Object, ObjectList +from sensor_msgs.msg import Image, RegionOfInterest +from rgb_object_recognition.yolov7.models.experimental import attempt_load +from rgb_object_recognition.yolov7.utils.datasets import letterbox +from rgb_object_recognition.yolov7.utils.general import check_img_size, non_max_suppression, apply_classifier, \ + scale_coords, xyxy2xywh +from rgb_object_recognition.yolov7.utils.plots import plot_one_box +from rgb_object_recognition.yolov7.utils.torch_utils import select_device, TracedModel, load_classifier, \ + time_synchronized + + +class RGBObjectRecognizer(): + def __init__(self, weights, net='detection', model_name='yolov7', + confidence_threshold=0.8, + iou_threshold=0.45, + img_size=640, + trace=True, + classify=False, + augment=False, + device='cpu', + debug_mode=True): + import sys + sys.path.insert(0, os.path.join(roslib.packages.get_pkg_dir("mir_object_recognition"), + 'common', 'src', 'rgb_object_recognition', 'yolov7')) + + self.cvbridge = CvBridge() + self.debug = debug_mode + self.pub_debug = rospy.Publisher( + "/mir_perception/multimodal_object_recognition/recognizer/rgb/output/debug_image", Image, queue_size=1) + self.pub_result = rospy.Publisher( + "output/object_list", ObjectList, queue_size=1) + self.sub_img = rospy.Subscriber( + "input/images", ImageList, self.image_recognition_cb) + self.net = net + self.model_name = model_name + self.weights = weights + self.confidence_threshold = confidence_threshold + self.iou_threshold = iou_threshold + self.augment = augment + + # Initialize + self.device = select_device(device) + self.half = self.device.type != 'cpu' # half precision only supported on CUDA + + # Load model + self.model = attempt_load(weights, map_location=self.device) # load FP32 model + self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names + self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names] + self.stride = int(self.model.stride.max()) # model stride + img_size = check_img_size(img_size, s=self.stride) # check img_size + self.img_size = img_size + + if trace: + self.model = TracedModel(self.model, self.device, img_size) + + if self.half: + self.model.half() # to FP16 + + # Second-stage classifier + self.classify = classify + if self.classify: + self.modelc = load_classifier(name='resnet101', n=2) # initialize + self.modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=self.device)['model']).to( + self.device).eval() + + def image_recognition_cb(self, img_msg): + if img_msg.images: + result_list = ObjectList() + objects = [] + rospy.loginfo("[{}] images received: {} ".format( + len(img_msg.images), self.model_name)) + if self.net == 'detection': + try: + cv_img = self.cvbridge.imgmsg_to_cv2( + img_msg.images[0], "bgr8") + # Padded resize + img = letterbox(cv_img, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + if self.model_name == 'yolov7': + old_img_w = old_img_h = self.img_size + old_img_b = 1 + + img = torch.from_numpy(img).to(self.device) + img = img.half() if self.half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + # Inference + t1 = time_synchronized() + pred = self.model(img, augment=self.augment)[0] + t2 = time_synchronized() + + # Apply NMS + pred = non_max_suppression(pred, self.confidence_threshold, self.iou_threshold, classes=None, + agnostic=True) + t3 = time_synchronized() + + # Apply Classifier + if self.classify: + pred = apply_classifier(pred, self.modelc, img, cv_img) + + # Process detections + bboxes = [] + probs = [] + labels = [] + for i, det in enumerate(pred): # detections per image + gn = torch.tensor(cv_img.shape)[[1, 0, 1, 0]] # normalization gain whwh + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], cv_img.shape).round() + + # Write results + for *xyxy, conf, cls in reversed(det): + if self.debug: + label = f'{self.names[int(cls)]} {conf:.2f}' + plot_one_box(xyxy, cv_img, label=label, color=self.colors[int(cls)], + line_thickness=1) + #xywh = (xyxy2xywh(torch.tensor(xyxy).view(-1, 4)) / gn).view(-1).tolist() # normalized xywh + bboxes.append(xyxy) + probs.append(conf) + labels.append(self.names[int(cls)]) + if self.debug: + # publish bbox and label + self.publish_debug_img(cv_img) + print(labels) + for i in range(len(labels)): + result = Object() + result.name = labels[i].upper() + result.probability = probs[i] + roi = RegionOfInterest() + roi.x_offset = int(bboxes[i][0]) + roi.y_offset = int(bboxes[i][1]) + roi.width = int(bboxes[i][2]) - int(bboxes[i][0]) + roi.height = int(bboxes[i][3]) - int(bboxes[i][1]) + result.roi = roi + objects.append(result) + if self.debug: + rospy.logdebug("Detected Objects: %s", labels) + + else: + bboxes, probs, labels = self.model.classify(cv_img) + + # Publish result_list + result_list.objects = objects + self.pub_result.publish(result_list) + + except CvBridgeError as e: + rospy.logerr(e) + return + + elif self.net == 'classification': + rospy.logwarn("TODO: MobileNet") + + def publish_debug_img(self, debug_img): + debug_img = np.array(debug_img, dtype=np.uint8) + debug_img = self.cvbridge.cv2_to_imgmsg(debug_img, "bgr8") + self.pub_debug.publish(debug_img) + + +if __name__ == '__main__': + rospy.init_node("rgb_object_recognizer") + rospy.loginfo('Started object recognition node.') + net = rospy.get_param("~net") + classifier_name = rospy.get_param("~classifier") + dataset = rospy.get_param("~dataset") + model_dir = rospy.get_param("~model_dir") + weights = os.path.join(roslib.packages.get_pkg_dir("mir_rgb_object_recognition_models"), + 'common', 'models', classifier_name, dataset, "best.pt") #"rc23_bins.pt" + + object_recognizer = RGBObjectRecognizer( + weights=weights, debug_mode=True) + rospy.loginfo("RGB Recognizer is ready using %s : %s , dataset: %s ", + net, classifier_name, dataset) + rospy.spin() diff --git a/mir_perception/mir_object_recognition/ros/src/multimodal_object_recognition_node.cpp b/mir_perception/mir_object_recognition/ros/src/multimodal_object_recognition_node.cpp index 3b4163531..4cc270822 100644 --- a/mir_perception/mir_object_recognition/ros/src/multimodal_object_recognition_node.cpp +++ b/mir_perception/mir_object_recognition/ros/src/multimodal_object_recognition_node.cpp @@ -44,13 +44,15 @@ MultimodalObjectRecognitionROS::MultimodalObjectRecognitionROS(ros::NodeHandle n bounding_box_visualizer_pc_("output/bounding_boxes", Color(Color::IVORY)), cluster_visualizer_rgb_("output/tabletop_cluster_rgb"), cluster_visualizer_pc_("output/tabletop_cluster_pc"), + cluster_visualizer_filtered_rgb_("output/tabletop_cluster_filtered_rgb"), label_visualizer_rgb_("output/rgb_labels", Color(Color::SEA_GREEN)), label_visualizer_pc_("output/pc_labels", Color(Color::IVORY)), data_collection_(false), enable_roi_(true), rgb_cluster_remove_outliers_(true), enable_rgb_recognizer_(true), - enable_pc_recognizer_(true) + enable_pc_recognizer_(true), + obj_category_("atwork") { tf_listener_.reset(new tf::TransformListener); scene_segmentation_ros_ = SceneSegmentationROSSPtr(new SceneSegmentationROS()); @@ -94,12 +96,10 @@ MultimodalObjectRecognitionROS::MultimodalObjectRecognitionROS(ros::NodeHandle n ROS_WARN_STREAM("[multimodal_object_recognition] target frame: " <("pointcloud_source_frame_id", pointcloud_source_frame_id_, "fixed_camera_link"); - nh_.param("logdir", logdir_, "/tmp/"); + nh_.param("logdir", logdir_, "/home/robocup/perception_debug_data/"); nh_.param("object_info", object_info_path_, "None"); loadObjectInfo(object_info_path_); - pub_filtered_rgb_cloud_plane_ = - nh_.advertise("filtered_rgb_cloud_plane", 1); } MultimodalObjectRecognitionROS::~MultimodalObjectRecognitionROS() @@ -109,9 +109,9 @@ MultimodalObjectRecognitionROS::~MultimodalObjectRecognitionROS() void MultimodalObjectRecognitionROS::synchronizeCallback(const sensor_msgs::ImageConstPtr &image, const sensor_msgs::PointCloud2ConstPtr &cloud) { + ROS_INFO("[multimodal_object_recognition_ros] Received enough messages"); if (pointcloud_msg_received_count_ < 1) { - ROS_INFO("[multimodal_object_recognition_ros] Received enough messages"); pointcloud_msg_ = cloud; pointcloud_msg_->header.frame_id = pointcloud_source_frame_id_; pointcloud_msg_received_count_ += 1; @@ -256,7 +256,7 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() filename.append("rgb_raw_"); filename.append(std::to_string(ros::Time::now().toSec())); mpu::object::saveCVImage(raw_cv_image, logdir_, filename); - ROS_INFO_STREAM("Image:" << filename << " saved to " << logdir_); + ROS_DEBUG_STREAM("Image:" << filename << " saved to " << logdir_); } else { @@ -266,6 +266,10 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() return; } + // Reset recognition callback flags + received_recognized_cloud_list_flag_ = false; + received_recognized_image_list_flag_ = false; + // Publish 3D object cluster for recognition if (!cloud_object_list.objects.empty() && enable_pc_recognizer_) { @@ -347,20 +351,21 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() mas_perception_msgs::ObjectList rgb_object_list; mas_perception_msgs::BoundingBoxList bounding_boxes; std::vector clusters_2d; + std::vector filtered_clusters_2d; cv_bridge::CvImagePtr cv_image; - if (recognized_image_list_.objects.size() > 0) + try { - try - { - cv_image = cv_bridge::toCvCopy(image_msg_, sensor_msgs::image_encodings::BGR8); - } - catch (cv_bridge::Exception& e) - { - ROS_ERROR("cv_bridge exception: %s", e.what()); - return; - } + cv_image = cv_bridge::toCvCopy(image_msg_, sensor_msgs::image_encodings::BGR8); + } + catch (cv_bridge::Exception& e) + { + ROS_ERROR("cv_bridge exception: %s", e.what()); + // return; + } + if (recognized_image_list_.objects.size() > 0) + { bounding_boxes.bounding_boxes.resize(recognized_image_list_.objects.size()); rgb_object_list.objects.resize(recognized_image_list_.objects.size()); @@ -372,39 +377,156 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() { object.shape.shape = object.shape.SPHERE; } + else if (flat_objects_.count(recognized_image_list_.objects[i].name)) + { + object.shape.shape = "flat"; + } else { object.shape.shape = object.shape.OTHER; } - // Get ROI - sensor_msgs::RegionOfInterest roi_2d = object.roi; - const cv::Rect2d rect2d(roi_2d.x_offset, roi_2d.y_offset, roi_2d.width, roi_2d.height); - if (debug_mode_) + double len_diag = 0.0; + + mas_perception_msgs::BoundingBox bbox; + sensor_msgs::RegionOfInterest roi_2d; + cv::Mat mask; + // if object category is atwork, then get bounding box + if (obj_category_ == "atwork") + { + // Get bounding box + bbox = object.bounding_box; + + if (debug_mode_) + { + // draw oriented bounding box + cv::Point2f vertices[4]; + for (int j = 0; j < 4; j++) + { + vertices[j].x = bbox.vertices[j].x; + vertices[j].y = bbox.vertices[j].y; + } + + try{ + // draw bbox + cv::line(cv_image->image, vertices[0], vertices[1], cv::Scalar(0, 255, 0), 1, 8); + cv::line(cv_image->image, vertices[1], vertices[2], cv::Scalar(0, 255, 0), 1, 8); + cv::line(cv_image->image, vertices[2], vertices[3], cv::Scalar(0, 255, 0), 1, 8); + cv::line(cv_image->image, vertices[3], vertices[0], cv::Scalar(0, 255, 0), 1, 8); + // add label + cv::putText(cv_image->image, object.name, cv::Point(vertices[0].x, vertices[0].y), + cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 255, 0), 1); + } + catch (...) { + ROS_WARN("could not draw bounding boxes."); + } + } + + // Remove large 2d misdetected bbox (misdetection) + geometry_msgs::Vector3 dim = bbox.dimensions; + len_diag = sqrt(powf(dim.x, 2) + powf(dim.y, 2)); + } + else if (obj_category_ == "old") + { + // get the segment mask + sensor_msgs::Image mask_img = object.mask; + + cv_bridge::CvImagePtr cv_mask_img; + try { + cv_mask_img = cv_bridge::toCvCopy(mask_img, sensor_msgs::image_encodings::MONO8); + mask = cv_mask_img->image; + // get the bounding box of the mask + cv::Rect bbox = cv::boundingRect(mask); + + // get the diagonal length of the bounding box + len_diag = sqrt(powf(bbox.width, 2) + powf(bbox.height, 2)); + } + catch (cv_bridge::Exception& e) { + ROS_ERROR("cv_bridge exception: %s", e.what()); + } + + if (debug_mode_) + { + // overlay the mask on the cv_image for visualization with random color + cv::Mat mask_rgb; + cv::cvtColor(mask, mask_rgb, cv::COLOR_GRAY2BGR); + cv::Mat mask_rgb_resized; + cv::resize(mask_rgb, mask_rgb_resized, cv::Size(cv_image->image.cols, cv_image->image.rows)); + cv::addWeighted(cv_image->image, 0.5, mask_rgb_resized, 0.5, 0.0, cv_image->image); + + // add label + // get the centroid of the mask + cv::Moments mu = cv::moments(mask, true); + cv::Point centroid(mu.m10 / mu.m00, mu.m01 / mu.m00); + cv::putText(cv_image->image, object.name, centroid, + cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 255, 0), 1); + } + } + else if (obj_category_ == "cavity") + { + // Get ROI + roi_2d = object.roi; + const cv::Rect2d rect2d(roi_2d.x_offset, roi_2d.y_offset, roi_2d.width, roi_2d.height); + + if (debug_mode_) + { + cv::Point pt1; + cv::Point pt2; + + pt1.x = roi_2d.x_offset; + pt1.y = roi_2d.y_offset; + pt2.x = roi_2d.x_offset + roi_2d.width; + pt2.y = roi_2d.y_offset + roi_2d.height; + + try{ + // draw bbox + cv::rectangle(cv_image->image, pt1, pt2, cv::Scalar(0, 255, 0), 1, 8, 0); + // add label + cv::putText(cv_image->image, object.name, cv::Point(pt1.x, pt2.y), + cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 255, 0), 1); + } + catch (...) { + ROS_WARN("could not draw bounding boxes."); + } + } + + // Remove large 2d misdetected bbox (misdetection) + len_diag = sqrt(powf(roi_2d.width, 2) + powf(roi_2d.height, 2)); + } + + // check if object name has container + bool is_container = false; + ROS_WARN("Object name: %s", object.name.c_str()); + if (object.name == "CONTAINER_BOX_BLUE" || object.name == "CONTAINER_BOX_RED") { - cv::Point pt1; - cv::Point pt2; - - pt1.x = roi_2d.x_offset; - pt1.y = roi_2d.y_offset; - pt2.x = roi_2d.x_offset + roi_2d.width; - pt2.y = roi_2d.y_offset + roi_2d.height; - - // draw bbox - cv::rectangle(cv_image->image, pt1, pt2, cv::Scalar(0, 255, 0), 1, 8, 0); - // add label - cv::putText(cv_image->image, object.name, cv::Point(pt1.x, pt2.y), - cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 255, 0), 1); + ROS_INFO("Found container object %s", object.name.c_str()); + is_container = true; } - // Remove large 2d misdetected bbox (misdetection) - double len_diag = sqrt(powf(roi_2d.width, 2) + powf(roi_2d.height, 2)); - if (len_diag > rgb_bbox_min_diag_ && len_diag < rgb_bbox_max_diag_) + if ((len_diag > rgb_bbox_min_diag_ && len_diag < rgb_bbox_max_diag_) || is_container) { PointCloud::Ptr cloud_roi(new PointCloud); - bool getROISuccess = mpu::pointcloud::getPointCloudROI(roi_2d, cloud_, cloud_roi, + bool getROISuccess = false; + if (obj_category_ == "atwork") + { + getROISuccess = mpu::pointcloud::getPointCloudROI(bbox, cloud_, cloud_roi, + rgb_roi_adjustment_, + rgb_cluster_remove_outliers_); + } + else if (obj_category_ == "old") + { + getROISuccess = mpu::pointcloud::getPointCloudROI(mask, cloud_, cloud_roi, rgb_roi_adjustment_, rgb_cluster_remove_outliers_); + + } + + else if (obj_category_ == "cavity") + { + getROISuccess = mpu::pointcloud::getPointCloudROI(roi_2d, cloud_, cloud_roi, + rgb_roi_adjustment_, + rgb_cluster_remove_outliers_); + } // ToDo: Filter big objects from 2d proposal, if the height is less than 3 mm // pcl::PointXYZRGB min_pt; // pcl::PointXYZRGB max_pt; @@ -431,14 +553,19 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() // Publish filtered point cloud from RGB recognizer // ************************************************ - PointCloud filtered_rgb_pointcloud; - filtered_rgb_pointcloud = mpu::object::estimatePose(cloud_roi, pose, object.shape.shape, + // PointCloud filtered_rgb_pointcloud; + PointCloud::Ptr filtered_rgb_pointcloud(new PointCloud); + *filtered_rgb_pointcloud = mpu::object::estimatePose(cloud_roi, pose, object, object.shape.shape, rgb_cluster_filter_limit_min_, - rgb_cluster_filter_limit_max_); + rgb_cluster_filter_limit_max_, + obj_category_); + // append filtered point cloud to filtered_clusters_2d + filtered_clusters_2d.push_back(filtered_rgb_pointcloud); + PointT min_pt; PointT max_pt; - pcl::getMinMax3D(*cloud_roi, min_pt, max_pt); + pcl::getMinMax3D(*filtered_rgb_pointcloud, min_pt, max_pt); rgb_object_list.objects[i].dimensions.vector.z = max_pt.z - min_pt.z; ROS_INFO("[RGB Object Height] Object %s length: %f", object.name.c_str(), rgb_object_list.objects[i].dimensions.vector.z); @@ -448,15 +575,6 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() ROS_INFO("[RGB Object Height] Object %s length is greater than 9cm: %f", object.name.c_str(), max_pt.z); } - sensor_msgs::PointCloud2 ros_filtered_rgb_pointcloud; - pcl::toROSMsg(filtered_rgb_pointcloud, ros_filtered_rgb_pointcloud); - ros_filtered_rgb_pointcloud.header.frame_id = target_frame_id_; - - pub_filtered_rgb_cloud_plane_.publish(ros_filtered_rgb_pointcloud); - - // To visualize the filtered point cloud, sleep for 3 seconds after every point cloud - // ros::Duration(3.0).sleep(); - //********************************* // Transform pose @@ -472,6 +590,22 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() { rgb_object_list.objects[i].pose = pose; } + + if (obj_category_ == "cavity") + { + rgb_object_list.objects[i].pose.pose.position.x += 0.02; + } + + // print the rpy pose + tf::Quaternion q( + rgb_object_list.objects[i].pose.pose.orientation.x, + rgb_object_list.objects[i].pose.pose.orientation.y, + rgb_object_list.objects[i].pose.pose.orientation.z, + rgb_object_list.objects[i].pose.pose.orientation.w); + tf::Matrix3x3 m(q); + double roll, pitch, yaw; + m.getRPY(roll, pitch, yaw); + ROS_INFO("[RGB] object %s: orient: [%f, %f, %f]", object.name.c_str(), roll, pitch, yaw); rgb_object_list.objects[i].probability = recognized_image_list_.objects[i].probability; rgb_object_list.objects[i].database_id = rgb_object_id_; rgb_object_list.objects[i].name = recognized_image_list_.objects[i].name; @@ -486,6 +620,7 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() } else { + ROS_WARN("[RGB] BBOX too big or too small"); ROS_DEBUG("[RGB] DECOY"); rgb_object_list.objects[i].name = "DECOY"; rgb_object_list.objects[i].database_id = rgb_object_id_; @@ -523,28 +658,46 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() else { ROS_WARN("No objects to publish"); + if (debug_mode_) + { + ros::Time time_now = ros::Time::now(); + // Save raw image + cv_bridge::CvImagePtr raw_cv_image; + if (mpu::object::getCVImage(image_msg_, raw_cv_image)) + { + std::string filename = ""; + filename = ""; + filename.append("rgb_raw_"); + filename.append(std::to_string(time_now.toSec())); + mpu::object::saveCVImage(raw_cv_image, logdir_, filename); + ROS_DEBUG_STREAM("Image:" << filename << " saved to " << logdir_); + } + else + { + ROS_ERROR("Cannot generate cv image..."); + } + } return; } if (debug_mode_) { - ROS_WARN_STREAM("Debug mode: publishing object information"); - publishDebug(combined_object_list, clusters_3d, clusters_2d); + ROS_DEBUG_STREAM("Debug mode: publishing object information"); + publishDebug(combined_object_list, clusters_3d, clusters_2d, filtered_clusters_2d); ros::Time time_now = ros::Time::now(); // Save debug image - if(recognized_image_list_.objects.size() > 0) - { - std::string filename = ""; - filename.append("rgb_debug_"); - filename.append(std::to_string(time_now.toSec())); + + std::string filename = ""; + filename.append("rgb_debug_"); + filename.append(std::to_string(time_now.toSec())); + try { mpu::object::saveCVImage(cv_image, logdir_, filename); - ROS_INFO_STREAM("Image:" << filename << " saved to " << logdir_); + ROS_DEBUG_STREAM("Image:" << filename << " saved to " << logdir_); } - else - { - ROS_WARN_STREAM("No Objects found. Cannot save debug image..."); + catch (...) { + ROS_ERROR("Could not save debug image."); } // Save raw image cv_bridge::CvImagePtr raw_cv_image; @@ -555,7 +708,7 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() filename.append("rgb_raw_"); filename.append(std::to_string(time_now.toSec())); mpu::object::saveCVImage(raw_cv_image, logdir_, filename); - ROS_INFO_STREAM("Image:" << filename << " saved to " << logdir_); + ROS_DEBUG_STREAM("Image:" << filename << " saved to " << logdir_); } else { @@ -570,18 +723,17 @@ void MultimodalObjectRecognitionROS::recognizeCloudAndImage() filename.append("pcd_cluster_"); filename.append(std::to_string(time_now.toSec())); mpu::object::savePcd(cluster, logdir_, filename); - ROS_INFO_STREAM("Point cloud:" << filename << " saved to " << logdir_); + ROS_DEBUG_STREAM("Point cloud:" << filename << " saved to " << logdir_); } } } void MultimodalObjectRecognitionROS::publishDebug(mas_perception_msgs::ObjectList &combined_object_list, std::vector &clusters_3d, - std::vector &clusters_2d) + std::vector &clusters_2d, + std::vector &filtered_clusters_2d) { - ROS_INFO_STREAM("Cloud list: " << recognized_cloud_list_.objects.size()); - ROS_INFO_STREAM("RGB list: " << recognized_image_list_.objects.size()); - ROS_INFO_STREAM("Combined object list: "<< combined_object_list.objects.size()); + ROS_INFO_STREAM("Cloud list: " << recognized_cloud_list_.objects.size() << " RGB list: " << recognized_image_list_.objects.size() << " Combined object list: " << combined_object_list.objects.size()); // Compute normal to generate parallel BBOX to the plane const Eigen::Vector3f normal = scene_segmentation_ros_->getPlaneNormal(); @@ -668,6 +820,10 @@ void MultimodalObjectRecognitionROS::publishDebug(mas_perception_msgs::ObjectLis label_visualizer_rgb_.publish(rgb_labels, rgb_object_pose_array); } } + if (filtered_clusters_2d.size() > 0) + { + cluster_visualizer_filtered_rgb_.publish(filtered_clusters_2d, target_frame_id_); + } } void MultimodalObjectRecognitionROS::publishObjectList(mas_perception_msgs::ObjectList &object_list) @@ -709,29 +865,40 @@ void MultimodalObjectRecognitionROS::adjustObjectPose(mas_perception_msgs::Objec { yaw = 0.0; } + if (object_list.objects[i].name == "M30_H" + or object_list.objects[i].name == "M20_H" + or object_list.objects[i].name == "S40_40_V" + or object_list.objects[i].name == "F20_20_V") + { + ROS_WARN("Setting yaw for M30/M20 to zero"); + yaw = 0.0; + } // Update container pose if (object_list.objects[i].name == "CONTAINER_BOX_RED" || object_list.objects[i].name == "CONTAINER_BOX_BLUE") { - if (object_list.objects[i].database_id > 100) + if (object_list.objects[i].database_id >= 100) { - ROS_DEBUG_STREAM("Updating RGB container pose"); + ROS_INFO_STREAM("Updating RGB container pose for " << object_list.objects[i].name); mm_object_recognition_utils_->adjustContainerPose(object_list.objects[i], container_height_); } } - if (object_list.objects[i].dimensions.vector.z > 0.09) - { - tf::Quaternion q2; - q2.setRPY(0.0, -1.57, 0.0); - object_list.objects[i].pose.pose.orientation.x = q2.x(); - object_list.objects[i].pose.pose.orientation.y = q2.y(); - object_list.objects[i].pose.pose.orientation.z = q2.z(); - object_list.objects[i].pose.pose.orientation.w = q2.w(); - } - else - { + // setting the pitch to -90 for vertical grasp + // if (object_list.objects[i].dimensions.vector.z > 0.09 and + // object_list.objects[i].name != "CONTAINER_BOX_RED" && + // object_list.objects[i].name != "CONTAINER_BOX_BLUE") + // { + // tf::Quaternion q2; + // q2.setRPY(0.0, -1.57, 0.0); + // object_list.objects[i].pose.pose.orientation.x = q2.x(); + // object_list.objects[i].pose.pose.orientation.y = q2.y(); + // object_list.objects[i].pose.pose.orientation.z = q2.z(); + // object_list.objects[i].pose.pose.orientation.w = q2.w(); + // } + // else + // { // Make pose flat tf::Quaternion q2 = tf::createQuaternionFromRPY(0.0, change_in_pitch , yaw); object_list.objects[i].pose.pose.orientation.x = q2.x(); @@ -739,10 +906,67 @@ void MultimodalObjectRecognitionROS::adjustObjectPose(mas_perception_msgs::Objec object_list.objects[i].pose.pose.orientation.z = q2.z(); object_list.objects[i].pose.pose.orientation.w = q2.w(); - object_list.objects[i].pose.pose.position.z = scene_segmentation_ros_->getWorkspaceHeight() + + double detected_object_height = object_list.objects[i].pose.pose.position.z; + if (obj_category_ == "cavity") + { + ROS_WARN_STREAM("PP01 workstation; not updating height"); + } + else if (object_list.objects[i].name == "CONTAINER_BOX_RED" || + object_list.objects[i].name == "CONTAINER_BOX_BLUE") + { + ROS_WARN_STREAM("Container; not updating height"); + } + else if (use_fixed_heights_ or (std::fabs(detected_object_height - scene_segmentation_ros_->getWorkspaceHeight()) > 0.03)) + { + if (use_fixed_heights_) + { + ROS_WARN_STREAM("Assuming fixed platform heights of 0, 5, 10 and 15 cm"); + } + else + { + ROS_WARN_STREAM("Difference between object height and workspace height is > 3cm"); + ROS_WARN_STREAM("Object height: " << detected_object_height << " Workspace height: " << scene_segmentation_ros_->getWorkspaceHeight() << "Hight of floor: " << height_of_floor_); + } + // do something + bool is_0cm = std::fabs(detected_object_height - height_of_floor_) < 0.01; + bool is_5cm = std::fabs(detected_object_height - (height_of_floor_ + 0.05)) < 0.01; + bool is_10cm = std::fabs(detected_object_height - (height_of_floor_ + 0.1)) < 0.01; + bool is_15cm = std::fabs(detected_object_height - (height_of_floor_ + 0.15)) < 0.01; + if (is_0cm) + { + ROS_WARN_STREAM("Updating height to 0cm"); + object_list.objects[i].pose.pose.position.z = height_of_floor_ + object_height_above_workspace_; + } + else if (is_5cm) + { + ROS_WARN_STREAM("Updating height to 5cm"); + object_list.objects[i].pose.pose.position.z = height_of_floor_ + 0.05 + object_height_above_workspace_; + } + else if (is_10cm) + { + ROS_WARN_STREAM("Updating height to 10cm"); + object_list.objects[i].pose.pose.position.z = height_of_floor_ + 0.1 + object_height_above_workspace_; + } + else if (is_15cm) + { + ROS_WARN_STREAM("Updating height to 15cm"); + object_list.objects[i].pose.pose.position.z = height_of_floor_ + 0.15 + object_height_above_workspace_; + } + else + { + ROS_WARN_STREAM("Height is not 0, 5, 10 or 15 cm. Not updating height"); + } + + } + else + { + object_list.objects[i].pose.pose.position.z = scene_segmentation_ros_->getWorkspaceHeight() + object_height_above_workspace_; - } + } + // } + + /* // Update workspace height if (scene_segmentation_ros_->getWorkspaceHeight() != -1000.0) { @@ -755,12 +979,43 @@ void MultimodalObjectRecognitionROS::adjustObjectPose(mas_perception_msgs::Objec ROS_WARN_STREAM("Updated container height: " << object_list.objects[i].pose.pose.position.z ); } } + */ // Update axis or bolt pose - if (object_list.objects[i].name == "M20_100" || object_list.objects[i].name == "AXIS") + if (object_list.objects[i].name == "M20_100" || object_list.objects[i].name == "AXIS" || object_list.objects[i].name == "SCREWDRIVER") { mm_object_recognition_utils_->adjustAxisBoltPose(object_list.objects[i]); } + + // update pose for Wrench + if (object_list.objects[i].name == "WRENCH") + { + bool is_0cm = std::fabs(detected_object_height - height_of_floor_) < 0.01; + bool is_5cm = std::fabs(detected_object_height - (height_of_floor_ + 0.05)) < 0.01; + bool is_10cm = std::fabs(detected_object_height - (height_of_floor_ + 0.1)) < 0.01; + bool is_15cm = std::fabs(detected_object_height - (height_of_floor_ + 0.15)) < 0.01; + + if (is_0cm) + { + object_list.objects[i].pose.pose.position.z -= 0.005; + } + + if (is_5cm) + { + object_list.objects[i].pose.pose.position.z -= 0.003; + } + + if (is_10cm) + { + object_list.objects[i].pose.pose.position.z -= 0.001; + } + + if (is_15cm) + { + object_list.objects[i].pose.pose.position.z -= 0.002; + } + + } } } @@ -785,6 +1040,10 @@ void MultimodalObjectRecognitionROS::loadObjectInfo(const std::string &filename) { round_objects_.insert(f.name); } + else if (f.shape == "flat") + { + flat_objects_.insert(f.name); + } object_info_.push_back(f); } } @@ -801,12 +1060,14 @@ void MultimodalObjectRecognitionROS::loadObjectInfo(const std::string &filename) void MultimodalObjectRecognitionROS::eventCallback(const std_msgs::String::ConstPtr &msg) { std_msgs::String event_out; + ROS_INFO("Event call back"); + if (msg->data == "e_start") { // Synchronize callback image_sub_ = new message_filters::Subscriber (nh_, "input_image_topic", 1); cloud_sub_ = new message_filters::Subscriber (nh_, "input_cloud_topic", 1); - msg_sync_ = new message_filters::Synchronizer (msgSyncPolicy(10), *image_sub_, *cloud_sub_); + msg_sync_ = new message_filters::Synchronizer (msgSyncPolicy(1), *image_sub_, *cloud_sub_); msg_sync_->registerCallback(boost::bind(&MultimodalObjectRecognitionROS::synchronizeCallback, this, _1, _2)); } else if (msg->data == "e_stop") @@ -859,13 +1120,16 @@ void MultimodalObjectRecognitionROS::configCallback(mir_object_recognition::Scen // Object recognizer param enable_rgb_recognizer_ = config.enable_rgb_recognizer; enable_pc_recognizer_ = config.enable_pc_recognizer; + obj_category_ = config.obj_category; // Cluster param center_cluster_ = config.center_cluster; pad_cluster_ = config.pad_cluster; padded_cluster_size_ = config.padded_cluster_size; // Workspace and object height + use_fixed_heights_ = config.use_fixed_heights; object_height_above_workspace_ = config.object_height_above_workspace; + height_of_floor_ = config.height_of_floor; container_height_ = config.container_height; // RGB proposal params rgb_roi_adjustment_ = config.rgb_roi_adjustment; diff --git a/mir_perception/mir_object_recognition/ros/src/multimodal_object_recognition_utils.cpp b/mir_perception/mir_object_recognition/ros/src/multimodal_object_recognition_utils.cpp index ca2413312..f126f3b2e 100644 --- a/mir_perception/mir_object_recognition/ros/src/multimodal_object_recognition_utils.cpp +++ b/mir_perception/mir_object_recognition/ros/src/multimodal_object_recognition_utils.cpp @@ -107,7 +107,8 @@ void MultimodalObjectRecognitionUtils::adjustContainerPose(mas_perception_msgs:: // Change the center of object container_object.pose.pose.position.x = centroid[0]; container_object.pose.pose.position.y = centroid[1]; - container_object.pose.pose.position.z = max_pt.z + container_height; + ROS_INFO_STREAM("Updating height from " << container_object.pose.pose.position.z << " to " << (max_pt.z - 0.02)); + container_object.pose.pose.position.z = max_pt.z - 0.02; } void MultimodalObjectRecognitionUtils::adjustAxisBoltPose(mas_perception_msgs::Object &object) { @@ -131,7 +132,17 @@ void MultimodalObjectRecognitionUtils::adjustAxisBoltPose(mas_perception_msgs::O unsigned int valid_points = pcl::compute3DCentroid(*point_at_z, centroid); if (object.name == "M20_100") { - ROS_INFO_STREAM("Updating M20_100 pose from object id: " << object.database_id); + ROS_INFO_STREAM("Updating object pose from object id: " << object.database_id); + float midpoint_x = (object.pose.pose.position.x + centroid[0])/2; + float midpoint_y = (object.pose.pose.position.y + centroid[1])/2; + float quarter_x = (object.pose.pose.position.x + midpoint_x)/2; + float quarter_y = (object.pose.pose.position.y + midpoint_y)/2; + object.pose.pose.position.x = quarter_x; + object.pose.pose.position.y = quarter_y; + } + else if (object.name == "SCREWDRIVER") + { + ROS_INFO_STREAM("Updating object pose from object id: " << object.database_id); float midpoint_x = (object.pose.pose.position.x + centroid[0])/2; float midpoint_y = (object.pose.pose.position.y + centroid[1])/2; object.pose.pose.position.x = midpoint_x; diff --git a/mir_perception/mir_object_segmentation/ros/launch/scene_segmentation.launch b/mir_perception/mir_object_segmentation/ros/launch/scene_segmentation.launch index 7bf326e97..c766c4195 100644 --- a/mir_perception/mir_object_segmentation/ros/launch/scene_segmentation.launch +++ b/mir_perception/mir_object_segmentation/ros/launch/scene_segmentation.launch @@ -2,7 +2,7 @@ - + diff --git a/mir_perception/mir_perceive_aruco_cube/README.md b/mir_perception/mir_perceive_aruco_cube/README.md index d4193320d..b0d67cfee 100644 --- a/mir_perception/mir_perceive_aruco_cube/README.md +++ b/mir_perception/mir_perceive_aruco_cube/README.md @@ -15,8 +15,8 @@ publishes a pose continously. Related package: `mir\_perceive\_mock` Input: - - raw image (`/arm_cam3d/rgb/image_raw`) - - camera info (`/arm_cam3d/rgb/camera_info`) + - raw image (`/arm_cam3d/color/image_raw`) + - camera info (`/arm_cam3d/color/camera_info`) - event in (`/mir_perception/aruco_cube_perceiver/event_in`) Output: - event out (`/mir_perception/aruco_cube_perceiver/event_out`) diff --git a/mir_perception/mir_perceive_aruco_cube/ros/launch/perceive_aruco_cube.launch b/mir_perception/mir_perceive_aruco_cube/ros/launch/perceive_aruco_cube.launch index 8b16a6f87..4851bfbd4 100644 --- a/mir_perception/mir_perceive_aruco_cube/ros/launch/perceive_aruco_cube.launch +++ b/mir_perception/mir_perceive_aruco_cube/ros/launch/perceive_aruco_cube.launch @@ -1,7 +1,7 @@ - + @@ -10,8 +10,8 @@ - - + + diff --git a/mir_perception/mir_perceive_aruco_cube/ros/scripts/perceive_aruco_cube b/mir_perception/mir_perceive_aruco_cube/ros/scripts/perceive_aruco_cube index 0af26262f..97e722b74 100755 --- a/mir_perception/mir_perceive_aruco_cube/ros/scripts/perceive_aruco_cube +++ b/mir_perception/mir_perceive_aruco_cube/ros/scripts/perceive_aruco_cube @@ -18,7 +18,7 @@ class ArucoCubePerceiver(object): def __init__(self): # class variables self._bridge = CvBridge() - self._aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_50) + self._aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50) self._listening = False self._publish_object = False self._retry_attempts = 0 diff --git a/mir_perception/mir_perception_utils/ros/include/mir_perception_utils/object_utils_ros.h b/mir_perception/mir_perception_utils/ros/include/mir_perception_utils/object_utils_ros.h index 8f29d39c5..49efd2bc5 100644 --- a/mir_perception/mir_perception_utils/ros/include/mir_perception_utils/object_utils_ros.h +++ b/mir_perception/mir_perception_utils/ros/include/mir_perception_utils/object_utils_ros.h @@ -54,9 +54,11 @@ void estimatePose(const BoundingBox &box, geometry_msgs::PoseStamped &pose); * \param[in] Minimum value of the field z allowed * \param[in] Maximum value of the field z allowed */ -PointCloud estimatePose(const PointCloud::Ptr &xyz_input_cloud, geometry_msgs::PoseStamped &pose, +PointCloud estimatePose(const PointCloud::Ptr &xyz_input_cloud, geometry_msgs::PoseStamped &pose, + mas_perception_msgs::Object &object, std::string shape = "None", float passthrough_lim_min = 0.0060, - float passthrough_lim_max = 0.0); + float passthrough_lim_max = 0.0, + std::string obj_category = "atwork"); /** \brief Transform pose * \param[in] Transform listener diff --git a/mir_perception/mir_perception_utils/ros/include/mir_perception_utils/pointcloud_utils_ros.h b/mir_perception/mir_perception_utils/ros/include/mir_perception_utils/pointcloud_utils_ros.h index be7c4f13c..abc1a82ce 100644 --- a/mir_perception/mir_perception_utils/ros/include/mir_perception_utils/pointcloud_utils_ros.h +++ b/mir_perception/mir_perception_utils/ros/include/mir_perception_utils/pointcloud_utils_ros.h @@ -17,6 +17,9 @@ #include #include +#include + +#include namespace mir_perception_utils { @@ -60,7 +63,27 @@ bool transformPointCloud2(const boost::shared_ptr &tf_lis * \param[in] Adjust the rgb roi proposal (in pixel) * \param[in] Remove 3D ROI outliers */ -bool getPointCloudROI(const sensor_msgs::RegionOfInterest &roi, const PointCloud::Ptr &cloud_id, +bool getPointCloudROI(const sensor_msgs::RegionOfInterest &roi, const PointCloud::Ptr &cloud_in, + PointCloud::Ptr &cloud_roi, float roi_size_adjustment, bool remove_outliers); + +/** \brief Get 3D ROI of point cloud given 2D BoundingBox + * \param[in] Region of interest (bounding box) of 2D object + * \param[in] Organized pointcloud input + * \param[out] 3D pointcloud cluster (3D ROI) of the given 2D BoundingBox + * \param[in] Adjust the rgb roi proposal (in pixel) + * \param[in] Remove 3D ROI outliers +*/ +bool getPointCloudROI(const mas_perception_msgs::BoundingBox &bbox, const PointCloud::Ptr &cloud_in, + PointCloud::Ptr &cloud_roi, float roi_size_adjustment, bool remove_outliers); + +/** \brief Get 3D ROI of point cloud given Segmentation mask + * \param[in] Mask of the object + * \param[in] Organized pointcloud input + * \param[out] 3D pointcloud cluster (3D ROI) of the given mask + * \param[in] Adjust the rgb roi proposal (in pixel) + * \param[in] Remove 3D ROI outliers +*/ +bool getPointCloudROI(const cv::Mat &mask, const PointCloud::Ptr &cloud_in, PointCloud::Ptr &cloud_roi, float roi_size_adjustment, bool remove_outliers); } }; diff --git a/mir_perception/mir_perception_utils/ros/src/arUco_test_detect.py b/mir_perception/mir_perception_utils/ros/src/arUco_test_detect.py new file mode 100755 index 000000000..0be43c5e0 --- /dev/null +++ b/mir_perception/mir_perception_utils/ros/src/arUco_test_detect.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +import rospy +import std_msgs.msg +from sensor_msgs.msg import Image +import cv2 +import numpy as np +from cv_bridge import CvBridge, CvBridgeError + +class DetectArucoMarker(): + + def __init__(self): + + #subscriber + self.arUco_ids = None + self.arUco_len = None + self.bridge = CvBridge() + print("in Init") + rospy.Subscriber("/tower_cam3d_middle/color/image_raw", Image, self.astra_callbck) + + def astra_callbck(self, Image): + + Image = self.bridge.imgmsg_to_cv2(Image, "bgr8") + # print('Image data', Image.data) + + dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_7X7_1000) + parameters = cv2.aruco.DetectorParameters_create() + + # print("detected") + corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(Image, dictionary, parameters=parameters) + Image = cv2.aruco.drawDetectedMarkers(Image, corners, ids) + + self.arUco_ids = ids + + if ids is not None: #add NoneType or greater than 1 condition for this check + if len(ids)>0: + self.arUco_len = len(ids) + print(f'Detected arUco markers : ',self.arUco_len) + print(f'Detected Ids of markers : ', self.arUco_ids) + + # def + +def main(): + rospy.init_node("detect_arUco_test", anonymous=True) + print(cv2.__version__) + detect_arUco_test = DetectArucoMarker() + # detect_arUco_test.start() + +if __name__ == "__main__": + main() + rospy.spin() \ No newline at end of file diff --git a/mir_perception/mir_perception_utils/ros/src/object_utils_ros.cpp b/mir_perception/mir_perception_utils/ros/src/object_utils_ros.cpp index 18dc28987..75b82f496 100644 --- a/mir_perception/mir_perception_utils/ros/src/object_utils_ros.cpp +++ b/mir_perception/mir_perception_utils/ros/src/object_utils_ros.cpp @@ -52,15 +52,17 @@ void object::estimatePose(const BoundingBox &box, geometry_msgs::PoseStamped &po pose.pose.orientation.w = q.w(); } -PointCloud object::estimatePose(const PointCloud::Ptr &xyz_input_cloud, geometry_msgs::PoseStamped &pose, +PointCloud object::estimatePose(const PointCloud::Ptr &xyz_input_cloud, geometry_msgs::PoseStamped &pose, + mas_perception_msgs::Object &object, std::string shape, float passthrough_lim_min_offset, - float passthrough_lim_max_offset) + float passthrough_lim_max_offset, + std::string obj_category) { // Apply filter to remove points belonging to the plane for non // circular/spherical object // to find its orientation PointCloud filtered_cloud; - if (shape == "sphere") { + if (shape == "sphere" or shape == "flat") { filtered_cloud = *xyz_input_cloud; } else { pcl::PassThrough pass_through; @@ -70,10 +72,38 @@ PointCloud object::estimatePose(const PointCloud::Ptr &xyz_input_cloud, geometry pcl::getMinMax3D(*xyz_input_cloud, min_pt, max_pt); double limit_min = min_pt.z + passthrough_lim_min_offset; double limit_max = max_pt.z + passthrough_lim_max_offset; - - pass_through.setFilterLimits(limit_min, limit_max); - pass_through.setInputCloud(xyz_input_cloud); - pass_through.filter(filtered_cloud); + if (obj_category == "cavity") + { + // print + // std::cout << "[CAVITYYYYYYYYYYYY] min_pt.z: " << min_pt.z << std::endl; + // std::cout << "[CAVITYYYYYYYYYYYY] max_pt.z: " << max_pt.z << std::endl; + limit_max = max_pt.z - 0.015; // TODO: make 0.01 as a dynamic configurable parameter + if (object.name == "M20_H") + { + limit_max = max_pt.z - 0.02; + } + pass_through.setFilterLimits(limit_min, limit_max); + pass_through.setInputCloud(xyz_input_cloud); + pass_through.filter(filtered_cloud); + + // change z value to max(filterpointcloud z) This is only for cavity + for (size_t i = 0; i < filtered_cloud.points.size(); ++i) { + // check if filtered_cloud.points[i].z has not nan or inf value + if (!std::isnan(filtered_cloud.points[i].z) && !std::isinf(filtered_cloud.points[i].z)) { + filtered_cloud.points[i].z = 0.015; // 0.045 is an empirical values, TODO: get 0.045 from target_pose_z_pos from config file + } + } + } + else{ + if (object.name != "M20" && object.name != "M30" && object.name != "F20_20_G") { + pass_through.setFilterLimits(limit_min, limit_max); + pass_through.setInputCloud(xyz_input_cloud); + pass_through.filter(filtered_cloud); + } + else{ + filtered_cloud = *xyz_input_cloud; + } + } } Eigen::Vector4f centroid; diff --git a/mir_perception/mir_perception_utils/ros/src/pointcloud_utils_ros.cpp b/mir_perception/mir_perception_utils/ros/src/pointcloud_utils_ros.cpp index 05fd61e43..3b86d8137 100644 --- a/mir_perception/mir_perception_utils/ros/src/pointcloud_utils_ros.cpp +++ b/mir_perception/mir_perception_utils/ros/src/pointcloud_utils_ros.cpp @@ -99,6 +99,93 @@ bool pointcloud::transformPointCloud2(const boost::shared_ptrheight <= 1 || cloud_in->width <= 1) { + ROS_ERROR("Pointcloud input height is %d and width is %d",cloud_in->height, cloud_in->width ); + return (false); + } + + // get the points inside the mask + for (int i = 0; i < cloud_in->width; i++) { + for (int j = 0; j < cloud_in->height; j++) { + if (mask.at(j, i) == 255) { + PointT pcl_point = cloud_in->at(i, j); + if ((!std::isnan(pcl_point.x)) && (!std::isnan(pcl_point.y)) && (!std::isnan(pcl_point.z)) && + (!std::isnan(pcl_point.r)) && (!std::isnan(pcl_point.g)) && (!std::isnan(pcl_point.b))) { + cloud_roi->points.push_back(pcl_point); + } + } + } + } + + cloud_roi->header = cloud_in->header; + if (remove_outliers) { + if (cloud_roi->points.size() > 0) { + pcl::StatisticalOutlierRemoval sor; + sor.setInputCloud(cloud_roi); + sor.setMeanK(50); + sor.setStddevMulThresh(3.0); + sor.filter(*cloud_roi); + } + } + + return (true); +} + +bool pointcloud::getPointCloudROI(const mas_perception_msgs::BoundingBox &bbox, + const PointCloud::Ptr &cloud_in, PointCloud::Ptr &cloud_roi, + float roi_size_adjustment, bool remove_outliers) +{ + if (cloud_in->height <= 1 || cloud_in->width <= 1) { + ROS_ERROR("Pointcloud input height is %d and width is %d",cloud_in->height, cloud_in->width ); + return (false); + } + + // get vertices of the oriented bounding box + std::vector vertices = bbox.vertices; + + // create cv polygon + std::vector polygon; + for (const auto &vertex : vertices) { + polygon.push_back(cv::Point(vertex.x, vertex.y)); + } + + // create a mask + cv::Mat mask = cv::Mat::zeros(cloud_in->height, cloud_in->width, CV_8UC1); + + // fill the mask + cv::fillConvexPoly(mask, polygon.data(), polygon.size(), cv::Scalar(255)); + + // get the points inside the mask + for (int i = 0; i < cloud_in->width; i++) { + for (int j = 0; j < cloud_in->height; j++) { + if (mask.at(j, i) == 255) { + PointT pcl_point = cloud_in->at(i, j); + if ((!std::isnan(pcl_point.x)) && (!std::isnan(pcl_point.y)) && (!std::isnan(pcl_point.z)) && + (!std::isnan(pcl_point.r)) && (!std::isnan(pcl_point.g)) && (!std::isnan(pcl_point.b))) { + cloud_roi->points.push_back(pcl_point); + } + } + } + } + + cloud_roi->header = cloud_in->header; + if (remove_outliers) { + if (cloud_roi->points.size() > 0) { + pcl::StatisticalOutlierRemoval sor; + sor.setInputCloud(cloud_roi); + sor.setMeanK(50); + sor.setStddevMulThresh(3.0); + sor.filter(*cloud_roi); + } + } + + return (true); +} + bool pointcloud::getPointCloudROI(const sensor_msgs::RegionOfInterest &roi, const PointCloud::Ptr &cloud_in, PointCloud::Ptr &cloud_roi, float roi_size_adjustment, bool remove_outliers) diff --git a/mir_perception/mir_ppt_detection/CMakeLists.txt b/mir_perception/mir_ppt_detection/CMakeLists.txt index c9f203a6b..9a73b5aab 100644 --- a/mir_perception/mir_ppt_detection/CMakeLists.txt +++ b/mir_perception/mir_ppt_detection/CMakeLists.txt @@ -8,6 +8,7 @@ find_package(catkin REQUIRED COMPONENTS pcl_conversions roscpp rospy + dynamic_reconfigure std_msgs sensor_msgs mas_perception_msgs @@ -24,6 +25,10 @@ generate_messages( std_msgs ) +generate_dynamic_reconfigure_options( + config/PPTDetection.cfg +) + catkin_package() include_directories( @@ -41,4 +46,4 @@ target_link_libraries(ppt_detector ${catkin_LIBRARIES} yaml-cpp ) -add_dependencies(ppt_detector mir_ppt_detection_generate_messages_cpp) +add_dependencies(ppt_detector mir_ppt_detection_generate_messages_cpp ${PROJECT_NAME}_gencfg) diff --git a/mir_perception/mir_ppt_detection/config/PPTDetection.cfg b/mir_perception/mir_ppt_detection/config/PPTDetection.cfg new file mode 100644 index 000000000..386af5de5 --- /dev/null +++ b/mir_perception/mir_ppt_detection/config/PPTDetection.cfg @@ -0,0 +1,16 @@ +#! /usr/bin/env python + +PACKAGE='mir_ppt_detection' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() +pc_object_segmentation = gen.add_group("Pointcloud object segmentation") +# def add (self, name, paramtype, level, description, default = None, min = None, max = None, edit_method = ""): + +pc_object_segmentation.add("enable_debug_pc", bool_t, 0, "Enable debug pointcloud publisher", False) + +pc_os_ppt_detection = pc_object_segmentation.add_group("PPT detection") +pc_os_ppt_detection.add("planar_projection_thresh", double_t, 0, "Threshold for planer projection", 0.04 , 0.0, 1.0) +pc_os_ppt_detection.add("target_pose_z_pos", double_t, 0, "Z axis value for target pose", 0.035 , 0.0, 1.0) +exit (gen.generate (PACKAGE, "mir_ppt_detection", "PPTDetection")) \ No newline at end of file diff --git a/mir_perception/mir_ppt_detection/config/object_shape_learned_params.yaml b/mir_perception/mir_ppt_detection/config/object_shape_learned_params.yaml old mode 100755 new mode 100644 diff --git a/mir_perception/mir_ppt_detection/config/params.yaml b/mir_perception/mir_ppt_detection/config/params.yaml new file mode 100644 index 000000000..e295bc55d --- /dev/null +++ b/mir_perception/mir_ppt_detection/config/params.yaml @@ -0,0 +1,4 @@ +# reasonable parameters to find planes of @Work workspaces +# parameters for PPT detection +planar_projection_thresh: 0.04 +target_pose_z_pos: 0.035 \ No newline at end of file diff --git a/mir_perception/mir_ppt_detection/include/mir_ppt_detection/ppt_detector.h b/mir_perception/mir_ppt_detection/include/mir_ppt_detection/ppt_detector.h index e5c805ad8..ab0f194cc 100644 --- a/mir_perception/mir_ppt_detection/include/mir_ppt_detection/ppt_detector.h +++ b/mir_perception/mir_ppt_detection/include/mir_ppt_detection/ppt_detector.h @@ -38,8 +38,10 @@ #include #include #include - +#include #include +#include + typedef pcl::PointXYZRGB PointT; typedef pcl::PointXYZRGBA PointRGBA; @@ -64,6 +66,10 @@ class PPTDetector PointCloudRGBA::Ptr& cavity_cloud); protected: + dynamic_reconfigure::Server server_; + /** \brief Dynamic reconfigure callback + * */ + void configCallback(mir_ppt_detection::PPTDetectionConfig &config, uint32_t level); PointRGBA get_point_rgba(const pcl::PointXYZRGB& pt_rgb); PointCloudRGBA::Ptr get_point_cloud_rgba(const pcl::PointCloud::Ptr cloud_rgb); @@ -118,11 +124,12 @@ class PPTDetector int downsample_scale = 3; float planar_projection_thresh = 0.015; - float cam_cx = 320.0; - float cam_cy = 245.1; - float cam_fx = 615.8; - float cam_fy = 615.6; + float cam_cx = 333.55; + float cam_cy = 248.31; + float cam_fx = 611.99; + float cam_fy = 611.72; float min_cavity_area = 1e-4; + float target_pose_z_pos_; ros::Publisher cloud_pub0, cloud_pub1, cloud_pub2; ros::Publisher cavity_pub; @@ -137,7 +144,6 @@ class PPTDetector tf::TransformListener listener_; std::string target_frame_, source_frame_; - }; #endif diff --git a/mir_perception/mir_ppt_detection/launch/ppt_detector.launch b/mir_perception/mir_ppt_detection/launch/ppt_detector.launch index 6f619fdce..a44b8cc77 100644 --- a/mir_perception/mir_ppt_detection/launch/ppt_detector.launch +++ b/mir_perception/mir_ppt_detection/launch/ppt_detector.launch @@ -1,9 +1,10 @@ - + + @@ -14,14 +15,18 @@ - + + + - + + + diff --git a/mir_perception/mir_ppt_detection/scripts/covariance_data/ppt_train_process_data b/mir_perception/mir_ppt_detection/scripts/covariance_data/ppt_train_process_data old mode 100755 new mode 100644 diff --git a/mir_perception/mir_ppt_detection/scripts/covariance_data/ppt_train_record_data b/mir_perception/mir_ppt_detection/scripts/covariance_data/ppt_train_record_data old mode 100755 new mode 100644 diff --git a/mir_perception/mir_ppt_detection/src/ppt_detector.cpp b/mir_perception/mir_ppt_detection/src/ppt_detector.cpp index 5e72be951..b83b043ee 100644 --- a/mir_perception/mir_ppt_detection/src/ppt_detector.cpp +++ b/mir_perception/mir_ppt_detection/src/ppt_detector.cpp @@ -5,8 +5,14 @@ PPTDetector::PPTDetector(): { // Create a ROS subscriber for the input point cloud // pc_sub_ = nh_.subscribe ("points", 1, &PPTDetector::cloud_cb, this); + float planar_projection_thresh; + float target_pose_z_pos_; + nh_.param("planar_projection_thresh", planar_projection_thresh, 0.03); + nh_.param("target_pose_z_pos", target_pose_z_pos_, 0.035); event_in_sub_ = nh_.subscribe("event_in", 1, &PPTDetector::eventInCallback, this); - + dynamic_reconfigure::Server::CallbackType f = + boost::bind(&PPTDetector::configCallback, this, _1, _2); + server_.setCallback(f); // Create a ROS publisher for the output point cloud cloud_pub0 = nh_.advertise ("cloud_non_planar", 1); cloud_pub1 = nh_.advertise ("cloud_planar", 1); @@ -26,10 +32,18 @@ PPTDetector::PPTDetector(): } nh_.param("target_frame", target_frame_, "base_link"); - nh_.param("source_frame", source_frame_, "arm_cam3d_camera_color_optical_frame"); + nh_.param("source_frame", source_frame_, "tower_cam3d_front_camera_color_optical_frame"); nh_.param("debug_pub", debug_pub_, true); } +void PPTDetector::configCallback(mir_ppt_detection::PPTDetectionConfig &config, uint32_t level) +{ + planar_projection_thresh = config.planar_projection_thresh; + target_pose_z_pos_ = config.target_pose_z_pos; +// ROS_INFO_STREAM("planar_projection_thresh " << planar_projection_thresh); +} + +\ bool PPTDetector::readObjectShapeParams() { std::string object_shape_learned_params_file; @@ -129,6 +143,32 @@ void PPTDetector::downsample_organized_cloud(T cloud_in, PointCloud::Ptr cloud_d for( size_t j = 0, jj = 0; j < cloud_downsampled->width; jj += scale, j++){ cloud_downsampled->at(j, i) = cloud_in->at(jj, ii); } + // if we find NaN's fill them with valid values by extrapolating from previous points (along width) + for (size_t j = 0; j < cloud_downsampled->width; j++) { + if (std::isnan(cloud_downsampled->at(j, i).x) and j > 2) + { + float diff_x = cloud_downsampled->at(j-1, i).x - cloud_downsampled->at(j-2, i).x; + float diff_y = cloud_downsampled->at(j-1, i).y - cloud_downsampled->at(j-2, i).y; + float current_x = cloud_downsampled->at(j-1, i).x + diff_x; + float current_y = cloud_downsampled->at(j-1, i).y + diff_y; + cloud_downsampled->at(j, i).x = current_x; + cloud_downsampled->at(j, i).y = current_y; + cloud_downsampled->at(j, i).z = cloud_downsampled->at(j - 1, i).z; + } + } + // if we find NaN's fill them with valid values by extrapolating from the previous rows + for (size_t j = 0; j < cloud_downsampled->width; j++) { + if (std::isnan(cloud_downsampled->at(j, i).x) and i > 2) + { + float diff_x = cloud_downsampled->at(j, i-1).x - cloud_downsampled->at(j, i-2).x; + float diff_y = cloud_downsampled->at(j, i-1).y - cloud_downsampled->at(j, i-2).y; + float current_x = cloud_downsampled->at(j, i-1).x + diff_x; + float current_y = cloud_downsampled->at(j, i-1).y + diff_y; + cloud_downsampled->at(j, i).x = current_x; + cloud_downsampled->at(j, i).y = current_y; + cloud_downsampled->at(j, i).z = cloud_downsampled->at(j, i-1).z; + } + } } } @@ -191,10 +231,12 @@ void PPTDetector::project_points_to_plane(PointCloud::Ptr cloud_in, if (fabs(pt_in_dist - pt_proj_dist) < planar_projection_thresh) { planar_indices->indices.push_back(col + row*cloud_in->width); pt_projected.a = 0; - } else if (std::isnan(pt_in_dist) || pt_in_dist > pt_proj_dist){ + } else if (std::isnan(pt_in_dist)){ + // Do nothing + } else if (pt_in_dist > pt_proj_dist){ non_planar_indices->indices.push_back(col + row*cloud_in->width); pt_projected.a = 1; - } + } cloud_projected->points.push_back(pt_projected); } } @@ -495,7 +537,7 @@ void PPTDetector::publish_cavity_msg(const mir_ppt_detection::Cavities& cavities { ROS_ERROR("%s", ex.what()); } - pose_in_target_frame.pose.position.z = 0.035; //TODO: do not hardcode this; use workspace height + object_height_above_workspace + pose_in_target_frame.pose.position.z = target_pose_z_pos_; //TODO: do not hardcode this; use workspace height + object_height_above_workspace mas_perception_msgs::Cavity cavity; diff --git a/mir_perception/mir_rtt_detection/CMakeLists.txt b/mir_perception/mir_rtt_detection/CMakeLists.txt new file mode 100644 index 000000000..c1db8bdc2 --- /dev/null +++ b/mir_perception/mir_rtt_detection/CMakeLists.txt @@ -0,0 +1,61 @@ +cmake_minimum_required(VERSION 2.8.3) +project(mir_rtt_detection) + +set(CMAKE_CXX_STANDARD 14) +add_compile_options( + -O3 +) + +find_package(catkin REQUIRED COMPONENTS + cv_bridge + dynamic_reconfigure + mas_perception_msgs + pcl_ros + roscpp + rospy + roslint + tf + visualization_msgs + geometry_msgs + mir_object_segmentation + mir_perception_utils +) +catkin_python_setup() +find_package(PCL 1.10 REQUIRED) +find_package(VTK REQUIRED) +find_package(OpenCV REQUIRED) + +generate_dynamic_reconfigure_options( + ros/config/SceneSegmentation.cfg +) + +catkin_package( + + LIBRARIES + mir_object_segmentation + ${PROJECT_NAME} + CATKIN_DEPENDS + mas_perception_msgs + visualization_msgs +) + +include_directories( + ${catkin_INCLUDE_DIRS} + ${PCL_INCLUDE_DIRS} + ${VTK_INCLUDE_DIRS} +) + +add_definitions(-fpermissive) + +### LIBRARIES #################################################### + +### EXECUTABLES ############################################### + +### INSTALLS + +install(DIRECTORY ros/launch/ + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/ros/launch +) +install(DIRECTORY ros/scripts/ + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/ros/scripts +) diff --git a/mir_perception/mir_rtt_detection/README.md b/mir_perception/mir_rtt_detection/README.md new file mode 100644 index 000000000..9c00bc80b --- /dev/null +++ b/mir_perception/mir_rtt_detection/README.md @@ -0,0 +1,49 @@ +# mir_rtt_detection + + +- In the Advanced Transportation Task 2 and final, the robot is required to pick up an object from a rotating table. +- In our current approach, object detection is performed using YOLOv8 on a sequence of 15 frames from the camera. +- Centroids and orientations for the target object are transformed to the robot's base frame for all images, and stored along with their timestamps. +- A circular model is fit to the sequence of centroids and used to estimate the direction and rotation speed of the rotating table. +- To measure the accuracy of the estimated radius and rotation speed, we used four objects (S40_40_B, M20, Housing, and AllenKey) at eight radii (32 to 46 cm in 2 cm increments), yielding 32 readings. +- We found a Mean Absolute Error (MAE) of 5 mm for the estimated radius and 6 * 10-3 rad/sec for rotational speed, compared to the ground truth, which was estimated by manually measuring the time for 10 rotations. +- The point on the circle that is closest to the robot is chosen as the grasp position, and the predicted time when the object will reach the selected location is determined by taking into account the last known position of the object and the rotation speed of the table. + + +
+ RTT circular model + RTT grasp position +
+ + +## Usage + +- Launch the bringup + ``` + roslaunch mir_bringup robot.launch + ``` + +- Launch planning bringup + ``` + roslaunch mir_planning_bringup robot.launch + ``` + +- Run the client from + ``` + rosrun mir_rtt_pick_object rtt_pick_object_client_test.py [obj_name] [workstation_name] + Example - rosrun mir_rtt_pick_object rtt_pick_object_client_test.py S40_40_B TT01 + ``` + +## Topics + +### In +- `/mir_perception/rtt/event_in` +- `/tower_cam3d_front/color/image_raw` +- `/tower_cam3d_front/depth/color/points` + +### Out +- `/mir_perception/rtt/event_out` +- `/mir_perception/mir_rtt_detector/predicted_object_pose` +- `/mir_perception/rtt/time_stamped_pose` +- `/mir_perception/multimodal_object_recognition/recognizer/rgb/output/debug_image` + diff --git a/mir_perception/mir_rtt_detection/docs/rtt.jpg b/mir_perception/mir_rtt_detection/docs/rtt.jpg new file mode 100644 index 000000000..55c637e69 Binary files /dev/null and b/mir_perception/mir_rtt_detection/docs/rtt.jpg differ diff --git a/mir_perception/mir_rtt_detection/docs/rtt_pick.jpg b/mir_perception/mir_rtt_detection/docs/rtt_pick.jpg new file mode 100644 index 000000000..59f1f48ec Binary files /dev/null and b/mir_perception/mir_rtt_detection/docs/rtt_pick.jpg differ diff --git a/mir_perception/mir_rtt_detection/package.xml b/mir_perception/mir_rtt_detection/package.xml new file mode 100644 index 000000000..dcdc13a86 --- /dev/null +++ b/mir_perception/mir_rtt_detection/package.xml @@ -0,0 +1,48 @@ + + + mir_rtt_detection + 0.0.0 + The rtt_detection package + + Ravisankar Selvaraju + GPLv3 + Ravisankar Selvaraju + + catkin + cv_bridge + geometry_msgs + roscpp + rospy + sensor_msgs + tf + dynamic_reconfigure + libpcl-all-dev + mas_perception_msgs + pcl_ros + visualization_msgs + roslint + mir_object_segmentation + mir_perception_utils + mir_rgb_object_recognition_models + mir_pointcloud_object_recognition_models + + cv_bridge + geometry_msgs + roscpp + rospy + sensor_msgs + tf + mir_object_segmentation + + cv_bridge + geometry_msgs + roscpp + rospy + sensor_msgs + tf + mas_perception_msgs + visualization_msgs + + roslaunch + + diff --git a/mir_perception/mir_rtt_detection/ros/launch/rtt_detector.launch b/mir_perception/mir_rtt_detection/ros/launch/rtt_detector.launch new file mode 100644 index 000000000..373beae97 --- /dev/null +++ b/mir_perception/mir_rtt_detection/ros/launch/rtt_detector.launch @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mir_perception/mir_rtt_detection/ros/scripts/rtt_detector_node b/mir_perception/mir_rtt_detection/ros/scripts/rtt_detector_node new file mode 100755 index 000000000..84b8b69b2 --- /dev/null +++ b/mir_perception/mir_rtt_detection/ros/scripts/rtt_detector_node @@ -0,0 +1,572 @@ +#!/usr/bin/env python3 +import os +import random +import time +import cv2 +import numpy as np +import roslib +import rospy +import torch +import math +import csv +from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion +from cv_bridge import CvBridge, CvBridgeError +from std_msgs.msg import ColorRGBA, String +import message_filters +from mas_perception_msgs.msg import ImageList, Object, ObjectList, TimeStampedPose +from sensor_msgs.msg import Image, RegionOfInterest, PointCloud2, PointField +from sensor_msgs import point_cloud2 +from ultralytics import YOLO +import tf +from sklearn.decomposition import PCA +from geometry_msgs.msg import PoseStamped, Pose +from visualization_msgs.msg import Marker +from scipy.optimize import least_squares +from sklearn.mixture import GaussianMixture +import tf.transformations as tr +from scipy.spatial.transform import Rotation as R +import tf2_sensor_msgs +from tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud +import tf2_ros +import tf2_py as tf2 +import ros_numpy +from scipy.optimize import least_squares +import pdb + +class RTTObjectTracker(): + def __init__(self, weights, net='detection', model_name='yolov8', + confidence_threshold=0.8, + iou_threshold=0.45, + img_size=640, + trace=True, + classify=False, + augment=False, + device='cpu', + debug_mode=True): + + self.cvbridge = CvBridge() + self.debug = debug_mode + self.pub_debug = rospy.Publisher("/mir_perception/multimodal_object_recognition/recognizer/rgb/output/debug_image", Image, queue_size=1) + self.pub_pose = rospy.Publisher("/mir_perception/mir_rtt_detector/predicted_object_pose", PoseStamped, queue_size=1) + self.pub_cropped_pc = rospy.Publisher("rtt/output/cropped_pc", PointCloud2, queue_size=1) + + self.net = net + self.model_name = model_name + self.weights = weights + self.confidence_threshold = confidence_threshold + self.iou_threshold = iou_threshold + self.augment = augment + self.table_rotating_clockwise = False + + # Dictionary to store object bounding box centers + self.object_centers = {} + # Object pose at pickup location + self.object_pose = {} + # Number of latest bounding box centers to keep + self.max_centers = 50 + # Dictionary to store object colors + self.object_colors = {} + # Dictionary to store object rotational velocities + self.object_rotational_speed = 0 + # Dictionary to store object ellipse parameters + self.object_ellipse_params_CL = {} + self.object_ellipse_params_BL = {} + self.settled_ellipse = {} + # Number of rotations + self.num_rotations = {} + self.future_timestamps = {} + self.center_point_position_list_wrt_base_link = {} + self.object_pick_position = {} + + # Initialize + self.device = torch.device(device) + self.half = self.device.type != 'cpu' # half precision only supported on CUDA + + # Load model + if self.model_name == 'yolov8': + self.model = YOLO(weights) + self.img_size = img_size + + self.tf_buffer = tf2_ros.Buffer(cache_time=rospy.Duration(12)) + self.tf_listener = tf.TransformListener() + self.tf2_listener = tf2_ros.TransformListener(self.tf_buffer) + + rospy.Subscriber("/mir_perception/rtt/event_in", String, self.event_in_cb) + self.event_out = rospy.Publisher("/mir_perception/rtt/event_out", String, queue_size=1) + self.time_stamped_pose = rospy.Publisher("/mir_perception/rtt/time_stamped_pose", TimeStampedPose, queue_size=5) + + self.event = None + self.object_name = None + self.time_estimation_complete = False + self.orientation_estimation_complete = False + self.pose_estimation_complete = False + self.rotational_speed_calculation_complete = False + + + def callback(self, img_msg, point_cloud_msg): + try: + self.run(img_msg, point_cloud_msg) + except Exception as e: + rospy.loginfo("[RTT detector node]killing point cloud callback") + + def event_in_cb(self, msg): + """ + Starts a planned motion based on the specified arm position. + + """ + self.event = msg.data + if self.event.startswith("e_start"): + self.object_name = self.event[8:] + + self.object_name = self.object_name.split("-")[0] + + # Subscribe to image topic + self.sub_img = message_filters.Subscriber("/tower_cam3d_front/color/image_raw", Image)#, self.image_recognition_cb) + + # subscribe to point cloud topic + self.sub_point_cloud = message_filters.Subscriber("/tower_cam3d_front/depth/color/points", PointCloud2)#, self.point_cloud_cb) + + # synchronize image and point cloud + self.ts = message_filters.ApproximateTimeSynchronizer([self.sub_img, self.sub_point_cloud], queue_size=10, slop=0.1) + self.ts.registerCallback(self.callback) + + if self.event.startswith("e_stop"): + # Dictionary to store object bounding box centers + self.object_centers = {} + # Object pose at pickup location + self.object_pose = {} + # Dictionary to store object rotational velocities + self.object_rotational_speed = 0 + self.future_timestamps = {} + self.center_point_position_list_wrt_base_link = {} + self.object_pick_position = {} + self.time_estimation_complete = False + self.orientation_estimation_complete = False + self.pose_estimation_complete = False + self.rotational_speed_calculation_complete = False + self.object_name = None + self.sub_img.sub.unregister() + self.sub_point_cloud.sub.unregister() + rospy.loginfo("[RTT detector node] Unregistered image and pointcloud subscribers") + + def yolo_detect(self, cv_img): + if self.net == 'detection' and self.model_name == 'yolov8': + predictions = self.model.predict(source=cv_img, + conf=self.confidence_threshold, + iou=self.iou_threshold, + device=self.device, + verbose=False, + ) + # convert results to numpy array + predictions_cp = predictions[0].obb.cpu() + class_ids = predictions_cp.cls.numpy() + class_labels = [self.model.names[int(label)] for label in class_ids] + class_scores = predictions_cp.conf.numpy() + class_bboxes = predictions_cp.xyxyxyxy.numpy() + + return class_bboxes, class_scores, class_labels, predictions + + def get_orientation_obb(self, bbox): + # Calculate the length of each side of the bounding box + side_lengths = [np.linalg.norm(np.array(bbox[i]) - np.array(bbox[(i + 1) % 4])) for i in range(4)] + + # Find the index of the longest side + longest_side_index = np.argmax(side_lengths) + + # Get the coordinates of the longest side + p1 = np.array(bbox[longest_side_index]) + p2 = np.array(bbox[(longest_side_index + 1) % 4]) + + # Calculate the vector representing the longest side + side_vector = p2 - p1 + + # Calculate the angle between the longest side and the horizontal axis + angle_rad = np.arctan2(side_vector[1], side_vector[0]) + + # Ensure the angle is between 0 and 2*pi radians + angle_rad %= (2 * np.pi) + + return angle_rad + + + def circle_residuals(self, params, x, y): + """ + Residual function for circle fitting. + params: [a, b, r] where (a, b) is the center coordinates and r is the radius. + x, y: Arrays of x and y coordinates of data points. + """ + a, b, r = params + return (x - a)**2 + (y - b)**2 - r**2 + + def fit_circle(self, x, y): + """ + Fit a circle to the given data points (x, y). + Returns the center coordinates (a, b) and the radius r. + """ + # Initial guess for the circle parameters + initial_guess = [0.0, 0.0, 1.0] + + # Use least squares optimization to fit the circle + result = least_squares(self.circle_residuals, initial_guess, args=(x, y)) + + # Extract the optimized parameters + center_x, center_y, radius = result.x + + return center_x, center_y, radius + + def get_pointcloud_position(self, pointcloud, center): + """ + pointcloud: pointcloud data of current frame with PointCloud2 data type + center: x and y coordinates of center of the bounding box of detected object + """ + + # Get the pointcloud data + pc = np.array(list(point_cloud2.read_points(pointcloud, skip_nans=False, field_names=("x", "y", "z"))), dtype=np.float32) + pc = pc.reshape((480,640,3)) + + # Get the center point of the bounding box + center_x, center_y = center + # center_point = pc[center_y, center_x] + + # Define the radius of the circle + radius = 7 + + # Calculate the coordinates of the circle points within the radius + min_x = max(0, center_x - radius) + max_x = min(pc.shape[1] - 1, center_x + radius) + min_y = max(0, center_y - radius) + max_y = min(pc.shape[0] - 1, center_y + radius) + + circle_points = [] + for y in range(min_y, max_y + 1): + for x in range(min_x, max_x + 1): + distance = math.sqrt((center_x - x) ** 2 + (center_y - y) ** 2) + if distance <= radius: + point = pc[y, x] + if not np.isnan(point).any(): + circle_points.append(point) + + # Determine the centroid of the non-NaN points + if circle_points: + circle_points_pc = np.array([point for point in circle_points]) + center_point = np.mean(circle_points_pc, axis=0) + + # Get the pose of the center point of the bounding box + pose = PoseStamped() + pose.header.frame_id = pointcloud.header.frame_id + pose.header.stamp = pointcloud.header.stamp + pose.pose.position.x = center_point[0] + pose.pose.position.y = center_point[1] + pose.pose.position.z = center_point[2] + pose.pose.orientation.x = 0.0 + pose.pose.orientation.y = 0.0 + pose.pose.orientation.z = 0.0 + pose.pose.orientation.w = 1.0 + + try: + t = self.tf_listener.getLatestCommonTime("base_link", pose.header.frame_id) + pose.header.stamp = t + pose = self.tf_listener.transformPose("base_link", pose) + # if pose doesnot has nan values then append pose to the list + if not math.isnan(pose.pose.position.x): + center_wrt_BL = pose.pose.position.x, pose.pose.position.y + return center_wrt_BL + except ( + tf.LookupException, + tf.ConnectivityException, + tf.ExtrapolationException, + ) as e: + rospy.logerr("Tf error: %s" % str(e)) + + return None + + def predict_yaw(self, xc, yc, center_list, yaw_list_CL): + """ + xc, yc: center point of the circle + center_list: list of center points of the detected object in each frame + yaw_list: list of yaw angles of the detected object in each frame + """ + + # convert yaw_list_CL to yaw_list_BL + yaw_list_BL = yaw_list_CL + for i in range(len(yaw_list_CL)): + yaw_list_BL[i] = np.pi/2 - yaw_list_CL[i] # from CL to BL + if yaw_list_BL[i] > np.pi: + yaw_list_BL[i] -= 2*np.pi + if yaw_list_BL[i] < 0: + yaw_list_BL[i] += np.pi + # now yaw_list is b/w 0 to np.pi (180 degrees) + + # predict at the point of pickup (at Base Link angle of 180 degrees) + for i in range(len(center_list)): + xo, yo = center_list[i] + angle = math.atan2(yo - yc, xo - xc) + yaw_list_BL[i] -= angle + if yaw_list_BL[i] >= np.pi: + yaw_list_BL[i] -= np.pi + elif yaw_list_BL[i] < 0: + yaw_list_BL[i] += np.pi + + # get median of yaw_list + predicted_yaw_BL = np.median(np.array(yaw_list_BL)) + return predicted_yaw_BL + + def calculate_rotational_speed(self, center_list, frame_time_list, xc, yc, label): + """ + center_list: list of center points of the detected object in each frame + frame_time_list: list of time stamps of each frame + xc, yc: center point of the circle + """ + # self.num_rotations[label] = -1 + # Calculate the angle between each center point and the ellipse center + angle_list = [] + for center in center_list: + xo, yo = center + angle = math.atan2(yo - yc, xo - xc) + angle_list.append(angle) + + # Calculate the time differences between two consecutive frames + time_diff_list = [] + for i in range(1, len(frame_time_list)): + time_diff = frame_time_list[i] - frame_time_list[i - 1] + time_diff_list.append(time_diff) + + # Calculate the rotational speeds based on the angle differences and time differences + rotational_speed_list = [] + count_clockwise = 0 + count_anticlockwise = 0 + + for i in range(len(angle_list) - 1): + # detection of rotation direction + if 0 <= angle_list[i] <= math.pi and 0 <= angle_list[i+1] <= math.pi and (count_anticlockwise + count_clockwise < 5): + if angle_list[i] < angle_list[i+1]: + count_anticlockwise += 1 + else: + count_clockwise += 1 + if count_clockwise > count_anticlockwise: + self.table_rotating_clockwise = True + else: + self.table_rotating_clockwise = False + + # if object is in the quadrant adjacent to pickup point, calculate the angle difference and rotational speed, and get the average value + if abs(angle_list[i]) >= math.pi/2 and abs(angle_list[i+1]) >= math.pi/2: + # check if product of two consecutive angles is negative + if angle_list[i] * angle_list[i+1] < 0: + if angle_list[i] > 0: + angle_diff = abs(angle_list[i] - (angle_list[i+1] + 2*math.pi)) + else: + angle_diff = abs(angle_list[i+1] - (angle_list[i] + 2*math.pi)) + else: + angle_diff = abs(angle_list[i + 1] - angle_list[i]) + time_diff = frame_time_list[i+1] - frame_time_list[i] + if time_diff < 3 and time_diff > 0.05: + rotational_speed = abs(angle_diff) / time_diff + rotational_speed_list.append(rotational_speed) + + # return the average value for rotational speed + if len(rotational_speed_list) == 0: + return 0 + else: + rotational_speed_array = np.array(rotational_speed_list) + avg_rotational_speed = np.mean(rotational_speed_array) + + return avg_rotational_speed + + def predict_future_timestamps(self, avg_rotational_speed, center_list, frame_time_list, xc, yc, closest_x, closest_y): + """ + center_list: list of center points of the detected object in each frame + frame_time_list: list of time stamps of each frame + xc, yc: center point of the ellipse + avg_rotational_speed: rotational spee+6+d of particular object + closest_x, closest_y: closest point on the ellipse to the end-effector + """ + + last_n_locations = center_list[-5:] # Select the last 5 center points + last_n_frame_times = frame_time_list[-5:] # Select the corresponding frame time stamps + closest_angle = math.pi + time_for_one_rotation = 2 * math.pi / avg_rotational_speed + future_time_stamps = [] + + for i in range(len(last_n_locations)): + latest_location_x = last_n_locations[i][0] + latest_location_y = last_n_locations[i][1] + latest_frame_time = last_n_frame_times[i] + + last_angle = math.atan2(latest_location_y - yc, latest_location_x - xc) + + # Calculate the difference between angles, considering the range of last angle + if self.table_rotating_clockwise: + angle_difference = abs(2*math.pi - abs(closest_angle - last_angle)) + else: + angle_difference = abs(closest_angle - last_angle) + + time_diff = angle_difference / avg_rotational_speed + next_time_stamp = latest_frame_time + time_diff + future_time_stamps.append(next_time_stamp) + + # Calculate median of the future time stamps + median_future_time_stamp = np.median(np.array(future_time_stamps)) + if median_future_time_stamp > rospy.Time.now().to_sec() + 3.0: + return median_future_time_stamp + else: + median_future_time_stamp += time_for_one_rotation + return median_future_time_stamp + + def run(self, image, pointcloud): + """ + image: image data of current frame with Image data type + pointcloud: pointcloud data of current frame with PointCloud2 data type + """ + + start = time.time() + if image: + try: + cv_img = self.cvbridge.imgmsg_to_cv2(image, "bgr8") + bboxes, probs, labels, predictions = self.yolo_detect(cv_img) + # Capatilize labels + labels = [label.upper() for label in labels] + + image_header_frame_time = image.header.stamp + frame_time = image_header_frame_time.secs + \ + (image_header_frame_time.nsecs / 1000000000) + + bbox_img = None + + # Fit a Circle to the centers of the bounding boxes + for bbox, prob, label in zip(bboxes, probs, labels): + if label == self.object_name: + # Calculate center of oriented bounding box + center_x = int((bbox[0][0] + bbox[1][0] + bbox[2][0] + bbox[3][0]) // 4) + center_y = int((bbox[0][1] + bbox[1][1] + bbox[2][1] + bbox[3][1]) // 4) + center = np.array([center_x, center_y]) + + # yaw_angle = self.get_orientation(best_cnt, bbox_img) + yaw_angle = self.get_orientation_obb(bbox) + + # Update object centers dictionary + if label not in self.center_point_position_list_wrt_base_link and center_y > 30: + self.object_centers[label] = [] + self.object_ellipse_params_CL[label] = [] + self.object_ellipse_params_BL[label] = [] + self.num_rotations[label] = -1 + self.future_timestamps[label] = None + self.settled_ellipse[label] = False + self.object_pose[label] = None + self.center_point_position_list_wrt_base_link[label] = [] + self.object_pick_position[label] = None + if label in self.center_point_position_list_wrt_base_link and center_y > 20: + self.object_centers[label].append((center, frame_time)) + center_BL = self.get_pointcloud_position(pointcloud, center) + + if center_BL[0] is not None: + self.center_point_position_list_wrt_base_link[label].append((center_BL, yaw_angle, frame_time)) + # Keep only the last `max_centers` centers + if len(self.object_centers[label]) > self.max_centers: + self.object_centers[label] = self.object_centers[label][-self.max_centers:] + self.center_point_position_list_wrt_base_link[label] = self.center_point_position_list_wrt_base_link[label][-self.max_centers:] + + # # Create a CSV file for base link frame points + # with open('/home/robocup/ros/noetic/robocup/object_data_wrt_base_link.csv', mode='w') as file: + # writer = csv.writer(file) + # writer.writerow(['Object Name', 'X', 'Y', 'yaw', 'Frame Time']) + + # # Write the object data to the CSV file + # for label, centers_data in self.center_point_position_list_wrt_base_link.items(): + # for center, yaw, frame_time in centers_data: + # # print(f"Label: {label}, Center: {center}, Frame Time: {frame_time}") + # writer.writerow([label, center[0], center[1], yaw, frame_time]) + + if label in self.center_point_position_list_wrt_base_link: + # TODO: Put number of object instances as a reconfigurable parameter.(best found value = 15) + if self.center_point_position_list_wrt_base_link[label] and len(self.center_point_position_list_wrt_base_link[label]) > 15: + for label, centers_data in self.center_point_position_list_wrt_base_link.items(): + centers = np.array([center for center, _, _ in centers_data]) + yaw_list = [yaw for _, yaw, _ in centers_data] + frame_time_list = [frame_time for _, _, frame_time in centers_data] + xc, yc, radius = self.fit_circle(centers[:,0], centers[:,1]) + + # getting the point on the trajectory at 90 degrees in image plane [x-axis: left to right, y-axis: top to bottom] + closest_x = xc - radius + 0.014 + closest_y = yc + # TODO: Plot closest_x and closest_y on debug image. + + if np.isnan(closest_x): + continue + if not math.isnan(closest_x): + # the pose of the object is calculated by cropping pointcloud by bounding box and applying PCA on it + self.object_pose[label] = PoseStamped() + self.object_pose[label].header.stamp = pointcloud.header.stamp + self.object_pose[label].header.frame_id = "base_link" + self.object_pose[label].pose.position.x = closest_x + self.object_pose[label].pose.position.y = closest_y + self.object_pose[label].pose.position.z = -0.007 + # TODO: Object position height (0.010 for C069 lab) is added as fixed value. Write a method to get RTT table height. + + if label == "M30" or label == "M20" or label == "BEARING2": + self.object_pose[label].pose.orientation.x = 0.0 + self.object_pose[label].pose.orientation.y = 0.0 + self.object_pose[label].pose.orientation.z = 0.0 + self.object_pose[label].pose.orientation.w = 1.0 + else: + predicted_yaw = self.predict_yaw(xc, yc, centers, yaw_list) + orientation = tr.quaternion_from_euler(0, 0, predicted_yaw) + self.object_pose[label].pose.orientation.x = orientation[0] + self.object_pose[label].pose.orientation.y = orientation[1] + self.object_pose[label].pose.orientation.z = orientation[2] + self.object_pose[label].pose.orientation.w = orientation[3] + self.orientation_estimation_complete = True + + # TODO: Plot predicted yaw on debug image. + if not self.rotational_speed_calculation_complete: + # get rotational speed + self.object_rotational_speed = self.calculate_rotational_speed(centers, frame_time_list, xc, yc, label) + self.rotational_speed_calculation_complete = True + if self.rotational_speed_calculation_complete and self.object_rotational_speed != 0: + # predict future timestamps at (closest_x, closest_y) + self.future_timestamps[label] = self.predict_future_timestamps(self.object_rotational_speed, centers, frame_time_list, xc, yc, closest_x, closest_y) + self.time_estimation_complete = True + if self.time_estimation_complete and self.orientation_estimation_complete and self.future_timestamps[self.object_name] is not None: + self.time_stamped_pose_msg = TimeStampedPose() + self.time_stamped_pose_msg.timestamps = self.future_timestamps[self.object_name] + self.time_stamped_pose_msg.pose = self.object_pose[self.object_name] + self.pub_pose.publish(self.object_pose[self.object_name]) + self.time_stamped_pose.publish(self.time_stamped_pose_msg) + # TODO: Add object pick time countdown in debug image [ha ha] + else: + self.time_estimation_complete = False + else: + continue + if self.debug: + # Draw bounding boxes and labels of detections + debug_img = predictions[0].plot() + self.publish_debug_img(debug_img) + + end = time.time() + # rospy.loginfo("Time taken for detection: %s",str(end - start)) + + except CvBridgeError as e: + rospy.logerr(e) + return + else: + rospy.logwarn("No image received") + + def publish_debug_img(self, debug_img): + debug_img = np.array(debug_img, dtype=np.uint8) + debug_img = self.cvbridge.cv2_to_imgmsg(debug_img, "bgr8") + self.pub_debug.publish(debug_img) + + +if __name__ == '__main__': + rospy.init_node("rtt") + rospy.loginfo('Started RTT Node.') + net = rospy.get_param("/mir_perception/multimodal_object_recognition/recognizer/rgb/rtt_detector/net") + classifier_name = rospy.get_param("/mir_perception/multimodal_object_recognition/recognizer/rgb/rtt_detector/classifier") + dataset = rospy.get_param("/mir_perception/multimodal_object_recognition/recognizer/rgb/rtt_detector/dataset") + model_dir = rospy.get_param("/mir_perception/multimodal_object_recognition/recognizer/rgb/rtt_detector/model_dir") + weights = os.path.join(roslib.packages.get_pkg_dir("mir_rgb_object_recognition_models"), + 'common', 'models', classifier_name, dataset, "atwork_obb.pt") + object_recognizer = RTTObjectTracker( + weights=weights, debug_mode=True) + rospy.loginfo("RTT Recognizer is ready using %s : %s , dataset: %s ", + net, classifier_name, dataset) + rospy.spin() diff --git a/mir_perception/mir_rtt_detection/setup.py b/mir_perception/mir_rtt_detection/setup.py new file mode 100644 index 000000000..f383a822d --- /dev/null +++ b/mir_perception/mir_rtt_detection/setup.py @@ -0,0 +1,11 @@ +# ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD + +from distutils.core import setup +from catkin_pkg.python_setup import generate_distutils_setup + +d = generate_distutils_setup( + packages=['rtt_node'], + package_dir={'rtt_node': 'ros/scripts/rtt_node'}, + ) + +setup(**d) diff --git a/mir_planning/mir_actions/mir_actions/ros/config/youbot_dynamic_params.yaml b/mir_planning/mir_actions/mir_actions/ros/config/youbot_dynamic_params.yaml index 68fc99480..b67b8ca7b 100644 --- a/mir_planning/mir_actions/mir_actions/ros/config/youbot_dynamic_params.yaml +++ b/mir_planning/mir_actions/mir_actions/ros/config/youbot_dynamic_params.yaml @@ -32,15 +32,15 @@ dbc_pick_object: p_gain_y: 4.0 p_gain_yaw: 4.0 max_velocity_x: 0.05 - max_velocity_y: 0.2 + max_velocity_y: 0.1 max_velocity_z: 0.0 max_velocity_roll: 0.0 max_velocity_pitch: 0.0 max_velocity_yaw: 0.4 use_collision_avoidance: True - front_laser_threshold: 0.04 - right_laser_threshold: 0.04 - rear_laser_threshold: 0.02 + front_laser_threshold: 0.0525 + right_laser_threshold: 0.1 + rear_laser_threshold: 0.1 left_laser_threshold: 0.04 loop_rate: 25.0 pregrasp_planner_single_shot: @@ -80,6 +80,22 @@ pregrasp_planner_single_shot: joint_5_offset_side_grasp: 0.0 rotation_range_min: 270.0 rotation_range_max: 90.0 +pregrasp_planner_pick: + /pregrasp_planner_node: + min_azimuth: 0.0 + max_azimuth: 0.0 + min_zenith: -5.0 + max_zenith: 5.0 + min_roll: -30.0 + max_roll: 30.0 +pregrasp_planner_no_sampling: + /pregrasp_planner_node: + min_azimuth: 0.0 + max_azimuth: 0.0 + min_zenith: 0.0 + max_zenith: 0.0 + min_roll: 0.0 + max_roll: 0.0 pregrasp_planner_zealous: /pregrasp_planner_node: min_azimuth: 0.0 @@ -100,3 +116,15 @@ pregrasp_planner_zealous: min_height: 0.0 target_frame: base_link ignore_orientation: True +multimodal_object_recognition_atwork: + /mir_perception/multimodal_object_recognition: + obj_category: atwork + rgb_cluster_filter_limit_max: 0.35 +multimodal_object_recognition_cavity: + /mir_perception/multimodal_object_recognition: + obj_category: cavity + rgb_cluster_filter_limit_max: -0.015 +multimodal_object_recognition_container: + /mir_perception/multimodal_object_recognition: + obj_category: container + rgb_cluster_filter_limit_max: 0.35 diff --git a/mir_planning/mir_actions/mir_actions/ros/launch/run_action_servers.launch b/mir_planning/mir_actions/mir_actions/ros/launch/run_action_servers.launch index d641cba46..640ccf8f0 100644 --- a/mir_planning/mir_actions/mir_actions/ros/launch/run_action_servers.launch +++ b/mir_planning/mir_actions/mir_actions/ros/launch/run_action_servers.launch @@ -53,4 +53,7 @@ + + +
diff --git a/mir_planning/mir_actions/mir_actions/ros/src/mir_actions/utils.py b/mir_planning/mir_actions/mir_actions/ros/src/mir_actions/utils.py index 7332a079d..feba2d188 100644 --- a/mir_planning/mir_actions/mir_actions/ros/src/mir_actions/utils.py +++ b/mir_planning/mir_actions/mir_actions/ros/src/mir_actions/utils.py @@ -114,8 +114,8 @@ def get_distance_between_poses(pose1, pose2): return type: float """ - print("pose1: {}".format(pose1.pose.position)) - print("pose2: {}".format(pose2.pose.position)) + # print("pose1: {}".format(pose1.pose.position)) + # print("pose2: {}".format(pose2.pose.position)) return math.sqrt( math.pow(pose1.pose.position.x - pose2.pose.position.x, 2) + diff --git a/mir_planning/mir_actions/mir_calibrate_pick/ros/scripts/calibrate_pick_server.py b/mir_planning/mir_actions/mir_calibrate_pick/ros/scripts/calibrate_pick_server.py index 35e420186..409145264 100755 --- a/mir_planning/mir_actions/mir_calibrate_pick/ros/scripts/calibrate_pick_server.py +++ b/mir_planning/mir_actions/mir_calibrate_pick/ros/scripts/calibrate_pick_server.py @@ -48,7 +48,8 @@ def main(): smach.StateMachine.add( "OPEN_GRIPPER", gms.control_gripper("open"), - transitions={"succeeded": "MOVE_ARM"}, + transitions={"succeeded": "MOVE_ARM", + "timeout": "MOVE_ARM"} ) smach.StateMachine.add('MOVE_ARM', gms.move_arm('look_at_workspace_from_near'), transitions={'succeeded': 'PUBLISH_REFERENCE_FRAME', diff --git a/mir_planning/mir_actions/mir_coworker_assembly/CMakeLists.txt b/mir_planning/mir_actions/mir_coworker_assembly/CMakeLists.txt new file mode 100644 index 000000000..c4356ddc3 --- /dev/null +++ b/mir_planning/mir_actions/mir_coworker_assembly/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 2.8.3) +project(mir_coworker_assembly) + +find_package(catkin REQUIRED + COMPONENTS +) + +catkin_package( + CATKIN_DEPENDS +) diff --git a/mir_planning/mir_actions/mir_coworker_assembly/README.md b/mir_planning/mir_actions/mir_coworker_assembly/README.md new file mode 100644 index 000000000..0a7abba6a --- /dev/null +++ b/mir_planning/mir_actions/mir_coworker_assembly/README.md @@ -0,0 +1,10 @@ +# mir_coworker_assembly + +This package is for coworker assembly technical challenge in Robocup 2023. The state machine will pick 3 objects, palce 3 objects, wait for coworker hand gesture, then detects and picks the assembled object. Finally it places the object. + +Our team won this technical challenge in Robocup@work 2023. + + +## Goal parameter description + +Requires no goal parameters from client. diff --git a/mir_planning/mir_actions/mir_coworker_assembly/package.xml b/mir_planning/mir_actions/mir_coworker_assembly/package.xml new file mode 100644 index 000000000..0eed6d9fc --- /dev/null +++ b/mir_planning/mir_actions/mir_coworker_assembly/package.xml @@ -0,0 +1,20 @@ + + + mir_coworker_assembly + 1.0.0 + + This package is for coworker assembly technical challenge in Robocup 2023. + The state machine will pick 3 objects, palce 3 objects, wait for coworker hand gesture. + Then detects and picks the assembled object. Finally it places the object. + This server uses coworker_assembly_detection_node and hand_gesture_node from mir_object_recognition + + + + GPLv3 + + Santosh + Santosh + + catkin + + diff --git a/mir_planning/mir_actions/mir_coworker_assembly/ros/launch/coworker_assembly.launch b/mir_planning/mir_actions/mir_coworker_assembly/ros/launch/coworker_assembly.launch new file mode 100644 index 000000000..fb414fecf --- /dev/null +++ b/mir_planning/mir_actions/mir_coworker_assembly/ros/launch/coworker_assembly.launch @@ -0,0 +1,7 @@ + + + + + + + diff --git a/mir_planning/mir_actions/mir_coworker_assembly/ros/scripts/coworker_assembly_client b/mir_planning/mir_actions/mir_coworker_assembly/ros/scripts/coworker_assembly_client new file mode 100755 index 000000000..2c4032ff8 --- /dev/null +++ b/mir_planning/mir_actions/mir_coworker_assembly/ros/scripts/coworker_assembly_client @@ -0,0 +1,40 @@ +#! /usr/bin/env python + +import sys + +import rospy +from actionlib import SimpleActionClient +from actionlib_msgs.msg import GoalStatus +from diagnostic_msgs.msg import KeyValue +from mir_planning_msgs.msg import GenericExecuteAction, GenericExecuteGoal + +if __name__ == "__main__": + rospy.init_node("coworker_assembly_client") + + client = SimpleActionClient("coworker_assembly_server", GenericExecuteAction) + client.wait_for_server() + + goal = GenericExecuteGoal() + + rospy.loginfo("Sending following goal to pick assembled object server") + rospy.loginfo(goal) + + client.send_goal(goal) + + timeout = 800.0 + finished_within_time = client.wait_for_result(rospy.Duration.from_sec(int(timeout))) + if not finished_within_time: + client.cancel_goal() + + state = client.get_state() + result = client.get_result() + if state == GoalStatus.SUCCEEDED: + rospy.loginfo("Action SUCCESS") + rospy.loginfo(client.get_result()) + sys.exit(0) + elif state == GoalStatus.ABORTED: + rospy.logerr("Action FAILED") + else: + rospy.logwarn("State: " + str(state)) + rospy.loginfo(client.get_result()) + sys.exit(1) diff --git a/mir_planning/mir_actions/mir_coworker_assembly/ros/scripts/coworker_assembly_server b/mir_planning/mir_actions/mir_coworker_assembly/ros/scripts/coworker_assembly_server new file mode 100755 index 000000000..a6ad925bc --- /dev/null +++ b/mir_planning/mir_actions/mir_coworker_assembly/ros/scripts/coworker_assembly_server @@ -0,0 +1,352 @@ +#!/usr/bin/python +import mcr_states.common.basic_states as gbs +import mir_states.common.action_states as gas +import mir_states.common.manipulation_states as gms +import rospy +import smach +from actionlib import SimpleActionClient +from geometry_msgs.msg import PoseStamped +from mir_actions.utils import Utils +from mir_planning_msgs.msg import ( + GenericExecuteAction, + GenericExecuteFeedback, + GenericExecuteResult, +) +from smach_ros import ActionServerWrapper, IntrospectionServer +from std_msgs.msg import String + +# =============================================================================== + + +class SelectObject(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["success", "failed"], + input_keys=["perceived_objects", "to_be_picked_objects", "platforms"], + output_keys=["feedback", "result", "to_be_picked_objects", "pick_anything_object", "pick_anything_stage_platform"], + ) + + def execute(self, userdata): + # Add empty result msg (because if none of the state do it, action server gives error) + userdata.result = GenericExecuteResult() + userdata.feedback = GenericExecuteFeedback( + current_state="SelectObject", text="selecting object to pick next" + ) + + to_be_picked_objects = userdata.to_be_picked_objects + perceived_objects = userdata.perceived_objects + platforms = userdata.platforms + + if len(to_be_picked_objects) != 0: + for obj in to_be_picked_objects: + if obj in perceived_objects: + userdata.pick_anything_object = obj + userdata.pick_anything_stage_platform = platforms[-1] + return "success" + + return "failed" + +class UpdatePickedObjectsAndPlatforms(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["success", "failed", "done"], + input_keys=["next_pick_object", "picked_objects", "to_be_picked_objects", "platforms", "pick_anything_object", "pick_anything_stage_platform"], + output_keys=["feedback", "result", "picked_objects", "to_be_picked_objects", "platforms"] + ) + + def execute(self, userdata): + # Add empty result msg (because if none of the state do it, action server gives error) + userdata.result = GenericExecuteResult() + userdata.feedback = GenericExecuteFeedback( + current_state="SelectObject", text="selecting object to pick next" + ) + + platforms = userdata.platforms + picked_objects = userdata.picked_objects + to_be_picked_objects = userdata.to_be_picked_objects + pick_anything_object = userdata.pick_anything_object + + if len(to_be_picked_objects) == 0: + return "done" + + # remove picked object from list + for obj in to_be_picked_objects: + if obj == pick_anything_object: + to_be_picked_objects.remove(obj) + userdata.to_be_picked_objects = to_be_picked_objects + picked_objects.append(obj) + userdata.picked_objects = picked_objects + platforms.remove(userdata.pick_anything_stage_platform) + userdata.platforms = platforms + if len(to_be_picked_objects) == 0: + return "done" + return "success" + + return "failed" + +# =============================================================================== + +def transition_cb(*args, **kwargs): + userdata = args[0] + sm_state = args[1][0] + + feedback = GenericExecuteFeedback() + feedback.current_state = sm_state + userdata.feedback = feedback + +def start_cb(*args, **kwargs): + userdata = args[0] + sm_state = args[1][0] + + feedback = GenericExecuteFeedback() + feedback.current_state = sm_state + userdata.feedback = feedback +# =============================================================================== + + +def main(): + # Open the container + rospy.init_node("coworker_assembly_server") + # Construct state machine + sm = smach.StateMachine( + outcomes=["OVERALL_SUCCESS", "OVERALL_FAILED"], + input_keys=["goal"], + output_keys=["feedback", "result"], + ) + sm.userdata.perception_result = None + sm.userdata.pick_anything_stage_platform = None + sm.userdata.pick_anything_object = None + + sm.userdata.picked_objects = [] + sm.userdata.to_be_picked_objects = ["M20_100", "M20", "SPACER"] + sm.userdata.platforms = ["PLATFORM_LEFT", "PLATFORM_MIDDLE", "PLATFORM_RIGHT"] + + with sm: + smach.StateMachine.add( + "MOVE_TO_PICK_LOCATION", + gas.move_base("WS09", timeout=60.0), + transitions={ + "success": "PERCEIVE_LOCATION", + "failed": "MOVE_TO_PICK_LOCATION", + }, + ) + + # start perception + smach.StateMachine.add( + "PERCEIVE_LOCATION", + gas.perceive_location(obj_category="multimodal_object_recognition_atwork", timeout=60.0), + transitions={ + "success": "SELECT_OBJECT", + "failed": "MOVE_TO_PICK_LOCATION", + }, + ) + + # select object to pick + smach.StateMachine.add( + "SELECT_OBJECT", + SelectObject(), + transitions={ + "success": "PICK_OBJECT", + "failed": "MOVE_TO_PICK_LOCATION", + }, + ) + + # pick object + smach.StateMachine.add( + "PICK_OBJECT", + gas.pick_object(), + transitions={ + "success": "STAGE_OBJECT", + "failed": "MOVE_TO_PICK_LOCATION", + }, + ) + + smach.StateMachine.add( + "STAGE_OBJECT", + gas.stage_object(timeout=60.0), + transitions={ + "success": "UPDATE_PICK_AND_PLATFORM_DATA", + "failed": "OVERALL_FAILED", + }, + ) + + # update data + smach.StateMachine.add( + "UPDATE_PICK_AND_PLATFORM_DATA", + UpdatePickedObjectsAndPlatforms(), + transitions={ + "success": "SELECT_OBJECT", + "failed": "OVERALL_FAILED", + "done": "MOVE_TO_UNSTAGE_LOCATION" + }, + ) + + smach.StateMachine.add( + "MOVE_TO_UNSTAGE_LOCATION", + gas.move_base("WS08", timeout=60.0), + transitions={ + "success": "UNSTAGE_OBJECT_LEFT", + "failed": "MOVE_TO_UNSTAGE_LOCATION", + }, + ) + + smach.StateMachine.add( + "UNSTAGE_OBJECT_LEFT", + gas.unstage_object("PLATFORM_LEFT", timeout=60.0), + transitions={ + "success": "PLACE_OBJECT_LEFT", + "failed": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "PLACE_OBJECT_LEFT", + gas.place_object("WS08"), + transitions={ + "success": "UNSTAGE_OBJECT_MIDDLE", + "failed": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "UNSTAGE_OBJECT_MIDDLE", + gas.unstage_object("PLATFORM_MIDDLE", timeout=60.0), + transitions={ + "success": "PLACE_OBJECT_MIDDLE", + "failed": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "PLACE_OBJECT_MIDDLE", + gas.place_object("WS08"), + transitions={ + "success": "UNSTAGE_OBJECT_RIGHT", + "failed": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "UNSTAGE_OBJECT_RIGHT", + gas.unstage_object("PLATFORM_RIGHT", timeout=60.0), + transitions={ + "success": "PLACE_OBJECT_RIGHT", + "failed": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "PLACE_OBJECT_RIGHT", + gas.place_object("WS08"), + transitions={ + "success": "CHECK_HUMAN_SIGNAL", + "failed": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "CHECK_HUMAN_SIGNAL", + gbs.send_and_wait_events_combined( + event_in_list=[("/mir_perception/gesture_recognition/event_in", "e_start")], + event_out_list=[("/mir_perception/gesture_recognition/event_out", "e_done", True)], + timeout_duration=40, + ), + transitions={ + "success": "PUBLISH_REFERENCE_FRAME", + "timeout": "PUBLISH_REFERENCE_FRAME", + "failure": "PUBLISH_REFERENCE_FRAME", + }, + ) + + # publish a static frame which will be used as reference for perceived objs + smach.StateMachine.add( + "PUBLISH_REFERENCE_FRAME", + gbs.send_event([("/static_transform_publisher_node/event_in", "e_start")]), + transitions={"success": "START_OBJECT_RECOGNITION"}, + ) + + smach.StateMachine.add( + "START_OBJECT_RECOGNITION", + gbs.send_and_wait_events_combined( + event_in_list=[("/mir_perception/coworker_assembly/event_in", "e_start")], + event_out_list=[("/mir_perception/coworker_assembly/event_out", "e_done", True)], + timeout_duration=60, + ), + transitions={ + "success": "PICK_ASSEMBLED_OBJECT", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + + # pick object + smach.StateMachine.add( + "PICK_ASSEMBLED_OBJECT", + gas.tc_pick_object(), + transitions={ + "success": "STAGE_ASSEMBLED_OBJECT", + "failed": "PICK_ASSEMBLED_OBJECT", + }, + ) + + smach.StateMachine.add( + "STAGE_ASSEMBLED_OBJECT", + gas.stage_object(timeout=60.0), + transitions={ + "success": "MOVE_TO_FINAL_LOCATION", + "failed": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "MOVE_TO_FINAL_LOCATION", + gas.move_base("WS09", timeout=60.0), + transitions={ + "success": "UNSTAGE_ASSEMBLED_OBJECT", + "failed": "MOVE_TO_UNSTAGE_LOCATION", + }, + ) + + smach.StateMachine.add( + "UNSTAGE_ASSEMBLED_OBJECT", + gas.unstage_object("PLATFORM_LEFT", timeout=60.0), + transitions={ + "success": "PLACE_ASSEMBLED_OBJECT", + "failed": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "PLACE_ASSEMBLED_OBJECT", + gas.place_object("WS09"), + transitions={ + "success": "OVERALL_SUCCESS", + "failed": "OVERALL_FAILED", + }, + ) + + + sm.register_transition_cb(transition_cb) + sm.register_start_cb(start_cb) + + # Construct action server wrapper + asw = ActionServerWrapper( + server_name="coworker_assembly_server", + action_spec=GenericExecuteAction, + wrapped_container=sm, + succeeded_outcomes=["OVERALL_SUCCESS"], + aborted_outcomes=["OVERALL_FAILED"], + preempted_outcomes=["PREEMPTED"], + goal_key="goal", + feedback_key="feedback", + result_key="result", + ) + # Run the server in a background thread + asw.run_server() + rospy.spin() + + +if __name__ == "__main__": + main() diff --git a/mir_planning/mir_actions/mir_insert_cavity/ros/scripts/insert_cavity_client_test.py b/mir_planning/mir_actions/mir_insert_cavity/ros/scripts/insert_cavity_client_test.py index ee0342b08..23b82ef28 100755 --- a/mir_planning/mir_actions/mir_insert_cavity/ros/scripts/insert_cavity_client_test.py +++ b/mir_planning/mir_actions/mir_insert_cavity/ros/scripts/insert_cavity_client_test.py @@ -19,7 +19,7 @@ peg = str(sys.argv[1]).upper() platform = str(sys.argv[2]).upper() goal.parameters.append(KeyValue(key="peg", value=peg)) - goal.parameters.append(KeyValue(key="platform", value=platform)) + goal.parameters.append(KeyValue(key="platform", value=platform)) # stage platform eg: PLATFORM_LEFT goal.parameters.append(KeyValue(key="hole", value="PP01")) rospy.loginfo("Sending following goal to place object server") rospy.loginfo(goal) diff --git a/mir_planning/mir_actions/mir_insert_cavity/ros/scripts/insert_cavity_server.py b/mir_planning/mir_actions/mir_insert_cavity/ros/scripts/insert_cavity_server.py index bc654d84d..23b00c359 100755 --- a/mir_planning/mir_actions/mir_insert_cavity/ros/scripts/insert_cavity_server.py +++ b/mir_planning/mir_actions/mir_insert_cavity/ros/scripts/insert_cavity_server.py @@ -9,40 +9,52 @@ import smach import tf.transformations from actionlib import SimpleActionClient -from geometry_msgs.msg import PoseStamped +from geometry_msgs.msg import PoseStamped, TwistStamped from mas_perception_msgs.msg import Cavity from mir_actions.utils import Utils from mir_planning_msgs.msg import ( GenericExecuteAction, GenericExecuteFeedback, GenericExecuteResult, + GenericExecuteGoal, ) from moveit_msgs.msg import MoveItErrorCodes from smach_ros import ActionServerWrapper, IntrospectionServer from std_msgs.msg import Float64, String - -# =============================================================================== - -# For wiggle arm -arm_command = moveit_commander.MoveGroupCommander("arm_1") -arm_command.set_goal_position_tolerance(0.01) -arm_command.set_goal_orientation_tolerance(0.01) -arm_command.set_goal_joint_tolerance(0.005) - -# =============================================================================== +from diagnostic_msgs.msg import KeyValue +from sensor_msgs.msg import JointState -class SelectObject(smach.State): - def __init__(self, topic_name): +class SelectCavity(smach.State): + def __init__(self, topic_name, vertical=False): smach.State.__init__( self, - outcomes=["succeeded"], + outcomes=["succeeded", "failed"], input_keys=["goal"], output_keys=["feedback", "result"], ) + self.vertical = vertical self.publisher = rospy.Publisher(topic_name, String, queue_size=10) rospy.sleep(0.1) # time for the publisher to register in ros network + def obj_to_cavity(self, obj_name: str, vertical: bool = False): + matchings = { + "M20": ["M20_H", "M20_V"], + "M30": ["M30_H", "M30_V"], + "M20_100": ["M20_100_H", "M20_H"], + "F20_20_B": ["F20_20_H", "F20_20_V"], + "F20_20_G": ["F20_20_H", "F20_20_V"], + "S40_40_B": ["S40_40_H", "S40_40_V"], + "S40_40_G": ["S40_40_H", "S40_40_V"], + } + if obj_name is not None and obj_name != "": + obj_name = obj_name.split("-")[0] + for key in matchings.keys(): + if obj_name == key: + return matchings[key][vertical] + return None + + def execute(self, userdata): # Add empty result msg (because if none of the state do it, action server gives error) userdata.result = GenericExecuteResult() @@ -51,41 +63,103 @@ def execute(self, userdata): ) obj = Utils.get_value_of(userdata.goal.parameters, "peg") - self.publisher.publish(String(data=obj)) + + if obj is None: + rospy.logerr("No object name provided") + return "failed" + + # get the cavity name + cavity_name = self.obj_to_cavity(obj, self.vertical) + + if cavity_name is None: + rospy.logerr("No cavity name found for object: " + obj) + return "failed" + + self.publisher.publish(String(data=cavity_name)) rospy.sleep(0.2) # let the topic survive for some time return "succeeded" - # =============================================================================== +class MoveArmUp(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded"], + input_keys=["goal"], + output_keys=["feedback", "result"], + ) + # velocity publisher + self.arm_velocity_pub = rospy.Publisher("/arm_1/arm_controller/cartesian_velocity_command", TwistStamped, queue_size=1) + + def execute(self, userdata): + + # send the velocity in +z direction wrt base_link to move the arm up + vel_msg = TwistStamped() + vel_msg.header.frame_id = "base_link" + # set velocity to 5cm/s + vel_msg.twist.linear.z = 0.04 + self.arm_velocity_pub.publish(vel_msg) + rospy.sleep(0.55) + # stop the arm + vel_msg.twist.linear.z = 0 + self.arm_velocity_pub.publish(vel_msg) + return "succeeded" + class ppt_wiggle_arm(smach.State): """ Wiggle arm after placement on the table """ - def __init__(self, wiggle_offset=0.0, joint=0): - smach.State.__init__(self, outcomes=["succeeded", "failed"]) - self.wiggle_offset = wiggle_offset - self.joint_number = joint + def __init__(self, wiggle_yaw=1.57): + smach.State.__init__(self, + input_keys=["goal"], + outcomes=["succeeded", "failed"]) + self.blocking = True # create publisher for sending gripper position self.gripper_topic = "/gripper_controller/command" - self.cavity_pose_topic = "/mcr_perception/object_selector/output/object_pose" + # self.cavity_pose_topic = "/mcr_perception/object_selector/output/object_pose" + self.cavity_pose_topic = "/mir_perception/multimodel_object_recognition/output/rgb_object_pose_array" + self.arm_velocity_pub = rospy.Publisher("/arm_1/arm_controller/cartesian_velocity_command", TwistStamped, queue_size=10) + + self.joint_states_sub = rospy.Subscriber("/joint_states", JointState, self.joint_states_cb) self.gripper = rospy.Publisher(self.gripper_topic, Float64, queue_size=10) - # Determining which object to wiggle - # rospy.Subscriber("~object_name", std_msgs.msg.String, self.object_name_cb) # Determineing object orientation rospy.Subscriber(self.cavity_pose_topic, PoseStamped, self.cavity_pose_cb) + # MoveIt! commander movegroup + self.arm_command = moveit_commander.MoveGroupCommander("arm_1") + self.arm_command.set_goal_position_tolerance(0.01) + self.arm_command.set_goal_orientation_tolerance(0.01) + self.arm_command.set_goal_joint_tolerance(0.005) + + self.object_name = None + self.type_of_adjustment = 'rotational' + + self.linear_wiggle_object_list = ["M20", "M30"] + self.rotational_wiggle_object_list = ["M20_100", "F20_20_B","F20_20_G", "S40_40_B", "S40_40_G"] + + + self.joint_offset_tolerance = 0.02 # radians + self.horizontal_wrist_angle = 2.98 # radians + self.vertical_wrist_angle = 4.53 # radians + self.yaw = 0 + self.wiggle_yaw = wiggle_yaw # wiggle angle in radians (default 1.57) on either direction self.joint_values_static = [] # giving some time to the publisher to register in ros network rospy.sleep(0.1) + + def joint_states_cb(self, msg): + if "arm_joint_1" in msg.name: # get the joint values of the arm only + self.current_joint_positions = msg.position + def object_name_cb(self, msg): """ Obtains an event for the component. @@ -106,82 +180,318 @@ def execute_arm(self, joint_number, wiggle_offset): self.joint_values_static[joint_number] + wiggle_offset ) try: - arm_command.set_joint_value_target(joint_values) + self.arm_command.set_joint_value_target(joint_values) except Exception as e: rospy.logerr("unable to set target position: %s" % (str(e))) - return "failed" - error_code = arm_command.go(wait=self.blocking) + return False + error_code = self.arm_command.go(wait=True) if error_code == MoveItErrorCodes.SUCCESS: - return "succeeded" + return True else: rospy.logerr("Arm movement failed with error code: %d", error_code) - return "failed" + return False - def execute(self, userdata): - # open the arm slightly - message = Float64() - message.data = -0.1 - self.gripper.publish(message) + def stop_arm(self): + message = TwistStamped() + message.header.stamp = rospy.Time.now() + self.arm_velocity_pub.publish(message) rospy.sleep(0.1) + return True + + def rotational_wiggle(self, direction, message, wiggle_velocity=5.0, timeout=15.0, wiggle_yaw=1.57): + + """ + WORKING DONT TOUCH !!! + """ + + while not rospy.is_shutdown(): + + if direction == "CW": + rospy.loginfo("Wiggling CW") + rospy.loginfo("Wiggle yaw: %f", wiggle_yaw) + message.twist.angular.z = wiggle_velocity + elif direction == "CCW": + wiggle_yaw = 2 * wiggle_yaw + rospy.loginfo("Wiggling CCW") + rospy.loginfo("Wiggle yaw: %f", wiggle_yaw) + message.twist.angular.z = -wiggle_velocity + + if self.current_joint_positions is None or len(self.current_joint_positions) < 1: + rospy.logwarn("No joint positions received") + continue # skip the rest of the loop + + # Getting Yaw angle from joint_states + initial_yaw = self.current_joint_positions[-1] # in radians take the last joint value(EFF) + initial_time = rospy.Time.now().to_sec() + + while not rospy.is_shutdown(): + current_time = rospy.Time.now().to_sec() + self.arm_velocity_pub.publish(message) + rospy.sleep(0.1) + current_yaw = self.current_joint_positions[-1] + # check for timeout also and break after 5 seconds + # limits: 5.58 and 0.16 + if (current_yaw >= 5.4) or (current_yaw <= 0.28): + rospy.logwarn("Reached the joint limit") + self.stop_arm() + return True + if abs(current_yaw - initial_yaw) >= wiggle_yaw: + rospy.loginfo("Reached the desired yaw angle") + self.stop_arm() + return True + if current_time - initial_time >= timeout: + rospy.logwarn("Reached the timeout") + self.stop_arm() + return False + + def linear_wiggle_cartesian_mode(self, movement_type, travel_direction, message, travel_distance= 0.1, travel_velocity=0.03, timeout=5.0): + """ + Because of joint limit linear velocity movement is not possible do properlly + + # how to use + self.linear_wiggle_cartesian_mode("horizontal", "backward", message, travel_distance= 0.05, travel_velocity=0.03) + """ + + rospy.logwarn("moving obj %s %s" % (movement_type, travel_direction)) + + message.header.stamp = rospy.Time.now() + message.header.frame_id = "arm_link_5" + while not rospy.is_shutdown(): + if travel_direction == "forward": # change the direction of travel + travel_velocity = travel_velocity + elif travel_direction == "backward": + travel_velocity = -travel_velocity + else: + rospy.logerr("Invalid direction") + travel_velocity = 0.0 + + travel_time = travel_distance / abs(travel_velocity) + initial_time = rospy.Time.now().to_sec() + while not rospy.is_shutdown(): + if movement_type == "horizontal": + message.twist.linear.x = 0.0 + message.twist.linear.y = travel_velocity + elif movement_type == "vertical": + message.twist.linear.x = travel_velocity + message.twist.linear.y = 0.0 + else: + rospy.logerr("Invalid movement type") + message.twist.linear.x = 0.0 + message.twist.linear.y = 0.0 + + self.arm_velocity_pub.publish(message) + current_time = rospy.Time.now().to_sec() + rospy.sleep(0.05) + if abs(current_time - initial_time) >= travel_time: # no feedback from the arm just wait for the travel time + rospy.loginfo("Travel time reached") + self.stop_arm() + positive_flag = True + return True + + # get current pose of the arm and add offset and move to that position, + + def linear_wiggle_joint_mode(self, travel_direction, travel_distance= 0.08): # travel distance in radians + + # for vertical motion move arm link 4 and for horizontal motion move arm link 0 + + rospy.logwarn("Adjusting the object with linear wiggle joint mode in %s direction", travel_direction) + joint_number_to_change = None - wiggle_offset = None - self.joint_values_static = arm_command.get_current_joint_values() - ################# - if np.allclose(self.yaw, 0): - # wiggle right left first - joint_number_to_change = 0 - wiggle_offset = -0.12 - self.execute_arm(joint_number_to_change, wiggle_offset) - # go left - joint_number_to_change = 0 - wiggle_offset = 0.12 - self.execute_arm(joint_number_to_change, wiggle_offset) - # go center again - joint_number_to_change = 0 - wiggle_offset = 0.0 - self.execute_arm(joint_number_to_change, wiggle_offset) - # wiggle forward - joint_number_to_change = 3 - wiggle_offset = -0.12 - self.execute_arm(joint_number_to_change, wiggle_offset) - # go left - joint_number_to_change = 3 - wiggle_offset = 0.12 - self.execute_arm(joint_number_to_change, wiggle_offset) - # go center again - joint_number_to_change = 3 - wiggle_offset = 0.0 - self.execute_arm(joint_number_to_change, wiggle_offset) + wiggle_offset = travel_distance + + if travel_distance > 1.5: # Since it is joint angles should be careful with the travel distance + rospy.logerr("Travel distance is too high") + return "failed" + + self.joint_values_static = self.arm_command.get_current_joint_values() + + if travel_direction == "horizontal": + # check if the joint values for joint 4 is around 2.98 +- 0.1 if not set it to 2.98 + if self.joint_values_static[4] < self.horizontal_wrist_angle - self.joint_offset_tolerance or self.joint_values_static[4] > self.horizontal_wrist_angle + self.joint_offset_tolerance: + self.joint_values_static[4] = self.horizontal_wrist_angle # joint value for arm link 4 when wrist is horizontal + + elif travel_direction == "vertical": + # check if the joint values for joint 4 is around 4.01 +- 0.1 if not set it to 4.01 + if self.joint_values_static[4] < self.vertical_wrist_angle - self.joint_offset_tolerance or self.joint_values_static[4] > self.vertical_wrist_angle + self.joint_offset_tolerance: + self.joint_values_static[4] = self.vertical_wrist_angle # joint value for arm link 4 when wrist is vertical + else: + rospy.logerr("[cavity server] Invalid travel direction") + return "failed" + + + try: + self.arm_command.set_joint_value_target(self.joint_values_static) + except Exception as e: + rospy.logerr("unable to set target position: %s" % (str(e))) + return "failed" + error_code = self.arm_command.go(wait=True) + if not error_code == MoveItErrorCodes.SUCCESS: + return "failed" + + success = self.execute_arm(joint_number_to_change , wiggle_offset= wiggle_offset) + success &= self.execute_arm(joint_number_to_change , wiggle_offset= -wiggle_offset) + success &= self.execute_arm(joint_number_to_change , wiggle_offset= 0.0) + + if success: + return "succeeded" else: - # wiggle forward - joint_number_to_change = 3 - wiggle_offset = -0.12 - self.execute_arm(joint_number_to_change, wiggle_offset) - # go left - joint_number_to_change = 3 - wiggle_offset = 0.12 - self.execute_arm(joint_number_to_change, wiggle_offset) - # go center again - joint_number_to_change = 3 - wiggle_offset = 0.0 - self.execute_arm(joint_number_to_change, wiggle_offset) - # wiggle right left first - joint_number_to_change = 0 - wiggle_offset = -0.12 - self.execute_arm(joint_number_to_change, wiggle_offset) - # go left - joint_number_to_change = 0 - wiggle_offset = 0.12 - self.execute_arm(joint_number_to_change, wiggle_offset) - # go center again - joint_number_to_change = 0 - wiggle_offset = 0.0 - self.execute_arm(joint_number_to_change, wiggle_offset) + return "failed" + + def rotate_wrist_joint(self, direction): + + """ + input: + direction: "horizontal" or "vertical" + Sets the wrist joint to horizontal or vertical position + """ + # for vertical motion move arm link 4 and for horizontal motion move arm link 0 + + self.joint_values_static = self.arm_command.get_current_joint_values() + + if direction == "horizontal": + # check if the joint values for joint 4 is around 2.98 +- 0.1 if not set it to 2.98 + if self.joint_values_static[4] < self.horizontal_wrist_angle - self.joint_offset_tolerance or self.joint_values_static[4] > self.horizontal_wrist_angle + self.joint_offset_tolerance: + self.joint_values_static[4] = self.horizontal_wrist_angle # joint value for arm link 4 when wrist is horizontal + + elif direction == "vertical": + # check if the joint values for joint 4 is around 4.01 +- 0.1 if not set it to 4.01 + if self.joint_values_static[4] < self.vertical_wrist_angle - self.joint_offset_tolerance or self.joint_values_static[4] > self.vertical_wrist_angle + self.joint_offset_tolerance: + self.joint_values_static[4] = self.vertical_wrist_angle # joint value for arm link 4 when wrist is vertical + else: + rospy.logerr("[cavity server] Invalid travel direction") + return "failed" + + try: + self.arm_command.set_joint_value_target(self.joint_values_static) + except Exception as e: + rospy.logerr("unable to set target position: %s" % (str(e))) + return "failed" + error_code = self.arm_command.go(wait=True) + if not error_code == MoveItErrorCodes.SUCCESS: + return "failed" + + def wiggle_arm_working(self, turn = 1, velocity = 0.08): + + message = TwistStamped() + message.header.frame_id = "arm_link_5" + message.twist.linear.x = 0.0 + message.twist.linear.y = 0.0 + message.twist.angular.z = 0.0 + initial_time = rospy.Time.now().to_sec() + travel_time = 1.0 + + while(abs(rospy.Time.now().to_sec() - initial_time) < travel_time): + message.twist.linear.x = 0.0 + message.twist.linear.y = velocity + self.arm_velocity_pub.publish(message) + rospy.sleep(0.05) + self.stop_arm() + + initial_time = rospy.Time.now().to_sec() + travel_time = 1.0 + while(abs(rospy.Time.now().to_sec() - initial_time) < travel_time): + message.twist.linear.x = 0.0 + message.twist.linear.y = -velocity + self.arm_velocity_pub.publish(message) + rospy.sleep(0.05) + self.stop_arm() + + initial_time = rospy.Time.now().to_sec() + travel_time = 1.0 + while(abs(rospy.Time.now().to_sec() - initial_time) < travel_time): + message.twist.linear.x = 0.0 + message.twist.linear.y = -velocity + self.arm_velocity_pub.publish(message) + rospy.sleep(0.05) + self.stop_arm() + + initial_time = rospy.Time.now().to_sec() + travel_time = 1.0 + while(abs(rospy.Time.now().to_sec() - initial_time) < travel_time): + message.twist.linear.x = 0.0 + message.twist.linear.y = velocity + self.arm_velocity_pub.publish(message) + rospy.sleep(0.05) + self.stop_arm() + + return True + + def execute(self, userdata): + + message = TwistStamped() + message.header.frame_id = "base_link" + + self.object_name = Utils.get_value_of(userdata.goal.parameters, "peg") + adjustment_list = ["rotational", "linear"] + + if self.object_name in self.linear_wiggle_object_list: + self.type_of_adjustment = "linear" + elif self.object_name in self.rotational_wiggle_object_list: + self.type_of_adjustment = "rotational" + elif self.object_name is None: + rospy.logwarn("No object name received") + self.object_name = "unknown" + + if self.type_of_adjustment == "rotational" or self.object_name == "unknown": + + rospy.loginfo("Adjusting %s with a %s movement", self.object_name, self.type_of_adjustment) + + self.rotational_wiggle("CW", message, wiggle_velocity=7.0) + rospy.loginfo("Rotational wiggle CW done") + + self.rotational_wiggle("CCW", message, wiggle_velocity=7.0) + rospy.loginfo("Rotational wiggle CCW done") + + + if self.type_of_adjustment == "linear" or self.object_name == "unknown": + + + rospy.loginfo("Adjusting %s with a %s movement", self.object_name, self.type_of_adjustment) + + rospy.loginfo("Starting linear wiggle in cartesian mode") + + # TODO: This is a quick fix only one direction will work, need a general solution + self.wiggle_arm_working(velocity = 0.1) + + rospy.loginfo("Horizontal wiggle done") return "succeeded" - ################# +# =============================================================================== + +class Unstage_to_place(smach.State): + + def __init__(self): + + smach.State.__init__(self, outcomes=["success","failed"],input_keys=["goal","heavy_objects", "platform","object"]) + self.platform = "PLATFORM_MIDDLE" + self.obj = "M20" + + self.unstage_client = SimpleActionClient('unstage_object_server', GenericExecuteAction) + self.unstage_client.wait_for_server() + + def execute(self,userdata): + + self.platform = Utils.get_value_of(userdata.goal.parameters, "platform") + self.obj = Utils.get_value_of(userdata.goal.parameters, "peg") + + if self.obj is None: + rospy.logwarn('Missing parameter "object". Using default.') + self.obj = "light" + else: + self.obj = "light" + + # Assigning the goal + + goal = GenericExecuteGoal() + goal.parameters.append(KeyValue(key="platform", value=self.platform)) + goal.parameters.append(KeyValue(key="object", value=self.obj)) + + self.unstage_client.send_goal(goal) + self.unstage_client.wait_for_result(rospy.Duration.from_sec(25.0)) + + return "success" # =============================================================================== def transition_cb(*args, **kwargs): @@ -211,141 +521,127 @@ def main(): output_keys=["feedback", "result"], ) with sm: - # publish object as string to mcr_perception_selectors -> cavity, - # this component then publishes pose in base_link reference frame - smach.StateMachine.add( - "SELECT_OBJECT", - SelectObject("/mcr_perception/cavity_pose_selector/object_name"), - transitions={"succeeded": "CHECK_IF_OBJECT_IS_AVAILABLE"}, - ) - + # set pregrasp planner params smach.StateMachine.add( - "CHECK_IF_OBJECT_IS_AVAILABLE", - gbs.send_and_wait_events_combined( - event_in_list=[ - ("/mcr_perception/cavity_pose_selector/event_in", "e_trigger",) - ], - event_out_list=[ - ( - "/mcr_perception/cavity_pose_selector/event_out", - "e_success", - True, - ) - ], - timeout_duration=10, - ), + "SET_PREGRASP_PARAMS", + gbs.set_named_config("pregrasp_planner_no_sampling"), transitions={ - "success": "SET_DBC_PARAMS", + "success": "PERCEIVE_LOCATION", "timeout": "OVERALL_FAILED", "failure": "OVERALL_FAILED", }, ) + # start perception smach.StateMachine.add( - "SET_DBC_PARAMS", - gbs.set_named_config("dbc_pick_object"), + "PERCEIVE_LOCATION", + gas.perceive_location(obj_category="multimodal_object_recognition_cavity"), transitions={ - "success": "MOVE_ROBOT_AND_TRY_PLACING", - "timeout": "OVERALL_FAILED", - "failure": "OVERALL_FAILED", + "success": "SELECT_CAVITY", + "failed": "OVERALL_FAILED", }, ) smach.StateMachine.add( - "MOVE_ROBOT_AND_TRY_PLACING", - gbs.send_and_wait_events_combined( - event_in_list=[("/wbc/event_in", "e_try")], - event_out_list=[("/wbc/event_out", "e_success", True)], - timeout_duration=50, - ), + "SELECT_CAVITY", + SelectCavity("/mcr_perception/object_selector/input/object_name", vertical=False), transitions={ - "success": "MOVE_ARM_TO_MIDDLE_POSE", - "timeout": "MOVE_ARM_TO_MIDDLE_POSE", - "failure": "MOVE_ARM_TO_MIDDLE_POSE", + "succeeded": "GENERATE_OBJECT_POSE", + "failed": "GET_VERTICAL_CAVITY" }, ) smach.StateMachine.add( - "MOVE_ARM_TO_MIDDLE_POSE", - gms.move_arm("ppt_cavity_middle"), + "GET_VERTICAL_CAVITY", + SelectCavity("/mcr_perception/object_selector/input/object_name", vertical=True), transitions={ - "succeeded": "PERCEIVE_CAVITY", - "failed": "MOVE_ARM_TO_MIDDLE_POSE", + "succeeded": "GENERATE_OBJECT_POSE", + "failed": "OVERALL_FAILED" }, ) - # perceive cavity again after moving in front of the desired cavity - smach.StateMachine.add( - "PERCEIVE_CAVITY", - gps.find_cavities(retries=1), - transitions={"cavities_found": "SELECT_OBJECT_AGAIN", "no_cavities_found": "OVERALL_FAILED",}, - ) - - smach.StateMachine.add( - "SELECT_OBJECT_AGAIN", - SelectObject("/mcr_perception/cavity_pose_selector/object_name"), - transitions={"succeeded": "CHECK_IF_OBJECT_IS_AVAILABLE_AGAIN"}, - ) + # generates a pose of object smach.StateMachine.add( - "CHECK_IF_OBJECT_IS_AVAILABLE_AGAIN", + "GENERATE_OBJECT_POSE", gbs.send_and_wait_events_combined( - event_in_list=[ - ("/mcr_perception/cavity_pose_selector/event_in", "e_trigger",) - ], - event_out_list=[ - ( - "/mcr_perception/cavity_pose_selector/event_out", - "e_success", - True, - ) - ], + event_in_list=[("/mcr_perception/object_selector/event_in", "e_trigger")], + event_out_list=[("/mcr_perception/object_selector/event_out", "e_selected", True)], timeout_duration=10, ), transitions={ - "success": "UNSTAGE_OBJECT", + "success": "UNSTAGE_FOR_PLACING", "timeout": "OVERALL_FAILED", "failure": "OVERALL_FAILED", }, ) + # unstage object to place smach.StateMachine.add( - "UNSTAGE_OBJECT", - gas.unstage_object(), - transitions={"success": "TRY_INSERTING", "failed": "OVERALL_FAILED",}, + "UNSTAGE_FOR_PLACING", + Unstage_to_place(), + transitions={ + "success":"SET_DBC_PARAMS", + "failed":"OVERALL_FAILED", + }, ) - # execute robot motion smach.StateMachine.add( - "TRY_INSERTING", + "SET_DBC_PARAMS", + gbs.set_named_config("dbc_pick_object"), + transitions={ + "success": "MOVE_ROBOT_AND_TRY_INSERTING", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "MOVE_ROBOT_AND_TRY_INSERTING", gbs.send_and_wait_events_combined( - event_in_list=[("/wbc/event_in", "e_start_arm_only")], + event_in_list=[("/wbc/event_in", "e_start")], event_out_list=[("/wbc/event_out", "e_success", True)], - timeout_duration=20, + timeout_duration=50, ), transitions={ "success": "OPEN_GRIPPER", - "timeout": "STAGE_OBJECT", - "failure": "STAGE_OBJECT", + "timeout": "STOP_MOVE_ROBOT_TO_OBJECT_WITH_FAILURE", + "failure": "STOP_MOVE_ROBOT_TO_OBJECT_WITH_FAILURE", }, ) smach.StateMachine.add( - "STAGE_OBJECT", - gas.stage_object(), - transitions={"success": "OVERALL_FAILED", "failed": "OVERALL_FAILED",}, + "STOP_MOVE_ROBOT_TO_OBJECT_WITH_FAILURE", + gbs.send_event( + [ + ("/waypoint_trajectory_generation/event_in", "e_stop"), + ("/wbc/event_in", "e_stop"), + ] + ), + transitions={"success": "OVERALL_FAILED"}, ) - # close gripper + # open gripper smach.StateMachine.add( "OPEN_GRIPPER", - gms.control_gripper("open"), - transitions={"succeeded": "WIGGLE_ARM"}, + gms.control_gripper(0.2), + transitions={ + "succeeded": "MOVE_ARM_UP", + "timeout": "MOVE_ARM_UP" + }, + ) + + smach.StateMachine.add( + "MOVE_ARM_UP", + MoveArmUp(), + transitions={ + "succeeded": "WIGGLE_ARM", + }, ) # wiggling the arm for precision placement smach.StateMachine.add( "WIGGLE_ARM", - ppt_wiggle_arm(wiggle_offset=-0.12, joint=0), + ppt_wiggle_arm(wiggle_yaw=1.57), transitions={ "succeeded": "MOVE_ARM_TO_HOLD", "failed": "MOVE_ARM_TO_HOLD", @@ -355,10 +651,10 @@ def main(): # move arm to HOLD position smach.StateMachine.add( "MOVE_ARM_TO_HOLD", - gms.move_arm("ppt_cavity_middle"), + gms.move_arm("pre_place"), transitions={ "succeeded": "OVERALL_SUCCESS", - "failed": "MOVE_ARM_TO_HOLD", + "failed": "OVERALL_FAILED", }, ) @@ -366,7 +662,7 @@ def main(): sm.register_start_cb(start_cb) # smach viewer - if rospy.get_param("~viewer_enabled", False): + if rospy.get_param("~viewer_enabled", True): sis = IntrospectionServer( "insert_cavity_smach_viewer", sm, "/INSERT_CAVITY_SMACH_VIEWER" ) @@ -391,3 +687,13 @@ def main(): if __name__ == "__main__": main() + + + + + + + + + + diff --git a/mir_planning/mir_actions/mir_insert_object/ros/scripts/insert_object_server.py b/mir_planning/mir_actions/mir_insert_object/ros/scripts/insert_object_server.py index ae5556f05..10d3ca960 100755 --- a/mir_planning/mir_actions/mir_insert_object/ros/scripts/insert_object_server.py +++ b/mir_planning/mir_actions/mir_insert_object/ros/scripts/insert_object_server.py @@ -37,6 +37,7 @@ def execute(self, userdata): ) obj = Utils.get_value_of(userdata.goal.parameters, "hole") + peg = Utils.get_value_of(userdata.goal.parameters, "peg") # peg is the object to be inserted self.publisher.publish(String(data=obj)) rospy.sleep(0.2) # let the topic survive for some time return "succeeded" @@ -72,6 +73,26 @@ def main(): output_keys=["feedback", "result"], ) with sm: + smach.StateMachine.add( + "SET_PREGRASP_PARAMS", + gbs.set_named_config("pregrasp_planner_no_sampling"), + transitions={ + "success": "PERCEIVE_LOCATION", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + + # start perception + smach.StateMachine.add( + "PERCEIVE_LOCATION", + gas.perceive_location(obj_category="multimodal_object_recognition_atwork"), + transitions={ + "success": "SELECT_OBJECT", + "failed": "OVERALL_FAILED", + }, + ) + smach.StateMachine.add( "SELECT_OBJECT", SelectObject("/mcr_perception/object_selector/input/object_name"), @@ -136,7 +157,16 @@ def main(): gbs.send_event( [("/mcr_perception/object_selector/event_in", "e_re_trigger")] ), - transitions={"success": "MOVE_ROBOT_AND_PLACE"}, + transitions={"success": "MOVE_ARM_TO_DEFAULT_PLACE_BEFORE_INSERT"}, + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_DEFAULT_PLACE_BEFORE_INSERT", + gms.move_arm("look_at_turntable"), + transitions={ + "succeeded": "MOVE_ROBOT_AND_PLACE", + "failed": "MOVE_ROBOT_AND_PLACE", + }, ) # execute robot motion @@ -187,8 +217,9 @@ def main(): smach.StateMachine.add( "OPEN_GRIPPER", - gms.control_gripper("open_narrow"), - transitions={"succeeded": "MOVE_ARM_TO_HOLD"}, + gms.control_gripper(0.25), + transitions={"succeeded": "MOVE_ARM_TO_HOLD", + "timeout": "MOVE_ARM_TO_HOLD"}, ) # move arm to HOLD position diff --git a/mir_planning/mir_actions/mir_move_base_safe/ros/scripts/move_base_safe_server.py b/mir_planning/mir_actions/mir_move_base_safe/ros/scripts/move_base_safe_server.py index 97cfe7d8a..25e0e10d0 100755 --- a/mir_planning/mir_actions/mir_move_base_safe/ros/scripts/move_base_safe_server.py +++ b/mir_planning/mir_actions/mir_move_base_safe/ros/scripts/move_base_safe_server.py @@ -258,7 +258,7 @@ def main(): smach.StateMachine.add( "MOVE_ARM_TO_DETECT_BARRIER_TAPE", - gms.move_arm("barrier_tape", blocking=False), + gms.move_arm("platform_middle_pre", use_moveit=True), transitions={"succeeded": "SETUP_MOVE_BASE", "failed": "MOVE_ARM_TO_DETECT_BARRIER_TAPE"}, ) diff --git a/mir_planning/mir_actions/mir_perceive_cavity/ros/scripts/perceive_cavity_client_test.py b/mir_planning/mir_actions/mir_perceive_cavity/ros/scripts/perceive_cavity_client_test.py index 5b1aba9e8..4ddaa5987 100755 --- a/mir_planning/mir_actions/mir_perceive_cavity/ros/scripts/perceive_cavity_client_test.py +++ b/mir_planning/mir_actions/mir_perceive_cavity/ros/scripts/perceive_cavity_client_test.py @@ -20,7 +20,7 @@ else: location = "PP01" goal.parameters.append(KeyValue(key="location", value=location)) - goal.parameters.append(KeyValue(key="perception_mode", value="three_view")) + goal.parameters.append(KeyValue(key="perception_mode", value="single_view")) rospy.loginfo("Sending following goal to perceive cavity server") rospy.loginfo(goal) diff --git a/mir_planning/mir_actions/mir_perceive_cavity/ros/scripts/perceive_cavity_server.py b/mir_planning/mir_actions/mir_perceive_cavity/ros/scripts/perceive_cavity_server.py index 1669d443d..dde1a0d77 100755 --- a/mir_planning/mir_actions/mir_perceive_cavity/ros/scripts/perceive_cavity_server.py +++ b/mir_planning/mir_actions/mir_perceive_cavity/ros/scripts/perceive_cavity_server.py @@ -63,7 +63,7 @@ def execute(self, userdata): # 2. single_view mode - only perceiving in one direction perception_mode = Utils.get_value_of(userdata.goal.parameters, "perception_mode") if perception_mode is not None and perception_mode == "single_view": - userdata.arm_pose_list = ["ppt_cavity_middle"] + userdata.arm_pose_list = ["pre_place"] else: userdata.arm_pose_list = [ "ppt_cavity_middle", diff --git a/mir_planning/mir_actions/mir_perceive_location/ros/scripts/perceive_location_server.py b/mir_planning/mir_actions/mir_perceive_location/ros/scripts/perceive_location_server.py index d7062b208..5835175ea 100755 --- a/mir_planning/mir_actions/mir_perceive_location/ros/scripts/perceive_location_server.py +++ b/mir_planning/mir_actions/mir_perceive_location/ros/scripts/perceive_location_server.py @@ -44,6 +44,7 @@ def get_robot_pose(self): return None def execute(self, userdata): + print("[percieve] userdata goal: ", str(userdata.goal.parameters)) target_location = Utils.get_value_of(userdata.goal.parameters, 'location') if target_location is not None: target_pose = Utils.get_pose_from_param_server(target_location) @@ -199,6 +200,31 @@ def execute(self, userdata): # =============================================================================== +class SetPerceptionParams(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["success", "failure"], + input_keys=["goal"], + ) + self.set_named_config = gbs.set_named_config("multimodal_object_recognition_atwork") + + def execute(self, userdata): + obj_category = Utils.get_value_of(userdata.goal.parameters, "obj_category") + + rospy.loginfo("=============[PERCEIVE_LOCATION] obj_category: %s", obj_category) + + if obj_category: + rospy.loginfo("=============[PERCEIVE_LOCATION] [IFFFF] obj_category: %s", obj_category) + self.set_named_config.execute(userdata, obj_category) + return "success" + else: + rospy.loginfo("=============[PERCEIVE_LOCATION] [ELSEEEE] obj_category: %s", obj_category) + self.set_named_config.execute(userdata) + return "success" + +# =============================================================================== + def transition_cb(*args, **kwargs): userdata = args[0] sm_state = args[1][0] @@ -228,7 +254,7 @@ def main(): ) # Open the container sm.userdata.arm_pose_list = [ - "look_at_workspace_from_near", + "platform_middle_pre", ] sm.userdata.arm_pose_index = 0 @@ -244,11 +270,22 @@ def main(): sm.userdata.base_pose_index = 0 with sm: - # approach to platform + + # set atwork model + smach.StateMachine.add( + "SET_PERCEPTION_PARAMS", + SetPerceptionParams(), + transitions={ + "success": "SETUP", + "failure": "OVERALL_FAILED", + }, + ) + smach.StateMachine.add( "SETUP", Setup(), transitions={"succeeded": "CHECK_IF_BASE_IS_AT_LOCATION"}, ) + smach.StateMachine.add( "CHECK_IF_BASE_IS_AT_LOCATION", CheckIfBaseCentered(), @@ -356,7 +393,7 @@ def main(): # move arm to appropriate position smach.StateMachine.add( "MOVE_ARM_TO_PERCEIVE_POSE", - gms.move_arm_and_gripper("open"), + gms.move_arm_and_gripper("open", use_moveit=False), transitions={ "succeeded": "WAIT_FOR_ARM_TO_STABILIZE", "failed": "MOVE_ARM_TO_PERCEIVE_POSE", diff --git a/mir_planning/mir_actions/mir_pick_from_shelf/ros/launch/pick_from_shelf.launch b/mir_planning/mir_actions/mir_pick_from_shelf/ros/launch/pick_from_shelf.launch index 42b673244..0214946b9 100644 --- a/mir_planning/mir_actions/mir_pick_from_shelf/ros/launch/pick_from_shelf.launch +++ b/mir_planning/mir_actions/mir_pick_from_shelf/ros/launch/pick_from_shelf.launch @@ -11,11 +11,11 @@ - + - + @@ -23,6 +23,9 @@ + + + @@ -33,6 +36,7 @@ +
diff --git a/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_client b/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_client index f14fe2ed3..87648a106 100755 --- a/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_client +++ b/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_client @@ -18,12 +18,12 @@ if __name__ == "__main__": goal = GenericExecuteGoal() obj = str(sys.argv[1]).upper() goal.parameters.append(KeyValue(key="object", value=obj)) - rospy.loginfo("Sending following goal to place object server") + rospy.loginfo("Sending following goal to pick from shelf object server") rospy.loginfo(goal) client.send_goal(goal) - timeout = 15.0 + timeout = 50.0 finished_within_time = client.wait_for_result( rospy.Duration.from_sec(int(timeout)) ) diff --git a/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_server b/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_server index 8f45fa808..1c8ce3421 100755 --- a/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_server +++ b/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_server @@ -3,12 +3,13 @@ from __future__ import print_function import sys +import copy import mcr_states.common.basic_states as gbs import mir_states.common.manipulation_states as gms import rospy import smach -from geometry_msgs.msg import PoseStamped +from geometry_msgs.msg import PoseStamped, Quaternion, TwistStamped from mir_actions.utils import Utils from mir_planning_msgs.msg import ( GenericExecuteAction, @@ -17,12 +18,12 @@ from mir_planning_msgs.msg import ( ) from smach_ros import ActionServerWrapper, IntrospectionServer from std_msgs.msg import String - +from tf.transformations import quaternion_from_euler, euler_from_quaternion +import tf from pick_from_shelf_utils import PickFromShelfUtils - +import math + # =============================================================================== - - class SelectObject(smach.State): def __init__(self, topic_name): smach.State.__init__( @@ -46,12 +47,10 @@ class SelectObject(smach.State): rospy.sleep(0.2) # let the topic to survive for some time return "succeeded" - # =============================================================================== - class SendPoseToDBC(smach.State): - def __init__(self, retract=False): + def __init__(self, retract=False, adjust_pose=False): smach.State.__init__(self, outcomes=["succeeded"]) self._dbc_pose_pub = rospy.Publisher( "/mcr_navigation/direct_base_controller/input_pose", @@ -63,27 +62,90 @@ class SendPoseToDBC(smach.State): PoseStamped, self._obj_pose_cb, ) + self._obj_pose_pub = rospy.Publisher( + "mcr_perception/object_selector/output/object_pose", + PoseStamped, + queue_size=10, + ) self._obj_pose = None self.retract = retract + self.adjust_pose = adjust_pose + self.listener = tf.TransformListener() self.utils = PickFromShelfUtils() rospy.sleep(0.1) # time for the publisher to register in ros network def _obj_pose_cb(self, msg): - self._obj_pose = msg + self._obj_pose = copy.deepcopy(msg) + self._new_obj_pose = copy.deepcopy(msg) + + def angle_diff(self, angle1, angle2): + return abs(math.atan2(math.sin(angle1-angle2), math.cos(angle1-angle2))) def execute(self, userdata): if self.retract: - dbc_pose = self.utils.get_retracted_dbc_pose() - else: + # get tf of base_link + tf_msg = self.listener.lookupTransform("/base_link_static", "/base_link", rospy.Time(0)) + # get the pose of the object in base_link frame + pose_msg = PoseStamped() + pose_msg.header.frame_id = "base_link_static" + pose_msg.header.stamp = rospy.Time.now() + # amount to move backward after pick object + pose_msg.pose.position.x = tf_msg[0][0] - 0.175 + pose_msg.pose.position.y = tf_msg[0][1] + pose_msg.pose.position.z = tf_msg[0][2] + pose_msg.pose.orientation.x = tf_msg[1][0] + pose_msg.pose.orientation.y = tf_msg[1][1] + pose_msg.pose.orientation.z = tf_msg[1][2] + pose_msg.pose.orientation.w = tf_msg[1][3] + dbc_pose = pose_msg + #dbc_pose = self.utils.get_retracted_dbc_pose() + elif self.adjust_pose: + self._obj_pose.pose.position.y -= 0.1 + self._obj_pose.pose.position.x += 0.05 dbc_pose = self.utils.calc_pose_for_dbc(self._obj_pose) + + self._new_obj_pose.pose.position.x -= 0.1 + # modify the orientation of the object pose + pose_rpy = euler_from_quaternion( + [ + self._new_obj_pose.pose.orientation.x, + self._new_obj_pose.pose.orientation.y, + self._new_obj_pose.pose.orientation.z, + self._new_obj_pose.pose.orientation.w, + ] + ) + # if angle diff is less than 45 + if self.angle_diff(pose_rpy[2], math.pi/2) < 0.785: + # set the orientation to 90 + self._new_obj_pose.pose.orientation = Quaternion(*quaternion_from_euler(0, 0, 1.5708)) + elif self.angle_diff(pose_rpy[2], -math.pi/2) < 0.785: + # set the orientation to -90 + self._new_obj_pose.pose.orientation = Quaternion(*quaternion_from_euler(0, 0, -1.5708)) + self._obj_pose_pub.publish(self._new_obj_pose) + else: + # print(f'obj pose: {self._obj_pose}') + # get tf of base_link + tf_msg = self.listener.lookupTransform("/base_link_static", "/base_link", rospy.Time(0)) + # get the pose of the object in base_link frame + pose_msg = PoseStamped() + pose_msg.header.frame_id = "base_link_static" + pose_msg.header.stamp = rospy.Time.now() + # amount to move forward to pick object + pose_msg.pose.position.x = tf_msg[0][0] + 0.127 + pose_msg.pose.position.y = tf_msg[0][1] + pose_msg.pose.position.z = tf_msg[0][2] + pose_msg.pose.orientation.x = tf_msg[1][0] + pose_msg.pose.orientation.y = tf_msg[1][1] + pose_msg.pose.orientation.z = tf_msg[1][2] + pose_msg.pose.orientation.w = tf_msg[1][3] + dbc_pose = pose_msg + # dbc_pose = self.utils.calc_pose_for_dbc(pose_msg) self._dbc_pose_pub.publish(dbc_pose) self._obj_pose = None return "succeeded" - # =============================================================================== - class SendPoseToMoveIt(smach.State): def __init__(self, modification_name="intermediate"): smach.State.__init__(self, outcomes=["succeeded"]) @@ -101,7 +163,7 @@ class SendPoseToMoveIt(smach.State): rospy.sleep(0.1) # time for the publisher to register in ros network def _obj_pose_cb(self, msg): - self._obj_pose = msg + self._obj_pose = copy.deepcopy(msg) def execute(self, userdata): modified_obj_pose = self.utils.get_arm_pose( @@ -113,9 +175,34 @@ class SendPoseToMoveIt(smach.State): self._obj_pose = None return "succeeded" - # =============================================================================== +class MoveArmUp(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded"], + input_keys=["goal"], + output_keys=["feedback", "result"], + ) + # velocity publisher + self.arm_velocity_pub = rospy.Publisher("/arm_1/arm_controller/cartesian_velocity_command", TwistStamped, queue_size=10) + + def execute(self, userdata): + + # send the velocity in +z direction wrt base_link to move the arm up + vel_msg = TwistStamped() + vel_msg.header.frame_id = "base_link" + # set velocity to 5cm/s + vel_msg.twist.linear.z = 0.05 + self.arm_velocity_pub.publish(vel_msg) + rospy.sleep(0.5) + # stop the arm + vel_msg.twist.linear.z = 0 + self.arm_velocity_pub.publish(vel_msg) + return "succeeded" + +# =============================================================================== def main(): # Open the container @@ -165,7 +252,7 @@ def main(): smach.StateMachine.add( "SEND_DBC_POSE", - SendPoseToDBC(retract=False), + SendPoseToDBC(retract=False, adjust_pose=True), transitions={"succeeded": "MOVE_BASE_USING_DBC"}, ) @@ -189,102 +276,98 @@ def main(): timeout_duration=10, ), transitions={ - "success": "MOVE_ARM_TO_PRE_GRASP", + "success": "OPEN_GRIPPER", "timeout": "OVERALL_FAILED", "failure": "OVERALL_FAILED", }, ) + smach.StateMachine.add( + "OPEN_GRIPPER", + gms.control_gripper(-1.4), #range -1.5 to 1, -1.5 is wide open, 0 is open, 1 is closed + transitions={"succeeded": "MOVE_ROBOT_AND_PICK", + "timeout": "MOVE_ROBOT_AND_PICK"}, + ) + smach.StateMachine.add( "MOVE_ARM_TO_PRE_GRASP", gms.move_arm("shelf_pre_grasp_lower"), transitions={ - "succeeded": "OPEN_GRIPPER", + "succeeded": "MOVE_ROBOT_AND_PICK", "failed": "MOVE_ARM_TO_PRE_GRASP", }, ) smach.StateMachine.add( - "OPEN_GRIPPER", - gms.control_gripper("open_rtt"), - transitions={"succeeded": "SEND_MOVEIT_POSE_INTERMEDIATE"}, - ) - - smach.StateMachine.add( - "SEND_MOVEIT_POSE_INTERMEDIATE", - SendPoseToMoveIt(modification_name="intermediate"), - transitions={"succeeded": "MOVE_ARM_USING_MOVEIT_INTERMEDIATE"}, - ) - - # Move arm using mir_moveit_client to an intermediate pose before obj - smach.StateMachine.add( - "MOVE_ARM_USING_MOVEIT_INTERMEDIATE", + "MOVE_ROBOT_AND_PICK", gbs.send_and_wait_events_combined( - event_in_list=[("arm_moveit_client/event_in", "e_start")], - event_out_list=[("arm_moveit_client/event_out", "e_success", True)], - timeout_duration=10, + event_in_list=[("/wbc/event_in", "e_start")], + event_out_list=[("/wbc/event_out", "e_success", True)], + timeout_duration=50, ), transitions={ - "success": "SEND_MOVEIT_POSE", - "timeout": "MOVE_ARM_TO_PRE_GRASP_FAILED", - "failure": "MOVE_ARM_TO_PRE_GRASP_FAILED", + "success": "MOVE_FRONT_TO_PICK", + "timeout": "MOVE_ARM_TO_PRE_PLACE", + "failure": "MOVE_ARM_TO_PRE_PLACE", }, ) + # move front to pick smach.StateMachine.add( - "SEND_MOVEIT_POSE", - SendPoseToMoveIt(modification_name="object"), - transitions={"succeeded": "MOVE_ARM_USING_MOVEIT"}, + "MOVE_FRONT_TO_PICK", + SendPoseToDBC(retract=False), + transitions={"succeeded": "MOVE_BASE_FRONT_USING_DBC"}, ) - # Move arm using mir_moveit_client to obj pose + # Move base using direct base controller smach.StateMachine.add( - "MOVE_ARM_USING_MOVEIT", + "MOVE_BASE_FRONT_USING_DBC", gbs.send_and_wait_events_combined( - event_in_list=[("arm_moveit_client/event_in", "e_start")], - event_out_list=[("arm_moveit_client/event_out", "e_success", True)], + event_in_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_in", + "e_start", + ) + ], + event_out_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_out", + "e_success", + True, + ) + ], timeout_duration=10, ), transitions={ "success": "CLOSE_GRIPPER", - "timeout": "MOVE_ARM_TO_PRE_GRASP_FAILED", - "failure": "MOVE_ARM_TO_PRE_GRASP_FAILED", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", }, ) smach.StateMachine.add( "CLOSE_GRIPPER", gms.control_gripper("close"), - transitions={"succeeded": "SEND_MOVEIT_POSE_INTERMEDIATE_POST"}, + transitions={"succeeded": "VERIFY_OBJECT_GRASPED", + "timeout": "VERIFY_OBJECT_GRASPED"}, ) smach.StateMachine.add( - "SEND_MOVEIT_POSE_INTERMEDIATE_POST", - SendPoseToMoveIt(modification_name="intermediate_post"), - transitions={"succeeded": "MOVE_ARM_USING_MOVEIT_INTERMEDIATE_POST"}, - ) - - # Move arm using mir_moveit_client to an intermediate pose before obj - smach.StateMachine.add( - "MOVE_ARM_USING_MOVEIT_INTERMEDIATE_POST", - gbs.send_and_wait_events_combined( - event_in_list=[("arm_moveit_client/event_in", "e_start")], - event_out_list=[("arm_moveit_client/event_out", "e_success", True)], - timeout_duration=10, - ), + "VERIFY_OBJECT_GRASPED", + gms.verify_object_grasped(3), transitions={ - "success": "MOVE_ARM_TO_PRE_GRASP_RETRACT", - "timeout": "MOVE_ARM_TO_PRE_GRASP_FAILED", - "failure": "MOVE_ARM_TO_PRE_GRASP_FAILED", + "succeeded": "MOVE_ARM_UP", + "timeout": "MOVE_ARM_UP", + "failed": "SEND_DBC_POSE_RETRACT", }, ) + # move arm up smach.StateMachine.add( - "MOVE_ARM_TO_PRE_GRASP_RETRACT", - gms.move_arm("shelf_pre_grasp_lower"), + "MOVE_ARM_UP", + MoveArmUp(), transitions={ - "succeeded": "SEND_DBC_POSE_RETRACT", - "failed": "MOVE_ARM_TO_PRE_GRASP_RETRACT", + "succeeded": "SEND_DBC_POSE_RETRACT" }, ) @@ -314,34 +397,25 @@ def main(): timeout_duration=10, ), transitions={ - "success": "MOVE_ARM_TO_INTERMEDIATE", - "timeout": "MOVE_ARM_TO_INTERMEDIATE", - "failure": "MOVE_ARM_TO_INTERMEDIATE", - }, - ) - - smach.StateMachine.add( - "MOVE_ARM_TO_PRE_GRASP_FAILED", - gms.move_arm("shelf_pre_grasp_lower"), - transitions={ - "succeeded": "OVERALL_FAILED", - "failed": "MOVE_ARM_TO_PRE_GRASP_FAILED", + "success": "MOVE_ARM_TO_PRE_PLACE", + "timeout": "MOVE_ARM_TO_PRE_PLACE", + "failure": "MOVE_ARM_TO_PRE_PLACE", }, ) smach.StateMachine.add( - "MOVE_ARM_TO_INTERMEDIATE", - gms.move_arm("stage_intermediate"), + "MOVE_ARM_TO_PRE_PLACE", # next action will be staging the object not placing it the pose name is pre-place commonly used everywhere + gms.move_arm("pre_place"), transitions={ "succeeded": "OVERALL_SUCCESS", - "failed": "MOVE_ARM_TO_INTERMEDIATE", + "failed": "MOVE_ARM_TO_PRE_PLACE", }, ) # smach viewer - if rospy.get_param("~viewer_enabled", False): + if rospy.get_param("~viewer_enabled", True): sis = IntrospectionServer( - "pick_from_shelf_viewer", sm, "/PICK_OBJECT_SMACH_VIEWER" + "pick_from_shelf_viewer", sm, "/PICK_FROM_SHELF_SMACH_VIEWER" ) sis.start() diff --git a/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_utils.py b/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_utils.py index 438c68632..49ee22b90 100644 --- a/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_utils.py +++ b/mir_planning/mir_actions/mir_pick_from_shelf/ros/scripts/pick_from_shelf_utils.py @@ -95,7 +95,7 @@ def get_retracted_dbc_pose(self): dbc_target_pose = PoseStamped() dbc_target_pose.header.stamp = rospy.Time.now() dbc_target_pose.header.frame_id = self.frame_id - dbc_target_pose.pose.position.x = self.retract_base_pose_x + dbc_target_pose.pose.position.x = self.retract_base_pose_x - 0.05 dbc_target_pose.pose.position.y = 0.0 dbc_target_pose.pose.orientation.w = 1.0 return dbc_target_pose diff --git a/mir_planning/mir_actions/mir_pick_object/ros/launch/pick_object_server.launch b/mir_planning/mir_actions/mir_pick_object/ros/launch/pick_object_server.launch index 668847e6f..e6feea8de 100644 --- a/mir_planning/mir_actions/mir_pick_object/ros/launch/pick_object_server.launch +++ b/mir_planning/mir_actions/mir_pick_object/ros/launch/pick_object_server.launch @@ -6,6 +6,8 @@ + + @@ -14,6 +16,9 @@ $(arg rollable_objects) + + $(arg drag_pick_objects) + diff --git a/mir_planning/mir_actions/mir_pick_object/ros/launch/tc_pick_object_server.launch b/mir_planning/mir_actions/mir_pick_object/ros/launch/tc_pick_object_server.launch new file mode 100644 index 000000000..70c40df7a --- /dev/null +++ b/mir_planning/mir_actions/mir_pick_object/ros/launch/tc_pick_object_server.launch @@ -0,0 +1,8 @@ + + + + + + + diff --git a/mir_planning/mir_actions/mir_pick_object/ros/scripts/pick_object_client_test.py b/mir_planning/mir_actions/mir_pick_object/ros/scripts/pick_object_client_test.py index 615ba9cfd..1db24e3cc 100755 --- a/mir_planning/mir_actions/mir_pick_object/ros/scripts/pick_object_client_test.py +++ b/mir_planning/mir_actions/mir_pick_object/ros/scripts/pick_object_client_test.py @@ -20,12 +20,12 @@ location = str(sys.argv[2]).upper() goal.parameters.append(KeyValue(key="object", value=obj)) goal.parameters.append(KeyValue(key="location", value=location)) - rospy.loginfo("Sending following goal to place object server") + rospy.loginfo("Sending following goal to pick object server") rospy.loginfo(goal) client.send_goal(goal) - timeout = 15.0 + timeout = 25.0 finished_within_time = client.wait_for_result( rospy.Duration.from_sec(int(timeout)) ) diff --git a/mir_planning/mir_actions/mir_pick_object/ros/scripts/pick_object_server.py b/mir_planning/mir_actions/mir_pick_object/ros/scripts/pick_object_server.py index 7a0b5c62f..76cb3224f 100755 --- a/mir_planning/mir_actions/mir_pick_object/ros/scripts/pick_object_server.py +++ b/mir_planning/mir_actions/mir_pick_object/ros/scripts/pick_object_server.py @@ -11,9 +11,16 @@ GenericExecuteAction, GenericExecuteFeedback, GenericExecuteResult, + GenericExecuteGoal + ) from smach_ros import ActionServerWrapper, IntrospectionServer from std_msgs.msg import String +from geometry_msgs.msg import TwistStamped + +from actionlib import SimpleActionClient +from actionlib_msgs.msg import GoalStatus +from diagnostic_msgs.msg import KeyValue # =============================================================================== @@ -62,6 +69,27 @@ def execute(self, userdata): return "large" return "small" +# =============================================================================== + +class ShouldDragPick(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["yes", "no"], + input_keys=["goal", "drag_pick_objects"], + output_keys=[], + ) + + def execute(self, userdata): + obj = Utils.get_value_of(userdata.goal.parameters, "object") + if obj is None: + rospy.logwarn('Missing parameter "object". Using default.') + return "no" + for object_to_drag in userdata.drag_pick_objects: + if object_to_drag.upper() in obj.upper(): + # return "yes" + return "no" # until tested + return "no" # =============================================================================== @@ -82,6 +110,32 @@ def execute(self, userdata): # =============================================================================== +class MoveArmUp(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded"], + input_keys=["goal"], + output_keys=["feedback", "result"], + ) + # velocity publisher + self.arm_velocity_pub = rospy.Publisher("/arm_1/arm_controller/cartesian_velocity_command", TwistStamped, queue_size=1) + + def execute(self, userdata): + + # send the velocity in +z direction wrt base_link to move the arm up + vel_msg = TwistStamped() + vel_msg.header.frame_id = "base_link" + # set velocity to 5cm/s + vel_msg.twist.linear.z = 0.04 + self.arm_velocity_pub.publish(vel_msg) + rospy.sleep(1.5) + # stop the arm + vel_msg.twist.linear.z = 0 + self.arm_velocity_pub.publish(vel_msg) + return "succeeded" + +# =============================================================================== def transition_cb(*args, **kwargs): userdata = args[0] sm_state = args[1][0] @@ -111,9 +165,20 @@ def main(): # read large object list sm.userdata.large_objects = rospy.get_param("~large_objects", ["S40_40_B", "S40_40_G", "M30", "BEARING_BOX", "MOTOR"]) - sm.userdata.reperceive = rospy.get_param("~reperceive", True) + sm.userdata.drag_pick_objects = rospy.get_param("~drag_pick_objects", ["ALLEN_KEY","WRENCH"]) + sm.userdata.reperceive = rospy.get_param("~reperceive", True) with sm: + smach.StateMachine.add( + "SET_PREGRASP_PARAMS", + gbs.set_named_config("pregrasp_planner_no_sampling"), + transitions={ + "success": "SELECT_OBJECT", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + smach.StateMachine.add( "SELECT_OBJECT", SelectObject("/mcr_perception/object_selector/input/object_name"), @@ -177,13 +242,14 @@ def main(): smach.StateMachine.add( "OPEN_GRIPPER_FOR_REPERCEIVE", gms.control_gripper("open"), - transitions={"succeeded": "MOVE_ARM"}, + transitions={"succeeded": "MOVE_ARM", + "timeout": "MOVE_ARM"}, ) # move arm to appropriate position smach.StateMachine.add( "MOVE_ARM", - gms.move_arm("look_at_workspace_from_near"), + gms.move_arm("pre_place", use_moveit=False), transitions={ "succeeded": "START_OBJECT_LIST_MERGER", "failed": "MOVE_ARM", @@ -198,21 +264,12 @@ def main(): timeout_duration=5, ), transitions={ - "success": "WAIT_FOR_ARM_TO_STABILIZE", + "success": "START_OBJECT_RECOGNITION", "timeout": "TRY_PICKING", "failure": "TRY_PICKING", }, ) - smach.StateMachine.add( - "WAIT_FOR_ARM_TO_STABILIZE", - mir_gbs.wait_for(0.5), - transitions={ - "succeeded": "START_OBJECT_RECOGNITION", - }, - ) - - # New perception pipeline state machine smach.StateMachine.add( "START_OBJECT_RECOGNITION", @@ -318,13 +375,15 @@ def main(): smach.StateMachine.add( "OPEN_GRIPPER_WIDE_LOCAL", gms.control_gripper("open"), - transitions={"succeeded": "TRY_PICKING"}, + transitions={"succeeded": "TRY_PICKING", + "timeout": "TRY_PICKING"}, ) smach.StateMachine.add( "OPEN_GRIPPER_NARROW_LOCAL", gms.control_gripper("open_narrow"), - transitions={"succeeded": "TRY_PICKING"}, + transitions={"succeeded": "TRY_PICKING", + "timeout": "TRY_PICKING"}, ) @@ -356,13 +415,15 @@ def main(): smach.StateMachine.add( "OPEN_GRIPPER_WIDE", gms.control_gripper("open"), - transitions={"succeeded": "MOVE_ROBOT_AND_PICK"}, + transitions={"succeeded": "MOVE_ROBOT_AND_PICK", + "timeout": "MOVE_ROBOT_AND_PICK"}, ) smach.StateMachine.add( "OPEN_GRIPPER_NARROW", gms.control_gripper("open_narrow"), - transitions={"succeeded": "MOVE_ROBOT_AND_PICK"}, + transitions={"succeeded": "MOVE_ROBOT_AND_PICK", + "timeout": "MOVE_ROBOT_AND_PICK"}, ) @@ -396,25 +457,75 @@ def main(): smach.StateMachine.add( "CLOSE_GRIPPER", gms.control_gripper("close"), - transitions={"succeeded": "MOVE_ARM_TO_STAGE_INTERMEDIATE"}, + transitions={"succeeded": "MOVE_ARM_UP", + "timeout": "MOVE_ARM_UP"}, ) - # move arm to stage_intemediate position + smach.StateMachine.add( - "MOVE_ARM_TO_STAGE_INTERMEDIATE", - gms.move_arm("stage_intermediate"), + "CHECK_IF_OBJECT_SHOULD_BE_DRAGGED", + ShouldDragPick(), + transitions={"yes":"DRAG_PICK", + "no":"MOVE_ARM_TO_PRE_PLACE"} + ) + + smach.StateMachine.add( + "DRAG_PICK", + gbs.send_and_wait_events_combined( + event_in_list=[("/wbc/event_in", "e_start_drag")], + event_out_list=[("/wbc/event_out", "e_success", True)], + timeout_duration=50, + ), transitions={ - "succeeded": "VERIFY_OBJECT_GRASPED", - "failed": "MOVE_ARM_TO_STAGE_INTERMEDIATE", + "success": "MOVE_ARM_TO_PRE_PLACE", + "timeout": "STOP_MOVE_ROBOT_TO_OBJECT_WITH_FAILURE", + "failure": "STOP_MOVE_ROBOT_TO_OBJECT_WITH_FAILURE", + }, + ) + + # move up 5 cm and then verify + + smach.StateMachine.add( + "MOVE_ARM_UP", + MoveArmUp(), + transitions={ + "succeeded": "VERIFY_OBJECT_GRASPED" }, ) smach.StateMachine.add( "VERIFY_OBJECT_GRASPED", - gms.verify_object_grasped(5), + gms.verify_object_grasped(3), + transitions={ + "succeeded": "MOVE_ARM_TO_PRE_PLACE", + "timeout": "OPEN_GRASP_FAILURE", + "failed": "OPEN_GRASP_FAILURE", + }, + ) + + smach.StateMachine.add( + "OPEN_GRASP_FAILURE", + gms.control_gripper("open"), + transitions={"succeeded": "MOVE_TO_PRE_PLACE_AND_FAIL", + "timeout": "MOVE_TO_PRE_PLACE_AND_FAIL"}, + ) + + smach.StateMachine.add( + "MOVE_TO_PRE_PLACE_AND_FAIL", + gms.move_arm("pre_place", use_moveit=True), + transitions={ + "succeeded": "OVERALL_FAILED", + "failed": "MOVE_TO_PRE_PLACE_AND_FAIL", + }, + ) + + + smach.StateMachine.add( + "MOVE_ARM_TO_PRE_PLACE", + gms.move_arm("pre_place", use_moveit=True), transitions={ "succeeded": "OVERALL_SUCCESS", - "failed": "OVERALL_FAILED", + "failed": "MOVE_ARM_TO_PRE_PLACE", }, ) @@ -423,7 +534,7 @@ def main(): # smach viewer - if rospy.get_param("~viewer_enabled", False): + if rospy.get_param("~viewer_enabled", True): sis = IntrospectionServer( "pick_object_smach_viewer", sm, "/PICK_OBJECT_SMACH_VIEWER" ) diff --git a/mir_planning/mir_actions/mir_pick_object/ros/scripts/tc_pick_object_server.py b/mir_planning/mir_actions/mir_pick_object/ros/scripts/tc_pick_object_server.py new file mode 100755 index 000000000..1c05c9ea2 --- /dev/null +++ b/mir_planning/mir_actions/mir_pick_object/ros/scripts/tc_pick_object_server.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +import sys + +import mcr_states.common.basic_states as gbs +import mir_states.common.manipulation_states as gms +import mir_states.common.basic_states as mir_gbs +import rospy +import smach +from mir_actions.utils import Utils +from mir_planning_msgs.msg import ( + GenericExecuteAction, + GenericExecuteFeedback, + GenericExecuteResult, + GenericExecuteGoal + +) +from smach_ros import ActionServerWrapper, IntrospectionServer +from std_msgs.msg import String +from geometry_msgs.msg import TwistStamped + +from actionlib import SimpleActionClient +from actionlib_msgs.msg import GoalStatus +from diagnostic_msgs.msg import KeyValue + +# =============================================================================== + +class MoveArmUp(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded"], + input_keys=["goal"], + output_keys=["feedback", "result"], + ) + # velocity publisher + self.arm_velocity_pub = rospy.Publisher("/arm_1/arm_controller/cartesian_velocity_command", TwistStamped, queue_size=1) + + def execute(self, userdata): + + # send the velocity in +z direction wrt base_link to move the arm up + vel_msg = TwistStamped() + vel_msg.header.frame_id = "base_link" + # set velocity to 5cm/s + vel_msg.twist.linear.z = 0.04 + self.arm_velocity_pub.publish(vel_msg) + rospy.sleep(1.5) + # stop the arm + vel_msg.twist.linear.z = 0 + self.arm_velocity_pub.publish(vel_msg) + return "succeeded" + +# =============================================================================== +def transition_cb(*args, **kwargs): + userdata = args[0] + sm_state = args[1][0] + + feedback = GenericExecuteFeedback() + feedback.current_state = sm_state + userdata.feedback = feedback + +def start_cb(*args, **kwargs): + userdata = args[0] + sm_state = args[1][0] + + feedback = GenericExecuteFeedback() + feedback.current_state = sm_state + userdata.feedback = feedback +# =============================================================================== + +def main(): + # Open the container + rospy.init_node("tc_pick_object_wbc_server") + # Construct state machine + sm = smach.StateMachine( + outcomes=["OVERALL_SUCCESS", "OVERALL_FAILED"], + input_keys=["goal"], + output_keys=["feedback", "result"], + ) + + with sm: + smach.StateMachine.add( + "SET_PREGRASP_PARAMS", + gbs.set_named_config("pregrasp_planner_no_sampling"), + transitions={ + "success": "SET_DBC_PARAMS", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "SET_DBC_PARAMS", + gbs.set_named_config("dbc_pick_object"), + transitions={ + "success": "MOVE_ROBOT_AND_PICK", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + + # whole body control command. It moves direct base controller and + # calls pre-grasp planner, and (optionally) moves arm to object pose + smach.StateMachine.add( + "MOVE_ROBOT_AND_PICK", + gbs.send_and_wait_events_combined( + event_in_list=[("/wbc/event_in", "e_start")], + event_out_list=[("/wbc/event_out", "e_success", True)], + timeout_duration=50, + ), + transitions={ + "success": "CLOSE_GRIPPER", + "timeout": "STOP_MOVE_ROBOT_TO_OBJECT_WITH_FAILURE", + "failure": "STOP_MOVE_ROBOT_TO_OBJECT_WITH_FAILURE", + }, + ) + + smach.StateMachine.add( + "STOP_MOVE_ROBOT_TO_OBJECT_WITH_FAILURE", + gbs.send_event( + [ + ("/waypoint_trajectory_generation/event_in", "e_stop"), + ("/wbc/event_in", "e_stop"), + ] + ), + transitions={"success": "OVERALL_FAILED"}, + ) + + smach.StateMachine.add( + "CLOSE_GRIPPER", + gms.control_gripper("close"), + transitions={"succeeded": "MOVE_ARM_UP", + "timeout": "MOVE_ARM_UP"}, + ) + + # move up 5 cm and then verify + smach.StateMachine.add( + "MOVE_ARM_UP", + MoveArmUp(), + transitions={ + "succeeded": "VERIFY_OBJECT_GRASPED" + }, + ) + + smach.StateMachine.add( + "VERIFY_OBJECT_GRASPED", + gms.verify_object_grasped(3), + transitions={ + "succeeded": "MOVE_ARM_TO_PRE_PLACE", + "timeout": "OPEN_GRASP_FAILURE", + "failed": "OPEN_GRASP_FAILURE", + }, + ) + + smach.StateMachine.add( + "OPEN_GRASP_FAILURE", + gms.control_gripper("open"), + transitions={"succeeded": "MOVE_TO_PRE_PLACE_AND_FAIL", + "timeout": "MOVE_TO_PRE_PLACE_AND_FAIL"}, + ) + + smach.StateMachine.add( + "MOVE_TO_PRE_PLACE_AND_FAIL", + gms.move_arm("pre_place", use_moveit=True), + transitions={ + "succeeded": "OVERALL_FAILED", + "failed": "MOVE_TO_PRE_PLACE_AND_FAIL", + }, + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_PRE_PLACE", + gms.move_arm("pre_place", use_moveit=True), + transitions={ + "succeeded": "OVERALL_SUCCESS", + "failed": "MOVE_ARM_TO_PRE_PLACE", + }, + ) + + sm.register_transition_cb(transition_cb) + sm.register_start_cb(start_cb) + + # smach viewer + if rospy.get_param("~viewer_enabled", True): + sis = IntrospectionServer( + "tc_pick_object_smach_viewer", sm, "/TC_PICK_OBJECT_SMACH_VIEWER" + ) + sis.start() + + # Construct action server wrapper + asw = ActionServerWrapper( + server_name="tc_wbc_pick_object_server", + action_spec=GenericExecuteAction, + wrapped_container=sm, + succeeded_outcomes=["OVERALL_SUCCESS"], + aborted_outcomes=["OVERALL_FAILED"], + preempted_outcomes=["PREEMPTED"], + goal_key="goal", + feedback_key="feedback", + result_key="result", + ) + # Run the server in a background thread + asw.run_server() + rospy.spin() + + +if __name__ == "__main__": + main() diff --git a/mir_planning/mir_actions/mir_place_object/ros/launch/place_object_server.launch b/mir_planning/mir_actions/mir_place_object/ros/launch/place_object_server.launch index e6eb13828..57e9e3564 100644 --- a/mir_planning/mir_actions/mir_place_object/ros/launch/place_object_server.launch +++ b/mir_planning/mir_actions/mir_place_object/ros/launch/place_object_server.launch @@ -2,12 +2,14 @@ - - - - $(arg max_allowed_tries) - + + + + $(arg max_allowed_tries) + + + + diff --git a/mir_planning/mir_actions/mir_place_object/ros/launch/shelf_place_pose.launch b/mir_planning/mir_actions/mir_place_object/ros/launch/shelf_place_pose.launch new file mode 100644 index 000000000..524167bae --- /dev/null +++ b/mir_planning/mir_actions/mir_place_object/ros/launch/shelf_place_pose.launch @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/mir_planning/mir_actions/mir_place_object/ros/scripts/.place_object_server.py.swp b/mir_planning/mir_actions/mir_place_object/ros/scripts/.place_object_server.py.swp new file mode 100644 index 000000000..46937ee54 Binary files /dev/null and b/mir_planning/mir_actions/mir_place_object/ros/scripts/.place_object_server.py.swp differ diff --git a/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_client_test.py b/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_client_test.py index f6fd889bc..c83ff1e0d 100755 --- a/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_client_test.py +++ b/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_client_test.py @@ -16,25 +16,26 @@ print("Server found") if len(sys.argv) > 1: - platform = str(sys.argv[1]).upper() + obj = str(sys.argv[1]).upper() if len(sys.argv) > 2: - obj = str(sys.argv[2]).upper() + location = str(sys.argv[2]).upper() else: obj = "M20" else: - platform = "PLATFORM_MIDDLE" obj = "M20" + location = "WS01" goal = GenericExecuteGoal() - goal.parameters.append(KeyValue(key="platform", value=platform)) + goal.parameters.append(KeyValue(key="platform", value="PLATFORM_MIDDLE")) goal.parameters.append(KeyValue(key="object", value=obj)) + goal.parameters.append(KeyValue(key="location", value=location)) rospy.loginfo("Sending following goal to place object server") rospy.loginfo(goal) client.send_goal(goal) - timeout = 15.0 + timeout = 300.0 finished_within_time = client.wait_for_result(rospy.Duration.from_sec(int(timeout))) if not finished_within_time: client.cancel_goal() diff --git a/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_server.py b/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_server.py index 116e0b9e1..7132d6cc3 100755 --- a/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_server.py +++ b/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_server.py @@ -14,6 +14,7 @@ from selectors import PollSelector import mcr_states.common.basic_states as gbs import mir_states.common.manipulation_states as gms # move the arm, and gripper +import mir_states.common.action_states as gas import rospy import smach from mir_actions.utils import Utils @@ -30,7 +31,99 @@ from diagnostic_msgs.msg import KeyValue from actionlib import SimpleActionClient from actionlib_msgs.msg import GoalStatus +from brics_actuator.msg import JointPositions, JointValue +from sensor_msgs.msg import JointState +import numpy as np +import tf +class MoveArmUp(smach.State): + + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "timeout"], + ) + self.joint_states_sub = rospy.Subscriber("/joint_states", JointState, self.joint_states_cb) + self.pub_arm_position = rospy.Publisher("/arm_1/arm_controller/position_command", JointPositions, queue_size=1) + self.current_joint_positions = None + self.is_arm_moving = False + self.zero_vel_counter = 0 + self.joint_1_position = 1.8787 + + def joint_states_cb(self, msg): + if "arm_joint_1" in msg.name: # get the joint values of the arm only + self.current_joint_positions = msg.position + + self.joint_state = msg + # monitor the velocities + self.joint_velocities = msg.velocity + # if all velocities are 0.0, the arm is not moving + if "arm_joint_1" in msg.name and all([v == 0.0 for v in self.joint_velocities]): + self.zero_vel_counter += 1 + + def execute(self, userdata): + self.current_joint_positions = None + while not rospy.is_shutdown(): + rospy.sleep(0.1) + if self.current_joint_positions is not None: + break + joint_values = self.current_joint_positions[:] + joint_values = list(joint_values) + joint_values[1] = self.joint_1_position + + names = self.joint_state.name + + joint_positions = JointPositions() + joint_positions.positions = [ + JointValue( + rospy.Time.now(), + joint_name, + "rad", + joint_value + ) + for joint_name, joint_value in zip(names, joint_values) + ] + self.pub_arm_position.publish(joint_positions) + rospy.sleep(1) + return "succeeded" + +# =============================================================================== + +class DefineShelfPlacePose(smach.State): + def __init__(self): + smach.State.__init__(self, + outcomes=['succeeded', 'failed'], + input_keys=["goal"], + output_keys=['move_arm_to']) + self.pose_list_sh01 = ["shelf_place_1", "shelf_place_2"] + self.pose_list_sh02 = ["shelf_place_3", "shelf_place_4"] + + def execute(self, userdata): + location = Utils.get_value_of(userdata.goal.parameters, "location") + try: + if location == "SH01": + if len(self.pose_list_sh01) > 0: + rospy.logwarn("Getting shelf place pose from list") + userdata.move_arm_to = self.pose_list_sh01.pop() + else: + rospy.logfatal("No more shelf place pose in list, so using default pose") + userdata.move_arm_to = "shelf_place_final" + elif location == "SH02": + if len(self.pose_list_sh02) > 0: + rospy.logwarn("Getting shelf place pose from list") + userdata.move_arm_to = self.pose_list_sh02.pop() + else: + rospy.logfatal("No more shelf place pose in list, so using default pose") + userdata.move_arm_to = "shelf_place_final" + # if len(self.pose_list) > 0: + # rospy.logwarn("Getting shelf place pose from list") + # userdata.move_arm_to = self.pose_list.pop() + # else: + # rospy.logfatal("No more shelf place pose in list, so using default pose") + # userdata.move_arm_to = "shelf_place_final" + return 'succeeded' + except: + return 'failed' # =============================================================================== class CheckIfLocationIsShelf(smach.State): @@ -51,38 +144,129 @@ def execute(self, userdata): return "not_shelf" -# =============================================================================== - -# new class for threshold calculation to go to default place -class Threshold_calculation(smach.State): +class CheckModePlacing(smach.State): def __init__(self): smach.State.__init__( self, - outcomes=["reached", "continue"], - input_keys=["max_allowed_tries", "threshold_counter"], - output_keys=["feedback", "result", "threshold_counter"], + input_keys=["empty_place"], + outcomes=["pose_selector", "empty_pose"], ) - def execute(self, userdata): - max_tries = userdata.max_allowed_tries + empty_place = userdata.empty_place + empty_place = True # remove later + print("******************") + print(empty_place) + if empty_place: + print("++++++++++++++= EMPTY_PLACE") + return "empty_pose" + else: + print("++++++++++++++= POSE SELECTOR") + return "pose_selector" - result = None - if userdata.threshold_counter >= max_tries: - result = "reached" - userdata.threshold_counter = 0 - else: - userdata.threshold_counter += 1 - result = "continue" +class GetPoseToPlaceOject(smach.State): # inherit from the State base class + def __init__(self, topic_name_pub, topic_name_sub, event_sub, timeout_duration): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["goal", "feedback"], + output_keys=["feedback", "result", "move_arm_to"], + ) + self.timeout = rospy.Duration.from_sec(timeout_duration) + # create publisher + self.platform_name_pub = rospy.Publisher(topic_name_pub, String, queue_size=10) + rospy.Subscriber(topic_name_sub, String, self.pose_cb) + rospy.Subscriber(event_sub, String, self.event_cb) + rospy.sleep(0.1) # time for publisher to register + self.place_pose = None + self.status = None + + def pose_cb(self, msg): + self.place_pose = msg.data + + def event_cb(self, msg): + self.status = msg.data + + def execute(self, userdata): + # Add empty result msg (because if none of the state do it, action server gives error) userdata.result = GenericExecuteResult() userdata.feedback = GenericExecuteFeedback( - current_state="GO_DEFAULT_THRESHOLD", text="No of time tried the IK reachability: " + str(userdata.threshold_counter), + current_state="GetPoseToPlaceOject", text="Getting pose to place obj", ) - return result - + + location = Utils.get_value_of(userdata.goal.parameters, "location") + if location is None: + rospy.logwarn('"location" not provided. Using default.') + return "failed" + + self.place_pose = None + self.status = None + self.platform_name_pub.publish(String(data=location)) + + # wait for messages to arrive + start_time = rospy.Time.now() + rate = rospy.Rate(10) # 10hz + while not (rospy.is_shutdown()): + if rospy.Time.now() - start_time > self.timeout: + break + if self.place_pose is not None and self.status is not None: + break + rate.sleep() + + if ( + self.place_pose is not None + and self.status is not None + and self.status == "e_success" + ): + userdata.move_arm_to = self.place_pose + return "succeeded" + else: + return "failed" + +# =============================================================================== + + +class DefalutSafePose(smach.State): + def __init__(self): + smach.State.__init__(self, outcomes=["succeeded", "failed"], + input_keys=["goal","move_arm_to"], + output_keys=["move_arm_to"]) + + def execute(self, userdata): + + rospy.logwarn("Checking pre-defined safe pose") + location = Utils.get_value_of(userdata.goal.parameters, "location") + current_platform_height = rospy.get_param("/"+location) + userdata.move_arm_to = str(str(current_platform_height)+'cm/pose4') + print("from place server ========") + print(userdata.move_arm_to) + rospy.sleep(0.1) + return "succeeded" + + +class CheckRetries(smach.State): + def __init__(self, state=False): + smach.State.__init__( + self, + outcomes=["retry", "no_retry"], + input_keys=["current_try", "max_allowed_tries"], + output_keys=["current_try"], + ) + + def execute(self, userdata): + + print("No of rety ===>", userdata.current_try) + + if userdata.current_try < userdata.max_allowed_tries: + userdata.current_try += 1 + return "retry" + else: + userdata.current_try = 0 + return "no_retry" + # ============================================================================== # new class for empty space detection @@ -115,34 +299,91 @@ def execute(self, userdata): # Getting data from empty space detector and giving return 'success' # =============================================================================== +# class DefalutSafePose(smach.State): +# def __init__(self): +# smach.State.__init__(self, outcomes=["succeeded", "failed"], +# input_keys=["goal"],) + +# self.empty_pose_pub = rospy.Publisher( +# "/mcr_perception/object_selector/output/object_pose", +# PoseStamped, +# queue_size=10, +# ) + +# def map_location_to_base_link(self, location): + +# if location is not None: +# current_platform_height = rospy.get_param("/"+location) +# current_platform_height = current_platform_height /100 +# else: +# current_platform_height = 0.10 + +# map_location_to_platform_height = { +# 0.15: 0.065, +# 0.10: 0.025, +# 0.05: -0.030, +# 0.0: -0.080 +# } -# new class for publishing the empty pose +# return map_location_to_platform_height[current_platform_height] + + +# def execute(self, userdata): + +# location = Utils.get_value_of(userdata.goal.parameters, "location") +# rospy.logwarn("Checking pre-defined safe pose") + +# height_from_base = self.map_location_to_base_link(location) + +# safe_pose = PoseStamped() +# safe_pose.header.frame_id = "base_link" +# safe_pose.pose.position.x = 0.610 +# safe_pose.pose.position.y = 0.095 +# safe_pose.pose.position.z = height_from_base + rospy.get_param("/mir_perception/empty_space_detector/object_height_above_workspace") # adding the height of the object above the workspace Should be change for vertical object + +# self.empty_pose_pub.publish(safe_pose) +# rospy.sleep(0.1) +# safe_pose = None +# return "succeeded" class PublishObjectPose(smach.State): def __init__(self): smach.State.__init__(self, outcomes=["success", "failed"], - input_keys=["empty_locations"]) + input_keys=["goal","empty_locations", "counter_reset_flag"], + output_keys=["counter_reset_flag"]) self.empty_pose_pub = rospy.Publisher( "/mcr_perception/object_selector/output/object_pose", PoseStamped, queue_size=10) + self.floor_height = rospy.get_param("height_of_floor", -0.083) + + def map_location_to_base_link(self, location): + + if location is not None: + current_platform_height = rospy.get_param("/"+location) + current_platform_height = current_platform_height /100 + else: + current_platform_height = 0.10 + + platform_height = self.floor_height + current_platform_height + + return platform_height + def execute(self, userdata): empty_locations = userdata.empty_locations + # reset the max threshold counter + userdata.counter_reset_flag = True + + location = Utils.get_value_of(userdata.goal.parameters, "location") + if len(empty_locations.poses) > 0: base_link_pose = PoseStamped() base_link_pose.header = empty_locations.header - - base_link_pose.pose.position.x = 0.0 - base_link_pose.pose.position.y = 0.0 - base_link_pose.pose.position.z = 0.0 - base_link_pose.pose.orientation.x = 0.0 - base_link_pose.pose.orientation.y = 0.0 - base_link_pose.pose.orientation.z = 0.0 empty_locations_temp = PoseStamped() empty_locations_temp.header = empty_locations.header @@ -153,29 +394,70 @@ def execute(self, userdata): empty_locations_temp.pose = empty_locations.poses[i] distance_list.append(Utils.get_distance_between_poses(base_link_pose, empty_locations_temp)) - index = distance_list.index(min(distance_list)) + index = distance_list.index(max(distance_list)) nearest_pose = PoseStamped() nearest_pose.header = empty_locations.header nearest_pose.pose = empty_locations.poses[index] - - # TODO: This z value should be tested - - nearest_pose.pose.position.z += rospy.get_param("/mir_perception/empty_space_detector/object_height_above_workspace") # adding the height of the object above the workspace Should be change for vertical object - - rospy.loginfo(nearest_pose.pose.position.z) - rospy.loginfo("Publishing single pose to pregrasp planner") - - rospy.loginfo(type(nearest_pose)) - self.empty_pose_pub.publish(nearest_pose) - - rospy.sleep(0.3) - nearest_pose = None + print("----------------------------------") + print("nearest pose", nearest_pose.pose.position.z) + + height_from_base = self.map_location_to_base_link(location) + if height_from_base > nearest_pose.pose.position.z: + nearest_pose.pose.position.z = height_from_base + nearest_pose.pose.position.z += rospy.get_param("/mir_perception/empty_space_detector/object_height_above_workspace") # adding the height of the object above the workspace Should be change for vertical object + nearest_pose.pose.position.z += -0.01 + + q_with_yaw = tf.transformations.quaternion_from_euler(0,0,np.pi/3) + nearest_pose.pose.orientation.x = q_with_yaw[0] + nearest_pose.pose.orientation.y = q_with_yaw[1] + nearest_pose.pose.orientation.z = q_with_yaw[2] + nearest_pose.pose.orientation.w = q_with_yaw[3] + # print("place_pose_quaternion", nearest_pose.pose.orientation) + print("Publishing single pose to pregrasp planner") + print("----------------------------------") + + self.empty_pose_pub.publish(nearest_pose) + + rospy.sleep(0.1) + nearest_pose = None + return "success" + +class MoveDBC(smach.State): + def __init__(self, forward=True): + smach.State.__init__(self, outcomes=["succeeded"]) + self._dbc_pose_pub = rospy.Publisher( + "/mcr_navigation/direct_base_controller/input_pose", + PoseStamped, + queue_size=1, + ) + self.forward = forward + self.listener = tf.TransformListener() - return "success" + def execute(self, userdata): + # get tf of base_link + tf_msg = self.listener.lookupTransform("/base_link_static", "/base_link", rospy.Time(0)) + # get the pose of the object in base_link frame + pose_msg = PoseStamped() + pose_msg.header.frame_id = "base_link_static" + pose_msg.header.stamp = rospy.Time.now() + # amount to move backward after pick object + if self.forward: + pose_msg.pose.position.x = tf_msg[0][0] + 0.075 + else: + pose_msg.pose.position.x = tf_msg[0][0] - 0.075 + + pose_msg.pose.position.y = tf_msg[0][1] + pose_msg.pose.position.z = tf_msg[0][2] + pose_msg.pose.orientation.x = tf_msg[1][0] + pose_msg.pose.orientation.y = tf_msg[1][1] + pose_msg.pose.orientation.z = tf_msg[1][2] + pose_msg.pose.orientation.w = tf_msg[1][3] + dbc_pose = pose_msg + self._dbc_pose_pub.publish(dbc_pose) + return "succeeded" -#================================================================================= def transition_cb(*args, **kwargs): userdata = args[0] @@ -202,120 +484,239 @@ def main(): input_keys=["goal", "feedback", "result"], output_keys=["feedback", "result"],) + sm.userdata.counter_reset_flag = False sm.userdata.threshold_counter = 0 sm.userdata.empty_locations = None - sm.userdata.heavy_objects = rospy.get_param("~heavy_objects", ["m20_100"]) sm.userdata.max_allowed_tries = rospy.get_param("~max_allowed_IK_tries", 3) + sm.userdata.empty_place = rospy.get_param("~is_empty_pose_placing", False) + sm.userdata.current_try = 0 + sm.userdata.move_arm_to = None with sm: + smach.StateMachine.add( + "MOVE_ROBOT_TO_CENTER", + gas.move_base(None), + transitions={"success": "MOVE_ARM_TO_PRE_PLACE", + "failed" : "OVERALL_FAILED"}, + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_PRE_PLACE", + gms.move_arm("pre_place", use_moveit=False), + transitions={ + "succeeded": "CHECK_IF_SHELF_INITIAL", + "failed": "MOVE_ARM_TO_PRE_PLACE", + }, + ) + # add states to the container smach.StateMachine.add( "CHECK_IF_SHELF_INITIAL", CheckIfLocationIsShelf(), transitions={ "shelf": "MOVE_ARM_TO_SHELF_INTERMEDIATE", - "not_shelf": "CHECK_MAX_TRY_THRESHOLD", + "not_shelf": "CHECK_MODE_OF_PLACING", }, ) + smach.StateMachine.add( + "CHECK_MODE_OF_PLACING", + CheckModePlacing(), + transitions={ + "pose_selector": "START_PLACE_POSE_SELECTOR", + "empty_pose": "CHECK_MAX_TRY_THRESHOLD", + } + ) + smach.StateMachine.add( "MOVE_ARM_TO_SHELF_INTERMEDIATE", gms.move_arm("shelf_intermediate"), transitions={ - "succeeded": "MOVE_ARM_TO_SHELF_INTERMEDIATE_2", - "failed": "MOVE_ARM_TO_SHELF_INTERMEDIATE", + "succeeded": "PUBLISH_REFERENCE_FRAME", + "failed": "OVERALL_FAILED", }, ) + # publish a static frame which will be used as reference for perceived objs smach.StateMachine.add( - "MOVE_ARM_TO_SHELF_INTERMEDIATE_2", - gms.move_arm("shelf_intermediate_2"), + "PUBLISH_REFERENCE_FRAME", + gbs.send_event([("/static_transform_publisher_node/event_in", "e_start")]), + transitions={"success": "SET_DBC_PARAMS"}, + ) + + smach.StateMachine.add( + "SET_DBC_PARAMS", + gbs.set_named_config("dbc_pick_object"), transitions={ - "succeeded": "MOVE_ARM_TO_PRE_GRASP_LOWER", - "failed": "MOVE_ARM_TO_SHELF_INTERMEDIATE_2", + "success": "MOVE_FORWARD", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", }, ) smach.StateMachine.add( - "MOVE_ARM_TO_PRE_GRASP_LOWER", - gms.move_arm("shelf_pre_grasp_lower"), + "MOVE_FORWARD", + MoveDBC(forward=True), + transitions={"succeeded": "MOVE_BASE_USING_DBC"}, + ) + + # Move base using direct base controller + smach.StateMachine.add( + "MOVE_BASE_USING_DBC", + gbs.send_and_wait_events_combined( + event_in_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_in", + "e_start", + ) + ], + event_out_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_out", + "e_success", + True, + ) + ], + timeout_duration=10, + ), + transitions={ + "success": "SET_SHELF_PLACE_POSE", + "timeout": "SET_SHELF_PLACE_POSE", + "failure": "SET_SHELF_PLACE_POSE", + }, + ) + + smach.StateMachine.add( + "SET_SHELF_PLACE_POSE", + DefineShelfPlacePose(), transitions={ "succeeded": "MOVE_ARM_TO_SHELF_PLACE_FINAL", - "failed": "MOVE_ARM_TO_PRE_GRASP_LOWER", + "failed": "SET_SHELF_PLACE_POSE", }, ) smach.StateMachine.add( "MOVE_ARM_TO_SHELF_PLACE_FINAL", - gms.move_arm("shelf_place_final"), + gms.move_arm(), transitions={ "succeeded": "OPEN_GRIPPER_SHELF", - "failed": "MOVE_ARM_TO_SHELF_PLACE_FINAL", + "failed": "OVERALL_FAILED", }, ) smach.StateMachine.add( "OPEN_GRIPPER_SHELF", gms.control_gripper("open"), - transitions={"succeeded": "MOVE_ARM_TO_SHELF_PLACE_FINAL_RETRACT"}, + transitions={"succeeded": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", + "timeout": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT"} ) smach.StateMachine.add( - "MOVE_ARM_TO_SHELF_PLACE_FINAL_RETRACT", - gms.move_arm("shelf_place_final"), + "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", + gms.move_arm("shelf_intermediate"), transitions={ - "succeeded": "MOVE_ARM_TO_PRE_GRASP_LOWER_RETRACT", - "failed": "MOVE_ARM_TO_SHELF_PLACE_FINAL_RETRACT", + "succeeded": "MOVE_BACKWARD", + "failed": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", }, ) smach.StateMachine.add( - "MOVE_ARM_TO_PRE_GRASP_LOWER_RETRACT", - gms.move_arm("shelf_pre_grasp_lower"), - transitions={ - "succeeded": "MOVE_ARM_TO_SHELF_INTERMEDIATE_2_RETRACT", - "failed": "MOVE_ARM_TO_PRE_GRASP_LOWER_RETRACT", - }, + "MOVE_BACKWARD", + MoveDBC(forward=False), + transitions={"succeeded": "MOVE_BASE_USING_DBC_BACK"}, ) + # Move base using direct base controller smach.StateMachine.add( - "MOVE_ARM_TO_SHELF_INTERMEDIATE_2_RETRACT", - gms.move_arm("shelf_intermediate_2"), + "MOVE_BASE_USING_DBC_BACK", + gbs.send_and_wait_events_combined( + event_in_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_in", + "e_start", + ) + ], + event_out_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_out", + "e_success", + True, + ) + ], + timeout_duration=10, + ), transitions={ - "succeeded": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", - "failed": "MOVE_ARM_TO_SHELF_INTERMEDIATE_2_RETRACT", + "success": "MOVE_ARM_TO_NEUTRAL", + "timeout": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", + "failure": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", }, ) + + +# below states are for default pose placing + smach.StateMachine.add( - "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", - gms.move_arm("shelf_intermediate"), + "START_PLACE_POSE_SELECTOR", + gbs.send_event( + [("/mcr_perception/place_pose_selector/event_in", "e_start")] + ), + transitions={"success": "GET_POSE_TO_PLACE_OBJECT"}, + ) + + smach.StateMachine.add( + "GET_POSE_TO_PLACE_OBJECT", + GetPoseToPlaceOject( + "/mcr_perception/place_pose_selector/platform_name", + "/mcr_perception/place_pose_selector/place_pose", + "/mcr_perception/place_pose_selector/event_out", + 10.0, + ), transitions={ - "succeeded": "MOVE_ARM_TO_NEUTRAL", - "failed": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", + "succeeded": "MOVE_ARM_TO_PLACE_OBJECT", + "failed": "MOVE_ARM_TO_DEFAULT_PLACE", }, ) -# till above the state machine is for shelf smach.StateMachine.add( - "CHECK_MAX_TRY_THRESHOLD", - Threshold_calculation(), + "MOVE_ARM_TO_DEFAULT_PLACE", + DefalutSafePose(), transitions={ - "continue": "EMPTY_SPACE_CLOUD_ADD", - "reached": "GO_DEFAULT_THRESHOLD", + "succeeded": "MOVE_ARM_TO_PLACE_OBJECT", + "failed": "MOVE_ARM_TO_DEFAULT_PLACE", + }, + ) + smach.StateMachine.add( + "MOVE_ARM_TO_PLACE_OBJECT", + gms.move_arm(), + transitions={"succeeded": "STOP_PLACE_POSE_SELECTOR", + "failed": "STOP_PLACE_POSE_SELECTOR", }, ) + smach.StateMachine.add( + "STOP_PLACE_POSE_SELECTOR", + gbs.send_event( + [("/mcr_perception/place_pose_selector/event_in", "e_stop")] + ), + # transitions={"success": "OPEN_GRIPPER"}, + transitions={"success": "RELEASE_GRIPPER"}, + ) + + +# below states for empty space placing-- + smach.StateMachine.add( - "GO_DEFAULT_THRESHOLD", - gms.move_arm("place_default"), + "CHECK_MAX_TRY_THRESHOLD", + CheckRetries(), transitions={ - "succeeded": "OPEN_GRIPPER", - "failed": "GO_DEFAULT_THRESHOLD", - } + "retry": "EMPTY_SPACE_CLOUD_ADD", + "no_retry": "START_PLACE_POSE_SELECTOR", + }, ) + smach.StateMachine.add( "EMPTY_SPACE_CLOUD_ADD", gbs.send_and_wait_events_combined( @@ -323,7 +724,7 @@ def main(): ("/mir_perception/empty_space_detector/event_in","e_add_cloud"), ], event_out_list = [("/mir_perception/empty_space_detector/event_out","e_added_cloud", True)], - timeout_duration=50, + timeout_duration=10, ), transitions={ "success": "EMPTY_SPACE_TRIGGER", @@ -338,7 +739,7 @@ def main(): gbs.send_and_wait_events_combined( event_in_list = [("/mir_perception/empty_space_detector/event_in","e_trigger")], event_out_list = [("/mir_perception/empty_space_detector/event_out","e_success",True)], - timeout_duration = 50, + timeout_duration = 10, ), transitions={ "success": "EMPTY_POSE_RECEIVE", @@ -404,34 +805,52 @@ def main(): timeout_duration=20, ), transitions={ - "success": "OPEN_GRIPPER", + # "success": "OPEN_GRIPPER", + "success": "RELEASE_GRIPPER", "timeout": "OVERALL_FAILED", "failure": "OVERALL_FAILED", }, ) - smach.StateMachine.add( - "OPEN_GRIPPER", - gms.control_gripper("open"), + "RELEASE_GRIPPER", + gms.control_gripper('release'), transitions={ - "succeeded": "MOVE_ARM_TO_NEUTRAL" - }, + "succeeded": "MOVE_ARM_UP", + "timeout": "MOVE_ARM_UP"} ) + smach.StateMachine.add( + "MOVE_ARM_UP", + MoveArmUp(), + transitions={ + "succeeded": "MOVE_ARM_TO_NEUTRAL", + "timeout": "MOVE_ARM_TO_NEUTRAL"} + ) + smach.StateMachine.add( "MOVE_ARM_TO_NEUTRAL", - gms.move_arm("barrier_tape"), + gms.move_arm("pre_place", use_moveit=False), transitions={ - "succeeded": "OVERALL_SUCCESS", + "succeeded": "OPEN_GRIPPER", "failed": "MOVE_ARM_TO_NEUTRAL", }, ) + smach.StateMachine.add( + "OPEN_GRIPPER", + gms.control_gripper('open'), + transitions={ + "succeeded": "OVERALL_SUCCESS", + "timeout": "OVERALL_SUCCESS"} + ) + + sm.register_transition_cb(transition_cb) sm.register_start_cb(start_cb) sm.userdata.threshold_counter = 0 + sm.userdata.current_try = 0 # smach viewer if rospy.get_param("~viewer_enabled", True): diff --git a/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_server_old.py b/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_server_old.py new file mode 100755 index 000000000..ad8104448 --- /dev/null +++ b/mir_planning/mir_actions/mir_place_object/ros/scripts/place_object_server_old.py @@ -0,0 +1,850 @@ +#!/usr/bin/env python + + +# threshold checking for TF calculation WIP + + +""" +This file is for eye to hand configuration of the robot +unstage will happen outside the place object server + +""" + + +from selectors import PollSelector +import mcr_states.common.basic_states as gbs +import mir_states.common.manipulation_states as gms # move the arm, and gripper +import mir_states.common.action_states as gas +import rospy +import smach +from mir_actions.utils import Utils +from mir_planning_msgs.msg import ( + GenericExecuteAction, + GenericExecuteFeedback, + GenericExecuteResult, + GenericExecuteGoal +) +from smach_ros import ActionServerWrapper, IntrospectionServer +from std_msgs.msg import String +from geometry_msgs.msg import PoseArray +from geometry_msgs.msg import PoseStamped +from diagnostic_msgs.msg import KeyValue +from actionlib import SimpleActionClient +from actionlib_msgs.msg import GoalStatus +from brics_actuator.msg import JointPositions, JointValue +from sensor_msgs.msg import JointState +import numpy as np +import tf + +class MoveArmUp(smach.State): + + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "timeout"], + ) + self.joint_states_sub = rospy.Subscriber("/joint_states", JointState, self.joint_states_cb) + self.pub_arm_position = rospy.Publisher("/arm_1/arm_controller/position_command", JointPositions, queue_size=1) + self.current_joint_positions = None + self.is_arm_moving = False + self.zero_vel_counter = 0 + self.joint_1_position = 1.8787 + + def joint_states_cb(self, msg): + if "arm_joint_1" in msg.name: # get the joint values of the arm only + self.current_joint_positions = msg.position + + self.joint_state = msg + # monitor the velocities + self.joint_velocities = msg.velocity + # if all velocities are 0.0, the arm is not moving + if "arm_joint_1" in msg.name and all([v == 0.0 for v in self.joint_velocities]): + self.zero_vel_counter += 1 + + def execute(self, userdata): + self.current_joint_positions = None + while not rospy.is_shutdown(): + rospy.sleep(0.1) + if self.current_joint_positions is not None: + break + joint_values = self.current_joint_positions[:] + joint_values = list(joint_values) + joint_values[1] = self.joint_1_position + + names = self.joint_state.name + + joint_positions = JointPositions() + joint_positions.positions = [ + JointValue( + rospy.Time.now(), + joint_name, + "rad", + joint_value + ) + for joint_name, joint_value in zip(names, joint_values) + ] + self.pub_arm_position.publish(joint_positions) + rospy.sleep(1) + return "succeeded" + +# =============================================================================== + +class DefineShelfPlacePose(smach.State): + def __init__(self): + smach.State.__init__(self, + outcomes=['succeeded', 'failed'], + output_keys=['move_arm_to']) + self.pose_list = ["shelf_place_1", "shelf_place_2"] + + def execute(self, userdata): + try: + if len(self.pose_list) > 0: + rospy.logwarn("Getting shelf place pose from list") + userdata.move_arm_to = self.pose_list.pop() + else: + rospy.logfatal("No more shelf place pose in list, so using default pose") + userdata.move_arm_to = "shelf_place_final" + return 'succeeded' + except: + return 'failed' + +# =============================================================================== +class CheckIfLocationIsShelf(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["shelf", "not_shelf"], + input_keys=["goal"], + output_keys=["feedback", "result"], + ) + + def execute(self, userdata): + location = Utils.get_value_of(userdata.goal.parameters, "location") + print("[Place Object Server] Location received : ", location) + if (location == "SH01") or (location == "SH02"): + return "shelf" + else: + return "not_shelf" + + +class CheckModePlacing(smach.State): + + def __init__(self): + smach.State.__init__( + self, + input_keys=["empty_place"], + outcomes=["pose_selector", "empty_pose"], + ) + def execute(self, userdata): + + empty_place = userdata.empty_place + empty_place = True # remove later + print("******************") + print(empty_place) + if empty_place: + print("++++++++++++++= EMPTY_PLACE") + return "empty_pose" + else: + print("++++++++++++++= POSE SELECTOR") + return "pose_selector" + + +class GetPoseToPlaceOject(smach.State): # inherit from the State base class + def __init__(self, topic_name_pub, topic_name_sub, event_sub, timeout_duration): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["goal", "feedback"], + output_keys=["feedback", "result", "move_arm_to"], + ) + + self.timeout = rospy.Duration.from_sec(timeout_duration) + # create publisher + self.platform_name_pub = rospy.Publisher(topic_name_pub, String, queue_size=10) + rospy.Subscriber(topic_name_sub, String, self.pose_cb) + rospy.Subscriber(event_sub, String, self.event_cb) + rospy.sleep(0.1) # time for publisher to register + self.place_pose = None + self.status = None + + def pose_cb(self, msg): + self.place_pose = msg.data + + def event_cb(self, msg): + self.status = msg.data + + def execute(self, userdata): + # Add empty result msg (because if none of the state do it, action server gives error) + userdata.result = GenericExecuteResult() + userdata.feedback = GenericExecuteFeedback( + current_state="GetPoseToPlaceOject", text="Getting pose to place obj", + ) + + location = Utils.get_value_of(userdata.goal.parameters, "location") + if location is None: + rospy.logwarn('"location" not provided. Using default.') + return "failed" + + self.place_pose = None + self.status = None + self.platform_name_pub.publish(String(data=location)) + + # wait for messages to arrive + start_time = rospy.Time.now() + rate = rospy.Rate(10) # 10hz + while not (rospy.is_shutdown()): + if rospy.Time.now() - start_time > self.timeout: + break + if self.place_pose is not None and self.status is not None: + break + rate.sleep() + + if ( + self.place_pose is not None + and self.status is not None + and self.status == "e_success" + ): + userdata.move_arm_to = self.place_pose + return "succeeded" + else: + return "failed" + +# =============================================================================== + + +class DefalutSafePose(smach.State): + def __init__(self): + smach.State.__init__(self, outcomes=["succeeded", "failed"], + input_keys=["goal","move_arm_to"], + output_keys=["move_arm_to"]) + + def execute(self, userdata): + + rospy.logwarn("Checking pre-defined safe pose") + location = Utils.get_value_of(userdata.goal.parameters, "location") + current_platform_height = rospy.get_param("/"+location) + userdata.move_arm_to = str(str(current_platform_height)+'cm/pose4') + print("from place server ========") + print(userdata.move_arm_to) + rospy.sleep(0.1) + return "succeeded" + + +class CheckRetries(smach.State): + def __init__(self, state=False): + smach.State.__init__( + self, + outcomes=["retry", "no_retry"], + input_keys=["current_try", "max_allowed_tries"], + output_keys=["current_try"], + ) + + def execute(self, userdata): + + print("No of rety ===>", userdata.current_try) + + if userdata.current_try < userdata.max_allowed_tries: + userdata.current_try += 1 + return "retry" + else: + userdata.current_try = 0 + return "no_retry" + +# ============================================================================== + +# new class for empty space detection + +class GetEmptyPositionOnTable(smach.State): + def __init__(self,empty_spaces): + + smach.State.__init__(self,outcomes=["success", "failure"], + input_keys=["empty_locations",], + output_keys=["feedback","result","empty_locations"] + ) + + self.empty_locations = None + self.timeout = rospy.Duration.from_sec(15.0) + rospy.Subscriber(empty_spaces, PoseArray, self.empty_space_cb) # subscribing to empty_space locations published by empty_space_detector node + rospy.sleep(0.1) + + def empty_space_cb(self, msg): # Callback for get empty_space_location from empty_space detector node + + self.empty_locations = msg + + def execute(self, userdata): # Getting data from empty space detector and giving all the poses it to next states + + userdata.result = GenericExecuteResult() + userdata.feedback = GenericExecuteFeedback( + current_state="EMPTY_POSE_RECEIVE", text="Receiving empty space location",) + rospy.loginfo("<<--- getting empty position --->>") + userdata.empty_locations = self.empty_locations # The empty locations have all the empty poses + + return 'success' + +# =============================================================================== +# class DefalutSafePose(smach.State): +# def __init__(self): +# smach.State.__init__(self, outcomes=["succeeded", "failed"], +# input_keys=["goal"],) + +# self.empty_pose_pub = rospy.Publisher( +# "/mcr_perception/object_selector/output/object_pose", +# PoseStamped, +# queue_size=10, +# ) + +# def map_location_to_base_link(self, location): + +# if location is not None: +# current_platform_height = rospy.get_param("/"+location) +# current_platform_height = current_platform_height /100 +# else: +# current_platform_height = 0.10 + +# map_location_to_platform_height = { +# 0.15: 0.065, +# 0.10: 0.025, +# 0.05: -0.030, +# 0.0: -0.080 +# } + +# return map_location_to_platform_height[current_platform_height] + + +# def execute(self, userdata): + +# location = Utils.get_value_of(userdata.goal.parameters, "location") +# rospy.logwarn("Checking pre-defined safe pose") + +# height_from_base = self.map_location_to_base_link(location) + +# safe_pose = PoseStamped() +# safe_pose.header.frame_id = "base_link" +# safe_pose.pose.position.x = 0.610 +# safe_pose.pose.position.y = 0.095 +# safe_pose.pose.position.z = height_from_base + rospy.get_param("/mir_perception/empty_space_detector/object_height_above_workspace") # adding the height of the object above the workspace Should be change for vertical object + +# self.empty_pose_pub.publish(safe_pose) +# rospy.sleep(0.1) +# safe_pose = None +# return "succeeded" + +class PublishObjectPose(smach.State): + def __init__(self): + smach.State.__init__(self, outcomes=["success", "failed"], + input_keys=["goal","empty_locations", "counter_reset_flag"], + output_keys=["counter_reset_flag"]) + + self.empty_pose_pub = rospy.Publisher( + "/mcr_perception/object_selector/output/object_pose", + PoseStamped, + queue_size=10) + + self.floor_height = rospy.get_param("height_of_floor", -0.083) + + def map_location_to_base_link(self, location): + + if location is not None: + current_platform_height = rospy.get_param("/"+location) + current_platform_height = current_platform_height /100 + else: + current_platform_height = 0.10 + + platform_height = self.floor_height + current_platform_height + + return platform_height + + def execute(self, userdata): + + empty_locations = userdata.empty_locations + # reset the max threshold counter + userdata.counter_reset_flag = True + + location = Utils.get_value_of(userdata.goal.parameters, "location") + + + if len(empty_locations.poses) > 0: + + base_link_pose = PoseStamped() + base_link_pose.header = empty_locations.header + + empty_locations_temp = PoseStamped() + empty_locations_temp.header = empty_locations.header + + + distance_list = [] + for i in range(len(empty_locations.poses)): + empty_locations_temp.pose = empty_locations.poses[i] + distance_list.append(Utils.get_distance_between_poses(base_link_pose, empty_locations_temp)) + + index = distance_list.index(max(distance_list)) + + nearest_pose = PoseStamped() + nearest_pose.header = empty_locations.header + nearest_pose.pose = empty_locations.poses[index] + + print("----------------------------------") + print("nearest pose", nearest_pose.pose.position.z) + + height_from_base = self.map_location_to_base_link(location) + if height_from_base > nearest_pose.pose.position.z: + nearest_pose.pose.position.z = height_from_base + nearest_pose.pose.position.z += rospy.get_param("/mir_perception/empty_space_detector/object_height_above_workspace") # adding the height of the object above the workspace Should be change for vertical object + nearest_pose.pose.position.z += -0.01 + + q_with_yaw = tf.transformations.quaternion_from_euler(0,0,np.pi/3) + nearest_pose.pose.orientation.x = q_with_yaw[0] + nearest_pose.pose.orientation.y = q_with_yaw[1] + nearest_pose.pose.orientation.z = q_with_yaw[2] + nearest_pose.pose.orientation.w = q_with_yaw[3] + # print("place_pose_quaternion", nearest_pose.pose.orientation) + print("Publishing single pose to pregrasp planner") + print("----------------------------------") + + self.empty_pose_pub.publish(nearest_pose) + + rospy.sleep(0.1) + nearest_pose = None + return "success" + +class MoveDBC(smach.State): + def __init__(self, forward=True): + smach.State.__init__(self, outcomes=["succeeded"]) + self._dbc_pose_pub = rospy.Publisher( + "/mcr_navigation/direct_base_controller/input_pose", + PoseStamped, + queue_size=1, + ) + self.forward = forward + self.listener = tf.TransformListener() + + def execute(self, userdata): + # get tf of base_link + tf_msg = self.listener.lookupTransform("/base_link_static", "/base_link", rospy.Time(0)) + # get the pose of the object in base_link frame + pose_msg = PoseStamped() + pose_msg.header.frame_id = "base_link_static" + pose_msg.header.stamp = rospy.Time.now() + # amount to move backward after pick object + if self.forward: + pose_msg.pose.position.x = tf_msg[0][0] + 0.075 + else: + pose_msg.pose.position.x = tf_msg[0][0] - 0.075 + + pose_msg.pose.position.y = tf_msg[0][1] + pose_msg.pose.position.z = tf_msg[0][2] + pose_msg.pose.orientation.x = tf_msg[1][0] + pose_msg.pose.orientation.y = tf_msg[1][1] + pose_msg.pose.orientation.z = tf_msg[1][2] + pose_msg.pose.orientation.w = tf_msg[1][3] + dbc_pose = pose_msg + self._dbc_pose_pub.publish(dbc_pose) + return "succeeded" + + +def transition_cb(*args, **kwargs): + userdata = args[0] + sm_state = args[1][0] + + feedback = GenericExecuteFeedback() + feedback.current_state = sm_state + userdata.feedback = feedback + +def start_cb(*args, **kwargs): + userdata = args[0] + sm_state = args[1][0] + + feedback = GenericExecuteFeedback() + feedback.current_state = sm_state + userdata.feedback = feedback + + +def main(): + rospy.init_node("place_object_server") + # Construct state machine + sm = smach.StateMachine( + outcomes=["OVERALL_SUCCESS", "OVERALL_FAILED"], + input_keys=["goal", "feedback", "result"], + output_keys=["feedback", "result"],) + + sm.userdata.counter_reset_flag = False + sm.userdata.threshold_counter = 0 + sm.userdata.empty_locations = None + sm.userdata.max_allowed_tries = rospy.get_param("~max_allowed_IK_tries", 3) + sm.userdata.empty_place = rospy.get_param("~is_empty_pose_placing", False) + sm.userdata.current_try = 0 + sm.userdata.move_arm_to = None + + with sm: + smach.StateMachine.add( + "MOVE_ROBOT_TO_CENTER", + gas.move_base(None), + transitions={"success": "MOVE_ARM_TO_PRE_PLACE", + "failed" : "OVERALL_FAILED"}, + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_PRE_PLACE", + gms.move_arm("pre_place", use_moveit=False), + transitions={ + "succeeded": "CHECK_IF_SHELF_INITIAL", + "failed": "MOVE_ARM_TO_PRE_PLACE", + }, + ) + + # add states to the container + smach.StateMachine.add( + "CHECK_IF_SHELF_INITIAL", + CheckIfLocationIsShelf(), + transitions={ + "shelf": "MOVE_ARM_TO_SHELF_INTERMEDIATE", + "not_shelf": "CHECK_MODE_OF_PLACING", + }, + ) + + smach.StateMachine.add( + "CHECK_MODE_OF_PLACING", + CheckModePlacing(), + transitions={ + "pose_selector": "START_PLACE_POSE_SELECTOR", + "empty_pose": "CHECK_MAX_TRY_THRESHOLD", + } + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_SHELF_INTERMEDIATE", + gms.move_arm("shelf_intermediate"), + transitions={ + "succeeded": "PUBLISH_REFERENCE_FRAME", + "failed": "OVERALL_FAILED", + }, + ) + + # publish a static frame which will be used as reference for perceived objs + smach.StateMachine.add( + "PUBLISH_REFERENCE_FRAME", + gbs.send_event([("/static_transform_publisher_node/event_in", "e_start")]), + transitions={"success": "SET_DBC_PARAMS"}, + ) + + smach.StateMachine.add( + "SET_DBC_PARAMS", + gbs.set_named_config("dbc_pick_object"), + transitions={ + "success": "MOVE_FORWARD", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "MOVE_FORWARD", + MoveDBC(forward=True), + transitions={"succeeded": "MOVE_BASE_USING_DBC"}, + ) + + # Move base using direct base controller + smach.StateMachine.add( + "MOVE_BASE_USING_DBC", + gbs.send_and_wait_events_combined( + event_in_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_in", + "e_start", + ) + ], + event_out_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_out", + "e_success", + True, + ) + ], + timeout_duration=10, + ), + transitions={ + "success": "SET_SHELF_PLACE_POSE", + "timeout": "SET_SHELF_PLACE_POSE", + "failure": "SET_SHELF_PLACE_POSE", + }, + ) + + smach.StateMachine.add( + "SET_SHELF_PLACE_POSE", + DefineShelfPlacePose(), + transitions={ + "succeeded": "MOVE_ARM_TO_SHELF_PLACE_FINAL", + "failed": "SET_SHELF_PLACE_POSE", + }, + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_SHELF_PLACE_FINAL", + gms.move_arm(), + transitions={ + "succeeded": "OPEN_GRIPPER_SHELF", + "failed": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "OPEN_GRIPPER_SHELF", + gms.control_gripper("open"), + transitions={"succeeded": "MOVE_BACKWARD", + "timeout": "MOVE_BACKWARD"} + ) + + smach.StateMachine.add( + "MOVE_BACKWARD", + MoveDBC(forward=False), + transitions={"succeeded": "MOVE_BASE_USING_DBC_BACK"}, + ) + + # Move base using direct base controller + smach.StateMachine.add( + "MOVE_BASE_USING_DBC_BACK", + gbs.send_and_wait_events_combined( + event_in_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_in", + "e_start", + ) + ], + event_out_list=[ + ( + "/mcr_navigation/direct_base_controller/coordinator/event_out", + "e_success", + True, + ) + ], + timeout_duration=10, + ), + transitions={ + "success": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", + "timeout": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", + "failure": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", + }, + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", + gms.move_arm("shelf_intermediate"), + transitions={ + "succeeded": "MOVE_ARM_TO_NEUTRAL", + "failed": "MOVE_ARM_TO_SHELF_INTERMEDIATE_RETRACT", + }, + ) + +# below states are for default pose placing + + smach.StateMachine.add( + "START_PLACE_POSE_SELECTOR", + gbs.send_event( + [("/mcr_perception/place_pose_selector/event_in", "e_start")] + ), + transitions={"success": "GET_POSE_TO_PLACE_OBJECT"}, + ) + + smach.StateMachine.add( + "GET_POSE_TO_PLACE_OBJECT", + GetPoseToPlaceOject( + "/mcr_perception/place_pose_selector/platform_name", + "/mcr_perception/place_pose_selector/place_pose", + "/mcr_perception/place_pose_selector/event_out", + 10.0, + ), + transitions={ + "succeeded": "MOVE_ARM_TO_PLACE_OBJECT", + "failed": "MOVE_ARM_TO_DEFAULT_PLACE", + }, + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_DEFAULT_PLACE", + DefalutSafePose(), + transitions={ + "succeeded": "MOVE_ARM_TO_PLACE_OBJECT", + "failed": "MOVE_ARM_TO_DEFAULT_PLACE", + }, + ) + smach.StateMachine.add( + "MOVE_ARM_TO_PLACE_OBJECT", + gms.move_arm(), + transitions={"succeeded": "STOP_PLACE_POSE_SELECTOR", + "failed": "STOP_PLACE_POSE_SELECTOR", + }, + ) + + smach.StateMachine.add( + "STOP_PLACE_POSE_SELECTOR", + gbs.send_event( + [("/mcr_perception/place_pose_selector/event_in", "e_stop")] + ), + transitions={"success": "OPEN_GRIPPER"}, + ) + + +# below states for empty space placing-- + + + smach.StateMachine.add( + "CHECK_MAX_TRY_THRESHOLD", + CheckRetries(), + transitions={ + "retry": "EMPTY_SPACE_CLOUD_ADD", + "no_retry": "START_PLACE_POSE_SELECTOR", + }, + ) + + + smach.StateMachine.add( + "EMPTY_SPACE_CLOUD_ADD", + gbs.send_and_wait_events_combined( + event_in_list = [ + ("/mir_perception/empty_space_detector/event_in","e_add_cloud"), + ], + event_out_list = [("/mir_perception/empty_space_detector/event_out","e_added_cloud", True)], + timeout_duration=10, + ), + transitions={ + "success": "EMPTY_SPACE_TRIGGER", + "timeout": "CHECK_MAX_TRY_THRESHOLD", + "failure": "EMPTY_SPACE_CLOUD_ADD", + }, + ) + + + smach.StateMachine.add( + "EMPTY_SPACE_TRIGGER", + gbs.send_and_wait_events_combined( + event_in_list = [("/mir_perception/empty_space_detector/event_in","e_trigger")], + event_out_list = [("/mir_perception/empty_space_detector/event_out","e_success",True)], + timeout_duration = 10, + ), + transitions={ + "success": "EMPTY_POSE_RECEIVE", + "timeout": "EMPTY_SPACE_CLOUD_ADD", + "failure": "CHECK_MAX_TRY_THRESHOLD", + }, + ) + + + smach.StateMachine.add( + "EMPTY_POSE_RECEIVE", + GetEmptyPositionOnTable("/mir_perception/empty_space_detector/empty_spaces"), + + transitions={ + "success": "PUBLISH_OBJECT_POSE_FOR_VERIFICATION", + "failure": "EMPTY_SPACE_CLOUD_ADD", + }, + ) + + smach.StateMachine.add( + "PUBLISH_OBJECT_POSE_FOR_VERIFICATION", + PublishObjectPose(), + transitions={ + "success": "CHECK_PRE_GRASP_POSE_IK", + "failed": "PUBLISH_OBJECT_POSE_FOR_VERIFICATION" + } + + ) + + smach.StateMachine.add( + "CHECK_PRE_GRASP_POSE_IK", + gbs.send_and_wait_events_combined( + event_in_list=[ + ("/pregrasp_planner_node/event_in", "e_start") + ], + event_out_list=[ + ( + "/pregrasp_planner_node/event_out", + "e_success", + True, + ) + ], + timeout_duration=20, + ), + transitions={ + "success": "GO_TO_PRE_GRASP_POSE", + "timeout": "CHECK_MAX_TRY_THRESHOLD", + "failure": "CHECK_MAX_TRY_THRESHOLD", + }, + ) + + smach.StateMachine.add( + "GO_TO_PRE_GRASP_POSE", + gbs.send_and_wait_events_combined( + event_in_list=[ + ("/waypoint_trajectory_generation/event_in", "e_start")], + event_out_list=[ + ( + "/waypoint_trajectory_generation/event_out", + "e_success", + True, + )], + timeout_duration=20, + ), + transitions={ + "success": "OPEN_GRIPPER", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + + + smach.StateMachine.add( + "OPEN_GRIPPER", + gms.control_gripper(0.25), + transitions={ + "succeeded": "MOVE_ARM_UP", + "timeout": "MOVE_ARM_UP"} + ) + + smach.StateMachine.add( + "MOVE_ARM_UP", + MoveArmUp(), + transitions={ + "succeeded": "MOVE_ARM_TO_NEUTRAL", + "timeout": "MOVE_ARM_TO_NEUTRAL"} + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_NEUTRAL", + gms.move_arm("pre_place", use_moveit=False), + transitions={ + "succeeded": "OVERALL_SUCCESS", + "failed": "MOVE_ARM_TO_NEUTRAL", + }, + ) + + sm.register_transition_cb(transition_cb) + sm.register_start_cb(start_cb) + sm.userdata.threshold_counter = 0 + sm.userdata.current_try = 0 + + # smach viewer + if rospy.get_param("~viewer_enabled", True): + sis = IntrospectionServer( + "place_object_smach_viewer", sm, "/STAGE_OBJECT_SMACH_VIEWER" + ) + sis.start() + + # Construct action server wrapper + asw = ActionServerWrapper( + server_name="place_object_server", + action_spec=GenericExecuteAction, + wrapped_container=sm, + succeeded_outcomes=["OVERALL_SUCCESS"], + aborted_outcomes=["OVERALL_FAILED"], + preempted_outcomes=["PREEMPTED"], + goal_key="goal", + feedback_key="feedback", + result_key="result", + ) + # Run the server in a background thread + asw.run_server() + rospy.spin() + + +if __name__ == "__main__": + main() diff --git a/mir_planning/mir_actions/mir_place_object/ros/scripts/place_unstage_object_server.py b/mir_planning/mir_actions/mir_place_object/ros/scripts/place_unstage_object_server.py index af83e379a..43d25afd4 100755 --- a/mir_planning/mir_actions/mir_place_object/ros/scripts/place_unstage_object_server.py +++ b/mir_planning/mir_actions/mir_place_object/ros/scripts/place_unstage_object_server.py @@ -311,8 +311,8 @@ def main(): smach.StateMachine.add( "OPEN_GRIPPER_SHELF", gms.control_gripper("open"), - transitions={"succeeded": "MOVE_ARM_TO_SHELF_PLACE_FINAL_RETRACT" - }, + transitions={"succeeded": "MOVE_ARM_TO_SHELF_PLACE_FINAL_RETRACT", + "timeout": "MOVE_ARM_TO_SHELF_PLACE_FINAL_RETRACT"}, ) smach.StateMachine.add( @@ -365,8 +365,9 @@ def main(): "CLOSE_GRIPPER", gms.control_gripper("close"), transitions={ - "succeeded": "MOVE_ARM_TO_PRE_DEFAULT" - }, + "succeeded": "MOVE_ARM_TO_PRE_DEFAULT", + "timeout": "MOVE_ARM_TO_PRE_DEFAULT" + }, ) @@ -505,7 +506,8 @@ def main(): "OPEN_GRIPPER", gms.control_gripper("open"), transitions={ - "succeeded": "MOVE_ARM_TO_NEUTRAL" + "succeeded": "MOVE_ARM_TO_NEUTRAL", + "timeout": "MOVE_ARM_TO_NEUTRAL", }, ) diff --git a/mir_planning/mir_actions/mir_place_object/ros/scripts/shelf_empty_space b/mir_planning/mir_actions/mir_place_object/ros/scripts/shelf_empty_space new file mode 100755 index 000000000..301818134 --- /dev/null +++ b/mir_planning/mir_actions/mir_place_object/ros/scripts/shelf_empty_space @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +import os +import random +import time +import cv2 +import numpy as np +import roslib +import rospy +import torch +import math +import csv +from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion +from cv_bridge import CvBridge, CvBridgeError +from std_msgs.msg import ColorRGBA, String +import message_filters +from mas_perception_msgs.msg import ImageList, Object, ObjectList, TimeStampedPose +from sensor_msgs.msg import Image, RegionOfInterest, PointCloud2, PointField +from sensor_msgs import point_cloud2 +from ultralytics import YOLO +import tf +from sklearn.decomposition import PCA +from geometry_msgs.msg import PoseStamped, Pose +from visualization_msgs.msg import Marker +from scipy.optimize import least_squares +from sklearn.mixture import GaussianMixture +import tf.transformations as tr +from scipy.spatial.transform import Rotation as R +import tf2_sensor_msgs +from tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud +import tf2_ros +import tf2_py as tf2 +import ros_numpy +from scipy.optimize import least_squares +import pdb + +class ShelfPlacePose(): + def __init__(self, debug_mode=True): + + self.cvbridge = CvBridge() + self.debug = debug_mode + self.pub_debug = rospy.Publisher("/mir_perception/multimodal_object_recognition/recognizer/rgb/output/debug_image", Image, queue_size=1) + self.pub_pose = rospy.Publisher("/mir_perception/predicted_shelf_place_pose", PoseStamped, queue_size=1) + + rospy.Subscriber("/mir_perception/shelf_place_pose/event_in", String, self.event_in_cb) + self.event_out = rospy.Publisher("/mir_perception/shelf_place_pose/event_out", String, queue_size=1) + self.event = None + + self.pose_estimation_complete = False + + + def callback(self, img_msg, point_cloud_msg): + try: + self.run(img_msg, point_cloud_msg) + except Exception as e: + rospy.loginfo("[shelf_place_pose node]killing point cloud callback") + + def event_in_cb(self, msg): + """ + Starts a planned motion based on the specified arm position. + + """ + self.event = msg.data + if self.event.startswith("e_start"): + # Subscribe to image topic + self.sub_img = message_filters.Subscriber("/tower_cam3d_front/color/image_raw", Image)#, self.image_recognition_cb) + + # subscribe to point cloud topic + self.sub_point_cloud = message_filters.Subscriber("/tower_cam3d_front/depth/color/points", PointCloud2)#, self.point_cloud_cb) + + # synchronize image and point cloud + self.ts = message_filters.ApproximateTimeSynchronizer([self.sub_img, self.sub_point_cloud], queue_size=10, slop=0.1) + self.ts.registerCallback(self.callback) + + if self.event.startswith("e_stop"): + self.sub_img.sub.unregister() + self.sub_point_cloud.sub.unregister() + rospy.loginfo("[shelf_place_pose node] Unregistered image and pointcloud subscribers") + + + def get_img_point(self, cv_img): + + if cv_img is None: + return None, None, None # Return None values if cv_img is None + + place_img = cv_img[:, :240] + + gray = cv2.cvtColor(place_img, cv2.COLOR_BGR2GRAY) + + # Convert image to binary + _, bw = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) + + # Find all the contours in the thresholded image + contours, _ = cv2.findContours(bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) + + # get contour with maximum area + max_area = 0 + best_cnt = None + for cnt in contours: + area = cv2.contourArea(cnt) + if area > max_area: + max_area = area + best_cnt = cnt + + # get center of the best_cnt + xi, yi, _, _ = cv2.boundingRect(best_cnt) if best_cnt is not None else (None, None, None, None) + + debug_img = cv_img + # mark best_cnt on cv_img (for visualization) + if best_cnt is not None: + cv2.drawContours(debug_img, [best_cnt], -1, (0, 255, 0), 2) + cv2.circle(debug_img, (xi + int(debug_img.shape[1] / 2), yi), 5, (0, 0, 255), -1) + + return debug_img, xi, yi + + + def get_pointcloud_position(self, pointcloud, center): + """ + pointcloud: pointcloud data of current frame with PointCloud2 data type + center: x and y coordinates of empty space in image + """ + + # Get the pointcloud data + pc = np.array(list(point_cloud2.read_points(pointcloud, skip_nans=False, field_names=("x", "y", "z"))), dtype=np.float32) + pc = pc.reshape((480,640,3)) + + center_x, center_y = center + + # Define the radius of the circle + radius = 7 + + # Calculate the coordinates of the circle points within the radius + min_x = max(0, center_x - radius) + max_x = min(pc.shape[1] - 1, center_x + radius) + min_y = max(0, center_y - radius) + max_y = min(pc.shape[0] - 1, center_y + radius) + + circle_points = [] + for y in range(min_y, max_y + 1): + for x in range(min_x, max_x + 1): + distance = math.sqrt((center_x - x) ** 2 + (center_y - y) ** 2) + if distance <= radius: + point = pc[y, x] + if not np.isnan(point).any(): + circle_points.append(point) + + # Determine the centroid of the non-NaN points + if circle_points: + circle_points_pc = np.array([point for point in circle_points]) + center_point = np.mean(circle_points_pc, axis=0) + + # Get the pose of the center point of the bounding box + pose = PoseStamped() + pose.header.frame_id = pointcloud.header.frame_id + pose.header.stamp = pointcloud.header.stamp + pose.pose.position.x = center_point[0] + pose.pose.position.y = center_point[1] + pose.pose.position.z = center_point[2] + pose.pose.orientation.x = 0.0 + pose.pose.orientation.y = 0.0 + pose.pose.orientation.z = 0.0 + pose.pose.orientation.w = 1.0 + + try: + t = self.tf_listener.getLatestCommonTime("base_link", pose.header.frame_id) + pose.header.stamp = t + pose = self.tf_listener.transformPose("base_link", pose) + # if pose doesnot has nan values then append pose to the list + if not math.isnan(pose.pose.position.x): + center_wrt_BL = pose.pose.position.x, pose.pose.position.y + return center_wrt_BL + except ( + tf.LookupException, + tf.ConnectivityException, + tf.ExtrapolationException, + ) as e: + rospy.logerr("Tf error: %s" % str(e)) + + return None + + def run(self, image, pointcloud): + """ + image: image data of current frame with Image data type + """ + + if image: + try: + cv_img = self.cvbridge.imgmsg_to_cv2(image, "bgr8") + + # Add image processing code here.. to get x and y + debug_img, xi, yi = self.get_img_point(cv_img) + + img_coord = (xi, yi) + xe, ye = self.get_pointcloud_position(pointcloud, img_coord) + + place_pose = PoseStamped() + place_pose.header.stamp = pointcloud.header.stamp + place_pose.header.frame_id = "base_link" + place_pose.pose.position.x = xe #0.55 + place_pose.pose.position.y = ye #0.00 + place_pose.pose.position.z = 0.35 + + orientation = tr.quaternion_from_euler(0, -0.35, 0) + place_pose.pose.orientation.x = orientation[0] + place_pose.pose.orientation.y = orientation[1] + place_pose.pose.orientation.z = orientation[2] + place_pose.pose.orientation.w = orientation[3] + + self.pub_pose.publish(place_pose) + + if self.debug: + # Draw bounding boxes and labels of detections + self.publish_debug_img(debug_img) + + except CvBridgeError as e: + rospy.logerr(e) + return + else: + rospy.logwarn("No image received") + + def publish_debug_img(self, debug_img): + debug_img = np.array(debug_img, dtype=np.uint8) + debug_img = self.cvbridge.cv2_to_imgmsg(debug_img, "bgr8") + self.pub_debug.publish(debug_img) + + +if __name__ == '__main__': + rospy.init_node("shelf_place_pose") + rospy.loginfo('Started shelf place node.') + place_pose = ShelfPlacePose(debug_mode=True) + rospy.spin() diff --git a/mir_planning/mir_actions/mir_rtt_pick_object/CMakeLists.txt b/mir_planning/mir_actions/mir_rtt_pick_object/CMakeLists.txt new file mode 100644 index 000000000..62a5f1f9b --- /dev/null +++ b/mir_planning/mir_actions/mir_rtt_pick_object/CMakeLists.txt @@ -0,0 +1,12 @@ +cmake_minimum_required(VERSION 2.8.3) +project(mir_rtt_pick_object) + +find_package(catkin REQUIRED + COMPONENTS + mas_perception_msgs +) + +catkin_package( + CATKIN_DEPENDS + mas_perception_msgs +) diff --git a/mir_planning/mir_actions/mir_rtt_pick_object/package.xml b/mir_planning/mir_actions/mir_rtt_pick_object/package.xml new file mode 100644 index 000000000..e0e5f18e5 --- /dev/null +++ b/mir_planning/mir_actions/mir_rtt_pick_object/package.xml @@ -0,0 +1,22 @@ + + + mir_rtt_pick_object + 1.0.0 + + Assumes that the location is already perceived and objects were found, then triggers + pregrasp planner, calls inverse kinematics and moves the arm towards the object, then + closes the gripper and moves the arm to idle position + + + GPLv3 + + Oscar Lima + Oscar Lima + + catkin + + mas_perception_msgs + + mas_perception_msgs + + diff --git a/mir_planning/mir_actions/mir_rtt_pick_object/ros/launch/rtt_pick_object.launch b/mir_planning/mir_actions/mir_rtt_pick_object/ros/launch/rtt_pick_object.launch new file mode 100644 index 000000000..fe054b52c --- /dev/null +++ b/mir_planning/mir_actions/mir_rtt_pick_object/ros/launch/rtt_pick_object.launch @@ -0,0 +1,8 @@ + + + + + + + diff --git a/mir_planning/mir_actions/mir_rtt_pick_object/ros/scripts/rtt_pick_object_client_test.py b/mir_planning/mir_actions/mir_rtt_pick_object/ros/scripts/rtt_pick_object_client_test.py new file mode 100755 index 000000000..2c3d575f4 --- /dev/null +++ b/mir_planning/mir_actions/mir_rtt_pick_object/ros/scripts/rtt_pick_object_client_test.py @@ -0,0 +1,47 @@ +#! /usr/bin/env python + +import sys + +import rospy +from actionlib import SimpleActionClient +from actionlib_msgs.msg import GoalStatus +from diagnostic_msgs.msg import KeyValue +from mir_planning_msgs.msg import GenericExecuteAction, GenericExecuteGoal + +if __name__ == "__main__": + rospy.init_node("rtt_client_tester") + + client = SimpleActionClient("rtt_server", GenericExecuteAction) + client.wait_for_server() + + if len(sys.argv) == 3: + goal = GenericExecuteGoal() + obj = str(sys.argv[1]).upper() + location = str(sys.argv[2]).upper() + goal.parameters.append(KeyValue(key="object", value=obj)) + goal.parameters.append(KeyValue(key="location", value=location)) + rospy.loginfo("Sending following goal to pick object server") + rospy.loginfo(goal) + + client.send_goal(goal) + + timeout = 500.0 + finished_within_time = client.wait_for_result( + rospy.Duration.from_sec(int(timeout)) + ) + if not finished_within_time: + client.cancel_goal() + + state = client.get_state() + result = client.get_result() + if state == GoalStatus.SUCCEEDED: + rospy.loginfo("Action SUCCESS") + rospy.loginfo(client.get_result()) + elif state == GoalStatus.ABORTED: + rospy.logerr("Action FAILED") + else: + rospy.logwarn("State: " + str(state)) + rospy.loginfo(client.get_result()) + else: + rospy.logerr("Arguments were not received in the proper format !") + rospy.loginfo("usage : pick OBJECT_NAME LOCATION") diff --git a/mir_planning/mir_actions/mir_rtt_pick_object/ros/scripts/rtt_pick_object_server.py b/mir_planning/mir_actions/mir_rtt_pick_object/ros/scripts/rtt_pick_object_server.py new file mode 100755 index 000000000..edf2a0dce --- /dev/null +++ b/mir_planning/mir_actions/mir_rtt_pick_object/ros/scripts/rtt_pick_object_server.py @@ -0,0 +1,381 @@ +#!/usr/bin/python +import sys + +import mcr_states.common.basic_states as gbs +import mir_states.common.manipulation_states as gms +import mir_states.common.basic_states as mir_gbs +import rospy +import smach +from mir_actions.utils import Utils +from mir_planning_msgs.msg import ( + GenericExecuteAction, + GenericExecuteFeedback, + GenericExecuteResult, +) +from smach_ros import ActionServerWrapper, IntrospectionServer +from std_msgs.msg import String, Float64 +from mas_perception_msgs.msg import TimeStampedPose +import tf.transformations as tr +from geometry_msgs.msg import PoseStamped + +# =============================================================================== + +class GetPredictions(smach.State): + def __init__(self, topic_name): + smach.State.__init__( + self, + outcomes=["succeeded", "timeout"], + input_keys=["goal", "current_rtt_pick_retry"], + output_keys=["feedback", "result", "pose", "time"], + ) + self.publisher = rospy.Publisher(topic_name, String, queue_size=10) + + def execute(self, userdata): + userdata.result = GenericExecuteResult() + userdata.feedback = GenericExecuteFeedback( + current_state="PredictObjectPose", text="predicting object pose" + ) + + obj = Utils.get_value_of(userdata.goal.parameters, "object") + msg_str = f'e_start_{obj}' + self.publisher.publish(String(data=msg_str)) + rospy.sleep(0.2) # let the topic to survive for some time\ + try: + predicted_msg = rospy.wait_for_message("/mir_perception/rtt/time_stamped_pose", TimeStampedPose, timeout=90) + userdata.time = predicted_msg.timestamps + userdata.pose = predicted_msg.pose + return "succeeded" + except Exception as e: + rospy.logwarn("Exception occured while waiting for object TimeStampedPose") + return "timeout" + +class WaitForObject(smach.State): + def __init__(self): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["time", "time_taken"], + output_keys=["feedback", "result", "time_taken"], + ) + # self.publisher = rospy.Publisher("/mir_perception/rtt_grasp/event_in", String, queue_size=10) + self.subscriber = rospy.Subscriber("/mir_perception/rtt/time_stamped_pose", TimeStampedPose, self.callback) + self.time = None + + def callback(self, msg): + self.time = msg.timestamps + + def execute(self, userdata): + userdata.result = GenericExecuteResult() + userdata.feedback = GenericExecuteFeedback( + current_state="WaitForObject", text="waiting for object" + ) + + # TODO: 0.75 is empirical value. This value will depend on object , radius of rotation and might also depend on orientation for large object. Add a config file for this. + userdata.time_taken = rospy.Time.now().to_sec() + if rospy.Time.now().to_sec() > userdata.time - 0.75: + return "failed" + while rospy.Time.now().to_sec() < userdata.time - 0.75: # This is the time to close the gripper for picking. The higher the time the earlier the gripper will close + rospy.sleep(0.01) + return "succeeded" + + + +class SetupObjectPose(smach.State): + def __init__(self, state): + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["pose"], + output_keys=["feedback", "result", "move_arm_to", "move_arm_to_pick"], + ) + + self.pick_pose = rospy.Publisher( + "/mcr_perception/object_selector/output/object_pose", + PoseStamped, + queue_size=10, + ) + + self.state = state + + def execute(self, userdata): + # #iCartesian pose (x, y, z, r, p, y, frame_id) + r, p, y = tr.euler_from_quaternion([userdata.pose.pose.orientation.x, userdata.pose.pose.orientation.y, userdata.pose.pose.orientation.z, userdata.pose.pose.orientation.w]) + + # convert rpy into quaternion + quaternion = tr.quaternion_from_euler(r, p, y) + + move_arm_to = PoseStamped() + move_arm_to.header.frame_id = userdata.pose.header.frame_id + move_arm_to.pose.position.x = userdata.pose.pose.position.x + move_arm_to.pose.position.y = userdata.pose.pose.position.y + move_arm_to.pose.position.z = userdata.pose.pose.position.z + 0.05 + + move_arm_to.pose.orientation.x = quaternion[0] + move_arm_to.pose.orientation.y = quaternion[1] + move_arm_to.pose.orientation.z = quaternion[2] + move_arm_to.pose.orientation.w = quaternion[3] + + move_arm_to_pick = PoseStamped() + move_arm_to_pick.header.frame_id = userdata.pose.header.frame_id + move_arm_to_pick.pose.position.x = userdata.pose.pose.position.x + move_arm_to_pick.pose.position.y = userdata.pose.pose.position.y + move_arm_to_pick.pose.position.z = userdata.pose.pose.position.z + + move_arm_to_pick.pose.orientation.x = quaternion[0] + move_arm_to_pick.pose.orientation.y = quaternion[1] + move_arm_to_pick.pose.orientation.z = quaternion[2] + move_arm_to_pick.pose.orientation.w = quaternion[3] + + + if self.state == "pre_pick_pose": + rospy.logwarn(move_arm_to) + self.pick_pose.publish(move_arm_to) + elif self.state == "pick_pose": + rospy.logwarn(move_arm_to_pick) + self.pick_pose.publish(move_arm_to_pick) + + + # TODO: to check if position is not nan + userdata.result = GenericExecuteResult() + userdata.feedback = GenericExecuteFeedback( + current_state="SetupObjectPose", text="Setting pose for RTT" + ) + return "succeeded" + +# =============================================================================== + +class CheckRetries(smach.State): + def __init__(self, state=False): + smach.State.__init__( + self, + outcomes=["retry", "no_retry", "succeeded"], + input_keys=["current_rtt_pick_retry", "rtt_pick_retries"], + output_keys=["current_rtt_pick_retry"], + ) + self.publisher = rospy.Publisher("/mir_perception/rtt/event_in", String, queue_size=10) + self.state = state + + def execute(self, userdata): + if self.state: + msg_str = f'e_stop' + self.publisher.publish(String(data=msg_str)) + return "succeeded" + else: + if userdata.current_rtt_pick_retry < userdata.rtt_pick_retries: + userdata.current_rtt_pick_retry += 1 + msg_str = f'e_stop' + self.publisher.publish(String(data=msg_str)) + return "retry" + else: + userdata.current_rtt_pick_retry = 0 + msg_str = f'e_stop' + self.publisher.publish(String(data=msg_str)) + return "no_retry" + +# =============================================================================== + +def transition_cb(*args, **kwargs): + userdata = args[0] + sm_state = args[1][0] + + feedback = GenericExecuteFeedback() + feedback.current_state = sm_state + userdata.feedback = feedback + +def start_cb(*args, **kwargs): + userdata = args[0] + sm_state = args[1][0] + + feedback = GenericExecuteFeedback() + feedback.current_state = sm_state + userdata.feedback = feedback +# =============================================================================== + +def main(): + # Open the container + rospy.init_node("rtt_server") + # Construct state machine + sm = smach.StateMachine( + outcomes=["OVERALL_SUCCESS", "OVERALL_FAILED"], + input_keys=["goal"], + output_keys=["feedback", "result"], + ) + + # rtt retries + sm.userdata.rtt_pick_retries = 1 + sm.userdata.current_rtt_pick_retry = 0 + + with sm: + smach.StateMachine.add( + "SET_PREGRASP_PARAMS", + gbs.set_named_config("pregrasp_planner_no_sampling"), + transitions={ + "success": "GO_PRE_PERCEIVE", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "GO_PRE_PERCEIVE", + gms.move_arm("pre_place", use_moveit=False), + transitions={ + "succeeded": "SELECT_OBJECT", + "failed": "GO_PRE_PERCEIVE", + } + ) + + smach.StateMachine.add( + "SELECT_OBJECT", + GetPredictions("/mir_perception/rtt/event_in"), + transitions={"succeeded": "OPEN_GRIPPER", + "timeout": "RETRY_PICK"} + ) + # TODO: add timeout to this state + + smach.StateMachine.add( + "OPEN_GRIPPER", + gms.control_gripper(-1.75), + transitions={"succeeded": "SETUP_OBJECT_POSE_PICK", + "timeout": "SETUP_OBJECT_POSE_PICK"}, + ) + + # Set the pose to pre pose for picking + smach.StateMachine.add( + "SETUP_OBJECT_POSE_PICK", + SetupObjectPose("pick_pose"), + transitions={"succeeded": "CHECK_PICK_POSE_IK", + "failed": "SETUP_OBJECT_POSE_PICK"}, + ) + + smach.StateMachine.add( + "CHECK_PICK_POSE_IK", + gbs.send_and_wait_events_combined( + event_in_list=[ + ("/pregrasp_planner_node/event_in", "e_start") + ], + event_out_list=[ + ( + "/pregrasp_planner_node/event_out", + "e_success", + True, + ) + ], + timeout_duration=20, + ), + transitions={ + "success": "GO_TO_PICK_POSE", + "timeout": "CHECK_PICK_POSE_IK", + "failure": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "GO_TO_PICK_POSE", + gbs.send_and_wait_events_combined( + event_in_list=[ + ("/waypoint_trajectory_generation/event_in", "e_start")], + event_out_list=[ + ( + "/waypoint_trajectory_generation/event_out", + "e_success", + True, + )], + timeout_duration=20, + ), + transitions={ + "success": "WAIT_FOR_OBJECT", + "timeout": "OVERALL_FAILED", + "failure": "OVERALL_FAILED", + }, + ) + + smach.StateMachine.add( + "WAIT_FOR_OBJECT", + WaitForObject(), + transitions={ + "succeeded": "CLOSE_GRIPPER", + "failed": "RETRY_PICK", + }, + ) + + smach.StateMachine.add( + "CLOSE_GRIPPER", + gms.control_gripper("close", timeout=0.5), + transitions={"succeeded": "MOVE_ARM_TO_STAGE_INTERMEDIATE", + "timeout": "MOVE_ARM_TO_STAGE_INTERMEDIATE"}, + ) + + # move arm to stage_intemediate position + smach.StateMachine.add( + "MOVE_ARM_TO_STAGE_INTERMEDIATE", + gms.move_arm("barrier_tape", use_moveit=False), + transitions={ + "succeeded": "VERIFY_OBJECT_GRASPED", + "failed": "MOVE_ARM_TO_STAGE_INTERMEDIATE", + }, + ) + + smach.StateMachine.add( + "VERIFY_OBJECT_GRASPED", + gms.verify_object_grasped(2), + transitions={ + "succeeded": "OBJECT_GRASPED", + "timeout": "OBJECT_GRASPED", + "failed": "RETRY_PICK", + }, + ) + + # retry if failed + smach.StateMachine.add( + "OBJECT_GRASPED", + CheckRetries(state=True), + transitions={ + "succeeded": "OVERALL_SUCCESS", + "retry": "OVERALL_SUCCESS", + "no_retry": "OVERALL_SUCCESS", + }, + ) + + # retry if failed + smach.StateMachine.add( + "RETRY_PICK", + CheckRetries(state=False), + transitions={ + "succeeded": "OVERALL_SUCCESS", + "retry": "SELECT_OBJECT", + "no_retry": "OVERALL_FAILED", + }, + ) + + sm.userdata.current_rtt_pick_retry = 0 + sm.register_transition_cb(transition_cb) + sm.register_start_cb(start_cb) + + + # # smach viewer + # if rospy.get_param("~viewer_enabled", True): + # sis = IntrospectionServer( + # "rtt_pick_object_smach_viewer", sm, "/RTT_PICK_OBJECT_SMACH_VIEWER" + # ) + # sis.start() + + # Construct action server wrapper + asw = ActionServerWrapper( + server_name="rtt_server", + action_spec=GenericExecuteAction, + wrapped_container=sm, + succeeded_outcomes=["OVERALL_SUCCESS"], + aborted_outcomes=["OVERALL_FAILED"], + preempted_outcomes=["PREEMPTED"], + goal_key="goal", + feedback_key="feedback", + result_key="result", + ) + # Run the server in a background thread + asw.run_server() + rospy.spin() + + +if __name__ == "__main__": + main() diff --git a/mir_planning/mir_actions/mir_stage_object/ros/scripts/stage_object_server.py b/mir_planning/mir_actions/mir_stage_object/ros/scripts/stage_object_server.py index 441c62c06..493ee92e1 100755 --- a/mir_planning/mir_actions/mir_stage_object/ros/scripts/stage_object_server.py +++ b/mir_planning/mir_actions/mir_stage_object/ros/scripts/stage_object_server.py @@ -16,7 +16,7 @@ # =============================================================================== class SetupMoveArm(smach.State): - def __init__(self, arm_target, is_heavy=False): + def __init__(self, arm_target): smach.State.__init__( self, outcomes=["succeeded", "failed"], @@ -24,20 +24,17 @@ def __init__(self, arm_target, is_heavy=False): output_keys=["feedback", "result", "move_arm_to"], ) self.arm_target = arm_target - self.is_heavy = is_heavy def execute(self, userdata): platform = Utils.get_value_of(userdata.goal.parameters, "platform") if platform is None: rospy.logwarn('Missing parameter "platform". Using default.') - platform = "PLATFORM_MIDDLE" + platform = "PLATFORM_LEFT" platform = platform.lower() if self.arm_target == "pre": platform += "_pre" - if self.is_heavy: - platform += "_heavy" userdata.move_arm_to = platform # Add empty result msg (because if none of the state do it, action server gives error) @@ -76,14 +73,12 @@ def main(): output_keys=["feedback", "result"], ) - # read heavy object list - sm.userdata.heavy_objects = rospy.get_param("~heavy_objects", ["m20_100"]) with sm: # add states to the container smach.StateMachine.add( "MOVE_ARM_TO_STAGE_INTERMEDIATE", - gms.move_arm("stage_intermediate"), + gms.move_arm("pre_place", use_moveit=False), transitions={ "succeeded": "SETUP_MOVE_ARM_PRE_STAGE", "failed": "MOVE_ARM_TO_STAGE_INTERMEDIATE", @@ -101,7 +96,7 @@ def main(): smach.StateMachine.add( "MOVE_ARM_PRE_STAGE", - gms.move_arm(), + gms.move_arm(use_moveit=False), transitions={ "succeeded": "SETUP_MOVE_ARM_STAGE", "failed": "MOVE_ARM_PRE_STAGE", @@ -119,7 +114,7 @@ def main(): smach.StateMachine.add( "MOVE_ARM_STAGE", - gms.move_arm(), + gms.move_arm(use_moveit=False), transitions={ "succeeded": "OPEN_GRIPPER", "failed": "MOVE_ARM_STAGE" @@ -129,7 +124,34 @@ def main(): smach.StateMachine.add( "OPEN_GRIPPER", gms.control_gripper("open_narrow"), - transitions={"succeeded": "OVERALL_SUCCESS"}, + transitions={"succeeded": "SETUP_MOVE_ARM_RETRACT", + "timeout": "SETUP_MOVE_ARM_RETRACT"}, + ) + + smach.StateMachine.add( + "SETUP_MOVE_ARM_RETRACT", + SetupMoveArm("pre"), + transitions={ + "succeeded": "MOVE_ARM_RETRACT", + "failed": "SETUP_MOVE_ARM_RETRACT", + }, + ) + smach.StateMachine.add( + "MOVE_ARM_RETRACT", + gms.move_arm(use_moveit=False), + transitions={ + "succeeded": "MOVE_ARM_TO_STAGE_INTERMEDIATE_RETRACT", + "failed": "MOVE_ARM_RETRACT" + }, + ) + + smach.StateMachine.add( + "MOVE_ARM_TO_STAGE_INTERMEDIATE_RETRACT", + gms.move_arm("pre_place", use_moveit=False), + transitions={ + "succeeded": "OVERALL_SUCCESS", + "failed": "MOVE_ARM_TO_STAGE_INTERMEDIATE_RETRACT", + }, ) sm.register_transition_cb(transition_cb) diff --git a/mir_planning/mir_actions/mir_stage_object/ros/scripts/stage_object_server_yb4.py b/mir_planning/mir_actions/mir_stage_object/ros/scripts/stage_object_server_yb4.py index a7a763faf..f68002d90 100644 --- a/mir_planning/mir_actions/mir_stage_object/ros/scripts/stage_object_server_yb4.py +++ b/mir_planning/mir_actions/mir_stage_object/ros/scripts/stage_object_server_yb4.py @@ -206,7 +206,8 @@ def main(): smach.StateMachine.add( "OPEN_GRIPPER", gms.control_gripper("open_narrow"), - transitions={"succeeded": "CHECK_IF_OBJECT_HEAVY_AGAIN"}, + transitions={"succeeded": "CHECK_IF_OBJECT_HEAVY_AGAIN", + "timeout": "CHECK_IF_OBJECT_HEAVY_AGAIN"}, ) smach.StateMachine.add( diff --git a/mir_planning/mir_actions/mir_unstage_object/ros/scripts/unstage_object_server.py b/mir_planning/mir_actions/mir_unstage_object/ros/scripts/unstage_object_server.py index 87e53a2d7..1e7cc233b 100755 --- a/mir_planning/mir_actions/mir_unstage_object/ros/scripts/unstage_object_server.py +++ b/mir_planning/mir_actions/mir_unstage_object/ros/scripts/unstage_object_server.py @@ -15,7 +15,7 @@ # =============================================================================== class SetupMoveArm(smach.State): - def __init__(self, arm_target, is_heavy=False): + def __init__(self, arm_target): smach.State.__init__( self, outcomes=["succeeded", "failed"], @@ -23,20 +23,19 @@ def __init__(self, arm_target, is_heavy=False): output_keys=["feedback", "result", "move_arm_to"], ) self.arm_target = arm_target - self.is_heavy = is_heavy + def execute(self, userdata): platform = Utils.get_value_of(userdata.goal.parameters, "platform") + obj = Utils.get_value_of(userdata.goal.parameters, "object") if platform is None: rospy.logwarn('Missing parameter "platform". Using default.') - platform = "PLATFORM_MIDDLE" + platform = "PLATFORM_LEFT" platform = platform.lower() if self.arm_target == "pre": platform += "_pre" - if self.is_heavy: - platform += "_heavy" userdata.move_arm_to = platform # Add empty result msg (because if none of the state do it, action server gives error) @@ -45,29 +44,23 @@ def execute(self, userdata): current_state="SetupMoveArm", text="Moving arm to " + platform ) return "succeeded" - - -# =============================================================================== - -class IsObjectHeavy(smach.State): + +class CheckRetries(smach.State): def __init__(self): smach.State.__init__( self, - outcomes=["heavy", "light"], - input_keys=["goal", "heavy_objects"], - output_keys=[], + outcomes=["retry", "no_retry"], + input_keys=["current_retry", "max_retries"], + output_keys=["current_retry"], ) def execute(self, userdata): - obj = Utils.get_value_of(userdata.goal.parameters, "object") - if obj is None: - rospy.logwarn('Missing parameter "object". Using default.') - return "light" - for heavy_object in userdata.heavy_objects: - if heavy_object.upper() in obj.upper(): - return "heavy" - return "light" - + if userdata.current_retry < userdata.max_retries: + userdata.current_retry += 1 + return "retry" + else: + userdata.current_retry = 0 + return "no_retry" # =============================================================================== @@ -98,14 +91,34 @@ def main(): output_keys=["feedback", "result"], ) - # read heavy object list - sm.userdata.heavy_objects = rospy.get_param("~heavy_objects", ["m20_100"]) + # retries + sm.userdata.max_retries = 1 + sm.userdata.current_retry = 0 with sm: smach.StateMachine.add( "OPEN_GRIPPER", gms.control_gripper("open_narrow"), - transitions={"succeeded": "SETUP_MOVE_ARM_STAGE"}, + transitions={"succeeded": "SETUP_MOVE_ARM_PRE_STAGE", + "timeout": "SETUP_MOVE_ARM_PRE_STAGE"}, + ) + + smach.StateMachine.add( + "SETUP_MOVE_ARM_PRE_STAGE", + SetupMoveArm("pre"), + transitions={ + "succeeded": "SETUP_ARM_PRE_STAGE", + "failed": "SETUP_MOVE_ARM_PRE_STAGE", + }, + ) + + smach.StateMachine.add( + "SETUP_ARM_PRE_STAGE", + gms.move_arm(use_moveit=False), + transitions={ + "succeeded": "SETUP_MOVE_ARM_STAGE", + "failed": "SETUP_ARM_PRE_STAGE" + }, ) # add states to the container @@ -120,7 +133,7 @@ def main(): smach.StateMachine.add( "MOVE_ARM_STAGE", - gms.move_arm(), + gms.move_arm(use_moveit=False), transitions={ "succeeded": "CLOSE_GRIPPER", "failed": "MOVE_ARM_STAGE" @@ -130,15 +143,27 @@ def main(): smach.StateMachine.add( "CLOSE_GRIPPER", gms.control_gripper("close"), - transitions={"succeeded": "VERIFY_OBJECT_GRASPED"}, + transitions={"succeeded": "VERIFY_OBJECT_GRASPED", + "timeout": "VERIFY_OBJECT_GRASPED"}, ) smach.StateMachine.add( "VERIFY_OBJECT_GRASPED", - gms.verify_object_grasped(5), + gms.verify_object_grasped(3), transitions={ "succeeded": "SETUP_MOVE_ARM_PRE_STAGE_AGAIN", - "failed": "OVERALL_FAILED", + "timeout": "SETUP_MOVE_ARM_PRE_STAGE_AGAIN", + "failed": "RETRY", + }, + ) + + # TODO: check if retry is working or not + smach.StateMachine.add( + "RETRY", + CheckRetries(), + transitions={ + "retry": "OPEN_GRIPPER", + "no_retry": "SETUP_MOVE_ARM_PRE_STAGE_AGAIN", }, ) @@ -153,24 +178,24 @@ def main(): smach.StateMachine.add( "MOVE_ARM_PRE_STAGE_AGAIN", - gms.move_arm(blocking=True), + gms.move_arm(blocking=True, use_moveit=False), transitions={ - "succeeded": "MOVE_ARM_TO_BARRIER_TAPE", + "succeeded": "MOVE_ARM_TO_PLATFORM_MIDDLE_PRE", "failed": "MOVE_ARM_PRE_STAGE_AGAIN", }, ) """ Barrier tape configuration is modified so while unstaging - the arm should go to barrier tape config in yb2-robot-configuration + the arm should go to pre place tape config in yb2-robot-configuration inorder to avoid any occlution in the camera """ smach.StateMachine.add( - "MOVE_ARM_TO_BARRIER_TAPE", - gms.move_arm("barrier_tape"), + "MOVE_ARM_TO_PLATFORM_MIDDLE_PRE", + gms.move_arm("platform_middle_pre", use_moveit=False), transitions={ "succeeded": "OVERALL_SUCCESS", - "failed": "MOVE_ARM_TO_BARRIER_TAPE", + "failed": "MOVE_ARM_TO_PLATFORM_MIDDLE_PRE", }, ) diff --git a/mir_planning/mir_atwork_commander_client/ros/launch/atwork_commander_client.launch b/mir_planning/mir_atwork_commander_client/ros/launch/atwork_commander_client.launch index 7e9db7eaa..4c7d53818 100644 --- a/mir_planning/mir_atwork_commander_client/ros/launch/atwork_commander_client.launch +++ b/mir_planning/mir_atwork_commander_client/ros/launch/atwork_commander_client.launch @@ -3,7 +3,7 @@ - + diff --git a/mir_planning/mir_atwork_commander_client/ros/src/mir_atwork_commander_client/atwork_commander_client.py b/mir_planning/mir_atwork_commander_client/ros/src/mir_atwork_commander_client/atwork_commander_client.py index 7354ae4b8..b00009aae 100644 --- a/mir_planning/mir_atwork_commander_client/ros/src/mir_atwork_commander_client/atwork_commander_client.py +++ b/mir_planning/mir_atwork_commander_client/ros/src/mir_atwork_commander_client/atwork_commander_client.py @@ -4,7 +4,7 @@ import copy import rospy - +import pdb from atwork_commander_msgs.msg import Task, Object, RobotState from mir_knowledge_ros.problem_uploader import ProblemUploader from rosplan_knowledge_msgs.srv import KnowledgeUpdateServiceRequest as Req @@ -38,6 +38,8 @@ def __init__(self): self._cavity_start_code = getattr(Object, "CAVITY_START") self._cavity_end_code = getattr(Object, "CAVITY_END") + self._large_objects = ["allenkey"] + def _timer_callback(self, event): self._robot_state.sender.header.stamp = rospy.Time.now() self._robot_state_pub.publish(self._robot_state) @@ -49,6 +51,9 @@ def _task_cb(self, msg): self._processed_task_ids.append(task.id) + task = self._change_workstation_names(task) + + start_obj_dicts = self._get_obj_dicts_from_workstations(task.arena_start_state) target_obj_dicts = self._get_obj_dicts_from_workstations(task.arena_target_state) @@ -71,7 +76,7 @@ def _task_cb(self, msg): # populate instances objects = [obj_dict["object_full_name"] for obj_dict in obj_dicts] - locations = [workstation.workstation_name for workstation in task.arena_start_state] + locations = [workstation.name for workstation in task.arena_start_state] instances = {"object": objects, "location": locations} # print(instances) @@ -85,6 +90,18 @@ def _task_cb(self, msg): self._print_task(obj_dicts) + def _change_workstation_names(self, taskmsg): + """ + REFBOX notation for rotating table keeps varying between RT and TT. + Parse the task msg and change it to TT evertime + """ + for start_state in taskmsg.arena_start_state: + start_state.name = start_state.name.replace("RT","TT") + for goal_state in taskmsg.arena_target_state: + goal_state.name = goal_state.name.replace("RT","TT") + + return taskmsg + def _get_entire_knowledge_from_obj_dicts(self, start_obj_dicts, target_obj_dicts): """TODO: @@ -215,6 +232,11 @@ def _get_facts_from_obj_dicts(self, obj_dicts): facts.append(self._get_fact_from_attr_and_values( "insertable", [obj_dict["object_full_name"]])) + + if obj_dict["object"] in self._large_objects: + facts.append(self._get_fact_from_attr_and_values( + "is_large", + [obj_dict["object_full_name"]])) facts.append(self._get_fact_from_attr_and_values( "on", @@ -277,7 +299,7 @@ def _get_obj_dicts_from_workstations(self, workstations): rospy.logwarn("Could not find " + str(obj.object) + " in object codes") continue - if self._cavity_start_code <= obj.object < self._cavity_end_code: + if self._cavity_start_code <= obj.object <= self._cavity_end_code: object_name = "pp01_cavity" else: object_name = self._obj_code_to_name[obj.object] @@ -286,14 +308,14 @@ def _get_obj_dicts_from_workstations(self, workstations): rospy.logwarn("Could not find " + str(obj.target) + " in object codes") continue - if self._cavity_start_code <= obj.target < self._cavity_end_code: + if self._cavity_start_code <= obj.target <= self._cavity_end_code: target_name = "pp01_cavity" else: target_name = self._obj_code_to_name[obj.target] obj_dict = { "object": object_name, - "location": workstation.workstation_name, + "location": workstation.name, "target": target_name, "decoy": obj.decoy } @@ -365,10 +387,18 @@ def _ignore_knowledge(self, obj_dicts): @staticmethod def get_obj_code_to_name_dict(): obj_code_to_name = {} + object_range_list = [] object_class_attributes = dir(Object) for attr in object_class_attributes: - if attr.isupper() and "START" not in attr and "END" not in attr: - obj_code_to_name[getattr(Object, attr)] = attr.lower() + if "START" in attr or "END" in attr: + object_range_list.append(getattr(Object, attr)) + for attr in object_class_attributes: + if "START" not in attr and "END" not in attr: + try: + if getattr(Object, attr) >= min(object_range_list) and getattr(Object, attr) <= max(object_range_list) or attr == "EMPTY": + obj_code_to_name[getattr(Object, attr)] = attr.lower() + except Exception as e: + pass return obj_code_to_name @staticmethod diff --git a/mir_planning/mir_knowledge/common/pddl/general_domain/all_without_rtt.pddl b/mir_planning/mir_knowledge/common/pddl/general_domain/all_without_rtt.pddl new file mode 100644 index 000000000..abda24026 --- /dev/null +++ b/mir_planning/mir_knowledge/common/pddl/general_domain/all_without_rtt.pddl @@ -0,0 +1,79 @@ +;This PDDL problem definition was made automatically from a KB snapshot +(define (problem general_domain_task) +(:domain general_domain) + +(:objects + WS01 WS02 WS03 WS04 WS05 WS06 SH01 TT01 PP01 START EXIT - location + ALLENKEY AXIS2 BEARING2 DRILL F20_20_B F20_20_G HOUSING M20 M20_100 M30 MOTOR2 S40_40_B S40_40_G SCREWDRIVER SPACER WRENCH CONTAINER_BOX_RED CONTAINER_BOX_BLUE - object + YOUBOT-BRSU - robot + PLATFORM_LEFT PLATFORM_MIDDLE PLATFORM_RIGHT - robot_platform +) + +(:init + ;Cost information starts + (= (total-cost) 0) + ;Cost information ends + + (at YOUBOT-BRSU START) + (gripper_is_free YOUBOT-BRSU) + (on ALLENKEY WS01) + (on HOUSING WS01) + (on S40_40_G WS01) + + (on AXIS2 WS02) + (on M20 WS02) + (on SCREWDRIVER WS02) + + (on BEARING2 WS03) + (on M20_100 WS03) + (on SPACER WS03) + + (on DRILL WS04) + (on M30 WS04) + (on WRENCH WS04) + + (on F20_20_B WS05) + (on F20_20_G WS05) + + (on MOTOR2 SH01) + (on S40_40_B SH01) + + (on CONTAINER_BOX_RED WS06) + (on CONTAINER_BOX_BLUE WS06) + (container CONTAINER_BOX_RED) + (heavy CONTAINER_BOX_RED) + (container CONTAINER_BOX_BLUE) + (heavy CONTAINER_BOX_BLUE) + (is_large ALLENKEY) + (is_large DRILL) + (is_big_enough PLATFORM_RIGHT) +) + +(:goal (and + (in ALLENKEY CONTAINER_BOX_BLUE) + (in AXIS2 CONTAINER_BOX_RED) + + (on HOUSING WS05) + (on MOTOR2 WS02) + (on SCREWDRIVER WS04) + (on BEARING2 WS01) + (on SPACER WS01) + (on DRILL WS03) + (on WRENCH WS03) + + (on F20_20_G SH01) + (on S40_40_G SH01) + + (in F20_20_B PP01_CAVITY-00) + (in S40_40_B PP01_CAVITY-00) + (in M30 PP01_CAVITY-00) + (in M20_100 PP01_CAVITY-00) + (in M20 PP01_CAVITY-00) + ) +) + +(:metric minimize (total-cost)) + +) + + diff --git a/mir_planning/mir_knowledge/common/pddl/general_domain/domain.pddl b/mir_planning/mir_knowledge/common/pddl/general_domain/domain.pddl index 709629e6c..ef602c29e 100644 --- a/mir_planning/mir_knowledge/common/pddl/general_domain/domain.pddl +++ b/mir_planning/mir_knowledge/common/pddl/general_domain/domain.pddl @@ -55,6 +55,12 @@ ; draw ?d is located at location ?l (located_at ?d - drawer ?l - location) + + ; object ?o is large + (is_large ?o - object) + + ; is robot platform ?rp big enough + (is_big_enough ?rp - robot_platform) ) (:functions @@ -120,6 +126,7 @@ :effect (and (on ?o ?l) (not (holding ?r ?o)) (gripper_is_free ?r) + (not (perceived ?l)) (increase (total-cost) 2) ) ) @@ -127,11 +134,29 @@ ; stage an object ?o in a robot platform ?rp which is not occupied with a gripper ?g ; which is holding the object ?o - (:action stage + (:action stage_general + :parameters (?r - robot ?rp - robot_platform ?o - object) + :precondition (and (holding ?r ?o) + (not (occupied ?rp)) + (not (gripper_is_free ?r)) + (not (is_large ?o)) + ) + :effect (and (not (holding ?r ?o)) + (gripper_is_free ?r) + (stored ?o ?rp) + (occupied ?rp) + (increase (total-cost) 1) + ) + ) + + + (:action stage_large :parameters (?r - robot ?rp - robot_platform ?o - object) :precondition (and (holding ?r ?o) (not (occupied ?rp)) (not (gripper_is_free ?r)) + (is_big_enough ?rp) + (is_large ?o) ) :effect (and (not (holding ?r ?o)) (gripper_is_free ?r) @@ -168,7 +193,6 @@ (gripper_is_free ?r) (stored ?peg ?rp) (not (container ?peg)) ;a container cannot be inserted into a container - (perceived ?l) ) :effect (and (not (holding ?r ?peg)) (gripper_is_free ?r) @@ -176,6 +200,7 @@ (on ?peg ?l) (not (stored ?peg ?rp)) (not (occupied ?rp)) + (perceived ?l) (heavy ?hole) (heavy ?peg) ;it doesn't become heavy but it cannot be picked again (increase (total-cost) 5) diff --git a/mir_planning/mir_knowledge/common/pddl/general_domain/finals.pddl b/mir_planning/mir_knowledge/common/pddl/general_domain/finals.pddl new file mode 100644 index 000000000..086106f79 --- /dev/null +++ b/mir_planning/mir_knowledge/common/pddl/general_domain/finals.pddl @@ -0,0 +1,73 @@ +;This PDDL problem definition was made automatically from a KB snapshot +(define (problem general_domain_task) + (:domain general_domain) + + (:objects + WS01 WS02 WS03 WS04 WS05 WS06 SH01 TT01 PP01 START EXIT - location + ALLENKEY AXIS2 BEARING2 DRILL F20_20_B F20_20_G HOUSING M20 M20_100 M30 MOTOR2 S40_40_B S40_40_G SCREWDRIVER SPACER WRENCH CONTAINER_BOX_RED CONTAINER_BOX_BLUE PP01_CAVITY-00 - object + YOUBOT-BRSU - robot + PLATFORM_LEFT PLATFORM_MIDDLE PLATFORM_RIGHT - robot_platform + ) + + (:init + ;Cost information starts + (= (total-cost) 0) + ;Cost information ends + + (at YOUBOT-BRSU PP01) + (gripper_is_free YOUBOT-BRSU) + + (on ALLENKEY WS01) + + (on M20 WS02) + (on SCREWDRIVER WS02) + + (on M20_100 WS03) + + (on WRENCH WS04) + + (on BEARING2 TT01) + (on S40_40_G TT01) + + (on F20_20_G WS05) + + (on MOTOR2 SH01) + (on S40_40_B SH01) + + (on CONTAINER_BOX_RED WS06) + (on CONTAINER_BOX_BLUE WS06) + (container CONTAINER_BOX_RED) + (heavy CONTAINER_BOX_RED) + (container CONTAINER_BOX_BLUE) + (heavy CONTAINER_BOX_BLUE) + (container PP01_CAVITY-00) + (heavy PP01_CAVITY-00) + (on PP01_CAVITY-00 PP01) + (is_large ALLENKEY) + (is_large DRILL) + (is_big_enough PLATFORM_RIGHT) + ) + + (:goal + (and + (in ALLENKEY CONTAINER_BOX_BLUE) + (in M20_100 CONTAINER_BOX_BLUE) + (in SCREWDRIVER CONTAINER_BOX_RED) + (in WRENCH CONTAINER_BOX_RED) + + (on MOTOR2 WS04) + (on S40_40_B WS03) + + (on BEARING2 SH01) + (on S40_40_G SH01) + + (in F20_20_G PP01_CAVITY-00) + (in M20 PP01_CAVITY-00) + ) + ) + + (:metric minimize + (total-cost) + ) + +) diff --git a/mir_planning/mir_knowledge/common/pddl/general_domain/finals2.pddl b/mir_planning/mir_knowledge/common/pddl/general_domain/finals2.pddl new file mode 100644 index 000000000..81f77048a --- /dev/null +++ b/mir_planning/mir_knowledge/common/pddl/general_domain/finals2.pddl @@ -0,0 +1,78 @@ +;This PDDL problem definition was made automatically from a KB snapshot +(define (problem general_domain_task) + (:domain general_domain) + + (:objects + WS01 WS02 WS03 WS04 WS05 WS06 SH01 TT01 PP01 START EXIT - location + ALLENKEY AXIS2 BEARING2 DRILL F20_20_B F20_20_G HOUSING M20 M20_100 M30 MOTOR2 S40_40_B S40_40_G SCREWDRIVER SPACER WRENCH CONTAINER_BOX_RED CONTAINER_BOX_BLUE PP01_CAVITY-00 - object + YOUBOT-BRSU - robot + PLATFORM_LEFT PLATFORM_MIDDLE PLATFORM_RIGHT - robot_platform + ) + + (:init + ;Cost information starts + (= (total-cost) 0) + ;Cost information ends + + (at YOUBOT-BRSU PP01) + (gripper_is_free YOUBOT-BRSU) + + (on ALLENKEY WS01) + + (on M20 WS02) + (on SCREWDRIVER WS02) + + (on M20_100 WS03) + + (on WRENCH WS03) + + + (on BEARING2 TT01) + (on S40_40_G TT01) + + (on HOUSING WS04) + + (on F20_20_G WS05) + + (on MOTOR2 SH01) + (on S40_40_B SH01) + + (on CONTAINER_BOX_RED WS06) + (on CONTAINER_BOX_BLUE WS06) + (container CONTAINER_BOX_RED) + (heavy CONTAINER_BOX_RED) + (container CONTAINER_BOX_BLUE) + (heavy CONTAINER_BOX_BLUE) + (container PP01_CAVITY-00) + (heavy PP01_CAVITY-00) + (on PP01_CAVITY-00 PP01) + (is_large ALLENKEY) + (is_large DRILL) + (is_big_enough PLATFORM_RIGHT) + ) + + (:goal + (and + (in ALLENKEY CONTAINER_BOX_BLUE) + (in M20_100 CONTAINER_BOX_BLUE) + (in SCREWDRIVER CONTAINER_BOX_RED) + (in WRENCH CONTAINER_BOX_RED) + + (on MOTOR2 WS04) + (on S40_40_B WS03) + + (on BEARING2 SH01) + (on S40_40_G SH01) + + (on HOUSING WS01) + + (in F20_20_G PP01_CAVITY-00) + (in M20 PP01_CAVITY-00) + ) + ) + + (:metric minimize + (total-cost) + ) + +) diff --git a/mir_planning/mir_knowledge/common/pddl/general_domain/local_2022_problem_complete.pddl b/mir_planning/mir_knowledge/common/pddl/general_domain/local_2022_problem_complete.pddl index 672b85e48..79f280162 100644 --- a/mir_planning/mir_knowledge/common/pddl/general_domain/local_2022_problem_complete.pddl +++ b/mir_planning/mir_knowledge/common/pddl/general_domain/local_2022_problem_complete.pddl @@ -3,7 +3,7 @@ (:domain general_domain) (:objects - WS00 WS03 WS05 WS01 START - location + WS03 WS05 WS01 START EXIT - location R20 F20_20_B INSULATION_TAPE BEARING_BOX SCREW_DRIVER MOTOR S40_40_B AXIS S40_40_G BRACKET TENNIS_BALL PRINGLES SPONGE TOWEL SPOON DISHWASHER_SOAP BROWN_BOX CUP EYE_GLASSES TOOTHBRUSH - object YOUBOT-BRSU - robot PLATFORM_MIDDLE - robot_platform @@ -14,10 +14,10 @@ (= (total-cost) 0) ;Cost information ends - (at YOUBOT-BRSU WS00) + (at YOUBOT-BRSU START) (gripper_is_free YOUBOT-BRSU) - (on F20_20_B WS00) - (on AXIS WS00) + (on F20_20_B WS03) + (on AXIS WS03) ) (:goal (and diff --git a/mir_planning/mir_knowledge/common/pddl/general_domain/mini_bmt_demo.pddl b/mir_planning/mir_knowledge/common/pddl/general_domain/mini_bmt_demo.pddl new file mode 100644 index 000000000..025d0a16b --- /dev/null +++ b/mir_planning/mir_knowledge/common/pddl/general_domain/mini_bmt_demo.pddl @@ -0,0 +1,53 @@ +;This PDDL problem definition was made automatically from a KB snapshot +(define (problem general_domain_task) + (:domain general_domain) + + (:objects + WS01 WS02 WS03 WS04 WS05 WS06 SH01 TT01 PP01 START EXIT - location + ALLENKEY-00 AXIS2-00 BEARING2-00 DRILL-00 F20_20_B-00 F20_20_G-00 F20_20_G-01 HOUSING-00 M20-00 M20_100-00 M30-00 M30-01 MOTOR2-00 S40_40_B-00 S40_40_G-00 SCREWDRIVER-00 SPACER-00 WRENCH-00 CONTAINER_RED-00 CONTAINER_BLUE-00 PP01_CAVITY-00 - object + YOUBOT-BRSU - robot + PLATFORM_LEFT PLATFORM_MIDDLE PLATFORM_RIGHT - robot_platform + ) + + (:init + ;Cost information starts + (= (total-cost) 0) + ;Cost information ends + + (at YOUBOT-BRSU START) + (gripper_is_free YOUBOT-BRSU) + (on MOTOR2-00 SH01) + + (on S40_40_G-00 WS01) + (on M30-00 WS01) + + (on BEARING2-00 WS02) + + (on F20_20_B-00 WS03) + + (on S40_40_B-00 TT01) + + (container PP01_CAVITY-00) + (heavy PP01_CAVITY-00) + (on PP01_CAVITY-00 PP01) + + (is_large ALLENKEY-00) + (is_big_enough PLATFORM_RIGHT) + ) + + (:goal + (and + (in S40_40_G-00 PP01_CAVITY-00) + (on MOTOR2-00 WS01) + (on M30-00 WS02) + (on BEARING2-00 WS03) + (on F20_20_B-00 SH01) + (on S40_40_B-00 WS04) + ) + ) + + (:metric minimize + (total-cost) + ) + +) diff --git a/mir_planning/mir_knowledge/common/pddl/general_domain/minimum_required_facts.pddl b/mir_planning/mir_knowledge/common/pddl/general_domain/minimum_required_facts.pddl index d2c3f799d..94c43794a 100644 --- a/mir_planning/mir_knowledge/common/pddl/general_domain/minimum_required_facts.pddl +++ b/mir_planning/mir_knowledge/common/pddl/general_domain/minimum_required_facts.pddl @@ -6,7 +6,7 @@ YOUBOT-BRSU - robot ; robot available platforms, to store objects inside the robot - PLATFORM_MIDDLE - robot_platform + PLATFORM_LEFT PLATFORM_MIDDLE PLATFORM_RIGHT - robot_platform ; locations START END - location @@ -21,6 +21,7 @@ ; status of the gripper at the beginning (gripper_is_free YOUBOT-BRSU) + (is_big_enough PLATFORM_RIGHT) ;(on o1 s1) ;(on o2 s2) ;(on o3 s3) diff --git a/mir_planning/mir_pddl_problem_generator/ros/config/points.yaml b/mir_planning/mir_pddl_problem_generator/ros/config/points.yaml index d89587327..dfbfe8add 100644 --- a/mir_planning/mir_pddl_problem_generator/ros/config/points.yaml +++ b/mir_planning/mir_pddl_problem_generator/ros/config/points.yaml @@ -2,31 +2,34 @@ # ===================== workstations ========================================= # ============================================================================== +pl: 1000 # highest priority for objects already staged ws: 100 sh: 50 co: 20 # container_box_blue / container_box_red pp: 10 -cb: 0 +tt: 1 # turntable - for RTT +cb: 0 # converyor belt. not used anymore # ============================================================================== # ====================== objects ============================================= # ============================================================================== -# long objects +# objects - easy to grasp f20_20_b: 20 f20_20_g: 20 s40_40_b: 20 s40_40_g: 20 -m20_100: 20 -axis: 20 - -# medium objects -r20: 20 +bearing2: 20 +spacer: 20 +m20: 20 m30: 20 -motor: 20 -bearing_box: 20 +axis2: 20 +motor2: 20 +housing: 20 +m20_100: 20 +drill: 20 -# small objects -distance_tube: 10 -bearing: 10 -m20: 10 \ No newline at end of file +# long and heavy objects +allenkey: 10 +screwdriver: 10 +wrench: 10 diff --git a/mir_planning/mir_pddl_problem_generator/ros/src/pddl_problem_generator.cpp b/mir_planning/mir_pddl_problem_generator/ros/src/pddl_problem_generator.cpp index 01d38d637..c10b6164a 100644 --- a/mir_planning/mir_pddl_problem_generator/ros/src/pddl_problem_generator.cpp +++ b/mir_planning/mir_pddl_problem_generator/ros/src/pddl_problem_generator.cpp @@ -355,7 +355,9 @@ std::string PDDLProblemGenerator::getSourceLocation(const rosplan_knowledge_msgs nh_.serviceClient(state_proposition_service_); rosplan_knowledge_msgs::GetAttributeService attr_srv; - attr_srv.request.predicate_name = ki.attribute_name; + // source location can only be "on" + // i.e. we want to find the source of the object that is in the selected goal (ki) + attr_srv.request.predicate_name = "on"; if (!getPropsClient.call(attr_srv)) { ROS_ERROR("[mir_pddl_problem_generator] Failed to call service %s: %s", state_proposition_service_.c_str(), attr_srv.request.predicate_name.c_str()); @@ -368,6 +370,19 @@ std::string PDDLProblemGenerator::getSourceLocation(const rosplan_knowledge_msgs } } } + attr_srv.request.predicate_name = "stored"; + if (!getPropsClient.call(attr_srv)) { + ROS_ERROR("[mir_pddl_problem_generator] Failed to call service %s: %s", + state_proposition_service_.c_str(), attr_srv.request.predicate_name.c_str()); + return ""; + } else { + for (size_t i = 0; i < attr_srv.response.attributes.size(); i++) { + rosplan_knowledge_msgs::KnowledgeItem attr = attr_srv.response.attributes[i]; + if (attr.values[1].value.compare(obj) == 0) { + return attr.values[0].value; + } + } + } return "XXXX"; // dummy location } diff --git a/mir_planning/mir_planner_executor/CMakeLists.txt b/mir_planning/mir_planner_executor/CMakeLists.txt index d277eb22d..95e2a04cf 100644 --- a/mir_planning/mir_planner_executor/CMakeLists.txt +++ b/mir_planning/mir_planner_executor/CMakeLists.txt @@ -39,6 +39,7 @@ add_executable(planner_executor ros/src/actions/pick/pick_action.cpp ros/src/actions/pick/pick_from_shelf_action.cpp ros/src/actions/pick/combined_pick_action.cpp + ros/src/actions/pick/pick_from_tt_action.cpp ros/src/actions/place/place_action.cpp ros/src/actions/place_unstage/place_unstage_action.cpp diff --git a/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/insert/base_insert_action.h b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/insert/base_insert_action.h index d540879cd..5d644cbff 100644 --- a/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/insert/base_insert_action.h +++ b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/insert/base_insert_action.h @@ -15,4 +15,5 @@ class BaseInsertAction : public ExecutorAction BaseInsertAction(std::string server_topic) : ExecutorAction(server_topic){}; void updateParamsBasedOnContext(std::vector ¶ms) override; void update_knowledge_base(bool success, std::vector ¶ms) override; + std::map failure_count_; }; diff --git a/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/perceive/base_perceive_action.h b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/perceive/base_perceive_action.h index 8e708deeb..fe46ec26e 100644 --- a/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/perceive/base_perceive_action.h +++ b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/perceive/base_perceive_action.h @@ -18,4 +18,5 @@ class BasePerceiveAction : public ExecutorAction }; void updateParamsBasedOnContext(std::vector ¶ms) override; void update_knowledge_base(bool success, std::vector ¶ms) override; + std::map failure_count_; }; diff --git a/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/pick/combined_pick_action.h b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/pick/combined_pick_action.h index 1238dd0e1..d25f8936a 100644 --- a/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/pick/combined_pick_action.h +++ b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/pick/combined_pick_action.h @@ -15,6 +15,7 @@ class CombinedPickAction : public BaseExecutorAction private: ExecutorAction *default_pick_; ExecutorAction *pick_from_shelf_; + ExecutorAction *pick_from_tt_; public: CombinedPickAction(); diff --git a/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/pick/pick_from_tt_action.h b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/pick/pick_from_tt_action.h new file mode 100644 index 000000000..d4b88c2ab --- /dev/null +++ b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/actions/pick/pick_from_tt_action.h @@ -0,0 +1,17 @@ +/* + * Copyright [2017] + * + * Author: Vivek Mannava + * + */ + +#pragma once + +#include + +class PickFromTurnTableAction : public BasePickAction +{ + public: + PickFromTurnTableAction(); + bool run(std::vector ¶ms); +}; diff --git a/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/to_be_removed_constants.h b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/to_be_removed_constants.h index c5150387d..cb494f208 100644 --- a/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/to_be_removed_constants.h +++ b/mir_planning/mir_planner_executor/ros/include/mir_planner_executor/to_be_removed_constants.h @@ -3,3 +3,4 @@ #define CAVITY_LOCATION_NAME "PP01" #define CAVITY_OBJECT_NAME "PP01_CAVITY-00" #define SHELF_LOCATION_TYPE "SH" +#define TURN_TABLE_TYPE "TT" diff --git a/mir_planning/mir_planner_executor/ros/src/actions/insert/base_insert_action.cpp b/mir_planning/mir_planner_executor/ros/src/actions/insert/base_insert_action.cpp index 5aeace32c..2d2219c43 100644 --- a/mir_planning/mir_planner_executor/ros/src/actions/insert/base_insert_action.cpp +++ b/mir_planning/mir_planner_executor/ros/src/actions/insert/base_insert_action.cpp @@ -16,6 +16,7 @@ void BaseInsertAction::update_knowledge_base(bool success, std::string location = getValueOf(params, "location"); std::string peg = getValueOf(params, "peg"); std::string hole = getValueOf(params, "hole"); + int N = 1; if (success) { knowledge_updater_->addKnowledge("in", {{"peg", peg}, {"hole", hole}}); knowledge_updater_->remGoal("in", {{"peg", peg}, {"hole", hole}}); @@ -29,13 +30,27 @@ void BaseInsertAction::update_knowledge_base(bool success, knowledge_updater_->addKnowledge("heavy", {{"hole", hole}}); knowledge_updater_->remGoal("heavy", {{"hole", hole}}); - knowledge_updater_->remKnowledge("stored", {{"o", peg}, {"rp", platform}}); + knowledge_updater_->remKnowledge("stored", {{"rp", platform},{"o", peg}}); knowledge_updater_->remKnowledge("occupied", {{"rp", platform}}); } else { - // knowledge_updater_->remKnowledge("perceived", {{"l", location}}); knowledge_updater_->remGoalsWithObject(peg); ROS_WARN("Insert failed, remove goals with object \"%s\"", peg.c_str()); + + int count = 1; + if (failure_count_.find(peg) != failure_count_.end()) { + count = failure_count_[peg] + 1; + } + + if (count > N) { + knowledge_updater_->remKnowledge("at", {{"r", robot}, {"l", location}}); + knowledge_updater_->addKnowledge("at", {{"r", robot}, {"l", "START"}}); + ROS_WARN("Insert for object \"%s\" failed %d times. Resetting robot location for replanning", peg.c_str(), count); + failure_count_[peg] = 0; + } + else { + failure_count_[peg] = count; + } } } diff --git a/mir_planning/mir_planner_executor/ros/src/actions/perceive/base_perceive_action.cpp b/mir_planning/mir_planner_executor/ros/src/actions/perceive/base_perceive_action.cpp index 8d04d4579..2c0bbd38d 100644 --- a/mir_planning/mir_planner_executor/ros/src/actions/perceive/base_perceive_action.cpp +++ b/mir_planning/mir_planner_executor/ros/src/actions/perceive/base_perceive_action.cpp @@ -13,12 +13,38 @@ void BasePerceiveAction::update_knowledge_base(bool success, { std::string robot = getValueOf(params, "robot_name"); std::string location = getValueOf(params, "location"); + int N = 1; if (success) { knowledge_updater_->addKnowledge("perceived", {{"l", location}}); knowledge_updater_->remGoal("perceived", {{"l", location}}); } else { - // TODO move base? - return; + // get the next action and check if it is PICK + std::string next_action = getValueOf(params, "next_action"); + if (next_action == "PICK" || next_action == "pick") { + int count = 1; + std::string object = getValueOf(params, "next_object"); + if (failure_count_.find(object) != failure_count_.end()) { + count = failure_count_[object] + 1; + knowledge_updater_->remKnowledge("at", {{"r", robot}, {"l", location}}); + knowledge_updater_->addKnowledge("at", {{"r", robot}, {"l", "START"}}); + } + ROS_WARN("Perceive for object \"%s\" failed %d times", object.c_str(), count); + if (count > N) { + knowledge_updater_->remGoalsWithObject(object); + knowledge_updater_->remKnowledge("perceived", {{"l", location}}); + knowledge_updater_->remKnowledge("at", {{"r", robot}, {"l", location}}); + knowledge_updater_->addKnowledge("at", {{"r", robot}, {"l", "START"}}); + ROS_WARN("Pick failed %d times, remove goals with object \"%s\"", count, object.c_str()); + failure_count_[object] = 0; + } else { + knowledge_updater_->remKnowledge("perceived", {{"l", location}}); + knowledge_updater_->remKnowledge("at", {{"r", robot}, {"l", location}}); + knowledge_updater_->addKnowledge("at", {{"r", robot}, {"l", "START"}}); + failure_count_[object] = count; + } + } else { + return; + } } } diff --git a/mir_planning/mir_planner_executor/ros/src/actions/pick/base_pick_action.cpp b/mir_planning/mir_planner_executor/ros/src/actions/pick/base_pick_action.cpp index 520212f86..ff6c82c01 100644 --- a/mir_planning/mir_planner_executor/ros/src/actions/pick/base_pick_action.cpp +++ b/mir_planning/mir_planner_executor/ros/src/actions/pick/base_pick_action.cpp @@ -14,7 +14,6 @@ void BasePickAction::update_knowledge_base(bool success, std::string robot = getValueOf(params, "robot_name"); std::string location = getValueOf(params, "location"); std::string object = getValueOf(params, "object"); - int N = 1; if (success) { knowledge_updater_->addKnowledge("holding", {{"r", robot}, {"o", object}}); knowledge_updater_->remGoal("holding", {{"r", robot}, {"o", object}}); @@ -26,12 +25,17 @@ void BasePickAction::update_knowledge_base(bool success, count = failure_count_[object] + 1; } ROS_WARN("Pick for object \"%s\" failed %d times", object.c_str(), count); - if (count > N) { + if (count >= 3) { // if failed to pick 3 times, remove goal and add to end of list knowledge_updater_->remGoalsWithObject(object); knowledge_updater_->remKnowledge("perceived", {{"l", location}}); ROS_WARN("Pick failed %d times, remove goals with object \"%s\"", count, object.c_str()); failure_count_[object] = 0; - } else { + } else if (count == 2) { // if failed to pick 2 times, move base to same location and retry + knowledge_updater_->remKnowledge("perceived", {{"l", location}}); + knowledge_updater_->remKnowledge("at", {{"r", robot}, {"l", location}}); + knowledge_updater_->addKnowledge("at", {{"r", robot}, {"l", "START"}}); + failure_count_[object] = count; + } else { // reperceive and try to pick knowledge_updater_->remKnowledge("perceived", {{"l", location}}); failure_count_[object] = count; } diff --git a/mir_planning/mir_planner_executor/ros/src/actions/pick/combined_pick_action.cpp b/mir_planning/mir_planner_executor/ros/src/actions/pick/combined_pick_action.cpp index 04399c6c2..d328c8e2a 100644 --- a/mir_planning/mir_planner_executor/ros/src/actions/pick/combined_pick_action.cpp +++ b/mir_planning/mir_planner_executor/ros/src/actions/pick/combined_pick_action.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -15,12 +16,14 @@ CombinedPickAction::CombinedPickAction() { default_pick_ = new PickAction(); pick_from_shelf_ = new PickFromShelfAction(); + pick_from_tt_ = new PickFromTurnTableAction(); } void CombinedPickAction::initialize(KnowledgeUpdater *knowledge_updater) { default_pick_->initialize(knowledge_updater); pick_from_shelf_->initialize(knowledge_updater); + pick_from_tt_->initialize(knowledge_updater); } bool CombinedPickAction::execute(std::string &name, std::vector ¶ms) @@ -31,7 +34,11 @@ bool CombinedPickAction::execute(std::string &name, std::vectorexecute(name, params); - } else { + } else if (location_type == TURN_TABLE_TYPE) { + ROS_INFO("CombinedPickAction: TurnTable"); + return pick_from_tt_->execute(name, params); + } + else { ROS_INFO("CombinedPickAction: DEFAULT"); return default_pick_->execute(name, params); } diff --git a/mir_planning/mir_planner_executor/ros/src/actions/pick/pick_from_tt_action.cpp b/mir_planning/mir_planner_executor/ros/src/actions/pick/pick_from_tt_action.cpp new file mode 100644 index 000000000..333a6c758 --- /dev/null +++ b/mir_planning/mir_planner_executor/ros/src/actions/pick/pick_from_tt_action.cpp @@ -0,0 +1,27 @@ +/* + * Copyright [2017] + * + * Author: Vivek Mannava + * + */ + +#include + +PickFromTurnTableAction::PickFromTurnTableAction() : BasePickAction("/rtt_server") +{ + // client_.waitForServer(); +} + +bool PickFromTurnTableAction::run(std::vector ¶ms) +{ + mir_planning_msgs::GenericExecuteGoal goal; + goal.parameters = params; + actionlib::SimpleClientGoalState state = + client_.sendGoalAndWait(goal, ros::Duration(250.0), ros::Duration(5.0)); + if (state == actionlib::SimpleClientGoalState::StateEnum::SUCCEEDED) { + /* auto res = client_.getResult(); */ + /* ROS_INFO_STREAM(*res); */ + return true; + } else + return false; +} diff --git a/mir_planning/mir_planner_executor/ros/src/actions/place/place_action.cpp b/mir_planning/mir_planner_executor/ros/src/actions/place/place_action.cpp index b01f6d44d..5baee8fb8 100644 --- a/mir_planning/mir_planner_executor/ros/src/actions/place/place_action.cpp +++ b/mir_planning/mir_planner_executor/ros/src/actions/place/place_action.cpp @@ -25,7 +25,6 @@ void PlaceAction::update_knowledge_base(bool success, knowledge_updater_->remKnowledge("holding", {{"r", robot}, {"o", object}}); knowledge_updater_->addKnowledge("gripper_is_free", {{"r", robot}}); knowledge_updater_->remGoal("gripper_is_free", {{"r", robot}}); - knowledge_updater_->addKnowledge("perceived", {{"l", location}}); knowledge_updater_->remGoal("perceived", {{"l", location}}); } else { return; diff --git a/mir_planning/mir_planner_executor/ros/src/actions/place_unstage/place_unstage_action.cpp b/mir_planning/mir_planner_executor/ros/src/actions/place_unstage/place_unstage_action.cpp index 5411105d8..fc5615f09 100644 --- a/mir_planning/mir_planner_executor/ros/src/actions/place_unstage/place_unstage_action.cpp +++ b/mir_planning/mir_planner_executor/ros/src/actions/place_unstage/place_unstage_action.cpp @@ -17,7 +17,6 @@ void PlaceUnstageAction::update_knowledge_base(bool success, knowledge_updater_->remKnowledge("stored",{{"rp",platform},{"o",object}}); knowledge_updater_->remKnowledge("occupied",{{"rp",platform}}); knowledge_updater_->addKnowledge("on", {{"o", object}, {"l", location}}); - knowledge_updater_->addKnowledge("perceived", {{"l", location}}); knowledge_updater_->addKnowledge("gripper_is_free", {{"r", robot}}); knowledge_updater_->remGoal("holding", {{"r", robot}, {"o", object}}); knowledge_updater_->remGoal("on", {{"o", object}, {"l", location}}); diff --git a/mir_planning/mir_planner_executor/ros/src/planner_executor.cpp b/mir_planning/mir_planner_executor/ros/src/planner_executor.cpp index ff611a892..19713d939 100644 --- a/mir_planning/mir_planner_executor/ros/src/planner_executor.cpp +++ b/mir_planning/mir_planner_executor/ros/src/planner_executor.cpp @@ -33,7 +33,8 @@ PlannerExecutor::PlannerExecutor(ros::NodeHandle &nh) : server_(nh, "execute_pla addActionExecutor("PICK", new CombinedPickAction()); addActionExecutor("PLACE", new PlaceAction()); - addActionExecutor("STAGE", new StageAction()); + addActionExecutor("STAGE_GENERAL", new StageAction()); + addActionExecutor("STAGE_LARGE", new StageAction()); addActionExecutor("UNSTAGE", new UnstageAction()); addActionExecutor("MOVE_BASE", new MoveAction()); addActionExecutor("INSERT", new CombinedInsertAction()); @@ -87,13 +88,20 @@ void PlannerExecutor::executeCallback() next_action.key = "next_action"; next_action.value = next_action_name; params.push_back(next_action); - if (next_action_name == "PICK" || next_action_name == "INSERT") { + if (next_action_name == "PICK") { std::string next_object = actions[i + 1].parameters[2].value; diagnostic_msgs::KeyValue next_object_param; next_object_param.key = "next_object"; next_object_param.value = next_object; params.push_back(next_object_param); } + else if (next_action_name == "INSERT"){ + std::string next_object = actions[i + 1].parameters[3].value; + diagnostic_msgs::KeyValue next_object_param; + next_object_param.key = "next_object"; + next_object_param.value = next_object; + params.push_back(next_object_param); + } } std::string action_name = toUpper(action.name); diff --git a/mir_planning/mir_planning_bringup/ros/config/logging_topics.yaml b/mir_planning/mir_planning_bringup/ros/config/logging_topics.yaml index bf96e0232..58c424f9d 100644 --- a/mir_planning/mir_planning_bringup/ros/config/logging_topics.yaml +++ b/mir_planning/mir_planning_bringup/ros/config/logging_topics.yaml @@ -1,7 +1,7 @@ /rockin/robot_pose /rockin/marker_pose /rockin/trajectory -/rockin/rgb/camera_info +/rockin/color/camera_info /rockin/depth/camera_info /rockin/depth/pointcloud /rockin/scan_0 diff --git a/mir_planning/mir_planning_bringup/ros/launch/logging.launch b/mir_planning/mir_planning_bringup/ros/launch/logging.launch index bac755c48..347f529cb 100644 --- a/mir_planning/mir_planning_bringup/ros/launch/logging.launch +++ b/mir_planning/mir_planning_bringup/ros/launch/logging.launch @@ -7,8 +7,8 @@ - - + + diff --git a/mir_planning/mir_planning_bringup/ros/launch/robot.launch b/mir_planning/mir_planning_bringup/ros/launch/robot.launch index 6b9d90447..a0392256d 100644 --- a/mir_planning/mir_planning_bringup/ros/launch/robot.launch +++ b/mir_planning/mir_planning_bringup/ros/launch/robot.launch @@ -26,7 +26,10 @@ - + + + + @@ -102,7 +105,8 @@ - + + @@ -145,9 +149,9 @@ - + @@ -155,6 +159,9 @@ + + + @@ -173,6 +180,9 @@ + + +
diff --git a/mir_planning/mir_planning_core/ros/launch/task_planning_components.launch b/mir_planning/mir_planning_core/ros/launch/task_planning_components.launch index 42644d173..45c93bd59 100644 --- a/mir_planning/mir_planning_core/ros/launch/task_planning_components.launch +++ b/mir_planning/mir_planning_core/ros/launch/task_planning_components.launch @@ -2,7 +2,7 @@ - + @@ -11,6 +11,10 @@ + + + + @@ -39,8 +43,8 @@ - - + + diff --git a/mir_planning/mir_planning_core/ros/scripts/planning_coordinator_sm.py b/mir_planning/mir_planning_core/ros/scripts/planning_coordinator_sm.py index 0b1e5921e..ba2ec0b54 100755 --- a/mir_planning/mir_planning_core/ros/scripts/planning_coordinator_sm.py +++ b/mir_planning/mir_planning_core/ros/scripts/planning_coordinator_sm.py @@ -20,7 +20,7 @@ from mir_planning_msgs.srv import ReAddGoals from std_msgs.msg import String -from mir_audio_receiver.msg import AudioMessage +# from mir_audio_receiver.msg import AudioMessage # =============================================================================== @@ -44,12 +44,12 @@ def execute(self, userdata): class plan_task(smach.State): def __init__(self, mode=PlanGoal.NORMAL): - # publishing a Audio message at startup - audio_message = AudioMessage() - audio_pub = rospy.Publisher("/mir_audio_receiver/tts_request", AudioMessage, queue_size=1) - audio_message.message = "Hello, I am the mastermind. I am ready to plan your tasks." - audio_pub.publish(audio_message) - rospy.sleep(1.0) + # # publishing a Audio message at startup + # audio_message = AudioMessage() + # audio_pub = rospy.Publisher("/mir_audio_receiver/tts_request", AudioMessage, queue_size=1) + # audio_message.message = "Hello, I am the mastermind. I am ready to plan your tasks." + # audio_pub.publish(audio_message) + # rospy.sleep(1.0) smach.State.__init__( self, @@ -159,11 +159,11 @@ def main(): domain_file = rospy.get_param("~domain_file", None) planner = rospy.get_param("~planner", "mercury") - # publishing a Audio message at startup - audio_message = AudioMessage() - audio_pub = rospy.Publisher("/mir_audio_receiver/tts_request", AudioMessage, queue_size=1) - audio_message.message = "Hello, I am the planning coordinator" - audio_pub.publish(audio_message) + # # publishing a Audio message at startup + # audio_message = AudioMessage() + # audio_pub = rospy.Publisher("/mir_audio_receiver/tts_request", AudioMessage, queue_size=1) + # audio_message.message = "Hello, I am the planning coordinator" + # audio_pub.publish(audio_message) if problem_file is None or domain_file is None: rospy.logfatal("Either domain and/or problem file not provided. Exiting.") diff --git a/mir_planning/mir_task_planning/common/pddl/container_problem.pddl b/mir_planning/mir_task_planning/common/pddl/container_problem.pddl new file mode 100644 index 000000000..a024b613e --- /dev/null +++ b/mir_planning/mir_task_planning/common/pddl/container_problem.pddl @@ -0,0 +1,37 @@ +;This PDDL problem definition was made automatically from a KB snapshot +(define (problem general_domain_task) +(:domain general_domain) + +(:objects + WS01 WS02 WS03 WS04 WS05 WS06 SH01 RT01 PP01 START EXIT - location + S40_40_B CONTAINER_RED - object + YOUBOT-BRSU - robot + PLATFORM_LEFT PLATFORM_MIDDLE PLATFORM_RIGHT - robot_platform +) + +(:init + ;Cost information starts + (= (total-cost) 0) + ;Cost information ends + + (at YOUBOT-BRSU START) + (gripper_is_free YOUBOT-BRSU) + + (on CONTAINER_RED WS03) + (on S40_40_B WS04) + (container CONTAINER_RED) + (heavy CONTAINER_RED) + + (is_big_enough PLATFORM_RIGHT) +) + +(:goal (and + (in S40_40_B CONTAINER_RED) + + ) +) + +(:metric minimize (total-cost)) + +) + diff --git a/mir_robots/mir_bringup/components/realsense_camera.launch b/mir_robots/mir_bringup/components/realsense_camera.launch index 9ddc601c5..496b50182 100644 --- a/mir_robots/mir_bringup/components/realsense_camera.launch +++ b/mir_robots/mir_bringup/components/realsense_camera.launch @@ -1,12 +1,185 @@ - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /tower_cam3d_front/stereo_module/enable_auto_exposure: false + /tower_cam3d_front/stereo_module/hdr_enabled: false + /tower_cam3d_front/stereo_module/exposure: 850 + /tower_cam3d_front/stereo_module/laser_power: 360 + + /tower_cam3d_front/spatial/holes_fill: 2 + + /tower_cam3d_front/hole_filling/holes_fill: 2 + + diff --git a/mir_robots/mir_bringup/components/realsense_dual_camera.launch b/mir_robots/mir_bringup/components/realsense_dual_camera.launch new file mode 100644 index 000000000..10aac080c --- /dev/null +++ b/mir_robots/mir_bringup/components/realsense_dual_camera.launch @@ -0,0 +1,175 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /tower_cam3d_front/stereo_module/enable_auto_exposure: false + /tower_cam3d_front/stereo_module/hdr_enabled: false + /tower_cam3d_front/stereo_module/exposure: 850 + /tower_cam3d_front/stereo_module/laser_power: 360 + + /tower_cam3d_front/spatial/holes_fill: 2 + + /tower_cam3d_front/hole_filling/holes_fill: 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mir_robots/mir_bringup/components/realsense_dual_camera_go24.launch b/mir_robots/mir_bringup/components/realsense_dual_camera_go24.launch new file mode 100644 index 000000000..529cee84e --- /dev/null +++ b/mir_robots/mir_bringup/components/realsense_dual_camera_go24.launch @@ -0,0 +1,145 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /tower_cam3d_front/stereo_module/enable_auto_exposure: false + /tower_cam3d_front/stereo_module/hdr_enabled: false + /tower_cam3d_front/stereo_module/exposure: 850 + /tower_cam3d_front/stereo_module/laser_power: 360 + + /tower_cam3d_front/spatial/holes_fill: 2 + + /tower_cam3d_front/hole_filling/holes_fill: 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /tower_cam3d_back/stereo_module/enable_auto_exposure: false + /tower_cam3d_back/stereo_module/hdr_enabled: false + /tower_cam3d_back/stereo_module/exposure: 850 + /tower_cam3d_back/stereo_module/laser_power: 360 + + /tower_cam3d_back/spatial/holes_fill: 2 + + /tower_cam3d_back/hole_filling/holes_fill: 2 + + + + \ No newline at end of file diff --git a/mir_robots/mir_bringup/robots/.youbot-brsu-2.launch.swp b/mir_robots/mir_bringup/robots/.youbot-brsu-2.launch.swp new file mode 100644 index 000000000..aef2cc40f Binary files /dev/null and b/mir_robots/mir_bringup/robots/.youbot-brsu-2.launch.swp differ diff --git a/mir_robots/mir_bringup/robots/view_youbot.launch b/mir_robots/mir_bringup/robots/view_youbot.launch new file mode 100644 index 000000000..c28727bdd --- /dev/null +++ b/mir_robots/mir_bringup/robots/view_youbot.launch @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/mir_robots/mir_bringup/robots/youbot-brsu-2.launch b/mir_robots/mir_bringup/robots/youbot-brsu-2.launch index 04df12ddd..389fcfcb7 100644 --- a/mir_robots/mir_bringup/robots/youbot-brsu-2.launch +++ b/mir_robots/mir_bringup/robots/youbot-brsu-2.launch @@ -33,10 +33,11 @@ - - - - + + + + + + + @@ -56,48 +58,191 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/mir_scenarios/mir_states/ros/src/mir_states/common/action_states.py b/mir_scenarios/mir_states/ros/src/mir_states/common/action_states.py index cb9c2b1f8..9dbca020a 100644 --- a/mir_scenarios/mir_states/ros/src/mir_states/common/action_states.py +++ b/mir_scenarios/mir_states/ros/src/mir_states/common/action_states.py @@ -21,7 +21,7 @@ def __init__(self, platform_name): def execute(self, userdata): self.client.send_goal(self.goal) - self.client.wait_for_result(rospy.Duration.from_sec(15.0)) + self.client.wait_for_result(rospy.Duration.from_sec(30.0)) state = self.client.get_state() if state == GoalStatus.SUCCEEDED: return "success" @@ -30,16 +30,41 @@ def execute(self, userdata): class pick_object(smach.State): - def __init__(self): - smach.State.__init__(self, outcomes=["success", "failed"]) + def __init__(self, obj_name = None): + smach.State.__init__(self, outcomes=["success", "failed"], + input_keys=["pick_anything_object"]) self.client = SimpleActionClient("wbc_pick_object_server", GenericExecuteAction) self.client.wait_for_server() + self.object_name = obj_name if obj_name is not None else "any" + + def execute(self, userdata): self.goal = GenericExecuteGoal() - self.goal.parameters.append(KeyValue(key="object", value="any")) + + pick_anything_object = userdata.pick_anything_object + + if pick_anything_object is not None or pick_anything_object != "": + self.object_name = pick_anything_object + + self.goal.parameters.append(KeyValue(key="object", value=str(self.object_name).upper())) + self.client.send_goal(self.goal) + self.client.wait_for_result(rospy.Duration.from_sec(60.0)) + state = self.client.get_state() + if state == GoalStatus.SUCCEEDED: + return "success" + else: + return "failed" + +class tc_pick_object(smach.State): + def __init__(self): + smach.State.__init__(self, outcomes=["success", "failed"]) + self.client = SimpleActionClient("tc_wbc_pick_object_server", GenericExecuteAction) + self.client.wait_for_server() def execute(self, userdata): + self.goal = GenericExecuteGoal() + self.client.send_goal(self.goal) - self.client.wait_for_result(rospy.Duration.from_sec(30.0)) + self.client.wait_for_result(rospy.Duration.from_sec(60.0)) state = self.client.get_state() if state == GoalStatus.SUCCEEDED: return "success" @@ -48,26 +73,41 @@ def execute(self, userdata): class perceive_location(smach.State): - def __init__(self): - smach.State.__init__(self, outcomes=["success", "failed"]) + def __init__(self, obj_category="multimodal_object_recognition_atwork", timeout=30.0): + smach.State.__init__(self, outcomes=["success", "failed"], + output_keys=["perceived_objects"]) self.client = SimpleActionClient( "perceive_location_server", GenericExecuteAction ) self.client.wait_for_server() self.goal = GenericExecuteGoal() + self.timeout = timeout + self.goal.parameters.append( + KeyValue(key="obj_category", value=str(obj_category))) def execute(self, userdata): self.client.send_goal(self.goal) - self.client.wait_for_result(rospy.Duration.from_sec(30.0)) + self.client.wait_for_result(rospy.Duration.from_sec(self.timeout)) state = self.client.get_state() + result = self.client.get_result() + objects = [] if state == GoalStatus.SUCCEEDED: + print("Perceived objects:") + for i in range(20): + obj_key = "obj_" + str(i+1) + obj_name = Utils.get_value_of(result.results, obj_key) + if obj_name == None: + break + print(obj_name.ljust(15)) + objects.append(obj_name) + userdata.perceived_objects = objects return "success" else: return "failed" class move_base(smach.State): - def __init__(self, destination_location): + def __init__(self, destination_location, timeout = 15.0): smach.State.__init__(self, outcomes=["success", "failed"], input_keys=["goal"]) @@ -75,6 +115,7 @@ def __init__(self, destination_location): self.client.wait_for_server() self.goal = GenericExecuteGoal() self.destination_location = destination_location + self.timeout = timeout def execute(self, userdata): @@ -104,7 +145,7 @@ def execute(self, userdata): ) self.client.send_goal(self.goal) - self.client.wait_for_result(rospy.Duration.from_sec(int(15.0))) + self.client.wait_for_result(rospy.Duration.from_sec(int(self.timeout))) state = self.client.get_state() if state == GoalStatus.SUCCEEDED: return "success" @@ -134,12 +175,13 @@ def execute(self, userdata): class stage_object(smach.State): - def __init__(self, platform=None): - smach.State.__init__(self, outcomes=["success", "failed"], input_keys=["goal"]) + def __init__(self, platform=None, timeout = 30.0): + smach.State.__init__(self, outcomes=["success", "failed"], input_keys=["goal", "pick_anything_stage_platform"]) self.client = SimpleActionClient("stage_object_server", GenericExecuteAction) self.client.wait_for_server() self.goal = GenericExecuteGoal() self.platform = platform + self.timeout = timeout def execute(self, userdata): self.goal.parameters = [] @@ -165,8 +207,16 @@ def execute(self, userdata): KeyValue(key="platform", value=str(platform).upper()) ) + # pick_anything_stage_platform + if userdata.pick_anything_stage_platform is not None or userdata.pick_anything_stage_platform != "": + self.goal.parameters = [] + rospy.loginfo("using pick anything stage platform") + self.goal.parameters.append( + KeyValue(key="platform", value=str(userdata.pick_anything_stage_platform).upper()) + ) + self.client.send_goal(self.goal) - self.client.wait_for_result(rospy.Duration.from_sec(30.0)) + self.client.wait_for_result(rospy.Duration.from_sec(self.timeout)) state = self.client.get_state() if state == GoalStatus.SUCCEEDED: return "success" @@ -175,12 +225,13 @@ def execute(self, userdata): class unstage_object(smach.State): - def __init__(self, platform=None): + def __init__(self, platform=None, timeout = 30.0): smach.State.__init__(self, outcomes=["success", "failed"], input_keys=["goal"]) self.client = SimpleActionClient("unstage_object_server", GenericExecuteAction) self.client.wait_for_server() self.goal = GenericExecuteGoal() self.platform = platform + self.timeout = timeout def execute(self, userdata): self.goal.parameters = [] @@ -207,7 +258,7 @@ def execute(self, userdata): ) self.client.send_goal(self.goal) - self.client.wait_for_result(rospy.Duration.from_sec(30.0)) + self.client.wait_for_result(rospy.Duration.from_sec(self.timeout)) state = self.client.get_state() if state == GoalStatus.SUCCEEDED: return "success" diff --git a/mir_scenarios/mir_states/ros/src/mir_states/common/manipulation_states.py b/mir_scenarios/mir_states/ros/src/mir_states/common/manipulation_states.py index 89af05cf1..9338f744b 100644 --- a/mir_scenarios/mir_states/ros/src/mir_states/common/manipulation_states.py +++ b/mir_scenarios/mir_states/ros/src/mir_states/common/manipulation_states.py @@ -18,6 +18,10 @@ from tf.transformations import euler_from_quaternion from mir_manipulation_msgs.msg import GripperCommand from std_msgs.msg import String +from brics_actuator.msg import JointPositions, JointValue +from srdfdom.srdf import SRDF +from sensor_msgs.msg import JointState +from mir_actions.utils import Utils class Bunch: def __init__(self, **kwds): @@ -33,7 +37,7 @@ class MoveitClient: :param move_arm_to: target where the arm should move. If it is a string, then it gives target name (should be availabile on the parameter server). If it as tuple or a list, then it is treated differently based on the length. If it - has 7 items, then it is cartesian pose (x, y, z, r, p ,y) + the corresponding frame. + has , then it is cartesian pose (x, y, z, r, p ,y) + the corresponding frame. If it has 5 items, then it is arm configuration in join space. :type move_arm_to: str | tuple | list @@ -86,7 +90,6 @@ def event_cb(self, msg): def execute(self, userdata, blocking=True): target = self.move_arm_to or userdata.move_arm_to - # do it twice because it probably fails the first time for i in range(2): self.client_event = None @@ -169,25 +172,74 @@ def list_to_brics_joint_positions(self, joint_positions): return brics_joint_positions +class ArmPositionCommand: + def __init__(self, target): + self.zero_vel_counter = 0 + self.robot_srdf = SRDF.from_parameter_server('/robot_description_semantic') + self.pub_arm_position = rospy.Publisher('arm_1/arm_controller/position_command', JointPositions, queue_size=1) + self.joint_state_sub = rospy.Subscriber('joint_states', JointState, self.joint_state_cb) + self.move_arm_to = target + self.is_arm_moving = False + + def joint_state_cb(self, msg: JointState): + self.joint_state = msg + # monitor the velocities + self.joint_velocities = msg.velocity + # if all velocities are 0.0, the arm is not moving + if "arm_joint_1" in msg.name and all([v == 0.0 for v in self.joint_velocities]): + self.zero_vel_counter += 1 + + def get_joint_values_from_group_state(self, group_state_name): + joint_configs = [] + for group_state in self.robot_srdf.group_states: + if group_state.name == group_state_name: + for joint in group_state.joints: + joint_configs.append((joint.name, joint.value[0])) + return joint_configs + return [] + + def execute(self, userdata): + joint_configs = self.get_joint_values_from_group_state( + self.move_arm_to or userdata.move_arm_to + ) + + if len(joint_configs) == 0: + rospy.logerr("No joint configuration found for group state " + self.move_arm_to) + return "failed" + else: + joint_positions = JointPositions() + joint_positions.positions = [ + JointValue( + rospy.Time.now(), + joint_name, + "rad", + joint_value + ) + for joint_name, joint_value in joint_configs + ] + self.pub_arm_position.publish(joint_positions) + self.zero_vel_counter = 0 + while self.zero_vel_counter < 10: + rospy.sleep(0.1) + return "succeeded" class move_arm(smach.State): - def __init__(self, target=None, blocking=True, tolerance=None, timeout=10.0): + def __init__(self, target=None, blocking=True, tolerance=None, timeout=10.0, use_moveit=True): smach.State.__init__( self, outcomes=["succeeded", "failed"], input_keys=["move_arm_to"] ) - joint_names = [ - "arm_joint_1", - "arm_joint_2", - "arm_joint_3", - "arm_joint_4", - "arm_joint_5", - ] + joint_names = [f'arm_joint_{i}' for i in range(1, 6)] self.arm_moveit_client = MoveitClient("/arm_", target, timeout, joint_names) + self.arm_position_command = ArmPositionCommand(target) self.blocking = blocking + self.use_moveit = use_moveit def execute(self, userdata): - return self.arm_moveit_client.execute(userdata, self.blocking) - + if self.use_moveit: + return self.arm_moveit_client.execute(userdata, self.blocking) + else: + print('Using arm position command *') + return self.arm_position_command.execute(userdata) class check_move_group_feedback(smach.State): def __init__(self, timeout=10.0): @@ -196,7 +248,7 @@ def __init__(self, timeout=10.0): self.status = 1 def status_cb(self, msg): - self.status = status + self.status = msg.status.status def execute(self): if self.status == 3: @@ -207,16 +259,18 @@ def execute(self): return "failed" class control_gripper(smach.State): - def __init__(self, target=None, blocking=True, tolerance=None, timeout=10.0): - smach.State.__init__(self, outcomes=["succeeded"]) + def __init__(self, target=None, blocking=True, tolerance=None, timeout=3): + smach.State.__init__(self, outcomes=["succeeded", "timeout"]) + self.timeout = rospy.Duration(timeout) self.command = GripperCommand() self.current_state = "GRIPPER_OPEN" self.grasped_counter = 0 - if 'open' in target: - self.command.command = GripperCommand.OPEN - elif 'close' in target: - self.command.command = GripperCommand.CLOSE + if type(target) == str: + if 'open' in target: + self.command.command = GripperCommand.OPEN + elif 'close' in target: + self.command.command = GripperCommand.CLOSE elif type(target) == float: self.command.command = target self.pub = rospy.Publisher('/arm_1/gripper_command', GripperCommand, queue_size=1) @@ -225,12 +279,17 @@ def __init__(self, target=None, blocking=True, tolerance=None, timeout=10.0): def feedback_cb(self, msg): json_obj = json.loads(msg.data) self.current_state = json_obj["state"] - if self.current_state == "OBJECT_GRASPED": - self.grasped_counter += 1 + # if self.current_state == "OBJECT_GRASPED": + # self.grasped_counter += 1 + # print('grasped counter 1: ', self.grasped_counter) def execute(self, userdata): self.pub.publish(self.command) - while True: + self.grasped_counter = 0 + start_time = rospy.Time.now() + while (rospy.Time.now() - start_time < self.timeout): + if self.current_state == "OBJECT_GRASPED": + self.grasped_counter += 1 if (self.current_state == "GRIPPER_OPEN" or self.current_state == "GRIPPER_INTER") and\ self.command.command != GripperCommand.CLOSE: self.grasped_counter = 0 @@ -243,12 +302,18 @@ def execute(self, userdata): self.grasped_counter > 4 and\ self.command.command == GripperCommand.CLOSE: self.grasped_counter = 0 + end_time = rospy.Time.now() + # print the time in seconds + print('grasped in: ', (end_time - start_time).to_sec()) return "succeeded" - rospy.sleep(0.1) + rospy.sleep(0.05) + rospy.logerr("Gripper open/close timed out") + return "timeout" class verify_object_grasped(smach.State): - def __init__(self, timeout=5.0): - smach.State.__init__(self, outcomes=["succeeded", "failed"]) + def __init__(self, timeout=3): + smach.State.__init__(self, outcomes=["succeeded", "failed", "timeout"], + input_keys=["goal"]) self.current_state = "OBJECT_GRASPED" self.grasped_counter = 0 @@ -262,18 +327,28 @@ def feedback_cb(self, msg): self.grasped_counter += 1 def execute(self, userdata): + # get the object name from userdata + object_name = Utils.get_value_of(userdata.goal.parameters, "object") + if object_name is not None and object_name != "": + object_name = object_name.split("-")[0] + else: + object_name = "" start_time = rospy.Time.now() while (rospy.Time.now() - start_time < self.timeout): rospy.sleep(0.1) if self.current_state == "GRIPPER_CLOSED" or\ self.current_state == "GRIPPER_INTER" or\ self.current_state == "GRIPPER_OPEN": + if object_name == "WRENCH": + # TODO: modify this + return "succeeded" return "failed" elif self.current_state == "OBJECT_GRASPED" and\ self.grasped_counter > 4: self.grasped_counter = 0 return "succeeded" - return "failed" + rospy.logerr('Grasp verification timeout') + return "timeout" class move_arm_and_gripper(smach.State): @@ -287,48 +362,31 @@ class move_arm_and_gripper(smach.State): take feedback from gripper_controller/state, and run move_arm state and the new gripper state concurrently. """ - def __init__(self, conf, target=None, blocking=True, tolerance=None, timeout=10.0): + def __init__(self, gripper_conf, target=None, blocking=True, tolerance=None, timeout=10.0, use_moveit=True): smach.State.__init__( self, outcomes=["succeeded", "failed"], input_keys=["move_arm_to"] ) - joint_names = [ - "arm_joint_1", - "arm_joint_2", - "arm_joint_3", - "arm_joint_4", - "arm_joint_5", - ] + joint_names = [f'arm_joint_{i}' for i in range(1, 6)] self.arm_moveit_client = MoveitClient("/arm_", target, timeout, joint_names) + self.arm_position_command = ArmPositionCommand(target) + self.blocking = blocking + self.use_moveit = use_moveit self.pub = rospy.Publisher('/arm_1/gripper_command', GripperCommand, queue_size=1) - self.conf = conf + self.gripper_conf = gripper_conf self.gripper_command = GripperCommand() - if 'open' in self.conf: + if 'open' in self.gripper_conf: self.gripper_command.command = GripperCommand.OPEN - elif 'close' in self.conf: + elif 'close' in self.gripper_conf: self.gripper_command.command = GripperCommand.CLOSE - elif type(self.conf) == float: - self.gripper_command.command = self.conf - - def get_targets(self, group_name): - text = rospy.get_param("/robot_description_semantic") - "Following code searches and extracts gripper configurations in youbot.srdf" - pattern = ( - 'group_state[ \\\\\n\r\f\v\t]*name="' - + self.conf - + '"[ \\\\\n\r\f\v\t]*group="arm_1_gripper">[ \\\\\n\r\f\v\t]*