Skip to content

Commit

Permalink
CI対応
Browse files Browse the repository at this point in the history
  • Loading branch information
mizonon committed Oct 31, 2024
1 parent 4e0a275 commit 057338c
Show file tree
Hide file tree
Showing 8 changed files with 159 additions and 175 deletions.
2 changes: 1 addition & 1 deletion crane_plus_examples/launch/demo.launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def generate_launch_description():
default_value='/dev/ttyUSB0',
description='Set port name.'
)

declare_use_camera = DeclareLaunchArgument(
'use_camera',
default_value='false',
Expand Down
56 changes: 24 additions & 32 deletions crane_plus_examples_py/crane_plus_examples_py/aruco_detection.py
Original file line number Diff line number Diff line change
@@ -1,77 +1,72 @@
# Copyright 2020 RT Corporation
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import rclpy
from rclpy.node import Node

import numpy as np

# from std_msgs.msg import String
from geometry_msgs.msg import TransformStamped
from sensor_msgs.msg import CameraInfo, Image
import cv2
from cv2 import aruco
from cv_bridge import CvBridge
import tf2_ros

from crane_plus_examples_py.utils import rotation_matrix_to_quaternion
from geometry_msgs.msg import TransformStamped
import numpy as np
import rclpy
from rclpy.node import Node
import tf2_ros
from sensor_msgs.msg import CameraInfo, Image


class ImageSubscriber(Node):
def __init__(self):
super().__init__("aruco_detection")
super().__init__('aruco_detection')
self.image_subscription = self.create_subscription(
Image, "image_raw", self.image_callback, 10
Image, 'image_raw', self.image_callback, 10
)
self.camera_info_subscription = self.create_subscription(
CameraInfo, "camera_info", self.camera_info_callback, 10
CameraInfo, 'camera_info', self.camera_info_callback, 10
)
self.tf_broadcaster = tf2_ros.TransformBroadcaster()

self.camera_info = None
self.bridge = CvBridge()

def image_callback(self, msg):
# 画像データをROSのメッセージからOpenCVの配列に変換
cv_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding=msg.encoding)
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_RGB2BGR)

if self.camera_info:
# ArUcoマーカのデータセットを読み込む
# DICT_6x6_50は6x6ビットのマーカが50個収録されたもの
MARKER_DICT = aruco.getPredefinedDictionary(aruco.DICT_6X6_50)

# マーカーID
ids = []

# 画像座標系上のマーカ頂点位置
corners = []

# マーカの検出
corners, ids, _ = aruco.detectMarkers(cv_img, MARKER_DICT)

# マーカの検出数
n_markers = len(ids)

# カメラパラメータ
CAMERA_MATRIX = np.array(self.camera_info['k']).reshape(3, 3)
DIST_COEFFS = np.array(self.camera_info['d']).reshape(1, 5)

# マーカ一辺の長さ 0.04 [m]
MARKER_LENGTH = 0.04

# マーカが一つ以上検出された場合、マーカの位置姿勢をtfで配信
if n_markers > 0:
for i in range(n_markers):
Expand All @@ -81,7 +76,7 @@ def image_callback(self, msg):
# tfの配信
t = TransformStamped()
t.header = msg.header
t.child_frame_id = "target_" + str(ids[i][0])
t.child_frame_id = 'target_' + str(ids[i][0])
t.transform.translation.x = tvecs[i][0][0]
t.transform.translation.y = tvecs[i][0][1]
t.transform.translation.z = tvecs[i][0][2]
Expand All @@ -94,20 +89,17 @@ def image_callback(self, msg):
t.transform.rotation.z = q.z
t.transform.rotation.w = q.w
self.tf_broadcaster.sendTransform(t)

def camera_info_callback(self, msg):
self.camera_info = msg


def main(args=None):
rclpy.init(args=args)

image_subscriber = ImageSubscriber()
rclpy.spin(image_subscriber)

# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
image_subscriber.destroy_node()
rclpy.shutdown()

Expand Down
51 changes: 23 additions & 28 deletions crane_plus_examples_py/crane_plus_examples_py/color_detection.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,25 @@
# Copyright 2020 RT Corporation
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import rclpy
from rclpy.node import Node

# from std_msgs.msg import String
from geometry_msgs.msg import TransformStamped
from sensor_msgs.msg import CameraInfo, Image
from image_geometry import PinholeCameraModel
import cv2
from cv_bridge import CvBridge
from geometry_msgs.msg import TransformStamped
from image_geometry import PinholeCameraModel
import rclpy
from rclpy.node import Node
import tf2_ros
from sensor_msgs.msg import CameraInfo, Image


class ImageSubscriber(Node):
Expand Down Expand Up @@ -50,24 +48,24 @@ def image_callback(self, msg):
HIGH_S = 255
LOW_V = 50
HIGH_V = 255

# ウェブカメラの画像を受け取る
cv_img = self.bridge.imgmsg_to_cv2(msg, desired_encoding=msg.encoding)

# 画像をRGBからHSVに変換(取得したカメラ画像にフォーマットを合わせる)
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_RGB2HSV)

# 画像の二値化
img_mask_1 = cv2.inRange(cv_img, (LOW_H_1, LOW_S, LOW_V), (HIGH_H_1, HIGH_S, HIGH_V))
img_mask_2 = cv2.inRange(cv_img, (LOW_H_2, LOW_S, LOW_V), (HIGH_H_2, HIGH_S, HIGH_V))

# マスク画像の合成
img_thresholded = cv2.bitwise_or(img_mask_1, img_mask_2)

# ノイズ除去の処理
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
img_thresholded = cv2.morphologyEx(img_thresholded, cv2.MORPH_OPEN, kernel)

# 穴埋めの処理
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
img_thresholded = cv2.morphologyEx(img_thresholded, cv2.MORPH_CLOSE, kernel)
Expand All @@ -77,34 +75,34 @@ def image_callback(self, msg):
d_m01 = moment['m01']
d_m10 = moment['m10']
d_area = moment['m00']

# 検出した領域のピクセル数が10000より大きい場合
if d_area < 10000:
# カメラモデル作成
camera_model = PinholeCameraModel()

# カメラのパラメータを設定
camera_model.fromCameraInfo(self.camera_info)

# 画像座標系における把持対象物の位置(2D)
pixel_x = d_m10 / d_area
pixel_y = d_m01 / d_area
point = (pixel_x, pixel_y)

# 補正後の画像座標系における把持対象物の位置を取得(2D)
rect_point = camera_model.rectifyImage(point)

# カメラ座標系から見た把持対象物の方向(Ray)を取得する
ray = camera_model.projectPixelTo3dRay(rect_point)

# カメラの高さを0.44[m]として把持対象物の位置を計算
CAMERA_HEIGHT = 0.46
object_position = {
'x': ray.x * CAMERA_HEIGHT,
'y': ray.y * CAMERA_HEIGHT,
'z': ray.z * CAMERA_HEIGHT,
}

# 把持対象物の位置をTFに配信
t = TransformStamped()
t.header = msg.header
Expand All @@ -113,27 +111,24 @@ def image_callback(self, msg):
t.transform.translation.y = object_position['y']
t.transform.translation.z = object_position['z']
self.tf_broadcaster.sendTransform(t)

# 閾値による二値化画像を配信
img_thresholded_msg = self.bridge.cv2_to_imgmsg(img_thresholded, encoding="mono8")
self.image_thresholded_publisher.publish(img_thresholded_msg)

def camera_info_callback(self, msg):
self.camera_info = msg


def main(args=None):
rclpy.init(args=args)

image_subscriber = ImageSubscriber()
rclpy.spin(image_subscriber)

# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
image_subscriber.destroy_node()
rclpy.shutdown()


if __name__ == '__main__':
main()
main()
30 changes: 15 additions & 15 deletions crane_plus_examples_py/crane_plus_examples_py/gripper_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,32 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from crane_plus_examples_py.utils import plan_and_execute
import math
import rclpy

# generic ros libraries
from rclpy.logging import get_logger

# moveit python library
from moveit.core.robot_state import RobotState
from moveit.planning import (
MoveItPy,
PlanRequestParameters,
)

from crane_plus_examples_py.utils import plan_and_execute
import rclpy
from rclpy.logging import get_logger


def main(args=None):
# ros2の初期化
rclpy.init(args=args)

# ロガー生成
logger = get_logger("gripper_control")
logger = get_logger('gripper_control')

# MoveItPy初期化
crane_plus = MoveItPy(node_name="moveit_py")
crane_plus_gripper = crane_plus.get_planning_component("gripper")
logger.info("MoveItPy instance created")
crane_plus = MoveItPy(node_name='moveit_py')
crane_plus_gripper = crane_plus.get_planning_component('gripper')
logger.info('MoveItPy instance created')

# instantiate a RobotState instance using the current robot model
robot_model = crane_plus.get_robot_model()
Expand All @@ -46,15 +44,15 @@ def main(args=None):
# planningのパラメータ設定
plan_request_params = PlanRequestParameters(
crane_plus,
"ompl_rrtc",
'ompl_rrtc',
)

# 速度&加速度のスケーリングファクタを設定
plan_request_params.max_acceleration_scaling_factor = 1.0 # Set 0.0 ~ 1.0
plan_request_params.max_velocity_scaling_factor = 1.0 # Set 0.0 ~ 1.0

# グリッパーを+30[deg]の位置に動かす
robot_state.set_joint_group_positions("gripper", [math.radians(30)])
robot_state.set_joint_group_positions('gripper', [math.radians(30)])
crane_plus_gripper.set_start_state_to_current_state()
crane_plus_gripper.set_goal_state(robot_state=robot_state)
plan_and_execute(
Expand All @@ -63,8 +61,9 @@ def main(args=None):
logger,
single_plan_parameters=plan_request_params,
)

# グリッパーを-30[deg]の位置に動かす
robot_state.set_joint_group_positions("gripper", [math.radians(-30)])
robot_state.set_joint_group_positions('gripper', [math.radians(-30)])
crane_plus_gripper.set_start_state_to_current_state()
crane_plus_gripper.set_goal_state(robot_state=robot_state)
plan_and_execute(
Expand All @@ -73,8 +72,9 @@ def main(args=None):
logger,
single_plan_parameters=plan_request_params,
)

# グリッパーを0[deg]の位置に動かす
robot_state.set_joint_group_positions("gripper", [0.0])
robot_state.set_joint_group_positions('gripper', [0.0])
crane_plus_gripper.set_start_state_to_current_state()
crane_plus_gripper.set_goal_state(robot_state=robot_state)
plan_and_execute(
Expand All @@ -83,7 +83,7 @@ def main(args=None):
logger,
single_plan_parameters=plan_request_params,
)

# MoveItPyの終了
crane_plus.shutdown()

Expand Down
Loading

0 comments on commit 057338c

Please sign in to comment.