Skip to content

Commit

Permalink
Merge branch 'master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
hollydinkel authored Nov 13, 2023
2 parents e08ab3b + 4f41f7b commit e3d684e
Show file tree
Hide file tree
Showing 6 changed files with 87 additions and 86 deletions.
8 changes: 3 additions & 5 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
__pycache__/
.vscode/
data/output/*.png
data/output/*/*.json
data/*.bag
data/
test.cpp
data/*
test.cpp
*.bag
2 changes: 1 addition & 1 deletion docs/RUN.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Installation and execution of TrackDLO was verified with the below dependencies
* [Pillow](https://pillow.readthedocs.io/en/stable/installation.html)
* [ROS Numpy](https://pypi.org/project/rosnumpy/)

We also provide Docker files for compatibility with other system configurations. Refer to the [DOCKER.md](https://github.com/RMDLO/trackdlo/blob/master/docs/DOCKER.md) for more information on using docker, and see the docker [requirements.txt](https://github.com/RMDLO/trackdlo/blob/master/docker/requirements.txt) file for a list of the tested versions of TrackDLO package dependencies.
We also provide Docker files for compatibility with other system configurations. Refer to the ![DOCKER.md](https://github.com/RMDLO/trackdlo/blob/master/docs/DOCKER.md) for more information on using docker, and see the docker ![requirements.txt](https://github.com/RMDLO/trackdlo/blob/master/docker/requirements.txt) file for a list of the tested versions of TrackDLO package dependencies.

## Other Requirements

Expand Down
16 changes: 7 additions & 9 deletions launch/realsense_node.launch
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,23 @@

<include file="$(find realsense2_camera)/launch/rs_camera.launch">
<arg name="json_file_path" value="$(find trackdlo)/config/preset_decimation_4.0_depth_step_100.json" />
<arg name="publish_tf" value="false" />
<arg name="filters" value="pointcloud,temporal,decimation" />
<arg name="depth_fps" value="15" />
<arg name="depth_fps" value="10" />
<arg name="depth_height" value="720" />
<arg name="depth_width" value="1280" />
<arg name="color_fps" value="15" />
<arg name="color_fps" value="10" />
<arg name="color_height" value="720" />
<arg name="color_width" value="1280" />
<arg name="align_depth" value="true" />
<arg name="ordered_pc" value="true" />
</include>

<node pkg="tf" type="static_transform_publisher" name="base_link_to_camera_color_optical_frame_tf" args="0.5308947503950723 0.030109485611943067 0.5874 -0.7071068 0.7071068 0 0 base_link camera_color_optical_frame 10" />
<node pkg="tf" type="static_transform_publisher" name="camera_color_optical_frame_to_camera_color_frame_tf" args="0 0 0 0.5 -0.5 0.5 0.5 camera_color_optical_frame camera_color_frame 10" />
<node pkg="tf" type="static_transform_publisher" name="camera_color_frame_to_camera_link_tf" args="-0.000351057737134 -0.0148385819048 -0.000117231989861 0.00429561594501 0.000667857821099 -0.00226634810679 0.999987959862 camera_color_optical_frame camera_color_frame 10" />
<node pkg="tf" type="static_transform_publisher" name="camera_link_to_camera_depth_frame_tf" args="0 0 0 0 0.0 0.0 1.0 camera_color_optical_frame camera_color_frame 10" />
<node pkg="tf" type="static_transform_publisher" name="camera_depth_frame_to_camera_depth_optical_frame_tf" args="0 0 0 -0.5 0.5 -0.5 0.5 camera_depth_frame camera_depth_optical_frame 10" />
<!-- <node pkg="tf" type="static_transform_publisher" name="link_6_to_camera_color_optical_frame_tf" args="0.0131191 0.0345247 -0.0382688 0.708756, -0.7054088, -0.0062758, 0.0048991 link_6 camera_color_optical_frame 10" /> -->

<node pkg="tf" type="static_transform_publisher" name="link_6_to_camera_link_tf" args="0.01801587 0.0156162 -0.03926776 0.00038930373662151424 0.7130193860400215 -0.002601741316836596 0.7011393830871517 link_6 camera_link 10" />
<node pkg="tf" type="static_transform_publisher" name="base_link_to_link_6_tf" args="0.374, 0.000, 0.630 0 0 0 1 base_link link_6 10" />

<!-- tracking rviz file -->
<node type="rviz" name="rviz" pkg="rviz" args="-d $(find trackdlo)/rviz/tracking.rviz" />
<!-- <node type="rviz" name="rviz" pkg="rviz" args="-d $(find trackdlo)/rviz/tracking.rviz" /> -->

</launch>
6 changes: 3 additions & 3 deletions launch/trackdlo.launch
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@
<!-- <arg name="hsv_threshold_lower_limit" default="100 200 60" /> -->
<arg name="hsv_threshold_lower_limit" default="90 90 30" />
<!-- <arg name="hsv_threshold_lower_limit" default="90 90 90" /> -->
<arg name="num_of_nodes" default="45" />
<arg name="num_of_nodes" default="35" />
<arg name="visualize_initialization_process" default="false" />
<arg name="multi_color_dlo" default="false" />

<!-- load parameters to corresponding nodes -->
<node name="trackdlo" pkg="trackdlo" type="trackdlo" output="screen">
<node name="trackdlo" pkg="trackdlo" type="trackdlo" output="screen" respawn="true">
<param name="camera_info_topic" type="string" value="$(arg camera_info_topic)" />
<param name="rgb_topic" type="string" value="$(arg rgb_topic)" />
<param name="depth_topic" type="string" value="$(arg depth_topic)" />
Expand Down Expand Up @@ -61,7 +61,7 @@
</node>

<!-- launch python node for initialization -->
<node name="init_tracker" pkg="trackdlo" type="initialize.py" output="screen">
<node name="init_tracker" pkg="trackdlo" type="initialize.py" output="screen" respawn="true">
<param name="camera_info_topic" type="string" value="$(arg camera_info_topic)" />
<param name="rgb_topic" type="string" value="$(arg rgb_topic)" />
<param name="depth_topic" type="string" value="$(arg depth_topic)" />
Expand Down
137 changes: 71 additions & 66 deletions trackdlo/src/initialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
import time
import cv2
import numpy as np
import time

from visualization_msgs.msg import MarkerArray
from scipy import interpolate
Expand Down Expand Up @@ -68,75 +67,81 @@ def callback (rgb, depth):
# color thresholding
mask = color_thresholding(hsv_image, cur_depth)

start_time = time.time()
mask = cv2.cvtColor(mask.copy(), cv2.COLOR_GRAY2BGR)

# returns the pixel coord of points (in order). a list of lists
img_scale = 1
extracted_chains = extract_connected_skeleton(visualize_initialization_process, mask, img_scale=img_scale, seg_length=8, max_curvature=25)

all_pixel_coords = []
for chain in extracted_chains:
all_pixel_coords += chain
print('Finished extracting chains. Time taken:', time.time()-start_time)

all_pixel_coords = np.array(all_pixel_coords) * img_scale
all_pixel_coords = np.flip(all_pixel_coords, 1)

pc_z = cur_depth[tuple(map(tuple, all_pixel_coords.T))] / 1000.0
f = proj_matrix[0, 0]
cx = proj_matrix[0, 2]
cy = proj_matrix[1, 2]
pixel_x = all_pixel_coords[:, 1]
pixel_y = all_pixel_coords[:, 0]

pc_x = (pixel_x - cx) * pc_z / f
pc_y = (pixel_y - cy) * pc_z / f
extracted_chains_3d = np.vstack((pc_x, pc_y))
extracted_chains_3d = np.vstack((extracted_chains_3d, pc_z))
extracted_chains_3d = extracted_chains_3d.T

# do not include those without depth values
extracted_chains_3d = extracted_chains_3d[((extracted_chains_3d[:, 0] != 0) | (extracted_chains_3d[:, 1] != 0) | (extracted_chains_3d[:, 2] != 0))]

if multi_color_dlo:
depth_threshold = 0.58 # m
extracted_chains_3d = extracted_chains_3d[extracted_chains_3d[:, 2] > depth_threshold]

# tck, u = interpolate.splprep(extracted_chains_3d.T, s=0.001)
tck, u = interpolate.splprep(extracted_chains_3d.T, s=0.0005)
# 1st fit, less points
u_fine = np.linspace(0, 1, 300) # <-- num fit points
x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
spline_pts = np.vstack((x_fine, y_fine, z_fine)).T

# 2nd fit, higher accuracy
num_true_pts = int(np.sum(np.sqrt(np.sum(np.square(np.diff(spline_pts, axis=0)), axis=1))) * 1000)
u_fine = np.linspace(0, 1, num_true_pts) # <-- num true points
x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
spline_pts = np.vstack((x_fine, y_fine, z_fine)).T
total_spline_len = np.sum(np.sqrt(np.sum(np.square(np.diff(spline_pts, axis=0)), axis=1)))

init_nodes = spline_pts[np.linspace(0, num_true_pts-1, num_of_nodes).astype(int)]

results = ndarray2MarkerArray(init_nodes, result_frame_id, [1, 150/255, 0, 0.75], [0, 1, 0, 0.75])
results_pub.publish(results)

# add color
pc_rgba = struct.unpack('I', struct.pack('BBBB', 255, 40, 40, 255))[0]
pc_rgba_arr = np.full((len(init_nodes), 1), pc_rgba)
pc_colored = np.hstack((init_nodes, pc_rgba_arr)).astype(object)
pc_colored[:, 3] = pc_colored[:, 3].astype(int)

header.stamp = rospy.Time.now()
converted_points = pcl2.create_cloud(header, fields, pc_colored)
pc_pub.publish(converted_points)

rospy.signal_shutdown('Finished initial node set computation.')
try:
start_time = time.time()
mask = cv2.cvtColor(mask.copy(), cv2.COLOR_GRAY2BGR)

# returns the pixel coord of points (in order). a list of lists
img_scale = 1
extracted_chains = extract_connected_skeleton(visualize_initialization_process, mask, img_scale=img_scale, seg_length=8, max_curvature=25)

all_pixel_coords = []
for chain in extracted_chains:
all_pixel_coords += chain
print('Finished extracting chains. Time taken:', time.time()-start_time)

all_pixel_coords = np.array(all_pixel_coords) * img_scale
all_pixel_coords = np.flip(all_pixel_coords, 1)

pc_z = cur_depth[tuple(map(tuple, all_pixel_coords.T))] / 1000.0
f = proj_matrix[0, 0]
cx = proj_matrix[0, 2]
cy = proj_matrix[1, 2]
pixel_x = all_pixel_coords[:, 1]
pixel_y = all_pixel_coords[:, 0]

pc_x = (pixel_x - cx) * pc_z / f
pc_y = (pixel_y - cy) * pc_z / f
extracted_chains_3d = np.vstack((pc_x, pc_y))
extracted_chains_3d = np.vstack((extracted_chains_3d, pc_z))
extracted_chains_3d = extracted_chains_3d.T

# do not include those without depth values
extracted_chains_3d = extracted_chains_3d[((extracted_chains_3d[:, 0] != 0) | (extracted_chains_3d[:, 1] != 0) | (extracted_chains_3d[:, 2] != 0))]

if multi_color_dlo:
depth_threshold = 0.58 # m
extracted_chains_3d = extracted_chains_3d[extracted_chains_3d[:, 2] > depth_threshold]

# tck, u = interpolate.splprep(extracted_chains_3d.T, s=0.001)
tck, u = interpolate.splprep(extracted_chains_3d.T, s=0.0005)
# 1st fit, less points
u_fine = np.linspace(0, 1, 300) # <-- num fit points
x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
spline_pts = np.vstack((x_fine, y_fine, z_fine)).T

# 2nd fit, higher accuracy
num_true_pts = int(np.sum(np.sqrt(np.sum(np.square(np.diff(spline_pts, axis=0)), axis=1))) * 1000)
u_fine = np.linspace(0, 1, num_true_pts) # <-- num true points
x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
spline_pts = np.vstack((x_fine, y_fine, z_fine)).T
total_spline_len = np.sum(np.sqrt(np.sum(np.square(np.diff(spline_pts, axis=0)), axis=1)))

init_nodes = spline_pts[np.linspace(0, num_true_pts-1, num_of_nodes).astype(int)]

results = ndarray2MarkerArray(init_nodes, result_frame_id, [1, 150/255, 0, 0.75], [0, 1, 0, 0.75])
results_pub.publish(results)

# add color
pc_rgba = struct.unpack('I', struct.pack('BBBB', 255, 40, 40, 255))[0]
pc_rgba_arr = np.full((len(init_nodes), 1), pc_rgba)
pc_colored = np.hstack((init_nodes, pc_rgba_arr)).astype(object)
pc_colored[:, 3] = pc_colored[:, 3].astype(int)

header.stamp = rospy.Time.now()
converted_points = pcl2.create_cloud(header, fields, pc_colored)
pc_pub.publish(converted_points)
# rospy.signal_shutdown('Finished initial node set computation.')
except:
rospy.logerr("Failed to extract splines.")
rospy.signal_shutdown('Stopping initialization.')

if __name__=='__main__':
rospy.init_node('init_tracker', anonymous=True)

global new_messages
new_messages=False

num_of_nodes = rospy.get_param('/init_tracker/num_of_nodes')
multi_color_dlo = rospy.get_param('/init_tracker/multi_color_dlo')
camera_info_topic = rospy.get_param('/init_tracker/camera_info_topic')
Expand Down
4 changes: 2 additions & 2 deletions trackdlo/src/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def orientation(p, q, r):
# See https://www.geeksforgeeks.org/orientation-3-ordered-points/amp/
# for details of below formula.

val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))
val = (np.float64(q.y - p.y) * (r.x - q.x)) - (np.float64(q.x - p.x) * (r.y - q.y))
if (val > 0):

# Clockwise orientation
Expand Down Expand Up @@ -213,7 +213,7 @@ def extract_connected_skeleton (visualize_process, mask, img_scale=10, seg_lengt
break

# graph for visualization
mask = cv2.line(mask, contour[i][0], contour[i+1][0], 255, 1)
mask = cv2.line(mask, tuple(contour[i][0]), tuple(contour[i+1][0]), 255, 1)

# if haven't initialized, perform initialization
if cur_seg_start_point is None:
Expand Down

0 comments on commit e3d684e

Please sign in to comment.