Skip to content

Commit

Permalink
Merge branch 'master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
hollydinkel authored Feb 10, 2024
2 parents 313f4bf + f81157c commit 9d689b1
Show file tree
Hide file tree
Showing 7 changed files with 45 additions and 59 deletions.
8 changes: 3 additions & 5 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
__pycache__/
.vscode/
data/output/*.png
data/output/*/*.json
data/*.bag
data/
test.cpp
data/*
test.cpp
*.bag
2 changes: 1 addition & 1 deletion docs/RUN.md
Original file line number Diff line number Diff line change
Expand Up @@ -124,4 +124,4 @@ The ROS bag files used in our paper and the supplementary video can be found [he
### Notes on Running the Bag Files

* The rope and the rubber tubing require different hsv thresholding values. Both of these objects use the `hsv_threshold_upper_limit` default value = `130 255 255` however the rope uses the `hsv_threshold_lower_limit` default value = `90 90 30` and the rubber tubing uses the `hsv_threshold_upper_limit` default value = `100 200 60`.
* For `.bag` files in `rope/`, `rubber_tubing/`, and `failure_cases/`, the `camera_info_topic` is published under `/camera/aligned_depth_to_color/camera_info`. For `.bag` files in `quantitative_eval/`, the `camera_info_topic` is published under `/camera/color/camera_info`.
* For `.bag` files in `rope/`, `rubber_tubing/`, and `failure_cases/`, the `camera_info_topic` is published under `/camera/aligned_depth_to_color/camera_info`. For `.bag` files in `quantitative_eval/`, the `camera_info_topic` is published under `/camera/color/camera_info`.
16 changes: 7 additions & 9 deletions launch/realsense_node.launch
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,23 @@

<include file="$(find realsense2_camera)/launch/rs_camera.launch">
<arg name="json_file_path" value="$(find trackdlo)/config/preset_decimation_4.0_depth_step_100.json" />
<arg name="publish_tf" value="false" />
<arg name="filters" value="pointcloud,temporal,decimation" />
<arg name="depth_fps" value="15" />
<arg name="depth_fps" value="10" />
<arg name="depth_height" value="720" />
<arg name="depth_width" value="1280" />
<arg name="color_fps" value="15" />
<arg name="color_fps" value="10" />
<arg name="color_height" value="720" />
<arg name="color_width" value="1280" />
<arg name="align_depth" value="true" />
<arg name="ordered_pc" value="true" />
</include>

<node pkg="tf" type="static_transform_publisher" name="base_link_to_camera_color_optical_frame_tf" args="0.5308947503950723 0.030109485611943067 0.5874 -0.7071068 0.7071068 0 0 base_link camera_color_optical_frame 10" />
<node pkg="tf" type="static_transform_publisher" name="camera_color_optical_frame_to_camera_color_frame_tf" args="0 0 0 0.5 -0.5 0.5 0.5 camera_color_optical_frame camera_color_frame 10" />
<node pkg="tf" type="static_transform_publisher" name="camera_color_frame_to_camera_link_tf" args="-0.000351057737134 -0.0148385819048 -0.000117231989861 0.00429561594501 0.000667857821099 -0.00226634810679 0.999987959862 camera_color_optical_frame camera_color_frame 10" />
<node pkg="tf" type="static_transform_publisher" name="camera_link_to_camera_depth_frame_tf" args="0 0 0 0 0.0 0.0 1.0 camera_color_optical_frame camera_color_frame 10" />
<node pkg="tf" type="static_transform_publisher" name="camera_depth_frame_to_camera_depth_optical_frame_tf" args="0 0 0 -0.5 0.5 -0.5 0.5 camera_depth_frame camera_depth_optical_frame 10" />
<!-- <node pkg="tf" type="static_transform_publisher" name="link_6_to_camera_color_optical_frame_tf" args="0.0131191 0.0345247 -0.0382688 0.708756, -0.7054088, -0.0062758, 0.0048991 link_6 camera_color_optical_frame 10" /> -->

<node pkg="tf" type="static_transform_publisher" name="link_6_to_camera_link_tf" args="0.01801587 0.0156162 -0.03926776 0.00038930373662151424 0.7130193860400215 -0.002601741316836596 0.7011393830871517 link_6 camera_link 10" />
<node pkg="tf" type="static_transform_publisher" name="base_link_to_link_6_tf" args="0.374, 0.000, 0.630 0 0 0 1 base_link link_6 10" />

<!-- tracking rviz file -->
<node type="rviz" name="rviz" pkg="rviz" args="-d $(find trackdlo)/rviz/tracking.rviz" />
<!-- <node type="rviz" name="rviz" pkg="rviz" args="-d $(find trackdlo)/rviz/tracking.rviz" /> -->

</launch>
14 changes: 6 additions & 8 deletions launch/trackdlo.launch
Original file line number Diff line number Diff line change
@@ -1,20 +1,18 @@
<launch>
<!-- change the below parameters to match your camera/scene setup -->
<!-- <arg name="camera_info_topic" default="/camera/color/camera_info" /> -->
<arg name="camera_info_topic" default="/camera/aligned_depth_to_color/camera_info" />
<arg name="rgb_topic" default="/camera/color/image_raw" />
<arg name="depth_topic" default="/camera/aligned_depth_to_color/image_raw" />
<arg name="result_frame_id" default="camera_color_optical_frame" />
<arg name="result_frame_id" default="camera_color_optical_frame" />
<arg name="hsv_threshold_upper_limit" default="130 255 255" />
<!-- <arg name="hsv_threshold_lower_limit" default="100 200 60" /> -->
<arg name="hsv_threshold_lower_limit" default="90 90 30" />
<!-- <arg name="hsv_threshold_lower_limit" default="90 90 90" /> -->
<arg name="num_of_nodes" default="45" />
<arg name="num_of_nodes" default="31" />
<arg name="visualize_initialization_process" default="false" />
<arg name="multi_color_dlo" default="false" />
<arg name="multi_color_dlo" default="true" />
<arg name="output" default="screen" />

<!-- load parameters to corresponding nodes -->
<node name="trackdlo" pkg="trackdlo" type="trackdlo" output="screen">
<node name="trackdlo" pkg="trackdlo" type="trackdlo" output="$(arg output)" respawn="true">
<param name="camera_info_topic" type="string" value="$(arg camera_info_topic)" />
<param name="rgb_topic" type="string" value="$(arg rgb_topic)" />
<param name="depth_topic" type="string" value="$(arg depth_topic)" />
Expand Down Expand Up @@ -61,7 +59,7 @@
</node>

<!-- launch python node for initialization -->
<node name="init_tracker" pkg="trackdlo" type="initialize.py" output="screen">
<node name="init_tracker" pkg="trackdlo" type="initialize.py" output="$(arg output)" respawn="true">
<param name="camera_info_topic" type="string" value="$(arg camera_info_topic)" />
<param name="rgb_topic" type="string" value="$(arg rgb_topic)" />
<param name="depth_topic" type="string" value="$(arg depth_topic)" />
Expand Down
2 changes: 2 additions & 0 deletions trackdlo/src/initialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def remove_duplicate_rows(array):
_, idx = np.unique(array, axis=0, return_index=True)
data = array[np.sort(idx)]
rospy.set_param('/init_tracker/num_of_nodes', len(data))

return data

def callback (rgb, depth):
Expand Down Expand Up @@ -124,6 +125,7 @@ def callback (rgb, depth):
nodes = spline_pts[np.linspace(0, num_true_pts-1, num_of_nodes).astype(int)]

init_nodes = remove_duplicate_rows(nodes)

# results = ndarray2MarkerArray(init_nodes, result_frame_id, [1, 150/255, 0, 0.75], [0, 1, 0, 0.75])
results = ndarray2MarkerArray(init_nodes, result_frame_id, [0, 149/255, 203/255, 0.75], [0, 149/255, 203/255, 0.75])
results_pub.publish(results)
Expand Down
49 changes: 19 additions & 30 deletions trackdlo/src/trackdlo_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,34 +86,21 @@ double pub_data_total = 0;
int frames = 0;

Mat color_thresholding (Mat cur_image_hsv) {
std::vector<int> lower_blue = {90, 90, 60};
std::vector<int> lower_blue = {90, 90, 30};
std::vector<int> upper_blue = {130, 255, 255};

std::vector<int> lower_red_1 = {130, 60, 50};
std::vector<int> upper_red_1 = {255, 255, 255};
std::vector<int> lower_green = {58, 130, 50};
std::vector<int> upper_green = {90, 255, 255};

std::vector<int> lower_red_2 = {0, 60, 50};
std::vector<int> upper_red_2 = {10, 255, 255};

std::vector<int> lower_yellow = {15, 100, 80};
std::vector<int> upper_yellow = {40, 255, 255};
Mat mask_blue, mask_green, mask;

Mat mask_blue, mask_red_1, mask_red_2, mask_red, mask_yellow, mask;
// filter blue
cv::inRange(cur_image_hsv, cv::Scalar(lower_blue[0], lower_blue[1], lower_blue[2]), cv::Scalar(upper_blue[0], upper_blue[1], upper_blue[2]), mask_blue);

// filter red
cv::inRange(cur_image_hsv, cv::Scalar(lower_red_1[0], lower_red_1[1], lower_red_1[2]), cv::Scalar(upper_red_1[0], upper_red_1[1], upper_red_1[2]), mask_red_1);
cv::inRange(cur_image_hsv, cv::Scalar(lower_red_2[0], lower_red_2[1], lower_red_2[2]), cv::Scalar(upper_red_2[0], upper_red_2[1], upper_red_2[2]), mask_red_2);
cv::inRange(cur_image_hsv, cv::Scalar(lower_green[0], lower_green[1], lower_green[2]), cv::Scalar(upper_green[0], upper_green[1], upper_green[2]), mask_green);

// filter yellow
cv::inRange(cur_image_hsv, cv::Scalar(lower_yellow[0], lower_yellow[1], lower_yellow[2]), cv::Scalar(upper_yellow[0], upper_yellow[1], upper_yellow[2]), mask_yellow);

// combine red mask
cv::bitwise_or(mask_red_1, mask_red_2, mask_red);
// combine overall mask
cv::bitwise_or(mask_red, mask_blue, mask);
cv::bitwise_or(mask_yellow, mask, mask);
cv::bitwise_or(mask_green, mask_blue, mask);

return mask;
}
Expand Down Expand Up @@ -335,11 +322,14 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
}

// add edges for checking overlap with upcoming nodes
double x1 = col_1;
double y1 = row_1;
double x2 = col_2;
double y2 = row_2;
cv::line(projected_edges, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(255, 255, 255), dlo_pixel_width);
cv::line(projected_edges, cv::Point(col_1, row_1), cv::Point(col_2, row_2), cv::Scalar(255, 255, 255), dlo_pixel_width);
}

// obtain self-occluded nodes
for (int i = 0; i < Y.rows(); i ++) {
if (std::find(not_self_occluded_nodes.begin(), not_self_occluded_nodes.end(), i) == not_self_occluded_nodes.end()) {
self_occluded_nodes.push_back(i);
}
}

// sort visible nodes to preserve the original connectivity
Expand Down Expand Up @@ -410,8 +400,8 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
cv::Scalar line_color;

if (std::find(vis.begin(), vis.end(), idx) != vis.end()) {
point_color = cv::Scalar(0, 150, 255);
line_color = cv::Scalar(0, 255, 0);
point_color = cv::Scalar(203, 149, 0);
line_color = cv::Scalar(203, 149, 0);
}
else {
point_color = cv::Scalar(0, 0, 255);
Expand All @@ -421,7 +411,7 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
line_color = cv::Scalar(0, 0, 255);
}
else {
line_color = cv::Scalar(0, 255, 0);
line_color = cv::Scalar(203, 149, 0);
}
}

Expand All @@ -433,7 +423,7 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
cv::circle(tracking_img, cv::Point(x, y), 7, point_color, -1);

if (std::find(vis.begin(), vis.end(), idx+1) != vis.end()) {
point_color = cv::Scalar(0, 150, 255);
point_color = cv::Scalar(203, 149, 0);
}
else {
point_color = cv::Scalar(0, 0, 255);
Expand All @@ -452,7 +442,7 @@ sensor_msgs::ImagePtr Callback(const sensor_msgs::ImageConstPtr& image_msg, cons
tracking_img_msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", tracking_img).toImageMsg();

// publish the results as a marker array
visualization_msgs::MarkerArray results = MatrixXd2MarkerArray(Y, result_frame_id, "node_results", {1.0, 150.0/255.0, 0.0, 1.0}, {0.0, 1.0, 0.0, 1.0}, 0.01, 0.005, vis, {1.0, 0.0, 0.0, 1.0}, {1.0, 0.0, 0.0, 1.0});
visualization_msgs::MarkerArray results = MatrixXd2MarkerArray(Y, result_frame_id, "node_results", {0.0, 149.0/255.0, 203.0/255.0, 1.0}, {0.0, 149.0/255.0, 203.0/255.0, 1.0}, 0.01, 0.005, vis, {1.0, 0.0, 0.0, 1.0}, {1.0, 0.0, 0.0, 1.0});
// visualization_msgs::MarkerArray results = MatrixXd2MarkerArray(Y, result_frame_id, "node_results", {1.0, 150.0/255.0, 0.0, 1.0}, {0.0, 1.0, 0.0, 1.0}, 0.01, 0.005);
visualization_msgs::MarkerArray guide_nodes_results = MatrixXd2MarkerArray(guide_nodes, result_frame_id, "guide_node_results", {0.0, 0.0, 0.0, 0.5}, {0.0, 0.0, 1.0, 0.5});
visualization_msgs::MarkerArray corr_priors_results = MatrixXd2MarkerArray(priors, result_frame_id, "corr_prior_results", {0.0, 0.0, 0.0, 0.5}, {1.0, 0.0, 0.0, 0.5});
Expand Down Expand Up @@ -600,7 +590,6 @@ int main(int argc, char **argv) {
init_nodes_sub = nh.subscribe("/trackdlo/init_nodes", 1, update_init_nodes);
camera_info_sub = nh.subscribe(camera_info_topic, 1, update_camera_info);

image_transport::Publisher mask_pub = it.advertise("/trackdlo/mask", pub_queue_size);
image_transport::Publisher tracking_img_pub = it.advertise("/trackdlo/results_img", pub_queue_size);
pc_pub = nh.advertise<sensor_msgs::PointCloud2>("/trackdlo/filtered_pointcloud", pub_queue_size);
results_pub = nh.advertise<visualization_msgs::MarkerArray>("/trackdlo/results_marker", pub_queue_size);
Expand Down
13 changes: 7 additions & 6 deletions utils/color_picker.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,21 +51,22 @@ def nothing(x):
lower = np.array([hMin, sMin, vMin])
upper = np.array([hMax, sMax, vMax])

# Create HSV Image and threshold into a range.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
output = cv2.bitwise_and(img,img, mask= mask)

# Print if there is a change in HSV value
if( (phMin != hMin) | (psMin != sMin) | (pvMin != vMin) | (phMax != hMax) | (psMax != sMax) | (pvMax != vMax) ):
print("(hMin = %d , sMin = %d, vMin = %d), (hMax = %d , sMax = %d, vMax = %d)" % (hMin , sMin , vMin, hMax, sMax , vMax))
print("HSV min: (%d %d %d), HSV max: (%d %d %d)" % (hMin, sMin, vMin, hMax, sMax, vMax))
phMin = hMin
psMin = sMin
pvMin = vMin
phMax = hMax
psMax = sMax
pvMax = vMax

# Create HSV Image and threshold into a range.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv.copy(), lower, upper)

output = cv2.bitwise_and(img, img, mask= mask)

# Display output image
cv2.imshow('image',output)

Expand Down

0 comments on commit 9d689b1

Please sign in to comment.