diff --git a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml index d4a0a8b5abdd8..eac82e18f4d35 100644 --- a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml +++ b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml @@ -4,10 +4,10 @@ - - - - + + + + @@ -60,16 +60,16 @@ - + - - - + + + @@ -77,16 +77,16 @@ - - + + - - + + diff --git a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py index 9603570b3cfe7..74b577c701f1d 100644 --- a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py +++ b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py @@ -62,9 +62,9 @@ def create_traffic_light_node_container(namespace, context, *args, **kwargs): camera_arguments = { "input/image": f"/sensing/camera/{namespace}/image_raw", "output/rois": f"/perception/traffic_light_recognition/{namespace}/detection/rois", - "output/traffic_signals": f"/perception/traffic_light_recognition/{namespace}/classification/traffic_signals", - "output/car/traffic_signals": f"/perception/traffic_light_recognition/{namespace}/classification/car/traffic_signals", - "output/pedestrian/traffic_signals": f"/perception/traffic_light_recognition/{namespace}/classification/pedestrian/traffic_signals", + "output/traffic_lights": f"/perception/traffic_light_recognition/{namespace}/classification/traffic_lights", + "output/car/traffic_lights": f"/perception/traffic_light_recognition/{namespace}/classification/car/traffic_lights", + "output/pedestrian/traffic_lights": f"/perception/traffic_light_recognition/{namespace}/classification/pedestrian/traffic_lights", } def create_parameter_dict(*args): @@ -101,7 +101,7 @@ def create_parameter_dict(*args): remappings=[ ("~/input/image", camera_arguments["input/image"]), ("~/input/rois", camera_arguments["output/rois"]), - ("~/output/traffic_signals", "classified/car/traffic_signals"), + ("~/output/traffic_lights", "classified/car/traffic_lights"), ], extra_arguments=[ {"use_intra_process_comms": LaunchConfiguration("use_intra_process")} @@ -116,7 +116,7 @@ def create_parameter_dict(*args): remappings=[ ("~/input/image", camera_arguments["input/image"]), ("~/input/rois", camera_arguments["output/rois"]), - ("~/output/traffic_signals", "classified/pedestrian/traffic_signals"), + ("~/output/traffic_lights", "classified/pedestrian/traffic_lights"), ], extra_arguments=[ {"use_intra_process_comms": LaunchConfiguration("use_intra_process")} @@ -132,8 +132,8 @@ def create_parameter_dict(*args): ("~/input/rois", camera_arguments["output/rois"]), ("~/input/rough/rois", "detection/rough/rois"), ( - "~/input/traffic_signals", - camera_arguments["output/traffic_signals"], + "~/input/traffic_lights", + camera_arguments["output/traffic_lights"], ), ("~/output/image", "debug/rois"), ("~/output/image/compressed", "debug/rois/compressed"), diff --git a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_occlusion_predictor.launch.py b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_occlusion_predictor.launch.py index b4a95165758b7..16dda3ae8c99d 100644 --- a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_occlusion_predictor.launch.py +++ b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_occlusion_predictor.launch.py @@ -33,9 +33,9 @@ def create_traffic_light_occlusion_predictor(namespace): "input/camera_info": f"/sensing/camera/{namespace}/camera_info", "input/cloud": LaunchConfiguration("input/cloud"), "input/rois": f"/perception/traffic_light_recognition/{namespace}/detection/rois", - "input/car/traffic_signals": "classified/car/traffic_signals", - "input/pedestrian/traffic_signals": "classified/pedestrian/traffic_signals", - "output/traffic_signals": f"/perception/traffic_light_recognition/{namespace}/classification/traffic_signals", + "input/car/traffic_lights": "classified/car/traffic_lights", + "input/pedestrian/traffic_lights": "classified/pedestrian/traffic_lights", + "output/traffic_lights": f"/perception/traffic_light_recognition/{namespace}/classification/traffic_lights", }.items() group = GroupAction( diff --git a/perception/autoware_crosswalk_traffic_light_estimator/README.md b/perception/autoware_crosswalk_traffic_light_estimator/README.md index b14fefbd43beb..82aa12c124029 100644 --- a/perception/autoware_crosswalk_traffic_light_estimator/README.md +++ b/perception/autoware_crosswalk_traffic_light_estimator/README.md @@ -2,23 +2,23 @@ ## Purpose -`crosswalk_traffic_light_estimator` is a module that estimates pedestrian traffic signals from HDMap and detected vehicle traffic signals. +`crosswalk_traffic_light_estimator` is a module that estimates pedestrian traffic signals from HDMap and detected traffic signals. ## Inputs / Outputs ### Input -| Name | Type | Description | -| ------------------------------------ | ------------------------------------------------ | ------------------ | -| `~/input/vector_map` | `autoware_map_msgs::msg::LaneletMapBin` | vector map | -| `~/input/route` | `autoware_planning_msgs::msg::LaneletRoute` | route | -| `~/input/classified/traffic_signals` | `tier4_perception_msgs::msg::TrafficSignalArray` | classified signals | +| Name | Type | Description | +| ----------------------------------- | ------------------------------------------------------- | ------------------ | +| `~/input/vector_map` | `autoware_map_msgs::msg::LaneletMapBin` | vector map | +| `~/input/route` | `autoware_planning_msgs::msg::LaneletRoute` | route | +| `~/input/classified/traffic_lights` | `autoware_perception_msgs::msg::TrafficLightGroupArray` | classified signals | ### Output -| Name | Type | Description | -| -------------------------- | ------------------------------------------------------- | --------------------------------------------------------- | -| `~/output/traffic_signals` | `autoware_perception_msgs::msg::TrafficLightGroupArray` | output that contains estimated pedestrian traffic signals | +| Name | Type | Description | +| ------------------------- | ------------------------------------------------------- | --------------------------------------------------------- | +| `~/output/traffic_lights` | `autoware_perception_msgs::msg::TrafficLightGroupArray` | output that contains estimated pedestrian traffic signals | ## Parameters @@ -26,9 +26,15 @@ | :---------------------------- | :------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------ | | `use_last_detect_color` | `bool` | If this parameter is `true`, this module estimates pedestrian's traffic signal as RED not only when vehicle's traffic signal is detected as GREEN/AMBER but also when detection results change GREEN/AMBER to UNKNOWN. (If detection results change RED or AMBER to UNKNOWN, this module estimates pedestrian's traffic signal as UNKNOWN.) If this parameter is `false`, this module use only latest detection results for estimation. (Only when the detection result is GREEN/AMBER, this module estimates pedestrian's traffic signal as RED.) | `true` | | `last_detect_color_hold_time` | `double` | The time threshold to hold for last detect color. | `2.0` | +| `last_colors_hold_time` | `double` | The time threshold to hold for history detected pedestrian traffic light color. The unit is second. | `1.0` | ## Inner-workings / Algorithms +1. Estimate the color of pedestrian traffic light from HDMap and detected vehicle traffic signals. +2. If pedestrian traffic light recognition is available, determine the final state based on classification result and estimation result. + +### Estimation + ```plantuml start @@ -58,7 +64,7 @@ end If traffic between pedestrians and vehicles is controlled by traffic signals, the crosswalk traffic signal maybe **RED** in order to prevent pedestrian from crossing when the following conditions are satisfied. -### Situation1 +#### Situation1 - crosswalk conflicts **STRAIGHT** lanelet - the lanelet refers **GREEN** or **AMBER** traffic signal (The following pictures show only **GREEN** case) @@ -70,7 +76,7 @@ If traffic between pedestrians and vehicles is controlled by traffic signals, th -### Situation2 +#### Situation2 - crosswalk conflicts different turn direction lanelets (STRAIGHT and LEFT, LEFT and RIGHT, RIGHT and STRAIGHT) - the lanelets refer **GREEN** or **AMBER** traffic signal (The following pictures show only **GREEN** case) @@ -79,6 +85,28 @@ If traffic between pedestrians and vehicles is controlled by traffic signals, th +### Final state + +```plantumul +start +if (the pedestrian traffic light classification result exists)then + : update the flashing flag according to the classification result(in_signal) and last_signals + if (the traffic light is flashing?)then(yes) + : update the traffic light state + else(no) + : the traffic light state is the same with the classification result +if (the classification result not exists) + : the traffic light state is the same with the estimation + : output the current traffic light state +end +``` + +#### Update flashing flag + +
+ +
+ ## Assumptions / Known limits ## Future extensions / Unimplemented parts diff --git a/perception/autoware_crosswalk_traffic_light_estimator/images/flashing_state.png b/perception/autoware_crosswalk_traffic_light_estimator/images/flashing_state.png new file mode 100644 index 0000000000000..7686f3842e75c Binary files /dev/null and b/perception/autoware_crosswalk_traffic_light_estimator/images/flashing_state.png differ diff --git a/perception/autoware_crosswalk_traffic_light_estimator/launch/crosswalk_traffic_light_estimator.launch.xml b/perception/autoware_crosswalk_traffic_light_estimator/launch/crosswalk_traffic_light_estimator.launch.xml index 2e1437ecd7d93..9fb9c1346339f 100644 --- a/perception/autoware_crosswalk_traffic_light_estimator/launch/crosswalk_traffic_light_estimator.launch.xml +++ b/perception/autoware_crosswalk_traffic_light_estimator/launch/crosswalk_traffic_light_estimator.launch.xml @@ -16,8 +16,8 @@ - - + + diff --git a/perception/autoware_crosswalk_traffic_light_estimator/src/node.cpp b/perception/autoware_crosswalk_traffic_light_estimator/src/node.cpp index b0ec4d0e5ffd0..bfa8d7c064c55 100644 --- a/perception/autoware_crosswalk_traffic_light_estimator/src/node.cpp +++ b/perception/autoware_crosswalk_traffic_light_estimator/src/node.cpp @@ -95,11 +95,11 @@ CrosswalkTrafficLightEstimatorNode::CrosswalkTrafficLightEstimatorNode( "~/input/route", rclcpp::QoS{1}.transient_local(), std::bind(&CrosswalkTrafficLightEstimatorNode::onRoute, this, _1)); sub_traffic_light_array_ = create_subscription( - "~/input/classified/traffic_signals", rclcpp::QoS{1}, + "~/input/classified/traffic_lights", rclcpp::QoS{1}, std::bind(&CrosswalkTrafficLightEstimatorNode::onTrafficLightArray, this, _1)); pub_traffic_light_array_ = - this->create_publisher("~/output/traffic_signals", rclcpp::QoS{1}); + this->create_publisher("~/output/traffic_lights", rclcpp::QoS{1}); pub_processing_time_ = std::make_shared(this, "~/debug"); } diff --git a/perception/autoware_map_based_prediction/include/map_based_prediction/map_based_prediction_node.hpp b/perception/autoware_map_based_prediction/include/map_based_prediction/map_based_prediction_node.hpp index 1675f8b1fbfa2..8eccee58ca4c0 100644 --- a/perception/autoware_map_based_prediction/include/map_based_prediction/map_based_prediction_node.hpp +++ b/perception/autoware_map_based_prediction/include/map_based_prediction/map_based_prediction_node.hpp @@ -87,7 +87,7 @@ class MapBasedPredictionNode : public rclcpp::Node rclcpp::Subscription::SharedPtr sub_objects_; rclcpp::Subscription::SharedPtr sub_map_; autoware::universe_utils::InterProcessPollingSubscriber - sub_traffic_signals_{this, "/traffic_signals"}; + sub_traffic_signals_{this, "/traffic_lights"}; // debug publisher std::unique_ptr> stop_watch_ptr_; diff --git a/perception/autoware_map_based_prediction/launch/map_based_prediction.launch.xml b/perception/autoware_map_based_prediction/launch/map_based_prediction.launch.xml index 2c668639c2a56..00b839e07355e 100644 --- a/perception/autoware_map_based_prediction/launch/map_based_prediction.launch.xml +++ b/perception/autoware_map_based_prediction/launch/map_based_prediction.launch.xml @@ -10,7 +10,7 @@ - + diff --git a/perception/autoware_traffic_light_arbiter/README.md b/perception/autoware_traffic_light_arbiter/README.md index 619154e1e183b..2d185bfc4f9e3 100644 --- a/perception/autoware_traffic_light_arbiter/README.md +++ b/perception/autoware_traffic_light_arbiter/README.md @@ -25,17 +25,17 @@ The table below outlines how the matching process determines the output based on #### Input -| Name | Type | Description | -| -------------------------------- | ----------------------------------------------------- | -------------------------------------------------------- | -| ~/sub/vector_map | autoware_map_msgs::msg::LaneletMapBin | The vector map to get valid traffic signal ids. | -| ~/sub/perception_traffic_signals | autoware_perception_msgs::msg::TrafficLightGroupArray | The traffic signals from the image recognition pipeline. | -| ~/sub/external_traffic_signals | autoware_perception_msgs::msg::TrafficLightGroupArray | The traffic signals from an external system. | +| Name | Type | Description | +| ------------------------------- | ----------------------------------------------------- | -------------------------------------------------------- | +| ~/sub/vector_map | autoware_map_msgs::msg::LaneletMapBin | The vector map to get valid traffic signal ids. | +| ~/sub/perception_traffic_lights | autoware_perception_msgs::msg::TrafficLightGroupArray | The traffic signals from the image recognition pipeline. | +| ~/sub/external_traffic_lights | autoware_perception_msgs::msg::TrafficLightGroupArray | The traffic signals from an external system. | #### Output -| Name | Type | Description | -| --------------------- | ----------------------------------------------------- | -------------------------------- | -| ~/pub/traffic_signals | autoware_perception_msgs::msg::TrafficLightGroupArray | The merged traffic signal state. | +| Name | Type | Description | +| -------------------- | ----------------------------------------------------- | -------------------------------- | +| ~/pub/traffic_lights | autoware_perception_msgs::msg::TrafficLightGroupArray | The merged traffic signal state. | ## Parameters diff --git a/perception/autoware_traffic_light_arbiter/launch/traffic_light_arbiter.launch.xml b/perception/autoware_traffic_light_arbiter/launch/traffic_light_arbiter.launch.xml index 8e2b9e8cf02d3..3ec9c13a4d1e5 100644 --- a/perception/autoware_traffic_light_arbiter/launch/traffic_light_arbiter.launch.xml +++ b/perception/autoware_traffic_light_arbiter/launch/traffic_light_arbiter.launch.xml @@ -1,16 +1,16 @@ - - - + + + - - - + + + diff --git a/perception/autoware_traffic_light_arbiter/src/traffic_light_arbiter.cpp b/perception/autoware_traffic_light_arbiter/src/traffic_light_arbiter.cpp index e71629fa5dd28..9898840089fea 100644 --- a/perception/autoware_traffic_light_arbiter/src/traffic_light_arbiter.cpp +++ b/perception/autoware_traffic_light_arbiter/src/traffic_light_arbiter.cpp @@ -85,14 +85,14 @@ TrafficLightArbiter::TrafficLightArbiter(const rclcpp::NodeOptions & options) std::bind(&TrafficLightArbiter::onMap, this, std::placeholders::_1)); perception_tlr_sub_ = create_subscription( - "~/sub/perception_traffic_signals", rclcpp::QoS(1), + "~/sub/perception_traffic_lights", rclcpp::QoS(1), std::bind(&TrafficLightArbiter::onPerceptionMsg, this, std::placeholders::_1)); external_tlr_sub_ = create_subscription( - "~/sub/external_traffic_signals", rclcpp::QoS(1), + "~/sub/external_traffic_lights", rclcpp::QoS(1), std::bind(&TrafficLightArbiter::onExternalMsg, this, std::placeholders::_1)); - pub_ = create_publisher("~/pub/traffic_signals", rclcpp::QoS(1)); + pub_ = create_publisher("~/pub/traffic_lights", rclcpp::QoS(1)); } void TrafficLightArbiter::onMap(const LaneletMapBin::ConstSharedPtr msg) diff --git a/perception/autoware_traffic_light_arbiter/test/test_node.cpp b/perception/autoware_traffic_light_arbiter/test/test_node.cpp index f993ad7cec84d..7e0e29541cb53 100644 --- a/perception/autoware_traffic_light_arbiter/test/test_node.cpp +++ b/perception/autoware_traffic_light_arbiter/test/test_node.cpp @@ -175,9 +175,8 @@ TEST(TrafficLightArbiterTest, testTrafficSignalOnlyPerceptionMsg) { rclcpp::init(0, nullptr); const std::string input_map_topic = "/traffic_light_arbiter/sub/vector_map"; - const std::string input_perception_topic = - "/traffic_light_arbiter/sub/perception_traffic_signals"; - const std::string output_topic = "/traffic_light_arbiter/pub/traffic_signals"; + const std::string input_perception_topic = "/traffic_light_arbiter/sub/perception_traffic_lights"; + const std::string output_topic = "/traffic_light_arbiter/pub/traffic_lights"; auto test_manager = generateTestManager(); auto test_target_node = generateNode(); @@ -209,8 +208,8 @@ TEST(TrafficLightArbiterTest, testTrafficSignalOnlyExternalMsg) { rclcpp::init(0, nullptr); const std::string input_map_topic = "/traffic_light_arbiter/sub/vector_map"; - const std::string input_external_topic = "/traffic_light_arbiter/sub/external_traffic_signals"; - const std::string output_topic = "/traffic_light_arbiter/pub/traffic_signals"; + const std::string input_external_topic = "/traffic_light_arbiter/sub/external_traffic_lights"; + const std::string output_topic = "/traffic_light_arbiter/pub/traffic_lights"; auto test_manager = generateTestManager(); auto test_target_node = generateNode(); @@ -242,10 +241,9 @@ TEST(TrafficLightArbiterTest, testTrafficSignalBothMsg) { rclcpp::init(0, nullptr); const std::string input_map_topic = "/traffic_light_arbiter/sub/vector_map"; - const std::string input_perception_topic = - "/traffic_light_arbiter/sub/perception_traffic_signals"; - const std::string input_external_topic = "/traffic_light_arbiter/sub/external_traffic_signals"; - const std::string output_topic = "/traffic_light_arbiter/pub/traffic_signals"; + const std::string input_perception_topic = "/traffic_light_arbiter/sub/perception_traffic_lights"; + const std::string input_external_topic = "/traffic_light_arbiter/sub/external_traffic_lights"; + const std::string output_topic = "/traffic_light_arbiter/pub/traffic_lights"; auto test_manager = generateTestManager(); auto test_target_node = generateNode(); diff --git a/perception/autoware_traffic_light_classifier/README.md b/perception/autoware_traffic_light_classifier/README.md index 6e720aabc7593..fd864b4cf27dd 100644 --- a/perception/autoware_traffic_light_classifier/README.md +++ b/perception/autoware_traffic_light_classifier/README.md @@ -8,8 +8,9 @@ traffic_light_classifier is a package for classifying traffic light labels using ### cnn_classifier -Traffic light labels are classified by EfficientNet-b1 or MobileNet-v2. -Totally 83400 (58600 for training, 14800 for evaluation and 10000 for test) TIER IV internal images of Japanese traffic lights were used for fine-tuning. +Traffic light labels are classified by EfficientNet-b1 or MobileNet-v2. +We trained classifiers for vehicular signals and pedestrian signals separately. +For vehicular signals, a total of 83400 (58600 for training, 14800 for evaluation and 10000 for test) TIER IV internal images of Japanese traffic lights were used for fine-tuning. The information of the models is listed here: | Name | Input Size | Test Accuracy | @@ -17,6 +18,14 @@ The information of the models is listed here: | EfficientNet-b1 | 128 x 128 | 99.76% | | MobileNet-v2 | 224 x 224 | 99.81% | +For pedestrian signals, a total of 21199 (17860 for training, 2114 for evaluation and 1225 for test) TIER IV internal images of Japanese traffic lights were used for fine-tuning. +The information of the models is listed here: + +| Name | Input Size | Test Accuracy | +| --------------- | ---------- | ------------- | +| EfficientNet-b1 | 128 x 128 | 97.89% | +| MobileNet-v2 | 224 x 224 | 99.10% | + ### hsv_classifier Traffic light colors (green, yellow and red) are classified in HSV model. @@ -43,20 +52,21 @@ These colors and shapes are assigned to the message as follows: ### Output -| Name | Type | Description | -| -------------------------- | ----------------------------------------------- | ------------------- | -| `~/output/traffic_signals` | `tier4_perception_msgs::msg::TrafficLightArray` | classified signals | -| `~/output/debug/image` | `sensor_msgs::msg::Image` | image for debugging | +| Name | Type | Description | +| ------------------------- | ----------------------------------------------- | ------------------- | +| `~/output/traffic_lights` | `tier4_perception_msgs::msg::TrafficLightArray` | classified signals | +| `~/output/debug/image` | `sensor_msgs::msg::Image` | image for debugging | ## Parameters ### Node Parameters -| Name | Type | Description | -| --------------------- | ----- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `classifier_type` | int | if the value is `1`, cnn_classifier is used | -| `data_path` | str | packages data and artifacts directory path | -| `backlight_threshold` | float | If the intensity get grater than this overwrite with UNKNOWN in corresponding RoI. Note that, if the value is much higher, the node only overwrites in the harsher backlight situations. Therefore, If you wouldn't like to use this feature set this value to `1.0`. The value can be `[0.0, 1.0]`. The confidence of overwritten signal is set to `0.0`. | +| Name | Type | Description | +| ----------------------------- | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `classifier_type` | int | if the value is `1`, cnn_classifier is used | +| `data_path` | str | packages data and artifacts directory path | +| `backlight_threshold` | float | If the intensity of light is grater than this threshold, the class of the corresponding RoI will be overwritten with UNKNOWN, and the confidence of the overwritten signal will be set to `0.0`. The value should be set in the range of `[0.0, 1.0]`. If you wouldn't like to use this feature, please set it to `1.0`. | +| `classify_traffic_light_type` | int | if the value is `0`, the classifier classifies the vehicular signals. if the value is `1`, it classifies the pedestrian signals. | ### Core Parameters diff --git a/perception/autoware_traffic_light_classifier/launch/traffic_light_classifier.launch.xml b/perception/autoware_traffic_light_classifier/launch/traffic_light_classifier.launch.xml index d0cbbd3dcae9b..42ffff49848a1 100644 --- a/perception/autoware_traffic_light_classifier/launch/traffic_light_classifier.launch.xml +++ b/perception/autoware_traffic_light_classifier/launch/traffic_light_classifier.launch.xml @@ -1,14 +1,14 @@ - + - + diff --git a/perception/autoware_traffic_light_classifier/src/traffic_light_classifier_node.cpp b/perception/autoware_traffic_light_classifier/src/traffic_light_classifier_node.cpp index 796a144bf8266..cca9c810fdc6b 100644 --- a/perception/autoware_traffic_light_classifier/src/traffic_light_classifier_node.cpp +++ b/perception/autoware_traffic_light_classifier/src/traffic_light_classifier_node.cpp @@ -43,7 +43,7 @@ TrafficLightClassifierNodelet::TrafficLightClassifierNodelet(const rclcpp::NodeO } traffic_signal_array_pub_ = this->create_publisher( - "~/output/traffic_signals", rclcpp::QoS{1}); + "~/output/traffic_lights", rclcpp::QoS{1}); using std::chrono_literals::operator""ms; timer_ = rclcpp::create_timer( diff --git a/perception/autoware_traffic_light_map_based_detector/README.md b/perception/autoware_traffic_light_map_based_detector/README.md index 8a59db19ae64d..201fa791b378e 100644 --- a/perception/autoware_traffic_light_map_based_detector/README.md +++ b/perception/autoware_traffic_light_map_based_detector/README.md @@ -9,7 +9,7 @@ Calibration and vibration errors can be entered as parameters, and the size of t ![traffic_light_map_based_detector_result](./docs/traffic_light_map_based_detector_result.svg) If the node receives route information, it only looks at traffic lights on that route. -If the node receives no route information, it looks at a radius of 200 meters and the angle between the traffic light and the camera is less than 40 degrees. +If the node receives no route information, it looks at traffic lights within a radius of `max_detection_range`. If the angle between the traffic light and the camera is larger than `traffic_light_max_angle_range`, it will be filtered. ## Input topics @@ -29,14 +29,16 @@ If the node receives no route information, it looks at a radius of 200 meters an ## Node parameters -| Parameter | Type | Description | -| ---------------------- | ------ | --------------------------------------------------------------------- | -| `max_vibration_pitch` | double | Maximum error in pitch direction. If -5~+5, it will be 10. | -| `max_vibration_yaw` | double | Maximum error in yaw direction. If -5~+5, it will be 10. | -| `max_vibration_height` | double | Maximum error in height direction. If -5~+5, it will be 10. | -| `max_vibration_width` | double | Maximum error in width direction. If -5~+5, it will be 10. | -| `max_vibration_depth` | double | Maximum error in depth direction. If -5~+5, it will be 10. | -| `max_detection_range` | double | Maximum detection range in meters. Must be positive | -| `min_timestamp_offset` | double | Minimum timestamp offset when searching for corresponding tf | -| `max_timestamp_offset` | double | Maximum timestamp offset when searching for corresponding tf | -| `timestamp_sample_len` | double | sampling length between min_timestamp_offset and max_timestamp_offset | +| Parameter | Type | Description | +| ------------------------------------------ | ------ | ---------------------------------------------------------------------------------------------- | +| `max_vibration_pitch` | double | Maximum error in pitch direction. If -5~+5, it will be 10. | +| `max_vibration_yaw` | double | Maximum error in yaw direction. If -5~+5, it will be 10. | +| `max_vibration_height` | double | Maximum error in height direction. If -5~+5, it will be 10. | +| `max_vibration_width` | double | Maximum error in width direction. If -5~+5, it will be 10. | +| `max_vibration_depth` | double | Maximum error in depth direction. If -5~+5, it will be 10. | +| `max_detection_range` | double | Maximum detection range in meters. Must be positive | +| `car_traffic_light_max_angle_range` | double | Maximum angle between the vehicular traffic light and the camera in degrees. Must be positive | +| `pedestrian_traffic_light_max_angle_range` | double | Maximum angle between the pedestrian traffic light and the camera in degrees. Must be positive | +| `min_timestamp_offset` | double | Minimum timestamp offset when searching for corresponding tf | +| `max_timestamp_offset` | double | Maximum timestamp offset when searching for corresponding tf | +| `timestamp_sample_len` | double | sampling length between min_timestamp_offset and max_timestamp_offset | diff --git a/perception/autoware_traffic_light_multi_camera_fusion/README.md b/perception/autoware_traffic_light_multi_camera_fusion/README.md index f7ee294cda147..6a4f26585a551 100644 --- a/perception/autoware_traffic_light_multi_camera_fusion/README.md +++ b/perception/autoware_traffic_light_multi_camera_fusion/README.md @@ -11,19 +11,19 @@ For every camera, the following three topics are subscribed: -| Name | Type | Description | -| -------------------------------------- | ---------------------------------------------- | --------------------------------------------------- | -| `~//camera_info` | sensor_msgs::CameraInfo | camera info from traffic_light_map_based_detector | -| `~//rois` | tier4_perception_msgs::TrafficLightRoiArray | detection roi from traffic_light_fine_detector | -| `~//traffic_signals` | tier4_perception_msgs::TrafficLightSignalArray | classification result from traffic_light_classifier | +| Name | Type | Description | +| ------------------------------------- | ---------------------------------------------- | --------------------------------------------------- | +| `~//camera_info` | sensor_msgs::CameraInfo | camera info from traffic_light_map_based_detector | +| `~//rois` | tier4_perception_msgs::TrafficLightRoiArray | detection roi from traffic_light_fine_detector | +| `~//traffic_lights` | tier4_perception_msgs::TrafficLightSignalArray | classification result from traffic_light_classifier | You don't need to configure these topics manually. Just provide the `camera_namespaces` parameter and the node will automatically extract the `` and create the subscribers. ## Output topics -| Name | Type | Description | -| -------------------------- | ------------------------------------------------- | ---------------------------------- | -| `~/output/traffic_signals` | autoware_perception_msgs::TrafficLightSignalArray | traffic light signal fusion result | +| Name | Type | Description | +| ------------------------- | ------------------------------------------------- | ---------------------------------- | +| `~/output/traffic_lights` | autoware_perception_msgs::TrafficLightSignalArray | traffic light signal fusion result | ## Node parameters diff --git a/perception/autoware_traffic_light_multi_camera_fusion/launch/traffic_light_multi_camera_fusion.launch.xml b/perception/autoware_traffic_light_multi_camera_fusion/launch/traffic_light_multi_camera_fusion.launch.xml index 32e3417cf9029..430aa0056d9ec 100644 --- a/perception/autoware_traffic_light_multi_camera_fusion/launch/traffic_light_multi_camera_fusion.launch.xml +++ b/perception/autoware_traffic_light_multi_camera_fusion/launch/traffic_light_multi_camera_fusion.launch.xml @@ -2,11 +2,11 @@ - + - + diff --git a/perception/autoware_traffic_light_multi_camera_fusion/src/traffic_light_multi_camera_fusion_node.cpp b/perception/autoware_traffic_light_multi_camera_fusion/src/traffic_light_multi_camera_fusion_node.cpp index 15211920bc7f2..5ade76fc8d8fa 100644 --- a/perception/autoware_traffic_light_multi_camera_fusion/src/traffic_light_multi_camera_fusion_node.cpp +++ b/perception/autoware_traffic_light_multi_camera_fusion/src/traffic_light_multi_camera_fusion_node.cpp @@ -86,6 +86,9 @@ int compareRecord( int visible_score_1 = calVisibleScore(r1); int visible_score_2 = calVisibleScore(r2); if (visible_score_1 == visible_score_2) { + /* + if the visible scores are the same, the one with higher confidence is of higher priority + */ double confidence_1 = r1.signal.elements[0].confidence; double confidence_2 = r2.signal.elements[0].confidence; return confidence_1 < confidence_2 ? -1 : 1; @@ -152,7 +155,7 @@ MultiCameraFusion::MultiCameraFusion(const rclcpp::NodeOptions & node_options) is_approximate_sync_ = this->declare_parameter("approximate_sync", false); message_lifespan_ = this->declare_parameter("message_lifespan", 0.09); for (const std::string & camera_ns : camera_namespaces) { - std::string signal_topic = camera_ns + "/classification/traffic_signals"; + std::string signal_topic = camera_ns + "/classification/traffic_lights"; std::string roi_topic = camera_ns + "/detection/rois"; std::string cam_info_topic = camera_ns + "/camera_info"; roi_subs_.emplace_back( @@ -179,7 +182,7 @@ MultiCameraFusion::MultiCameraFusion(const rclcpp::NodeOptions & node_options) map_sub_ = create_subscription( "~/input/vector_map", rclcpp::QoS{1}.transient_local(), std::bind(&MultiCameraFusion::mapCallback, this, _1)); - signal_pub_ = create_publisher("~/output/traffic_signals", rclcpp::QoS{1}); + signal_pub_ = create_publisher("~/output/traffic_lights", rclcpp::QoS{1}); } void MultiCameraFusion::trafficSignalRoiCallback( diff --git a/perception/autoware_traffic_light_occlusion_predictor/README.md b/perception/autoware_traffic_light_occlusion_predictor/README.md index bc57dbea76c97..d2473c6acf701 100644 --- a/perception/autoware_traffic_light_occlusion_predictor/README.md +++ b/perception/autoware_traffic_light_occlusion_predictor/README.md @@ -8,22 +8,24 @@ For each traffic light roi, hundreds of pixels would be selected and projected i ![image](images/occlusion.png) -If no point cloud is received or all point clouds have very large stamp difference with the camera image, the occlusion ratio of each roi would be set as 0. +If no point cloud is received or all point clouds have very large stamp difference with the camera image, the occlusion ratio of each roi would be set as 0. The signal whose occlusion ratio is larger than `max_occlusion_ratio`will be set as unknown type. ## Input topics -| Name | Type | Description | -| -------------------- | ---------------------------------------------- | ------------------------ | -| `~input/vector_map` | autoware_map_msgs::msg::LaneletMapBin | vector map | -| `~/input/rois` | autoware_perception_msgs::TrafficLightRoiArray | traffic light detections | -| `~input/camera_info` | sensor_msgs::CameraInfo | target camera parameter | -| `~/input/cloud` | sensor_msgs::PointCloud2 | LiDAR point cloud | +| Name | Type | Description | +| ----------------------------------- | --------------------------------------------------- | -------------------------------- | +| `~input/vector_map` | autoware_auto_mapping_msgs::HADMapBin | vector map | +| `~/input/car/traffic_lights` | tier4_perception_msgs::msg::TrafficLightArray | vehicular traffic light signals | +| `~/input/pedestrian/traffic_lights` | tier4_perception_msgs::msg::TrafficLightArray | pedestrian traffic light signals | +| `~/input/rois` | autoware_auto_perception_msgs::TrafficLightRoiArray | traffic light detections | +| `~input/camera_info` | sensor_msgs::CameraInfo | target camera parameter | +| `~/input/cloud` | sensor_msgs::PointCloud2 | LiDAR point cloud | ## Output topics -| Name | Type | Description | -| -------------------- | ---------------------------------------------------- | ---------------------------- | -| `~/output/occlusion` | autoware_perception_msgs::TrafficLightOcclusionArray | occlusion ratios of each roi | +| Name | Type | Description | +| ------------------------- | --------------------------------------------- | ------------------------------------------------------------ | +| `~/output/traffic_lights` | tier4_perception_msgs::msg::TrafficLightArray | traffic light signals reset according to the occlusion ratio | ## Node parameters @@ -34,3 +36,4 @@ If no point cloud is received or all point clouds have very large stamp differen | `max_valid_pt_dist` | double | The points within this distance would be used for calculation | | `max_image_cloud_delay` | double | The maximum delay between LiDAR point cloud and camera image | | `max_wait_t` | double | The maximum time waiting for the LiDAR point cloud | +| `max_occlusion_ratio` | int | The maximum occlusion ratio for setting signal as unknown | diff --git a/perception/autoware_traffic_light_occlusion_predictor/launch/traffic_light_occlusion_predictor.launch.xml b/perception/autoware_traffic_light_occlusion_predictor/launch/traffic_light_occlusion_predictor.launch.xml index d59d5a7717297..d9fae26b7fd55 100644 --- a/perception/autoware_traffic_light_occlusion_predictor/launch/traffic_light_occlusion_predictor.launch.xml +++ b/perception/autoware_traffic_light_occlusion_predictor/launch/traffic_light_occlusion_predictor.launch.xml @@ -4,9 +4,9 @@ - - - + + + @@ -14,9 +14,9 @@ - - - + + + diff --git a/perception/autoware_traffic_light_occlusion_predictor/src/node.cpp b/perception/autoware_traffic_light_occlusion_predictor/src/node.cpp index 16498eb4d7094..99d76e2004c90 100644 --- a/perception/autoware_traffic_light_occlusion_predictor/src/node.cpp +++ b/perception/autoware_traffic_light_occlusion_predictor/src/node.cpp @@ -58,7 +58,7 @@ TrafficLightOcclusionPredictorNode::TrafficLightOcclusionPredictorNode( // publishers signal_pub_ = - create_publisher("~/output/traffic_signals", 1); + create_publisher("~/output/traffic_lights", 1); // configuration parameters config_.azimuth_occlusion_resolution_deg = @@ -75,7 +75,7 @@ TrafficLightOcclusionPredictorNode::TrafficLightOcclusionPredictorNode( config_.elevation_occlusion_resolution_deg); const std::vector topics{ - "~/input/car/traffic_signals", "~/input/rois", "~/input/camera_info", "~/input/cloud"}; + "~/input/car/traffic_lights", "~/input/rois", "~/input/camera_info", "~/input/cloud"}; const std::vector qos(topics.size(), rclcpp::SensorDataQoS()); synchronizer_ = std::make_shared( this, topics, qos, @@ -85,7 +85,7 @@ TrafficLightOcclusionPredictorNode::TrafficLightOcclusionPredictorNode( config_.max_image_cloud_delay, config_.max_wait_t); const std::vector topics_ped{ - "~/input/pedestrian/traffic_signals", "~/input/rois", "~/input/camera_info", "~/input/cloud"}; + "~/input/pedestrian/traffic_lights", "~/input/rois", "~/input/camera_info", "~/input/cloud"}; const std::vector qos_ped(topics_ped.size(), rclcpp::SensorDataQoS()); synchronizer_ped_ = std::make_shared( this, topics_ped, qos_ped, diff --git a/perception/autoware_traffic_light_visualization/launch/traffic_light_map_visualizer.launch.xml b/perception/autoware_traffic_light_visualization/launch/traffic_light_map_visualizer.launch.xml index 8ff56915766aa..1c580cd7ecbdb 100644 --- a/perception/autoware_traffic_light_visualization/launch/traffic_light_map_visualizer.launch.xml +++ b/perception/autoware_traffic_light_visualization/launch/traffic_light_map_visualizer.launch.xml @@ -1,7 +1,7 @@ - + - + diff --git a/perception/autoware_traffic_light_visualization/launch/traffic_light_roi_visualizer.launch.xml b/perception/autoware_traffic_light_visualization/launch/traffic_light_roi_visualizer.launch.xml index d4af7a27636df..be61276d58d7b 100644 --- a/perception/autoware_traffic_light_visualization/launch/traffic_light_roi_visualizer.launch.xml +++ b/perception/autoware_traffic_light_visualization/launch/traffic_light_roi_visualizer.launch.xml @@ -2,7 +2,7 @@ - + @@ -11,7 +11,7 @@ - + diff --git a/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.cpp b/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.cpp index 6ab9f09064f58..c797399f07959 100644 --- a/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.cpp +++ b/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.cpp @@ -76,7 +76,7 @@ void TrafficLightRoiVisualizerNode::connectCb() image_sub_.subscribe(this, "~/input/image", "raw", rmw_qos_profile_sensor_data); roi_sub_.subscribe(this, "~/input/rois", rclcpp::QoS{1}.get_rmw_qos_profile()); traffic_signals_sub_.subscribe( - this, "~/input/traffic_signals", rclcpp::QoS{1}.get_rmw_qos_profile()); + this, "~/input/traffic_lights", rclcpp::QoS{1}.get_rmw_qos_profile()); if (enable_fine_detection_) { rough_roi_sub_.subscribe(this, "~/input/rough/rois", rclcpp::QoS{1}.get_rmw_qos_profile()); }