diff --git a/scenario_runner/srunner/autoagents/__init__.py b/scenario_runner/srunner/autoagents/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/scenario_runner/srunner/autoagents/agent_wrapper.py b/scenario_runner/srunner/autoagents/agent_wrapper.py new file mode 100644 index 0000000..e5bc6ad --- /dev/null +++ b/scenario_runner/srunner/autoagents/agent_wrapper.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python + +# Copyright (c) 2019 Intel Corporation +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +Wrapper for autonomous agents required for tracking and checking of used sensors +""" + +from __future__ import print_function + +import carla + +from srunner.autoagents.sensor_interface import CallBack +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider + + +class AgentWrapper(object): + + """ + Wrapper for autonomous agents required for tracking and checking of used sensors + """ + + _agent = None + _sensors_list = [] + + def __init__(self, agent): + """ + Set the autonomous agent + """ + self._agent = agent + + def __call__(self): + """ + Pass the call directly to the agent + """ + return self._agent() + + def setup_sensors(self, vehicle, debug_mode=False): + """ + Create the sensors defined by the user and attach them to the ego-vehicle + :param vehicle: ego vehicle + :return: + """ + bp_library = CarlaDataProvider.get_world().get_blueprint_library() + for sensor_spec in self._agent.sensors(): + # These are the sensors spawned on the carla world + bp = bp_library.find(str(sensor_spec['type'])) + if sensor_spec['type'].startswith('sensor.camera'): + bp.set_attribute('image_size_x', str(sensor_spec['width'])) + bp.set_attribute('image_size_y', str(sensor_spec['height'])) + bp.set_attribute('fov', str(sensor_spec['fov'])) + sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'], + roll=sensor_spec['roll'], + yaw=sensor_spec['yaw']) + elif sensor_spec['type'].startswith('sensor.lidar'): + bp.set_attribute('range', str(sensor_spec['range'])) + bp.set_attribute('rotation_frequency', str(sensor_spec['rotation_frequency'])) + bp.set_attribute('channels', str(sensor_spec['channels'])) + bp.set_attribute('upper_fov', str(sensor_spec['upper_fov'])) + bp.set_attribute('lower_fov', str(sensor_spec['lower_fov'])) + bp.set_attribute('points_per_second', str(sensor_spec['points_per_second'])) + sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'], + roll=sensor_spec['roll'], + yaw=sensor_spec['yaw']) + elif sensor_spec['type'].startswith('sensor.other.gnss'): + sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], + z=sensor_spec['z']) + sensor_rotation = carla.Rotation() + + # create sensor + sensor_transform = carla.Transform(sensor_location, sensor_rotation) + sensor = CarlaDataProvider.get_world().spawn_actor(bp, sensor_transform, vehicle) + # setup callback + sensor.listen(CallBack(sensor_spec['id'], sensor, self._agent.sensor_interface)) + self._sensors_list.append(sensor) + + while not self._agent.all_sensors_ready(): + if debug_mode: + print(" waiting for one data reading from sensors...") + CarlaDataProvider.get_world().tick() + + def cleanup(self): + """ + Remove and destroy all sensors + """ + for i, _ in enumerate(self._sensors_list): + if self._sensors_list[i] is not None: + self._sensors_list[i].stop() + self._sensors_list[i].destroy() + self._sensors_list[i] = None + self._sensors_list = [] diff --git a/scenario_runner/srunner/autoagents/autonomous_agent.py b/scenario_runner/srunner/autoagents/autonomous_agent.py new file mode 100644 index 0000000..d1178ff --- /dev/null +++ b/scenario_runner/srunner/autoagents/autonomous_agent.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides the base class for all autonomous agents +""" + +from __future__ import print_function + +import carla + +from srunner.autoagents.sensor_interface import SensorInterface +from srunner.scenariomanager.timer import GameTime +from srunner.tools.route_manipulation import downsample_route + + +class AutonomousAgent(object): + + """ + Autonomous agent base class. All user agents have to be derived from this class + """ + + def __init__(self, path_to_conf_file): + # current global plans to reach a destination + self._global_plan = None + self._global_plan_world_coord = None + + # this data structure will contain all sensor data + self.sensor_interface = SensorInterface() + + # agent's initialization + self.setup(path_to_conf_file) + + def setup(self, path_to_conf_file): + """ + Initialize everything needed by your agent and set the track attribute to the right type: + """ + pass + + def sensors(self): # pylint: disable=no-self-use + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors in the following format: + + [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, + + {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, + 'id': 'LIDAR'} + ] + + """ + sensors = [] + + return sensors + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + :return: control + """ + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + control.hand_brake = False + + return control + + def destroy(self): + """ + Destroy (clean-up) the agent + :return: + """ + pass + + def __call__(self): + """ + Execute the agent call, e.g. agent() + Returns the next vehicle controls + """ + input_data = self.sensor_interface.get_data() + + timestamp = GameTime.get_time() + wallclock = GameTime.get_wallclocktime() + print('======[Agent] Wallclock_time = {} / Sim_time = {}'.format(wallclock, timestamp)) + + control = self.run_step(input_data, timestamp) + control.manual_gear_shift = False + + return control + + def all_sensors_ready(self): + """ + Check if all sensors are ready + Returns true if sensors are ready + """ + return self.sensor_interface.all_sensors_ready() + + def set_global_plan(self, global_plan_gps, global_plan_world_coord): + """ + Set the plan (route) for the agent + """ + + ds_ids = downsample_route(global_plan_world_coord, 1) + self._global_plan_world_coord = [(global_plan_world_coord[x][0], global_plan_world_coord[x][1]) + for x in ds_ids] + self._global_plan = [global_plan_gps[x] for x in ds_ids] diff --git a/scenario_runner/srunner/autoagents/dummy_agent.py b/scenario_runner/srunner/autoagents/dummy_agent.py new file mode 100644 index 0000000..0367867 --- /dev/null +++ b/scenario_runner/srunner/autoagents/dummy_agent.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides a dummy agent to control the ego vehicle +""" + +from __future__ import print_function + +import carla + +from srunner.autoagents.autonomous_agent import AutonomousAgent + + +class DummyAgent(AutonomousAgent): + + """ + Dummy autonomous agent to control the ego vehicle + """ + + def setup(self, path_to_conf_file): + """ + Setup the agent parameters + """ + + def sensors(self): + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors in the following format: + + [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, + + {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, + 'id': 'LIDAR'} + + + """ + sensors = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 800, 'height': 600, 'fov': 100, 'id': 'Center'}, + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, + 'yaw': -45.0, 'width': 800, 'height': 600, 'fov': 100, 'id': 'Left'}, + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 45.0, + 'width': 800, 'height': 600, 'fov': 100, 'id': 'Right'}, + {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, + 'yaw': -45.0, 'id': 'LIDAR'}, + {'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'}, + {'type': 'sensor.can_bus', 'reading_frequency': 25, 'id': 'can_bus'}, + {'type': 'sensor.hd_map', 'reading_frequency': 1, 'id': 'hdmap'}, + ] + + return sensors + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + """ + print("=====================>") + for key, val in input_data.items(): + if hasattr(val[1], 'shape'): + shape = val[1].shape + print("[{} -- {:06d}] with shape {}".format(key, val[0], shape)) + else: + print("[{} -- {:06d}] ".format(key, val[0])) + print("<=====================") + + # DO SOMETHING SMART + + # RETURN CONTROL + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + control.hand_brake = False + + return control diff --git a/scenario_runner/srunner/autoagents/human_agent.py b/scenario_runner/srunner/autoagents/human_agent.py new file mode 100644 index 0000000..982da33 --- /dev/null +++ b/scenario_runner/srunner/autoagents/human_agent.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides a human agent to control the ego vehicle via keyboard +""" + +import time +from threading import Thread +import cv2 +import numpy as np + +try: + import pygame + from pygame.locals import K_DOWN + from pygame.locals import K_LEFT + from pygame.locals import K_RIGHT + from pygame.locals import K_SPACE + from pygame.locals import K_UP + from pygame.locals import K_a + from pygame.locals import K_d + from pygame.locals import K_s + from pygame.locals import K_w +except ImportError: + raise RuntimeError('cannot import pygame, make sure pygame package is installed') + +import carla + +from srunner.autoagents.autonomous_agent import AutonomousAgent + + +class HumanInterface(object): + + """ + Class to control a vehicle manually for debugging purposes + """ + + def __init__(self, parent): + self.quit = False + self._parent = parent + self._width = 800 + self._height = 600 + self._throttle_delta = 0.05 + self._steering_delta = 0.01 + self._surface = None + + pygame.init() + pygame.font.init() + self._clock = pygame.time.Clock() + self._display = pygame.display.set_mode((self._width, self._height), pygame.HWSURFACE | pygame.DOUBLEBUF) + pygame.display.set_caption("Human Agent") + + def run(self): + """ + Run the GUI + """ + while not self._parent.agent_engaged and not self.quit: + time.sleep(0.5) + + controller = KeyboardControl() + while not self.quit: + self._clock.tick_busy_loop(20) + controller.parse_events(self._parent.current_control, self._clock) + # Process events + pygame.event.pump() + + # process sensor data + input_data = self._parent.sensor_interface.get_data() + image_center = input_data['Center'][1][:, :, -2::-1] + image_left = input_data['Left'][1][:, :, -2::-1] + image_right = input_data['Right'][1][:, :, -2::-1] + image_rear = input_data['Rear'][1][:, :, -2::-1] + + top_row = np.hstack((image_left, image_center, image_right)) + bottom_row = np.hstack((0 * image_rear, image_rear, 0 * image_rear)) + comp_image = np.vstack((top_row, bottom_row)) + # resize image + image_rescaled = cv2.resize(comp_image, dsize=(self._width, self._height), interpolation=cv2.INTER_CUBIC) + + # display image + self._surface = pygame.surfarray.make_surface(image_rescaled.swapaxes(0, 1)) + if self._surface is not None: + self._display.blit(self._surface, (0, 0)) + pygame.display.flip() + + pygame.quit() + + +class HumanAgent(AutonomousAgent): + + """ + Human agent to control the ego vehicle via keyboard + """ + + current_control = None + agent_engaged = False + + def setup(self, path_to_conf_file): + """ + Setup the agent parameters + """ + + self.agent_engaged = False + self.current_control = carla.VehicleControl() + self.current_control.steer = 0.0 + self.current_control.throttle = 1.0 + self.current_control.brake = 0.0 + self.current_control.hand_brake = False + self._hic = HumanInterface(self) + self._thread = Thread(target=self._hic.run) + self._thread.start() + + def sensors(self): + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors in the following format: + + [ + ['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel, + 'yaw': yaw, 'pitch': pitch, 'roll': roll, + 'width': width, 'height': height, 'fov': fov}, 'Sensor01'], + ['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel, + 'yaw': yaw, 'pitch': pitch, 'roll': roll, + 'width': width, 'height': height, 'fov': fov}, 'Sensor02'], + + ['sensor.lidar.ray_cast', {'x':x_rel, 'y': y_rel, 'z': z_rel, + 'yaw': yaw, 'pitch': pitch, 'roll': roll}, 'Sensor03'] + ] + + """ + sensors = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Center'}, + + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, + 'yaw': -45.0, 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 45.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, + + {'type': 'sensor.camera.rgb', 'x': -1.8, 'y': 0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, + 'yaw': 180.0, 'width': 300, 'height': 200, 'fov': 130, 'id': 'Rear'}, + + {'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'} + ] + + return sensors + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + """ + self.agent_engaged = True + time.sleep(0.1) + return self.current_control + + def destroy(self): + """ + Cleanup + """ + self._hic.quit = True + self._thread.join() + + +class KeyboardControl(object): + + """ + Keyboard control for the human agent + """ + + def __init__(self): + """ + Init + """ + self._control = carla.VehicleControl() + self._steer_cache = 0.0 + + def parse_events(self, control, clock): + """ + Parse the keyboard events and set the vehicle controls accordingly + """ + for event in pygame.event.get(): + if event.type == pygame.QUIT: + return + + self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time()) + control.steer = self._control.steer + control.throttle = self._control.throttle + control.brake = self._control.brake + control.hand_brake = self._control.hand_brake + + def _parse_vehicle_keys(self, keys, milliseconds): + """ + Calculate new vehicle controls based on input keys + """ + self._control.throttle = 0.6 if keys[K_UP] or keys[K_w] else 0.0 + steer_increment = 15.0 * 5e-4 * milliseconds + if keys[K_LEFT] or keys[K_a]: + self._steer_cache -= steer_increment + elif keys[K_RIGHT] or keys[K_d]: + self._steer_cache += steer_increment + else: + self._steer_cache = 0.0 + + self._steer_cache = min(0.95, max(-0.95, self._steer_cache)) + self._control.steer = round(self._steer_cache, 1) + self._control.brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0 + self._control.hand_brake = keys[K_SPACE] diff --git a/scenario_runner/srunner/autoagents/npc_agent.py b/scenario_runner/srunner/autoagents/npc_agent.py new file mode 100644 index 0000000..dbcaa42 --- /dev/null +++ b/scenario_runner/srunner/autoagents/npc_agent.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This module provides an NPC agent to control the ego vehicle +""" + +from __future__ import print_function + +import carla +from agents.navigation.basic_agent import BasicAgent + +from srunner.autoagents.autonomous_agent import AutonomousAgent +from srunner.scenariomanager.carla_data_provider import CarlaDataProvider + + +class NpcAgent(AutonomousAgent): + + """ + NPC autonomous agent to control the ego vehicle + """ + + _agent = None + _route_assigned = False + + def setup(self, path_to_conf_file): + """ + Setup the agent parameters + """ + + self._route_assigned = False + self._agent = None + + def sensors(self): + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors in the following format: + + [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, + + {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, + 'id': 'LIDAR'} + + + """ + + sensors = [ + {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, + ] + + return sensors + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + """ + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + control.hand_brake = False + + if not self._agent: + hero_actor = None + for actor in CarlaDataProvider.get_world().get_actors(): + if 'role_name' in actor.attributes and actor.attributes['role_name'] == 'hero': + hero_actor = actor + break + if hero_actor: + self._agent = BasicAgent(hero_actor) + + return control + + if not self._route_assigned: + if self._global_plan: + plan = [] + + for transform, road_option in self._global_plan_world_coord: + wp = CarlaDataProvider.get_map().get_waypoint(transform.location) + plan.append((wp, road_option)) + + self._agent._local_planner.set_global_plan(plan) # pylint: disable=protected-access + self._route_assigned = True + + else: + control = self._agent.run_step() + + return control diff --git a/scenario_runner/srunner/autoagents/ros_agent.py b/scenario_runner/srunner/autoagents/ros_agent.py new file mode 100644 index 0000000..deb537d --- /dev/null +++ b/scenario_runner/srunner/autoagents/ros_agent.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python +# +# Copyright (c) 2019 Intel Corporation +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . +# + +""" +This module provides a ROS autonomous agent interface to control the ego vehicle via a ROS stack +""" + +import math +import os +import subprocess +import signal +import threading +import time + +import numpy + +import carla + +import rospy +from cv_bridge import CvBridge +from geometry_msgs.msg import PoseStamped +from nav_msgs.msg import Odometry, Path +from rosgraph_msgs.msg import Clock +from sensor_msgs.msg import Image, PointCloud2, NavSatFix, NavSatStatus, CameraInfo +from sensor_msgs.point_cloud2 import create_cloud_xyz32 +from std_msgs.msg import Header, String +import tf +# pylint: disable=line-too-long +from carla_msgs.msg import CarlaEgoVehicleStatus, CarlaEgoVehicleInfo, CarlaEgoVehicleInfoWheel, CarlaEgoVehicleControl, CarlaWorldInfo +# pylint: enable=line-too-long + +from srunner.autoagents.autonomous_agent import AutonomousAgent + + +class RosAgent(AutonomousAgent): + + """ + Base class for ROS-based stacks. + + Derive from it and implement the sensors() method. + + Please define TEAM_CODE_ROOT in your environment. + The stack is started by executing $TEAM_CODE_ROOT/start.sh + + The sensor data is published on similar topics as with the carla-ros-bridge. You can find details about + the utilized datatypes there. + + This agent expects a roscore to be running. + """ + + speed = None + current_control = None + stack_process = None + timestamp = None + current_map_name = None + step_mode_possible = None + vehicle_info_publisher = None + global_plan_published = None + + def setup(self, path_to_conf_file): + """ + setup agent + """ + self.stack_thread = None + + # get start_script from environment + team_code_path = os.environ['TEAM_CODE_ROOT'] + if not team_code_path or not os.path.exists(team_code_path): + raise IOError("Path '{}' defined by TEAM_CODE_ROOT invalid".format(team_code_path)) + start_script = "{}/start.sh".format(team_code_path) + if not os.path.exists(start_script): + raise IOError("File '{}' defined by TEAM_CODE_ROOT invalid".format(start_script)) + + # set use_sim_time via commandline before init-node + process = subprocess.Popen( + "rosparam set use_sim_time true", shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) + process.wait() + if process.returncode: + raise RuntimeError("Could not set use_sim_time") + + # initialize ros node + rospy.init_node('ros_agent', anonymous=True) + + # publish first clock value '0' + self.clock_publisher = rospy.Publisher('clock', Clock, queue_size=10, latch=True) + self.clock_publisher.publish(Clock(rospy.Time.from_sec(0))) + + # execute script that starts the ad stack (remains running) + rospy.loginfo("Executing stack...") + self.stack_process = subprocess.Popen(start_script, shell=True, preexec_fn=os.setpgrp) + + self.vehicle_control_event = threading.Event() + self.timestamp = None + self.speed = 0 + self.global_plan_published = False + + self.vehicle_info_publisher = None + self.vehicle_status_publisher = None + self.odometry_publisher = None + self.world_info_publisher = None + self.map_file_publisher = None + self.current_map_name = None + self.tf_broadcaster = None + self.step_mode_possible = False + + self.vehicle_control_subscriber = rospy.Subscriber( + '/carla/ego_vehicle/vehicle_control_cmd', CarlaEgoVehicleControl, self.on_vehicle_control) + + self.current_control = carla.VehicleControl() + + self.waypoint_publisher = rospy.Publisher( + '/carla/ego_vehicle/waypoints', Path, queue_size=1, latch=True) + + self.publisher_map = {} + self.id_to_sensor_type_map = {} + self.id_to_camera_info_map = {} + self.cv_bridge = CvBridge() + + # setup ros publishers for sensors + # pylint: disable=line-too-long + for sensor in self.sensors(): + self.id_to_sensor_type_map[sensor['id']] = sensor['type'] + if sensor['type'] == 'sensor.camera.rgb': + self.publisher_map[sensor['id']] = rospy.Publisher( + '/carla/ego_vehicle/camera/rgb/' + sensor['id'] + "/image_color", Image, queue_size=1, latch=True) + self.id_to_camera_info_map[sensor['id']] = self.build_camera_info(sensor) + self.publisher_map[sensor['id'] + '_info'] = rospy.Publisher( + '/carla/ego_vehicle/camera/rgb/' + sensor['id'] + "/camera_info", CameraInfo, queue_size=1, latch=True) + elif sensor['type'] == 'sensor.lidar.ray_cast': + self.publisher_map[sensor['id']] = rospy.Publisher( + '/carla/ego_vehicle/lidar/' + sensor['id'] + "/point_cloud", PointCloud2, queue_size=1, latch=True) + elif sensor['type'] == 'sensor.other.gnss': + self.publisher_map[sensor['id']] = rospy.Publisher( + '/carla/ego_vehicle/gnss/' + sensor['id'] + "/fix", NavSatFix, queue_size=1, latch=True) + elif sensor['type'] == 'sensor.can_bus': + if not self.vehicle_info_publisher: + self.vehicle_info_publisher = rospy.Publisher( + '/carla/ego_vehicle/vehicle_info', CarlaEgoVehicleInfo, queue_size=1, latch=True) + if not self.vehicle_status_publisher: + self.vehicle_status_publisher = rospy.Publisher( + '/carla/ego_vehicle/vehicle_status', CarlaEgoVehicleStatus, queue_size=1, latch=True) + elif sensor['type'] == 'sensor.hd_map': + if not self.odometry_publisher: + self.odometry_publisher = rospy.Publisher( + '/carla/ego_vehicle/odometry', Odometry, queue_size=1, latch=True) + if not self.world_info_publisher: + self.world_info_publisher = rospy.Publisher( + '/carla/world_info', CarlaWorldInfo, queue_size=1, latch=True) + if not self.map_file_publisher: + self.map_file_publisher = rospy.Publisher('/carla/map_file', String, queue_size=1, latch=True) + if not self.tf_broadcaster: + self.tf_broadcaster = tf.TransformBroadcaster() + else: + raise TypeError("Invalid sensor type: {}".format(sensor['type'])) + # pylint: enable=line-too-long + + def destroy(self): + """ + Cleanup of all ROS publishers + """ + if self.stack_process and self.stack_process.poll() is None: + rospy.loginfo("Sending SIGTERM to stack...") + os.killpg(os.getpgid(self.stack_process.pid), signal.SIGTERM) + rospy.loginfo("Waiting for termination of stack...") + self.stack_process.wait() + time.sleep(5) + rospy.loginfo("Terminated stack.") + + rospy.loginfo("Stack is no longer running") + self.world_info_publisher.unregister() + self.map_file_publisher.unregister() + self.vehicle_status_publisher.unregister() + self.vehicle_info_publisher.unregister() + self.waypoint_publisher.unregister() + self.stack_process = None + rospy.loginfo("Cleanup finished") + + def on_vehicle_control(self, data): + """ + callback if a new vehicle control command is received + """ + cmd = carla.VehicleControl() + cmd.throttle = data.throttle + cmd.steer = data.steer + cmd.brake = data.brake + cmd.hand_brake = data.hand_brake + cmd.reverse = data.reverse + cmd.gear = data.gear + cmd.manual_gear_shift = data.manual_gear_shift + self.current_control = cmd + if not self.vehicle_control_event.is_set(): + self.vehicle_control_event.set() + # After the first vehicle control is sent out, it is possible to use the stepping mode + self.step_mode_possible = True + + def build_camera_info(self, attributes): # pylint: disable=no-self-use + """ + Private function to compute camera info + + camera info doesn't change over time + """ + camera_info = CameraInfo() + # store info without header + camera_info.header = None + camera_info.width = int(attributes['width']) + camera_info.height = int(attributes['height']) + camera_info.distortion_model = 'plumb_bob' + cx = camera_info.width / 2.0 + cy = camera_info.height / 2.0 + fx = camera_info.width / ( + 2.0 * math.tan(float(attributes['fov']) * math.pi / 360.0)) + fy = fx + camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1] + camera_info.D = [0, 0, 0, 0, 0] + camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0] + camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0] + return camera_info + + def publish_plan(self): + """ + publish the global plan + """ + msg = Path() + msg.header.frame_id = "/map" + msg.header.stamp = rospy.Time.now() + for wp in self._global_plan_world_coord: + pose = PoseStamped() + pose.pose.position.x = wp[0].location.x + pose.pose.position.y = -wp[0].location.y + pose.pose.position.z = wp[0].location.z + quaternion = tf.transformations.quaternion_from_euler( + 0, 0, -math.radians(wp[0].rotation.yaw)) + pose.pose.orientation.x = quaternion[0] + pose.pose.orientation.y = quaternion[1] + pose.pose.orientation.z = quaternion[2] + pose.pose.orientation.w = quaternion[3] + msg.poses.append(pose) + + rospy.loginfo("Publishing Plan...") + self.waypoint_publisher.publish(msg) + + def sensors(self): + """ + Define the sensor suite required by the agent + + :return: a list containing the required sensors + """ + raise NotImplementedError( + "This function has to be implemented by the derived classes") + + def get_header(self): + """ + Returns ROS message header + """ + header = Header() + header.stamp = rospy.Time.from_sec(self.timestamp) + return header + + def publish_lidar(self, sensor_id, data): + """ + Function to publish lidar data + """ + header = self.get_header() + header.frame_id = 'ego_vehicle/lidar/{}'.format(sensor_id) + + lidar_data = numpy.frombuffer( + data, dtype=numpy.float32) + lidar_data = numpy.reshape( + lidar_data, (int(lidar_data.shape[0] / 3), 3)) + # we take the oposite of y axis + # (as lidar point are express in left handed coordinate system, and ros need right handed) + # we need a copy here, because the data are read only in carla numpy + # array + lidar_data = -1.0 * lidar_data + # we also need to permute x and y + lidar_data = lidar_data[..., [1, 0, 2]] + msg = create_cloud_xyz32(header, lidar_data) + self.publisher_map[sensor_id].publish(msg) + + def publish_gnss(self, sensor_id, data): + """ + Function to publish gnss data + """ + msg = NavSatFix() + msg.header = self.get_header() + msg.header.frame_id = 'gps' + msg.latitude = data[0] + msg.longitude = data[1] + msg.altitude = data[2] + msg.status.status = NavSatStatus.STATUS_SBAS_FIX + # pylint: disable=line-too-long + msg.status.service = NavSatStatus.SERVICE_GPS | NavSatStatus.SERVICE_GLONASS | NavSatStatus.SERVICE_COMPASS | NavSatStatus.SERVICE_GALILEO + # pylint: enable=line-too-long + self.publisher_map[sensor_id].publish(msg) + + def publish_camera(self, sensor_id, data): + """ + Function to publish camera data + """ + msg = self.cv_bridge.cv2_to_imgmsg(data, encoding='bgra8') + # the camera data is in respect to the camera's own frame + msg.header = self.get_header() + msg.header.frame_id = 'ego_vehicle/camera/rgb/{}'.format(sensor_id) + + cam_info = self.id_to_camera_info_map[sensor_id] + cam_info.header = msg.header + self.publisher_map[sensor_id + '_info'].publish(cam_info) + self.publisher_map[sensor_id].publish(msg) + + def publish_can(self, sensor_id, data): + """ + publish can data + """ + if not self.vehicle_info_publisher: + self.vehicle_info_publisher = rospy.Publisher( + '/carla/ego_vehicle/vehicle_info', CarlaEgoVehicleInfo, queue_size=1, latch=True) + info_msg = CarlaEgoVehicleInfo() + for wheel in data['wheels']: + wheel_info = CarlaEgoVehicleInfoWheel() + wheel_info.tire_friction = wheel['tire_friction'] + wheel_info.damping_rate = wheel['damping_rate'] + wheel_info.steer_angle = wheel['steer_angle'] + wheel_info.disable_steering = wheel['disable_steering'] + info_msg.wheels.append(wheel_info) + info_msg.max_rpm = data['max_rpm'] + info_msg.moi = data['moi'] + info_msg.damping_rate_full_throttle = data['damping_rate_full_throttle'] + info_msg.damping_rate_zero_throttle_clutch_disengaged = data['damping_rate_zero_throttle_clutch_disengaged'] + info_msg.use_gear_autobox = data['use_gear_autobox'] + info_msg.clutch_strength = data['clutch_strength'] + info_msg.mass = data['mass'] + info_msg.drag_coefficient = data['drag_coefficient'] + info_msg.center_of_mass.x = data['center_of_mass']['x'] + info_msg.center_of_mass.y = data['center_of_mass']['y'] + info_msg.center_of_mass.z = data['center_of_mass']['z'] + self.vehicle_info_publisher.publish(info_msg) + msg = CarlaEgoVehicleStatus() + msg.header = self.get_header() + msg.velocity = data['speed'] + self.speed = data['speed'] + # todo msg.acceleration + msg.control.throttle = self.current_control.throttle + msg.control.steer = self.current_control.steer + msg.control.brake = self.current_control.brake + msg.control.hand_brake = self.current_control.hand_brake + msg.control.reverse = self.current_control.reverse + msg.control.gear = self.current_control.gear + msg.control.manual_gear_shift = self.current_control.manual_gear_shift + + self.vehicle_status_publisher.publish(msg) + + def publish_hd_map(self, sensor_id, data): + """ + publish hd map data + """ + roll = -math.radians(data['transform']['roll']) + pitch = -math.radians(data['transform']['pitch']) + yaw = -math.radians(data['transform']['yaw']) + quat = tf.transformations.quaternion_from_euler(roll, pitch, yaw) + x = data['transform']['x'] + y = -data['transform']['y'] + z = data['transform']['z'] + + if self.odometry_publisher: + + odometry = Odometry() + odometry.header.frame_id = 'map' + odometry.header.stamp = rospy.Time.from_sec(self.timestamp) + odometry.child_frame_id = 'base_link' + odometry.pose.pose.position.x = x + odometry.pose.pose.position.y = y + odometry.pose.pose.position.z = z + + odometry.pose.pose.orientation.x = quat[0] + odometry.pose.pose.orientation.y = quat[1] + odometry.pose.pose.orientation.z = quat[2] + odometry.pose.pose.orientation.w = quat[3] + + odometry.twist.twist.linear.x = self.speed + odometry.twist.twist.linear.y = 0 + odometry.twist.twist.linear.z = 0 + + self.odometry_publisher.publish(odometry) + + if self.world_info_publisher: + # extract map name + map_name = os.path.basename(data['map_file'])[:-4] + if self.current_map_name != map_name: + self.current_map_name = map_name + world_info = CarlaWorldInfo() + world_info.map_name = self.current_map_name + world_info.opendrive = data['opendrive'] + self.world_info_publisher.publish(world_info) + if self.map_file_publisher: + self.map_file_publisher.publish(data['map_file']) + + def use_stepping_mode(self): # pylint: disable=no-self-use + """ + Overload this function to use stepping mode! + """ + return False + + def run_step(self, input_data, timestamp): + """ + Execute one step of navigation. + """ + self.vehicle_control_event.clear() + self.timestamp = timestamp + self.clock_publisher.publish(Clock(rospy.Time.from_sec(timestamp))) + + # check if stack is still running + if self.stack_process and self.stack_process.poll() is not None: + raise RuntimeError("Stack exited with: {} {}".format( + self.stack_process.returncode, self.stack_process.communicate()[0])) + + # publish global plan to ROS once + if self._global_plan_world_coord and not self.global_plan_published: + self.global_plan_published = True + self.publish_plan() + + new_data_available = False + + # publish data of all sensors + for key, val in input_data.items(): + new_data_available = True + sensor_type = self.id_to_sensor_type_map[key] + if sensor_type == 'sensor.camera.rgb': + self.publish_camera(key, val[1]) + elif sensor_type == 'sensor.lidar.ray_cast': + self.publish_lidar(key, val[1]) + elif sensor_type == 'sensor.other.gnss': + self.publish_gnss(key, val[1]) + elif sensor_type == 'sensor.can_bus': + self.publish_can(key, val[1]) + elif sensor_type == 'sensor.hd_map': + self.publish_hd_map(key, val[1]) + else: + raise TypeError("Invalid sensor type: {}".format(sensor_type)) + + if self.use_stepping_mode(): + if self.step_mode_possible and new_data_available: + self.vehicle_control_event.wait() + # if the stepping mode is not used or active, there is no need to wait here + + return self.current_control diff --git a/scenario_runner/srunner/autoagents/sensor_interface.py b/scenario_runner/srunner/autoagents/sensor_interface.py new file mode 100644 index 0000000..b4806e9 --- /dev/null +++ b/scenario_runner/srunner/autoagents/sensor_interface.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python + +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +This file containts CallBack class and SensorInterface, responsible of +handling the use of sensors for the agents +""" + +import copy +import logging +import numpy as np + +import carla + + +class CallBack(object): + + """ + Class the sensors listen to in order to receive their data each frame + """ + + def __init__(self, tag, sensor, data_provider): + """ + Initializes the call back + """ + self._tag = tag + self._data_provider = data_provider + + self._data_provider.register_sensor(tag, sensor) + + def __call__(self, data): + """ + call function + """ + if isinstance(data, carla.Image): + self._parse_image_cb(data, self._tag) + elif isinstance(data, carla.LidarMeasurement): + self._parse_lidar_cb(data, self._tag) + elif isinstance(data, carla.GnssMeasurement): + self._parse_gnss_cb(data, self._tag) + else: + logging.error('No callback method for this sensor.') + + # Parsing CARLA physical Sensors + def _parse_image_cb(self, image, tag): + """ + parses cameras + """ + array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) + array = copy.deepcopy(array) + array = np.reshape(array, (image.height, image.width, 4)) + self._data_provider.update_sensor(tag, array, image.frame) + + def _parse_lidar_cb(self, lidar_data, tag): + """ + parses lidar sensors + """ + points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4')) + points = copy.deepcopy(points) + points = np.reshape(points, (int(points.shape[0] / 3), 3)) + self._data_provider.update_sensor(tag, points, lidar_data.frame) + + def _parse_gnss_cb(self, gnss_data, tag): + """ + parses gnss sensors + """ + array = np.array([gnss_data.latitude, + gnss_data.longitude, + gnss_data.altitude], dtype=np.float64) + self._data_provider.update_sensor(tag, array, gnss_data.frame) + + +class SensorInterface(object): + + """ + Class that contains all sensor data + """ + + def __init__(self): + """ + Initializes the class + """ + self._sensors_objects = {} + self._data_buffers = {} + self._timestamps = {} + + def register_sensor(self, tag, sensor): + """ + Registers the sensors + """ + if tag in self._sensors_objects: + raise ValueError("Duplicated sensor tag [{}]".format(tag)) + + self._sensors_objects[tag] = sensor + self._data_buffers[tag] = None + self._timestamps[tag] = -1 + + def update_sensor(self, tag, data, timestamp): + """ + Updates the sensor + """ + if tag not in self._sensors_objects: + raise ValueError("The sensor with tag [{}] has not been created!".format(tag)) + self._data_buffers[tag] = data + self._timestamps[tag] = timestamp + + def all_sensors_ready(self): + """ + Checks if all the sensors have sent data at least once + """ + for key in self._sensors_objects: + if self._data_buffers[key] is None: + return False + return True + + def get_data(self): + """ + Returns the data of a sensor + """ + data_dict = {} + for key in self._sensors_objects: + data_dict[key] = (self._timestamps[key], self._data_buffers[key]) + return data_dict