in CARLA_0.9.8/PythonAPI/carla/agents/navigation/carla_env.py [0:0]
def __init__(self,
render_display=0, # 0, 1
record_display_images=0, # 0, 1
record_rl_images=0, # 0, 1
changing_weather_speed=0.0, # [0, +inf)
display_text=0, # 0, 1
rl_image_size=84,
max_episode_steps=1000,
frame_skip=1,
is_other_cars=True,
fov=60, # degrees for rl camera
num_cameras=3,
port=2000
):
if record_display_images:
assert render_display
self.render_display = render_display
self.save_display_images = record_display_images
self.save_rl_images = record_rl_images
self.changing_weather_speed = changing_weather_speed
self.display_text = display_text
self.rl_image_size = rl_image_size
self._max_episode_steps = max_episode_steps # DMC uses this
self.frame_skip = frame_skip
self.is_other_cars = is_other_cars
self.num_cameras = num_cameras
self.actor_list = []
if self.render_display:
pygame.init()
self.display = pygame.display.set_mode((800, 600), pygame.HWSURFACE | pygame.DOUBLEBUF)
self.font = get_font()
self.clock = pygame.time.Clock()
self.client = carla.Client('localhost', port)
self.client.set_timeout(5.0)
self.world = self.client.load_world("Town04")
self.map = self.world.get_map()
assert self.map.name == "Town04"
# remove old vehicles and sensors (in case they survived)
self.world.tick()
actor_list = self.world.get_actors()
for vehicle in actor_list.filter("*vehicle*"):
print("Warning: removing old vehicle")
vehicle.destroy()
for sensor in actor_list.filter("*sensor*"):
print("Warning: removing old sensor")
sensor.destroy()
self.vehicle = None
self.vehicle_start_pose = None
self.vehicles_list = [] # their ids
self.vehicles = None
self.reset_vehicle() # creates self.vehicle
self.actor_list.append(self.vehicle)
blueprint_library = self.world.get_blueprint_library()
if render_display:
bp = blueprint_library.find('sensor.camera.rgb')
bp.set_attribute('enable_postprocess_effects', str(True))
self.camera_rgb = self.world.spawn_actor(bp, carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)), attach_to=self.vehicle)
self.actor_list.append(self.camera_rgb)
# we'll use up to five cameras, which we'll stitch together
bp = blueprint_library.find('sensor.camera.rgb')
bp.set_attribute('image_size_x', str(self.rl_image_size))
bp.set_attribute('image_size_y', str(self.rl_image_size))
bp.set_attribute('fov', str(fov))
bp.set_attribute('enable_postprocess_effects', str(True))
location = carla.Location(x=1.6, z=1.7)
self.camera_rl = self.world.spawn_actor(bp, carla.Transform(location, carla.Rotation(yaw=0.0)), attach_to=self.vehicle)
self.camera_rl_left = self.world.spawn_actor(bp, carla.Transform(location, carla.Rotation(yaw=-float(fov))), attach_to=self.vehicle)
self.camera_rl_lefter = self.world.spawn_actor(bp, carla.Transform(location, carla.Rotation(yaw=-2*float(fov))), attach_to=self.vehicle)
self.camera_rl_right = self.world.spawn_actor(bp, carla.Transform(location, carla.Rotation(yaw=float(fov))), attach_to=self.vehicle)
self.camera_rl_righter = self.world.spawn_actor(bp, carla.Transform(location, carla.Rotation(yaw=2*float(fov))), attach_to=self.vehicle)
self.actor_list.append(self.camera_rl)
self.actor_list.append(self.camera_rl_left)
self.actor_list.append(self.camera_rl_lefter)
self.actor_list.append(self.camera_rl_right)
self.actor_list.append(self.camera_rl_righter)
bp = self.world.get_blueprint_library().find('sensor.other.collision')
self.collision_sensor = self.world.spawn_actor(bp, carla.Transform(), attach_to=self.vehicle)
self.collision_sensor.listen(lambda event: self._on_collision(event))
self.actor_list.append(self.collision_sensor)
self._collision_intensities_during_last_time_step = []
if self.save_display_images or self.save_rl_images:
import datetime
now = datetime.datetime.now()
image_dir = "images-" + now.strftime("%Y-%m-%d-%H-%M-%S")
os.mkdir(image_dir)
self.image_dir = image_dir
if self.render_display:
self.sync_mode = CarlaSyncMode(self.world, self.camera_rgb, self.camera_rl, self.camera_rl_left, self.camera_rl_lefter, self.camera_rl_right, self.camera_rl_righter, fps=20)
else:
self.sync_mode = CarlaSyncMode(self.world, self.camera_rl, self.camera_rl_left, self.camera_rl_lefter, self.camera_rl_right, self.camera_rl_righter, fps=20)
# weather
self.weather = Weather(self.world, self.changing_weather_speed)
# dummy variables given bisim's assumption on deep-mind-control suite APIs
low = -1.0
high = 1.0
self.action_space = DotMap()
self.action_space.low.min = lambda: low
self.action_space.high.max = lambda: high
self.action_space.shape = [2]
self.observation_space = DotMap()
self.observation_space.shape = (3, rl_image_size, num_cameras * rl_image_size)
self.observation_space.dtype = np.dtype(np.uint8)
self.reward_range = None
self.metadata = None
self.action_space.sample = lambda: np.random.uniform(low=low, high=high, size=self.action_space.shape[0]).astype(np.float32)
# roaming carla agent
self.agent = None
self.count = 0
self.dist_s = 0
self.return_ = 0
self.collide_count = 0
self.velocities = []
self.world.tick()
self.reset() # creates self.agent