当前位置:   article > 正文

carla学习笔记(五)_carla.colorconverter

carla.colorconverter

今天主要学习了example里面的tutorial.py的源码,越发觉得这才应该是最开始学习的代码,大部分代码块都有注释,理解起来十分容易。

  1. #!/usr/bin/env python
  2. # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
  3. # Barcelona (UAB).
  4. #
  5. # This work is licensed under the terms of the MIT license.
  6. # For a copy, see <https://opensource.org/licenses/MIT>.
  7. import glob
  8. import os
  9. import sys
  10. try:
  11. sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
  12. sys.version_info.major,
  13. sys.version_info.minor,
  14. 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
  15. except IndexError:
  16. pass
  17. import carla
  18. import random
  19. import time
  20. def main():
  21. actor_list = []
  22. # In this tutorial script, we are going to add a vehicle to the simulation
  23. # and let it drive in autopilot. We will also create a camera attached to
  24. # that vehicle, and save all the images generated by the camera to disk.
  25. try:
  26. # First of all, we need to create the client that will send the requests
  27. # to the simulator. Here we'll assume the simulator is accepting
  28. # requests in the localhost at port 2000.
  29. client = carla.Client('localhost', 2000)
  30. client.set_timeout(2.0)
  31. # Once we have a client we can retrieve the world that is currently
  32. # running.
  33. world = client.get_world()
  34. # The world contains the list blueprints that we can use for adding new
  35. # actors into the simulation.
  36. blueprint_library = world.get_blueprint_library()
  37. # Now let's filter all the blueprints of type 'vehicle' and choose one
  38. # at random.
  39. bp = random.choice(blueprint_library.filter('vehicle'))
  40. # A blueprint contains the list of attributes that define a vehicle's
  41. # instance, we can read them and modify some of them. For instance,
  42. # let's randomize its color.
  43. if bp.has_attribute('color'):
  44. color = random.choice(bp.get_attribute('color').recommended_values)
  45. bp.set_attribute('color', color)
  46. # Now we need to give an initial transform to the vehicle. We choose a
  47. # random transform from the list of recommended spawn points of the map.
  48. transform = random.choice(world.get_map().get_spawn_points())
  49. # So let's tell the world to spawn the vehicle.
  50. vehicle = world.spawn_actor(bp, transform)
  51. # It is important to note that the actors we create won't be destroyed
  52. # unless we call their "destroy" function. If we fail to call "destroy"
  53. # they will stay in the simulation even after we quit the Python script.
  54. # For that reason, we are storing all the actors we create so we can
  55. # destroy them afterwards.
  56. # 这里是为了帮助销毁vehicles的时候方便,特地设置了一个list。
  57. actor_list.append(vehicle)
  58. print('created %s' % vehicle.type_id)
  59. # Let's put the vehicle to drive around.
  60. vehicle.set_autopilot(True)
  61. # Let's add now a "depth" camera attached to the vehicle. Note that the
  62. # transform we give here is now relative to the vehicle.
  63. camera_bp = blueprint_library.find('sensor.camera.depth')
  64. cam_bp = blueprint_library.find('sensor.camera.rgb')
  65. camera_transform = carla.Transform(carla.Location(z=20))
  66. cam01_transform = carla.Transform(carla.Location(x=10, y=10, z=20))
  67. # camera = world.spawn_actor(camera_bp, camera_transform, attach_to=vehicle)
  68. camera = world.spawn_actor(camera_bp, camera_transform)
  69. cam01 = world.spawn_actor(cam_bp, cam01_transform)
  70. actor_list.append(camera)
  71. actor_list.append(cam01)
  72. print('created %s' % camera.type_id)
  73. print('created %s' % cam01.type_id)
  74. # Now we register the function that will be called each time the sensor
  75. # receives an image. In this example we are saving the image to disk
  76. # converting the pixels to gray-scale.
  77. # cc = carla.ColorConverter.LogarithmicDepth
  78. # camera.listen(lambda image: image.save_to_disk('_out/%06d.png' % image.frame, cc))
  79. cam01.listen(lambda image: image.save_to_disk('_out/%06d.png' % image.frame))
  80. # camera.listen(lambda image: image.save_to_disk('_out/%06d.png' % image.frame))
  81. # Oh wait, I don't like the location we gave to the vehicle, I'm going
  82. # to move it a bit forward.
  83. location = vehicle.get_location()
  84. location.x += 40
  85. vehicle.set_location(location)
  86. print('moved vehicle to %s' % location)
  87. # But the city now is probably quite empty, let's add a few more
  88. # vehicles.
  89. # 创造一列车10辆
  90. transform.location += carla.Location(x=40, y=-3.2)
  91. transform.rotation.yaw = -180.0
  92. # for _ in range(0, 10):
  93. # transform.location.x += 8.0
  94. # bp = random.choice(blueprint_library.filter('vehicle'))
  95. # # This time we are using try_spawn_actor. If the spot is already
  96. # # occupied by another object, the function will return None.
  97. # npc = world.try_spawn_actor(bp, transform)
  98. # if npc is not None:
  99. # actor_list.append(npc)
  100. # npc.set_autopilot(True)
  101. # print('created %s' % npc.type_id)
  102. time.sleep(500)
  103. finally:
  104. print('destroying actors')
  105. camera.destroy()
  106. client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
  107. print('done.')
  108. if __name__ == '__main__':
  109. main()

然后我看到它有一部分里面有存储sensor采集到的数据的部分,我就想起了昨天的问题,准备借鉴这部分的内容在验证一下:

  1. """
  2. 学习tutorial.py时,看到了类似功能的代码,然后检验了一下我昨天的设想。
  3. 主要内容是:
  4. 生成一个camera.depth、一个camera.rgb、一个lidar然后分别存储进actor_list[],最后保存传感器采集
  5. 的数据,然后返回matlab显示。采集的数量目前来看是通过time.sleep()函数来控制的。
  6. """
  7. import glob
  8. import os
  9. import sys
  10. try:
  11. sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
  12. sys.version_info.major,
  13. sys.version_info.minor,
  14. 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
  15. except IndexError:
  16. pass
  17. import carla
  18. import random
  19. import time
  20. def main():
  21. actor_list = []
  22. try:
  23. client = carla.Client('localhost', 2000)
  24. client.set_timeout(2.0)
  25. world = client.get_world()
  26. blueprint_library = world.get_blueprint_library()
  27. bp = random.choice(blueprint_library.filter('vehicle'))
  28. if bp.has_attribute('color'):
  29. color = random.choice(bp.get_attribute('color').recommended_values)
  30. bp.set_attribute('color', color)
  31. transform = random.choice(world.get_map().get_spawn_points())
  32. # 在蓝图库中找到三个传感器
  33. camera_bp = blueprint_library.find('sensor.camera.depth')
  34. cam_bp = blueprint_library.find('sensor.camera.rgb')
  35. lidar_bp = blueprint_library.find('sensor.lidar.ray_cast')
  36. # 设置三个传感器的生成的位置
  37. camera_transform = carla.Transform(carla.Location(z=20))
  38. cam01_transform = carla.Transform(carla.Location(x=10, y=10, z=20))
  39. lidar01_transform = carla.Transform(carla.Location(x=20, y=10, z=1))
  40. # 这个location的位置选的不太好,居然一个点都没有采到,不知道是什么问题,现在验证一下。利用draw_string函数:
  41. # world.debug.draw_string(lidar01_transform.location, 'O', draw_shadow=False,
  42. # color=carla.Color(r=0, g=255, b=0), life_time=500,
  43. # persistent_lines=True)
  44. # 发现了是z设置的问题,一开始设置的太高了,设置了20
  45. # 生成三个传感器,独立于vehicle
  46. camera = world.spawn_actor(camera_bp, camera_transform)
  47. cam01 = world.spawn_actor(cam_bp, cam01_transform)
  48. lidar01 = world.spawn_actor(lidar_bp, lidar01_transform)
  49. #在actor_list表中添加三个传感器
  50. actor_list.append(camera)
  51. actor_list.append(cam01)
  52. actor_list.append(lidar01)
  53. # 终端输出三个传感器的名字id
  54. print('created %s' % camera.type_id)
  55. print('created %s' % cam01.type_id)
  56. print('created %s' % lidar01.type_id)
  57. # Now we register the function that will be called each time the sensor
  58. # receives an image. In this example we are saving the image to disk
  59. # converting the pixels to gray-scale.
  60. # cc = carla.ColorConverter.LogarithmicDepth
  61. # camera.listen(lambda image: image.save_to_disk('_out/%06d.png' % image.frame, cc))
  62. # cam01.listen(lambda image: image.save_to_disk('/media/hhh/75c0c2a9-f565-4a05-b2a5-5599a918e2f0/hhh/carlaLearning/PythonAPI/learning_document/%06d.png' % image.frame))
  63. # 解决了昨天的疑问,应该是昨天存储路径的问题,就是os.path的问题,我的设想从一开始就没有问题。
  64. # camera.listen(lambda image: image.save_to_disk('_out/%06d.png' % image.frame))
  65. # lidar01.listen(lambda image: image.save_to_disk('/media/hhh/75c0c2a9-f565-4a05-b2a5-5599a918e2f0/hhh/carlaLearning/PythonAPI/learning_document/%06d.ply' % image.frame))
  66. # But the city now is probably quite empty, let's add a few more
  67. # vehicles.
  68. # 创造一列车,10辆
  69. # 但我发现每次的都是随机生成的,猜测是carla随机生成
  70. # 后来仔细看来代码,transform.location += carla.Location(x=40, y=-3.2),这里的transform应该
  71. # 是用了前面的random选择,这里的carla.Location应该是x=40,y=-3.2,z=0然后加到前面的transform上面去
  72. transform.location += carla.Location(x=40, y=-3.2)
  73. transform.rotation.yaw = -180.0
  74. print(transform)
  75. for _ in range(0, 10):
  76. transform.location.x += 8.0
  77. print(transform)
  78. bp = random.choice(blueprint_library.filter('vehicle'))
  79. # This time we are using try_spawn_actor. If the spot is already
  80. # occupied by another object, the function will return None.
  81. # 这个函数挺有意思的,大概封装了碰撞检测函数?
  82. npc = world.try_spawn_actor(bp, transform)
  83. if npc is not None:
  84. actor_list.append(npc)
  85. npc.set_autopilot(True)
  86. print('created %s' % npc.type_id)
  87. #这个sleep设置了try部分运行的时间。
  88. time.sleep(500)
  89. finally:
  90. print('destroying actors')
  91. camera.destroy()
  92. # 利用指令来删除actor_list里面的vehicles
  93. client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
  94. print('done.')
  95. if __name__ == '__main__':
  96. main()

最后显示是可以采集的,也可以正常存储。说明项目要求的路侧放置lidar基本可以实现。下周周报有东西可以写了。准备这段时间尝试在路侧放置一个lidar,然后控制一辆车从路口经过,然后分析lidar采集的数据,看能不能清晰的实现。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/知新_RL/article/detail/91320
推荐阅读
相关标签
  

闽ICP备14008679号