当前位置:   article > 正文

python 直接读取深度图像、rgb图像、点云并可视化_python提取d435i录制的bag图像

python提取d435i录制的bag图像

以Intel的D435I相机为例,我们可以使用python代码,进行深度图像、RGB图像的读取和点云的可视化:

  1. import pyrealsense2 as rs
  2. import numpy as np
  3. import cv2
  4. import pcl
  5. from pcl import pcl_visualization
  6. cloud = pcl.PointCloud_PointXYZRGB()
  7. def visual(visual_viewer, pt,color):
  8. length = len(pt)
  9. points = np.zeros((length, 4),dtype=np.float32)
  10. for i in range(length):
  11. points[i][0] = pt[i][0]
  12. points[i][1] = pt[i][1]
  13. points[i][2] = pt[i][2]
  14. points[i][3] = color[i][1]
  15. # pt = [list(i) for i in pt]
  16. # pt = np.array([*pt])
  17. # pt = np.hstack((pt, np.uint8(color)))
  18. cloud.from_array(points) #从array构建点云的方式
  19. visual_viewer.ShowColorCloud(cloud)
  20. v = True
  21. # while v:
  22. # v = not (visual_viewer.WasStopped())
  23. if __name__ == "__main__":
  24. # Configure depth and color streams
  25. pipeline = rs.pipeline()
  26. config = rs.config()
  27. config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
  28. config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
  29. # Start streaming
  30. pipeline.start(config)
  31. #深度图像向彩色对齐
  32. align_to_color=rs.align(rs.stream.color)
  33. pc = rs.pointcloud()
  34. points = rs.points()
  35. visual_viewer = pcl_visualization.CloudViewing()
  36. try:
  37. while True:
  38. # Wait for a coherent pair of frames: depth and color
  39. frames = pipeline.wait_for_frames()
  40. frames = align_to_color.process(frames)
  41. depth_frame = frames.get_depth_frame()
  42. color_frame = frames.get_color_frame()
  43. if not depth_frame or not color_frame:
  44. continue
  45. # Convert images to numpy arrays
  46. depth_image = np.asanyarray(depth_frame.get_data())
  47. color_image = np.asanyarray(color_frame.get_data())
  48. # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
  49. depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
  50. # Stack both images horizontally
  51. images = np.hstack((color_image, depth_colormap))
  52. # Get point data
  53. colorful=color_image.reshape(-1,3)
  54. pc.map_to(color_frame)
  55. points = pc.calculate(depth_frame)
  56. #获取顶点坐标
  57. vtx = np.asanyarray(points.get_vertices())
  58. visual(visual_viewer, vtx, colorful)
  59. # Show images
  60. cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
  61. cv2.imshow('RealSense', images)
  62. key = cv2.waitKey(1)
  63. # Press esc or 'q' to close the image window
  64. if key & 0xFF == ord('q') or key == 27:
  65. cv2.destroyAllWindows()
  66. break
  67. finally:
  68. # Stop streaming
  69. pipeline.stop()

若以racord的*.bag文件进行读取和可视化,有两种方案:

方案一:采用intel提供的库 pyrealsense2,从文件读取数据即可:

  1. pipeline = rs.pipeline()
  2. # Create a config object
  3. config = rs.config()
  4. # Tell config that we will use a recorded device from file to be used by the pipeline through playback.
  5. rs.config.enable_device_from_file(config, file_path, repeat_playback=False)
  6. pipeline.start(config)

其他就和上面用相机读取的时候一致了。

方案二:采用rosbag读取*.bag数据,并用cv_bridge转换数据:

  1. import os
  2. import cv2
  3. import numpy as np
  4. import rosbag
  5. from cv_bridge import CvBridge
  6. import sensor_msgs.point_cloud2 as pc2
  7. from sensor_msgs.msg import PointCloud2
  8. import pcl
  9. from pcl import pcl_visualization
  10. import pyrealsense2 as rs
  11. import numpy as np
  12. # 相机参数
  13. depth_camera_metrix = np.array([[920.523,0.0,641.936], [0.0, 919.243,351.036], [0,0,1]])
  14. def depth2xyz(depth_map,depth_cam_matrix,flatten=False,depth_scale=1000):
  15. fx,fy = depth_cam_matrix[0,0],depth_cam_matrix[1,1]
  16. cx,cy = depth_cam_matrix[0,2],depth_cam_matrix[1,2]
  17. h,w=np.mgrid[0:depth_map.shape[0],0:depth_map.shape[1]]
  18. z=depth_map/depth_scale
  19. x=(w-cx)*z/fx
  20. y=(h-cy)*z/fy
  21. xyz=np.dstack((x,y,z)) if flatten==False else np.dstack((x,y,z)).reshape(-1,3)
  22. #xyz=cv2.rgbd.depthTo3d(depth_map,depth_cam_matrix)
  23. return xyz
  24. def visual_pcl(visual, depth, color):
  25. xyz = depth2xyz(depth, depth_camera_metrix)
  26. # colorful = np.asanyarray(color.get_data())
  27. xyz = xyz.reshape(-1, 3)
  28. color=color.reshape(-1,3)
  29. # pc.map_to(color)
  30. # points = pc.calculate(depth)
  31. # vtx = np.asanyarray(points.get_vertices())
  32. cloud = pcl.PointCloud_PointXYZRGB()
  33. temp = np.zeros((len(xyz), 4), np.float32)
  34. length = len(xyz)
  35. for i in range(length):
  36. temp[i][0] = xyz[i][0] * 1000.0
  37. temp[i][1] = xyz[i][1] * 1000.0
  38. temp[i][2] = xyz[i][2] * 1000.0
  39. temp[i][3] = color[i][0]
  40. cloud.from_array(temp)
  41. visual.ShowColorCloud(cloud)
  42. if __name__ == "__main__":
  43. file_path = "./stairs.bag"
  44. visual = pcl_visualization.CloudViewing()
  45. pc = rs.pointcloud()
  46. points = rs.points()
  47. bag = rosbag.Bag(file_path, "r")
  48. bag_data = bag.read_messages()
  49. info = bag.get_type_and_topic_info()
  50. print(info)
  51. bridge = CvBridge()
  52. depth = None; points_tp = None
  53. for topic, msg, t in bag_data:
  54. print(topic)
  55. if topic == "/cam_1/aligned_depth_to_color/image_raw/compressed":
  56. # # cv_img = bridge.imgmsg_to_cv2(msg, "16UC1")
  57. # bridge.encoding_to_dtype_with_channels("16UC1")
  58. depth = bridge.compressed_imgmsg_to_cv2(msg)
  59. print(depth.shape)
  60. # depth = pc2.read_points(msg)
  61. # points = np.asanyarray(depth) # 如果bag中保存了该类格式,可用此种方法读取点
  62. cv2.imshow("Image window", depth)
  63. cv2.waitKey(3)
  64. print("here")
  65. elif topic == "cam_1/color/image_raw/compressed":
  66. cv_img = bridge.compressed_imgmsg_to_cv2(msg)
  67. colorful=cv_img.reshape(-1,3)
  68. # pc.map_to(cv_img)
  69. # points = pc.calculate(depth)
  70. # vtx = np.asanyarray(points.get_vertices())
  71. visual_pcl(visual, depth, colorful)
  72. cv2.imshow("Cam1", cv_img)
  73. cv2.waitKey(3)
  74. print("end")

示例图片:

 

 

 

声明:本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号