赞
踩
1、开源库:
OpenCV,
二维图像处理和机器学习
https://github.com/Ewenwan/MVision/tree/master/opencv_app
OpenNI2 +OpenKinect(freenect),
深度传感器(MicrosoftKinect and Asus Xtion Pro)驱动和处理库
PCL.
点云库 处理三维点云数据
https://github.com/Ewenwan/MVision/tree/master/PCL_APP
2维特征提取包 find_object_2d (ROS package)
https://github.com/introlab/find-object
http://wiki.ros.org/find_object_2d
补充参考:https://github.com/introlab/find-object
3D物体识别(https://github.com/wg-perception)
2D & 3D Object Detection 目标检测 算法和论文
直接安装 # ROS Kinetic: $ sudo apt-get install ros-kinetic-find-object-2d # ROS Jade: $ sudo apt-get install ros-jade-find-object-2d # ROS Indigo: $ sudo apt-get install ros-indigo-find-object-2d # ROS Hydro: $ sudo apt-get install ros-hydro-find-object-2d 源码安装 $ cd ~/catkin_ws $ git clone https://github.com/introlab/find-object.git src/find_object_2d $ catkin_make 运行 $ roscore & # Launch your preferred usb camera driver $ rosrun uvc_camera uvc_camera_node & $ rosrun find_object_2d find_object_2d image:=image_raw
2、图像分辨率:
160x120 (QQVGA), 320x240 (QVGA), 640x480 (VGA) , 1280x960 (SXGA).
3、安装深度传感器驱动: ROS OpenNI and OpenKinect (freenect) Drivers
sudo apt-get install ros-indigo-openni-* ros-indigo-openni2-* ros-indigo-freenect-*
$ rospack profile
4、安装普通摄像头驱动 Webcam Driver
源码安装
usb_cam:
cd ~/catkin_ws/src
git clone https://github.com/bosch-ros-pkg/usb_cam.git
cd ~/catkin_ws
catkin_make
rospack profile
libuvc_ros:
https://github.com/ros-drivers/libuvc_ros
uvc_cam:
cd cd ~/catkin_ws/src
git clone https://github.com/ericperko/uvc_cam.git
rosmake uvc_cam
5、测试图像传感器
对于 rgb-d传感器 使用 image_view 包 测试 http://wiki.ros.org/image_view 发布数据 在/camera/rgb/image_raw话题上 Microsoft Kinect: $ roslaunch freenect_launch freenect-registered-xyzrgb.launch Asus Xtion, Xtion Pro, or Primesense 1.08/1.09 cameras: $ roslaunch openni2_launch openni2.launch depth_registration:=true 可视化查看 rgb图像 rosrun image_view image_view image:=/camera/rgb/image_raw 可视化查看 深度图像 $ rosrun image_view image_view image:=/camera/depth_registered/image_rect 对于Webcam Driver 发布消息 $ roslaunch rbx1_vision usb_cam.launch video_device:=/dev/video0 or $ roslaunch rbx1_vision usb_cam.launch video_device:=/dev/video1 可视化查看 rosrun image_view image_view image:=/camera/rgb/image_raw <launch> <arg name="video_device" default="/dev/video0" /> 驱动名字 <node name="usb_cam" pkg="usb_cam" type="usb_cam_node" clear_params="true" output="screen">##节点信息 <remap from="usb_cam/image_raw" to="/camera/rgb/image_raw" /> 话题重定向 <remap from="usb_cam/camera_info" to="/camera/rgb/camera_info" /> <param name="video_device" value="$(arg video_device)" /> <param name="image_width" value="640" /> <param name="image_height" value="480" /> <param name="framerate" value="30" /> <param name="pixel_format" value="yuyv" /> 这里注意需要和驱动程序输出的图片格式一致 原先为mmp格式 <param name="contrast" value="32" /> 对比度 <param name="brightness" value="32" /> 亮度 <param name="saturation" value="32" /> 饱和度 <param name="autofocus" value="true" /> <param name="camera_frame_id" value="camera_link" /> </node> </launch> rqt 也可以查看
6、安装 ros 支持的opencv
$ sudo apt-get install ros-indigo-vision-opencv libopencv-dev \
python-opencv
$ rospack profile
7、安装 ros连接opencv 桥梁包 cv_bridge Packag 转换 ros图片格式 与 opencv图片格式
http://wiki.ros.org/cv_bridge
rbx1_vision/nodes/cv_bridge.demo.py 展示了怎样使用 cv_bridge
#!/usr/bin/env python # -*- coding:utf-8 -*- import rospy import sys import cv2 import cv2.cv as cv from sensor_msgs.msg import Image, CameraInfo from cv_bridge import CvBridge, CvBridgeError import numpy as np class cvBridgeDemo(): def __init__(self): self.node_name = "cv_bridge_demo" rospy.init_node(self.node_name) # 关闭 rospy.on_shutdown(self.cleanup) # 创建 rgb图像 显示窗口 self.cv_window_name = self.node_name cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL) cv.MoveWindow(self.cv_window_name, 25, 75) # 创建深度图像显示窗口 cv.NamedWindow("Depth Image", cv.CV_WINDOW_NORMAL) cv.MoveWindow("Depth Image", 25, 350) # 创建 ros 图 到 opencv图像转换 对象 self.bridge = CvBridge() # 订阅 深度图像和rgb图像数据发布的话题 以及定义 回调函数 # the appropriate callbacks self.image_sub = rospy.Subscriber("input_rgb_image", Image, self.image_callback, queue_size=1) self.depth_sub = rospy.Subscriber("input_depth_image", Image, self.depth_callback, queue_size=1) # 登陆信息 rospy.loginfo("Waiting for image topics...") rospy.wait_for_message("input_rgb_image", Image) rospy.loginfo("Ready.") # 收到rgb图像后的回调函数 def image_callback(self, ros_image): # 转换图像格式到opencv格式 try: frame = self.bridge.imgmsg_to_cv2(ros_image, "bgr8") except CvBridgeError, e: print e # 转换成 numpy 图像数组格式 frame = np.array(frame, dtype=np.uint8) # 简单处理图像数据 颜色 滤波 边缘检测等 display_image = self.process_image(frame) # 显示图像 cv2.imshow(self.node_name, display_image) # 检测键盘按键事件 self.keystroke = cv2.waitKey(5) if self.keystroke != -1: cc = chr(self.keystroke & 255).lower() if cc == 'q': # The user has press the q key, so exit rospy.signal_shutdown("User hit q key to quit.") # 收到深度图像后的回调函数 def depth_callback(self, ros_image): # 转换图像格式到opencv格式 try: # Convert the depth image using the default passthrough encoding depth_image = self.bridge.imgmsg_to_cv2(ros_image, "passthrough") except CvBridgeError, e: print e # 转换成 numpy 图像数组格式 depth_array = np.array(depth_image, dtype=np.float32) # 深度图像 数据 正则化到 二值图像 cv2.normalize(depth_array, depth_array, 0, 1, cv2.NORM_MINMAX) # 深度图像处理 depth_display_image = self.process_depth_image(depth_array) # 现实结果 cv2.imshow("Depth Image", depth_display_image) # rgb图像处理 def process_image(self, frame): # 转化成灰度图像 grey = cv2.cvtColor(frame, cv.CV_BGR2GRAY) # 均值滤波 grey = cv2.blur(grey, (7, 7)) # Canny 边缘检测 edges = cv2.Canny(grey, 15.0, 30.0) return edges # 深度图像处理 def process_depth_image(self, frame): # 这里直接返回原图 可做其他处理 return frame # 关闭节点 销毁所有 窗口 def cleanup(self): print "Shutting down vision node." cv2.destroyAllWindows() # 主函数 def main(args): try: cvBridgeDemo() rospy.spin() except KeyboardInterrupt: print "Shutting down vision node." cv.DestroyAllWindows() if __name__ == '__main__': main(sys.argv)
安装 照相机驱动程序 获取视频流 usb_cam
cd catkin_ws/src git clone git://github.com/bosch-ros-pkg/usb_cam.git catkin_make source ~/catkin_ws/devel/setup.bash 使用rgb 摄像头获取的图像数据测试 上述节点功能 注意发布数据的话题 重映射到 上述节点指定的话题 <launch> <node name="usb_cam" pkg="usb_cam" type="usb_cam_node" output="screen" > <param name="video_device" value="/dev/video0" /> <param name="image_width" value="640" /> <param name="image_height" value="480" /> <param name="pixel_format" value="yuyv" /> <param name="camera_frame_id" value="usb_cam" /> <param name="io_method" value="mmap"/> <remap from="/usb_cam/image_raw" to="camera/image_raw"/> </node> </node> <node name="cvDemo" pkg="vision_system" type="my_cv_bridge_demo.py" output="screen"> </node> </launch>
鼠标 选取 感兴趣区域
#!/usr/bin/env python # -*- coding:utf-8 -*- """ 使用opencv2 跟踪 用户鼠标选择的 区域 """ import rospy # ros系统操作 import sys # 系统程序 输入参数的获取等 import cv2 # 新版opencv2 api import cv2.cv as cv # 老版 opencv api from std_msgs.msg import String # ros系统 消息类型 来自 std_msgs.msg 标准消息类型 from sensor_msgs.msg import Image, RegionOfInterest, CameraInfo # ros 系统 消息类型 来自 sensor_msgs 传感器消息类型包 from cv_bridge import CvBridge, CvBridgeError # ros 系统 图像 格式 转换到 opencv图像格式 以及转换失败的错误信息处理 import time # 计时 import numpy as np # numpy 的数组 class ROS2OpenCV2(object): def __init__(self, node_name): self.node_name = node_name rospy.init_node(node_name) rospy.loginfo("启动节点 " + str(node_name)) # 关闭 rospy.on_shutdown(self.cleanup) # 一些参数 可由 launch文件 修改的参数 self.show_text = rospy.get_param("~show_text", True) self.show_features = rospy.get_param("~show_features", True) self.show_boxes = rospy.get_param("~show_boxes", True) self.flip_image = rospy.get_param("~flip_image", False) self.feature_size = rospy.get_param("~feature_size", 10) # 点 圆形 的 半径 # 传感器消息类型 感兴趣区域 发布话题 self.ROI = RegionOfInterest() self.roi_pub = rospy.Publisher("/roi", RegionOfInterest, queue_size=1) # 初始化 全局变量 self.frame = None self.frame_size = None self.frame_width = None self.frame_height = None self.depth_image = None self.marker_image = None self.display_image = None self.grey = None self.prev_grey = None self.selected_point = None self.selection = None self.drag_start = None self.keystroke = None self.detect_box = None # 检测的目标区域位置框 self.track_box = None # 跟踪的目标区域位置框 self.display_box = None self.keep_marker_history = False self.night_mode = False self.auto_face_tracking = False self.cps = 0 # 每秒 处理 次数 Cycles per second = number of processing loops per second. self.cps_values = list() self.cps_n_values = 20 self.busy = False self.resize_window_width = 0 #窗口大小 self.resize_window_height = 0 self.face_tracking = False # 创建 显示 窗口 self.cv_window_name = self.node_name cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL) if self.resize_window_height > 0 and self.resize_window_width > 0: cv.ResizeWindow(self.cv_window_name, self.resize_window_width, self.resize_window_height) # 创建 ros 图 到 opencv图像转换 对象 bridge self.bridge = CvBridge() # 设置对应窗口 鼠标点击事件 回调函数 cv.SetMouseCallback (self.node_name, self.on_mouse_click, None) # 订阅 深度图像和rgb图像数据发布的话题 以及定义 回调函数 self.image_sub = rospy.Subscriber("input_rgb_image", Image, self.image_callback, queue_size=1) self.depth_sub = rospy.Subscriber("input_depth_image", Image, self.depth_callback, queue_size=1) # 鼠标点击事件回调函数 def on_mouse_click(self, event, x, y, flags, param): # 允许用户用鼠标选者一个感兴趣区域 一个矩形框图 全局变量 selection 矩形框 <左上角点x 左上角点y 宽度 高度> if self.frame is None: return # 鼠标按下 if event == cv.CV_EVENT_LBUTTONDOWN and not self.drag_start: self.features = [] # 初始化感兴趣区域 self.track_box = None # 跟踪 框 self.detect_box = None# 检测 框 self.selected_point = (x, y)# 选择点 self.drag_start = (x, y) # 拖拽起点 后 开始拖拽 # 鼠标抬起 if event == cv.CV_EVENT_LBUTTONUP: self.drag_start = None # 拖拽结束 self.classifier_initialized = False self.detect_box = self.selection # 鼠标拖拽 if self.drag_start: xmin = max(0, min(x, self.drag_start[0]))# 起点 横坐标 为 鼠标选者区域的 横向 最小值 min(x, self.drag_start[0]) ymin = max(0, min(y, self.drag_start[1]
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。