using namespace cv;using namespace std;void Optimiz..._opencv多源图像融合">
当前位置:   article > 正文

【5】OpenCV2.4.9实现图像拼接与融合方法【SURF、SIFT、ORB、FAST、Harris角点 、stitch 】_opencv多源图像融合

opencv多源图像融合

相关文章:

【1】windows下安装OpenCV(4.3)+VS2017安装+opencv_contrib4.3.0配置

【2】Visual Studio 2017同时配置OpenCV2.4 以及OpenCV4.3

【3】opencv_contrib4.3.0库配置+opencv安装

【4】配置和运行Opencv常见的一些问题总结,以及bug解决。

【5】OpenCV2.4.9实现图像拼接与融合方法【SURF、SIFT、ORB、FAST、Harris角点 、stitch 】

【6】opencv采用映射技术实现鱼眼镜头校正和鱼眼镜头还原全景图。


本文出现的数据结果和码源见https://download.csdn.net/download/sinat_39620217/18269470

特别提示:opencv安装配置详情请参考相关文章【1】【2】【3】【4】

OpenCV2.4.9实现图像拼接与融合三种方法【SURF ORB stitch 】

  • 将四副分割图融合为一张完整的图片

  • 特征检测和特征匹配后:

  • 最后效果:

实现图像拼接具体步骤:

  1. 对每幅图进行特征点提取
  2. 对对特征点进行匹配
  3. 进行图像配准
  4. 把图像拷贝到另一幅图像的特定位置
  5. 对重叠边界进行特殊处理

特征点提取

全景图像的拼接,主要是特征点的提取、特征匹配和图像融合;现在CV领域有很多特征点的定义,比如sift、surf、harris角点、ORB都是很有名的特征因子。为了提高拼接的速度和质量,本文在特征提取时采用了改进的特征提取的算法,基于可靠性检测的SURF 算法,特征点粗匹配时采用快速匹配法。

特征点定义:一幅图像中总存在着其独特的像素点,这些点我们可以认为就是这幅图像的特征

1. SURF(Speeded Up Robust Feature)

SURF算法是对图像进行不同尺寸空间的高斯卷积,然后进行特征点的提取,但是SURF对图像步骤进行了近似替换和简化,降低了计算量。不仅具有很好的鲁棒性和准确性,实时性也提高了不少。

  • 积分图像的生成

设L(x, y)为原图中的像素点,其积分图像的面积等于该点到原点的所有点的总和,计算公式如下:

由上式可得,任意一块矩形区域(下图:计算积分图像)的积分面积可由式得:                                        

  • 特征点的提取

SURF 算法在积分图像的基础上,利用 Hessian 检测子进行特征点的求取。

(1)计算像素点I(x,y)在尺度s上的Hessian矩阵

然后离散化上面的高斯函数。

(2)SURF 特征向量的生成

首先以特征点为中心确定边长为 20s 的正方形区域,然后再划分为4×4 的小区域,每个小区域又分为5×5个采样点,最后用Harr小波计算每个小区域垂直和水平方向的响应,并统计5×5个采样点的总的响应,推导出下面的矢量

可得4×4×4=64维的SURF 特征的描述符,完成预处理后,再进行特征匹配。

  1. //提取特征点
  2. SurfFeatureDetector Detector(2000);
  3. vector<KeyPoint> keyPoint1, keyPoint2;
  4. Detector.detect(image1, keyPoint1);
  5. Detector.detect(image2, keyPoint2);
  6. //特征点描述,为下边的特征点匹配做准备
  7. SurfDescriptorExtractor Descriptor;
  8. Mat imageDesc1, imageDesc2;
  9. Descriptor.compute(image1, keyPoint1, imageDesc1);
  10. Descriptor.compute(image2, keyPoint2, imageDesc2);
  11. FlannBasedMatcher matcher;
  12. vector<vector<DMatch> > matchePoints;
  13. vector<DMatch> GoodMatchePoints;
  14. vector<Mat> train_desc(1, imageDesc1);
  15. matcher.add(train_desc);
  16. matcher.train();
  17. matcher.knnMatch(imageDesc2, matchePoints, 2);
  18. cout << "total match points: " << matchePoints.size() << endl;
  19. // Lowe's algorithm,获取优秀匹配点
  20. for (int i = 0; i < matchePoints.size(); i++)
  21. {
  22. if (matchePoints[i][0].distance < 0.4 * matchePoints[i][1].distance)
  23. {
  24. GoodMatchePoints.push_back(matchePoints[i][0]);
  25. }
  26. }
  27. Mat first_match;
  28. drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
  29. //imshow("first_match ", first_match);
  30. imwrite("H:/opencv2.4/picture/first_match.jpg", first_match);

为了排除因为图像遮挡和背景混乱而产生的无匹配关系的关键点,SIFT的作者Lowe提出了比较最近邻距离与次近邻距离的SIFT匹配方式:取一幅图像中的一个SIFT关键点,并找出其与另一幅图像中欧式距离最近的前两个关键点,在这两个关键点中,如果最近的距离除以次近的距离得到的比率ratio少于某个阈值T,则接受这一对匹配点。因为对于错误匹配,由于特征空间的高维性,相似的距离可能有大量其他的错误匹配,从而它的ratio值比较高。显然降低这个比例阈值T,SIFT匹配点数目会减少,但更加稳定,反之亦然。

Lowe推荐ratio的阈值为0.8,但作者对大量任意存在尺度、旋转和亮度变化的两幅图片进行匹配,结果表明ratio取值在0. 4~0. 6 之间最佳,小于0. 4的很少有匹配点,大于0. 6的则存在大量错误匹配点,所以建议ratio的取值原则如下:

ratio=0. 4:对于准确度要求高的匹配;

ratio=0. 6:对于匹配点数目要求比较多的匹配;

ratio=0. 5:一般情况下。

最终融合效果如下:

完整代码代码如下:

  1. #include "highgui/highgui.hpp"
  2. #include "opencv2/nonfree/nonfree.hpp"
  3. #include "opencv2/legacy/legacy.hpp"
  4. #include <iostream>
  5. using namespace cv;
  6. using namespace std;
  7. void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);
  8. typedef struct
  9. {
  10. Point2f left_top;
  11. Point2f left_bottom;
  12. Point2f right_top;
  13. Point2f right_bottom;
  14. }four_corners_t;
  15. four_corners_t corners;
  16. void CalcCorners(const Mat& H, const Mat& src)
  17. {
  18. double v2[] = { 0, 0, 1 };//左上角
  19. double v1[3];//变换后的坐标值
  20. Mat V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  21. Mat V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  22. V1 = H * V2;
  23. //左上角(0,0,1)
  24. cout << "V2: " << V2 << endl;
  25. cout << "V1: " << V1 << endl;
  26. corners.left_top.x = v1[0] / v1[2];
  27. corners.left_top.y = v1[1] / v1[2];
  28. //左下角(0,src.rows,1)
  29. v2[0] = 0;
  30. v2[1] = src.rows;
  31. v2[2] = 1;
  32. V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  33. V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  34. V1 = H * V2;
  35. corners.left_bottom.x = v1[0] / v1[2];
  36. corners.left_bottom.y = v1[1] / v1[2];
  37. //右上角(src.cols,0,1)
  38. v2[0] = src.cols;
  39. v2[1] = 0;
  40. v2[2] = 1;
  41. V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  42. V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  43. V1 = H * V2;
  44. corners.right_top.x = v1[0] / v1[2];
  45. corners.right_top.y = v1[1] / v1[2];
  46. //右下角(src.cols,src.rows,1)
  47. v2[0] = src.cols;
  48. v2[1] = src.rows;
  49. v2[2] = 1;
  50. V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  51. V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  52. V1 = H * V2;
  53. corners.right_bottom.x = v1[0] / v1[2];
  54. corners.right_bottom.y = v1[1] / v1[2];
  55. }
  56. int main(int argc, char *argv[])
  57. {
  58. Mat image01 = imread("H:/opencv2.4/picture/7.2.jpg", 1); //右图
  59. Mat image02 = imread("H:/opencv2.4/picture/7.1.jpg", 1); //左图
  60. //imshow("右", image01);
  61. //imshow("左", image02);
  62. //灰度图转换
  63. Mat image1, image2;
  64. cvtColor(image01, image1, CV_RGB2GRAY);
  65. cvtColor(image02, image2, CV_RGB2GRAY);
  66. //提取特征点
  67. SurfFeatureDetector Detector(2000);
  68. vector<KeyPoint> keyPoint1, keyPoint2;
  69. Detector.detect(image1, keyPoint1);
  70. Detector.detect(image2, keyPoint2);
  71. //特征点描述,为下边的特征点匹配做准备
  72. SurfDescriptorExtractor Descriptor;
  73. Mat imageDesc1, imageDesc2;
  74. Descriptor.compute(image1, keyPoint1, imageDesc1);
  75. Descriptor.compute(image2, keyPoint2, imageDesc2);
  76. FlannBasedMatcher matcher;
  77. vector<vector<DMatch> > matchePoints;
  78. vector<DMatch> GoodMatchePoints;
  79. vector<Mat> train_desc(1, imageDesc1);
  80. matcher.add(train_desc);
  81. matcher.train();
  82. matcher.knnMatch(imageDesc2, matchePoints, 2);
  83. cout << "total match points: " << matchePoints.size() << endl;
  84. // Lowe's algorithm,获取优秀匹配点
  85. for (int i = 0; i < matchePoints.size(); i++)
  86. {
  87. if (matchePoints[i][0].distance < 0.4 * matchePoints[i][1].distance)
  88. {
  89. GoodMatchePoints.push_back(matchePoints[i][0]);
  90. }
  91. }
  92. Mat first_match;
  93. drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
  94. //imshow("first_match ", first_match);
  95. imwrite("H:/opencv2.4/picture/first_match.jpg", first_match);
  96. vector<Point2f> imagePoints1, imagePoints2;
  97. for (int i = 0; i < GoodMatchePoints.size(); i++)
  98. {
  99. imagePoints2.push_back(keyPoint2[GoodMatchePoints[i].queryIdx].pt);
  100. imagePoints1.push_back(keyPoint1[GoodMatchePoints[i].trainIdx].pt);
  101. }
  102. //获取图像1到图像2的投影映射矩阵 尺寸为3*3
  103. Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
  104. 也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差
  105. //Mat homo=getPerspectiveTransform(imagePoints1,imagePoints2);
  106. cout << "变换矩阵为:\n" << homo << endl << endl; //输出映射矩阵
  107. //计算配准图的四个顶点坐标
  108. CalcCorners(homo, image01);
  109. cout << "left_top:" << corners.left_top << endl;
  110. cout << "left_bottom:" << corners.left_bottom << endl;
  111. cout << "right_top:" << corners.right_top << endl;
  112. cout << "right_bottom:" << corners.right_bottom << endl;
  113. //图像配准
  114. Mat imageTransform1, imageTransform2;
  115. warpPerspective(image01, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), image02.rows));
  116. //warpPerspective(image01, imageTransform2, adjustMat*homo, Size(image02.cols*1.3, image02.rows*1.8));
  117. //imshow("直接经过透视矩阵变换", imageTransform1);
  118. //imwrite("H:/opencv2.4/picture/trans1.jpg", imageTransform1);
  119. //创建拼接后的图,需提前计算图的大小
  120. int dst_width = imageTransform1.cols; //取最右点的长度为拼接图的长度
  121. int dst_height = image02.rows;
  122. Mat dst(dst_height, dst_width, CV_8UC3);
  123. dst.setTo(0);
  124. imageTransform1.copyTo(dst(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
  125. image02.copyTo(dst(Rect(0, 0, image02.cols, image02.rows)));
  126. imshow("b_dst", dst);
  127. OptimizeSeam(image02, imageTransform1, dst);
  128. imshow("拼接图片", dst);
  129. imwrite("H:/opencv2.4/picture/拼接图片7.jpg", dst);
  130. waitKey();
  131. return 0;
  132. }
  133. //优化两图的连接处,使得拼接自然
  134. void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst)
  135. {
  136. int start = MIN(corners.left_top.x, corners.left_bottom.x);//开始位置,即重叠区域的左边界
  137. double processWidth = img1.cols - start;//重叠区域的宽度
  138. int rows = dst.rows;
  139. int cols = img1.cols; //注意,是列数*通道数
  140. double alpha = 1;//img1中像素的权重
  141. for (int i = 0; i < rows; i++)
  142. {
  143. uchar* p = img1.ptr<uchar>(i); //获取第i行的首地址
  144. uchar* t = trans.ptr<uchar>(i);
  145. uchar* d = dst.ptr<uchar>(i);
  146. for (int j = start; j < cols; j++)
  147. {
  148. //如果遇到图像trans中无像素的黑点,则完全拷贝img1中的数据
  149. if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
  150. {
  151. alpha = 1;
  152. }
  153. else
  154. {
  155. //img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比,实验证明,这种方法确实好
  156. alpha = (processWidth - (j - start)) / processWidth;
  157. }
  158. d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
  159. d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
  160. d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);
  161. }
  162. }
  163. }

2 .ORB(ORiented Brief)

ORB是ORiented Brief的简称,是brief算法的改进版。ORB算法比SIFT算法快100倍,比SURF算法快10倍。在计算机视觉领域有种说法,ORB算法的综合性能在各种测评里较其他特征提取算法是最好的。

ORB算法是brief算法的改进,那么我们先说一下brief算法有什么去缺点。
BRIEF的优点在于其速度,其缺点是:

  • 不具备旋转不变性
  • 对噪声敏感
  • 不具备尺度不变性

而ORB算法就是试图解决上述缺点中1和2提出的一种新概念。值得注意的是,ORB没有解决尺度不变性

  1. #include "highgui/highgui.hpp"
  2. #include "opencv2/nonfree/nonfree.hpp"
  3. #include "opencv2/legacy/legacy.hpp"
  4. #include <iostream>
  5. using namespace cv;
  6. using namespace std;
  7. void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);
  8. typedef struct
  9. {
  10. Point2f left_top;
  11. Point2f left_bottom;
  12. Point2f right_top;
  13. Point2f right_bottom;
  14. }four_corners_t;
  15. four_corners_t corners;
  16. void CalcCorners(const Mat& H, const Mat& src)
  17. {
  18. double v2[] = { 0, 0, 1 };//左上角
  19. double v1[3];//变换后的坐标值
  20. Mat V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  21. Mat V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  22. V1 = H * V2;
  23. //左上角(0,0,1)
  24. cout << "V2: " << V2 << endl;
  25. cout << "V1: " << V1 << endl;
  26. corners.left_top.x = v1[0] / v1[2];
  27. corners.left_top.y = v1[1] / v1[2];
  28. //左下角(0,src.rows,1)
  29. v2[0] = 0;
  30. v2[1] = src.rows;
  31. v2[2] = 1;
  32. V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  33. V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  34. V1 = H * V2;
  35. corners.left_bottom.x = v1[0] / v1[2];
  36. corners.left_bottom.y = v1[1] / v1[2];
  37. //右上角(src.cols,0,1)
  38. v2[0] = src.cols;
  39. v2[1] = 0;
  40. v2[2] = 1;
  41. V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  42. V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  43. V1 = H * V2;
  44. corners.right_top.x = v1[0] / v1[2];
  45. corners.right_top.y = v1[1] / v1[2];
  46. //右下角(src.cols,src.rows,1)
  47. v2[0] = src.cols;
  48. v2[1] = src.rows;
  49. v2[2] = 1;
  50. V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  51. V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  52. V1 = H * V2;
  53. corners.right_bottom.x = v1[0] / v1[2];
  54. corners.right_bottom.y = v1[1] / v1[2];
  55. }
  56. int main(int argc, char *argv[])
  57. {
  58. Mat image01 = imread("H:/opencv2.4/picture/1.2.jpg", 1); //右图
  59. Mat image02 = imread("H:/opencv2.4/picture/1.1.jpg", 1); //左图
  60. imshow("p2", image01);
  61. imshow("p1", image02);
  62. //灰度图转换
  63. Mat image1, image2;
  64. cvtColor(image01, image1, CV_RGB2GRAY);
  65. cvtColor(image02, image2, CV_RGB2GRAY);
  66. //提取特征点
  67. OrbFeatureDetector surfDetector(3000);
  68. vector<KeyPoint> keyPoint1, keyPoint2;
  69. surfDetector.detect(image1, keyPoint1);
  70. surfDetector.detect(image2, keyPoint2);
  71. //特征点描述,为下边的特征点匹配做准备
  72. OrbDescriptorExtractor SurfDescriptor;
  73. Mat imageDesc1, imageDesc2;
  74. SurfDescriptor.compute(image1, keyPoint1, imageDesc1);
  75. SurfDescriptor.compute(image2, keyPoint2, imageDesc2);
  76. flann::Index flannIndex(imageDesc1, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);
  77. vector<DMatch> GoodMatchePoints;
  78. Mat macthIndex(imageDesc2.rows, 2, CV_32SC1), matchDistance(imageDesc2.rows, 2, CV_32FC1);
  79. flannIndex.knnSearch(imageDesc2, macthIndex, matchDistance, 2, flann::SearchParams());
  80. // Lowe's algorithm,获取优秀匹配点
  81. for (int i = 0; i < matchDistance.rows; i++)
  82. {
  83. if (matchDistance.at<float>(i, 0) < 0.4 * matchDistance.at<float>(i, 1))
  84. {
  85. DMatch dmatches(i, macthIndex.at<int>(i, 0), matchDistance.at<float>(i, 0));
  86. GoodMatchePoints.push_back(dmatches);
  87. }
  88. }
  89. Mat first_match;
  90. drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
  91. imshow("first_match ", first_match);
  92. vector<Point2f> imagePoints1, imagePoints2;
  93. for (int i = 0; i < GoodMatchePoints.size(); i++)
  94. {
  95. imagePoints2.push_back(keyPoint2[GoodMatchePoints[i].queryIdx].pt);
  96. imagePoints1.push_back(keyPoint1[GoodMatchePoints[i].trainIdx].pt);
  97. }
  98. //获取图像1到图像2的投影映射矩阵 尺寸为3*3
  99. Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
  100. 也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差
  101. //Mat homo=getPerspectiveTransform(imagePoints1,imagePoints2);
  102. cout << "变换矩阵为:\n" << homo << endl << endl; //输出映射矩阵
  103. //计算配准图的四个顶点坐标
  104. CalcCorners(homo, image01);
  105. cout << "left_top:" << corners.left_top << endl;
  106. cout << "left_bottom:" << corners.left_bottom << endl;
  107. cout << "right_top:" << corners.right_top << endl;
  108. cout << "right_bottom:" << corners.right_bottom << endl;
  109. //图像配准
  110. Mat imageTransform1, imageTransform2;
  111. warpPerspective(image01, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), image02.rows));
  112. //warpPerspective(image01, imageTransform2, adjustMat*homo, Size(image02.cols*1.3, image02.rows*1.8));
  113. imshow("直接经过透视矩阵变换", imageTransform1);
  114. imwrite("trans1.jpg", imageTransform1);
  115. //创建拼接后的图,需提前计算图的大小
  116. int dst_width = imageTransform1.cols; //取最右点的长度为拼接图的长度
  117. int dst_height = image02.rows;
  118. Mat dst(dst_height, dst_width, CV_8UC3);
  119. dst.setTo(0);
  120. imageTransform1.copyTo(dst(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
  121. image02.copyTo(dst(Rect(0, 0, image02.cols, image02.rows)));
  122. imshow("b_dst", dst);
  123. OptimizeSeam(image02, imageTransform1, dst);
  124. imshow("拼接图", dst);
  125. imwrite("H:/opencv2.4/picture/拼接图.jpg", dst);
  126. waitKey();
  127. return 0;
  128. }
  129. //优化两图的连接处,使得拼接自然
  130. void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst)
  131. {
  132. int start = MIN(corners.left_top.x, corners.left_bottom.x);//开始位置,即重叠区域的左边界
  133. double processWidth = img1.cols - start;//重叠区域的宽度
  134. int rows = dst.rows;
  135. int cols = img1.cols; //注意,是列数*通道数
  136. double alpha = 1;//img1中像素的权重
  137. for (int i = 0; i < rows; i++)
  138. {
  139. uchar* p = img1.ptr<uchar>(i); //获取第i行的首地址
  140. uchar* t = trans.ptr<uchar>(i);
  141. uchar* d = dst.ptr<uchar>(i);
  142. for (int j = start; j < cols; j++)
  143. {
  144. //如果遇到图像trans中无像素的黑点,则完全拷贝img1中的数据
  145. if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
  146. {
  147. alpha = 1;
  148. }
  149. else
  150. {
  151. //img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比,实验证明,这种方法确实好
  152. alpha = (processWidth - (j - start)) / processWidth;
  153. }
  154. d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
  155. d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
  156. d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);
  157. }
  158. }
  159. }

效果:

3. stitch

opencv其实自己就有实现图像拼接的算法,opencv stitch算法到底选用了哪个算法作为其特征检测方式:

  1. #ifdef HAVE_OPENCV_NONFREE
  2. stitcher.setFeaturesFinder(new detail::SurfFeaturesFinder());
  3. #else
  4. stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder());
  5. #endif

在源码createDefault函数中(默认设置),第一选择是SURF,第二选择才是ORB(没有NONFREE模块才选)

效果:

以下是码源:

  1. #include <iostream>
  2. #include <opencv2/core/core.hpp>
  3. #include <opencv2/highgui/highgui.hpp>
  4. #include <opencv2/imgproc/imgproc.hpp>
  5. #include <opencv2/stitching/stitcher.hpp>
  6. using namespace std;
  7. using namespace cv;
  8. bool try_use_gpu = false;
  9. vector<Mat> imgs;
  10. string result_name = "dst1.jpg";
  11. int main(int argc, char * argv[])
  12. {
  13. Mat img4 = imread("H:/opencv2.4/picture/4.4.jpg");//右图
  14. Mat img3 = imread("H:/opencv2.4/picture/4.3.jpg");//右图
  15. Mat img2 = imread("H:/opencv2.4/picture/4.2.jpg");//右图
  16. Mat img1 = imread("H:/opencv2.4/picture/4.1.jpg");//左图
  17. imshow("p4", img4);
  18. imshow("p3", img3);
  19. imshow("p2", img2);
  20. imshow("p1", img1);
  21. if (img1.empty() || img2.empty() || img3.empty() || img4.empty())
  22. {
  23. cout << "Can't read image" << endl;
  24. return -1;
  25. }
  26. imgs.push_back(img4);
  27. imgs.push_back(img3);
  28. imgs.push_back(img2);
  29. imgs.push_back(img1);
  30. Stitcher stitcher = Stitcher::createDefault(try_use_gpu);
  31. // 使用stitch函数进行拼接
  32. Mat pano;
  33. Stitcher::Status status = stitcher.stitch(imgs, pano);
  34. if (status != Stitcher::OK)
  35. {
  36. cout << "Can't stitch images, error code = " << int(status) << endl;
  37. return -1;
  38. }
  39. imwrite(result_name, pano);
  40. Mat pano2 = pano.clone();
  41. // 显示源图像,和结果图像
  42. imshow("全景图像", pano);
  43. imwrite("H:/opencv2.4/picture/拼接图4.jpg", pano);
  44. if (waitKey() == 27)
  45. return 0;
  46. }

结论:

个人推荐stitch方法,集成封装调用简单,关键效果还好。

如有问题请参考文章开头的相关文链接

参考博客:OpenCV探索之路(二十四)图像拼接和图像融合技术

                  OpenCV探索之路(二十三):特征检测和特征匹配方法汇总【SURF、SIFT、ORB、FAST、Harris角点】

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家小花儿/article/detail/81242
推荐阅读
相关标签
  

闽ICP备14008679号