当前位置:   article > 正文

Opencv Sift和Surf特征实现图像无缝拼接生成全景图像_siftfeaturedetector 拼接 opencv

siftfeaturedetector 拼接 opencv

 

Sift和Surf算法实现两幅图像拼接的过程是一样的,主要分为4大部分:

1. 特征点提取和描述
2. 特征点配对,找到两幅图像中匹配点的位置
3. 通过配对点,生成变换矩阵,并对图像1应用变换矩阵生成对图像2的映射图像
4. 图像2拼接到映射图像上,完成拼接


过程1、2、3没啥好说的了,关键看看步骤4中的拼接部分。这里先采用比较简单一点的拼接方式来实现:

1. 找到图像1和图像2中最强的匹配点所在的位置
2. 通过映射矩阵变换,得到图像1的最强匹配点经过映射后投影到新图像上的位置坐标
3. 在新图像上的最强匹配点的映射坐标处,衔接两幅图像,该点左侧图像完全是图像1,右侧完全是图像2


这里拼接的正确与否完全取决于特征点的选取,如果选取的是错误匹配的特征点,拼接一定失败,所以这里选了排在第一个的最强的匹配点,作为拼接点。

 

  1. // siftandsurf.cpp : 定义控制台应用程序的入口点。
  2. //
  3. #include "stdafx.h"
  4. #include "highgui/highgui.hpp"
  5. #include "opencv2/nonfree/nonfree.hpp"
  6. #include "opencv2/legacy/legacy.hpp"
  7. using namespace cv;
  8. //计算原始图像点位在经过矩阵变换后在目标图像上对应位置
  9. Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri);
  10. int main(int argc, char *argv[])
  11. {
  12. argv[1] = "拼接图像1.jpg";
  13. argv[2] = "拼接图像2.jpg";
  14. Mat image01 = imread(argv[1]);
  15. Mat image02 = imread(argv[2]);
  16. imshow("拼接图像1", image01);
  17. imshow("拼接图像2", image02);
  18. //灰度图转换
  19. Mat image1, image2;
  20. cvtColor(image01, image1, CV_RGB2GRAY);
  21. cvtColor(image02, image2, CV_RGB2GRAY);
  22. //提取特征点
  23. SiftFeatureDetector siftDetector(800); // 海塞矩阵阈值
  24. vector<KeyPoint> keyPoint1, keyPoint2;
  25. siftDetector.detect(image1, keyPoint1);
  26. siftDetector.detect(image2, keyPoint2);
  27. //特征点描述,为下边的特征点匹配做准备
  28. SiftDescriptorExtractor siftDescriptor;
  29. Mat imageDesc1, imageDesc2;
  30. siftDescriptor.compute(image1, keyPoint1, imageDesc1);
  31. siftDescriptor.compute(image2, keyPoint2, imageDesc2);
  32. //获得匹配特征点,并提取最优配对
  33. FlannBasedMatcher matcher;
  34. vector<DMatch> matchePoints;
  35. matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
  36. sort(matchePoints.begin(), matchePoints.end()); //特征点排序
  37. //获取排在前N个的最优匹配特征点
  38. vector<Point2f> imagePoints1, imagePoints2;
  39. for (int i = 0; i<10; i++)
  40. {
  41. imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
  42. imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);
  43. }
  44. //获取图像1到图像2的投影映射矩阵,尺寸为3*3
  45. Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
  46. Mat adjustMat = (Mat_<double>(3, 3) << 1.0, 0, image01.cols, 0, 1.0, 0, 0, 0, 1.0);
  47. Mat adjustHomo = adjustMat*homo;
  48. //获取最强配对点在原始图像和矩阵变换后图像上的对应位置,用于图像拼接点的定位
  49. Point2f originalLinkPoint, targetLinkPoint, basedImagePoint;
  50. originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt;
  51. targetLinkPoint = getTransformPoint(originalLinkPoint, adjustHomo);
  52. basedImagePoint = keyPoint2[matchePoints[0].trainIdx].pt;
  53. //图像配准
  54. Mat imageTransform1;
  55. warpPerspective(image01, imageTransform1, adjustMat*homo, Size(image02.cols + image01.cols + 110, image02.rows));
  56. //在最强匹配点左侧的重叠区域进行累加,是衔接稳定过渡,消除突变
  57. Mat image1Overlap, image2Overlap; //1和图2的重叠部分
  58. image1Overlap = imageTransform1(Rect(Point(targetLinkPoint.x - basedImagePoint.x, 0), Point(targetLinkPoint.x, image02.rows)));
  59. image2Overlap = image02(Rect(0, 0, image1Overlap.cols, image1Overlap.rows));
  60. Mat image1ROICopy = image1Overlap.clone(); //复制一份图1的重叠部分
  61. for (int i = 0; i<image1Overlap.rows; i++)
  62. {
  63. for (int j = 0; j<image1Overlap.cols; j++)
  64. {
  65. double weight;
  66. weight = (double)j / image1Overlap.cols; //随距离改变而改变的叠加系数
  67. image1Overlap.at<Vec3b>(i, j)[0] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[0] + weight*image2Overlap.at<Vec3b>(i, j)[0];
  68. image1Overlap.at<Vec3b>(i, j)[1] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[1] + weight*image2Overlap.at<Vec3b>(i, j)[1];
  69. image1Overlap.at<Vec3b>(i, j)[2] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[2] + weight*image2Overlap.at<Vec3b>(i, j)[2];
  70. }
  71. }
  72. Mat ROIMat = image02(Rect(Point(image1Overlap.cols, 0), Point(image02.cols, image02.rows))); //2中不重合的部分
  73. ROIMat.copyTo(Mat(imageTransform1, Rect(targetLinkPoint.x, 0, ROIMat.cols, image02.rows))); //不重合的部分直接衔接上去
  74. namedWindow("拼接结果", 0);
  75. imshow("拼接结果", imageTransform1);
  76. imwrite("D:\\拼接结果.jpg", imageTransform1);
  77. waitKey();
  78. return 0;
  79. }
  80. //计算原始图像点位在经过矩阵变换后在目标图像上对应位置
  81. Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri)
  82. {
  83. Mat originelP, targetP;
  84. originelP = (Mat_<double>(3, 1) << originalPoint.x, originalPoint.y, 1.0);
  85. targetP = transformMaxtri*originelP;
  86. float x = targetP.at<double>(0, 0) / targetP.at<double>(2, 0);
  87. float y = targetP.at<double>(1, 0) / targetP.at<double>(2, 0);
  88. return Point2f(x, y);
  89. }

转载自:https://blog.csdn.net/dcrmg/article/details/52629856

 

图像拼接(十一):双摄像头实时拼接+stitching_detailed

https://blog.csdn.net/czl389/article/details/60769026

 

opencv学习(三十八)之图像模板匹配matchTemplate()

https://blog.csdn.net/keith_bb/article/details/70050080

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/在线问答5/article/detail/1011701
推荐阅读
相关标签
  

闽ICP备14008679号