赞
踩
• 了解OpenCV中实现的SIFT, SURF, ORB等特征检测器的用法,并进行实验。将检测到的特征点用不同大小的圆表示,比较不同方法的效率、效果等。
• 了解OpenCV的特征匹配方法,并进行实验。
SIFT算法的过程实质是在不同尺度空间上查找特征点(关键点),用128维方向向量的方式对特征点进行描述,最后通过对比描述向量实现目标匹配。
概括起来主要有三大步骤:
1、 提取关键点;
2、 对关键点附加详细的信息(局部特征)也就是所谓的描述器;
3、 通过两方特征点(附带上特征向量的关键点)的两两比较找出相互匹配的若干对特征点,建立物体间的对应关系。
运行结果为:
Opencv中Surf算子提取特征,生成特征描述子,匹配特征的流程跟Sift是完全一致的。
这里要着重说一下绘制使用的drawKeypoints方法:
void drawKeypoints( const Mat& image, const vector& keypoints, CV_OUT Mat& outImage, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT );
第一个参数image:原始图像,可以使三通道或单通道图像;
第二个参数keypoints:特征点向量,向量内每一个元素是一个
KeyPoint对象,包含了特征点的各种属性信息;
第三个参数outImage:特征点绘制的画布图像,可以是原图像;
第四个参数color:绘制的特征点的颜色信息,默认绘制的是随机彩色;
第五个参数flags:特征点的绘制模式,其实就是设置特征点的那些信息需要绘制,那些不需要绘制。
而在进行特征点匹配时,当仅使用筛选出的最优匹配点进行匹配的时候,意味着会有很多非最优的特征点不会被匹配,这时候可以设置flags=DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ,意味着那些不是最优特征点的点不会被绘制出来。
ORB - (Oriented Fast and Rotated BRIEF)算法是基于FAST特征检测与BRIEF特征描述子匹配实现,相比BRIEF算法中依靠随机方式获取二值点对,ORB通过FAST方法,FAST方式寻找候选特征点方式是假设灰度图像像素点A周围的像素存在连续大于或者小于A的灰度值,选择任意一个像素点P,假设半径为3,周围16个像素表示如下:
假设存在连续N个点满足
则像素点P被标记为候选特征点、通常N取值为9、12,上图N=9。 为了简化计算,我们可以只计算1、9、5、13四个点,至少其中三个点满足上述不等式条件,即可将P视为候选点。
ORB比BRIEF方式更加合理,同时具有旋转不变性特征与噪声抑制效果,ORB实现选择不变性特征,是通过对BRIEF描述子的特征点区域的计算得到角度方向参数。
完整的ORB特征描述子算法流程图如下:
运行结果:
此外,需要注意的是在使用这些方法之前,要对opencv进行扩展,使其能够调用nonfree.hpp、legacy.hpp以及features2d.hpp,而扩展方法是使用cmake对从官网上下载的扩展包进行安装,具体教程参见
https://blog.csdn.net/streamchuanxi/article/details/51044929
// CVE9sift.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
#include "pch.h"
#include <iostream>
#include "highgui/highgui.hpp"
//#include "opencv2/nonfree/nonfree.hpp"
//#include "opencv2/legacy/legacy.hpp"
using namespace cv;
using namespace std;
//SIFT算法
int main(int argc, char *argv[])
{
Mat image01 = imread(argv[1]);
Mat image02 = imread(argv[2]);
Mat image1, image2;
GaussianBlur(image01, image1, Size(3, 3), 0.5);
GaussianBlur(image02, image2, Size(3, 3), 0.5);
//提取特征点
SiftFeatureDetector siftDetector(30); //限定提起前15个特征点
vector<KeyPoint> keyPoint1, keyPoint2;
siftDetector.detect(image1, keyPoint1);
siftDetector.detect(image2, keyPoint2);
//绘制特征点
drawKeypoints(image1, keyPoint1, image1, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
drawKeypoints(image2, keyPoint2, image2, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
namedWindow("KeyPoints of image1", 0);
namedWindow("KeyPoints of image2", 0);
imshow("KeyPoints of image1", image1);
imshow("KeyPoints of image2", image2);
//特征点描述,为下边的特征点匹配做准备
SiftDescriptorExtractor siftDescriptor;
Mat imageDesc1, imageDesc2;
siftDescriptor.compute(image1, keyPoint1, imageDesc1);
siftDescriptor.compute(image2, keyPoint2, imageDesc2);
//特征点匹配并显示匹配结果
BruteForceMatcher<L2<float>> matcher;
vector<DMatch> matchePoints;
matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
Mat imageOutput;
drawMatches(image01, keyPoint1, image02, keyPoint2, matchePoints, imageOutput);
namedWindow("Mathch Points", 0);
imshow("Mathch Points", imageOutput);
waitKey();
return 0;
}
//SURF算法
int main(int argc, char *argv[])
{
Mat image01 = imread(argv[1]);
Mat image02 = imread(argv[2]);
Mat image1, image2;
image1 = image01.clone();
image2 = image02.clone();
//提取特征点
SurfFeatureDetector surfDetector(4000); //hessianThreshold,海塞矩阵阈值,并不是限定特征点的个数
vector<KeyPoint> keyPoint1, keyPoint2;
surfDetector.detect(image1, keyPoint1);
surfDetector.detect(image2, keyPoint2);
//绘制特征点
drawKeypoints(image1, keyPoint1, image1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
drawKeypoints(image2, keyPoint2, image2, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imshow("KeyPoints of image1", image1);
imshow("KeyPoints of image2", image2);
//特征点描述,为下边的特征点匹配做准备
SurfDescriptorExtractor SurfDescriptor;
Mat imageDesc1, imageDesc2;
SurfDescriptor.compute(image1, keyPoint1, imageDesc1);
SurfDescriptor.compute(image2, keyPoint2, imageDesc2);
//特征点匹配并显示匹配结果
//BruteForceMatcher<L2<float>> matcher;
FlannBasedMatcher matcher;
vector<DMatch> matchePoints;
matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
//提取强特征点
double minMatch = 1;
double maxMatch = 0;
for (int i = 0; i < matchePoints.size(); i++)
{
//匹配值最大最小值获取
minMatch = minMatch > matchePoints[i].distance ? matchePoints[i].distance : minMatch;
maxMatch = maxMatch < matchePoints[i].distance ? matchePoints[i].distance : maxMatch;
}
//最大最小值输出
cout << "最佳匹配值是: " << minMatch << endl;
cout << "最差匹配值是: " << maxMatch << endl;
//获取排在前边的几个最优匹配结果
vector<DMatch> goodMatchePoints;
for (int i = 0; i < matchePoints.size(); i++)
{
if (matchePoints[i].distance < minMatch + (maxMatch - minMatch) / 2)
{
goodMatchePoints.push_back(matchePoints[i]);
}
}
//绘制最优匹配点
Mat imageOutput;
drawMatches(image01, keyPoint1, image02, keyPoint2, goodMatchePoints, imageOutput, Scalar::all(-1),
Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("Mathch Points", imageOutput);
waitKey();
return 0;
}
//ORB
#include <iostream>
#include <fstream>
#include <sstream>
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
void KeyPointsToPoints(vector<KeyPoint> kpts, vector<Point2f> &pts);
bool refineMatchesWithHomography(
const std::vector<cv::KeyPoint>& queryKeypoints,
const std::vector<cv::KeyPoint>& trainKeypoints,
float reprojectionThreshold, std::vector<cv::DMatch>& matches,
cv::Mat& homography);
/** @function main */
/*int main(int argc, char* argv[]) {
/************************************************************************/
/* 特征点检测,特征提取,特征匹配,计算投影变换 */
/************************************************************************/
// 读取图片
Mat img1Ori = imread("1.jpg", 0);
Mat img2Ori = imread("2.jpg", 0);
// 缩小尺度
Mat img1, img2;
resize(img1Ori, img1, Size(img1Ori.cols / 4, img1Ori.cols / 4));
resize(img2Ori, img2, Size(img2Ori.cols / 4, img2Ori.cols / 4));
cv::Ptr<cv::FeatureDetector> detector = new cv::ORB(1000); // 创建orb特征点检测
cv::Ptr<cv::DescriptorExtractor> extractor = new cv::FREAK(true, true); // 用Freak特征来描述特征点
cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BFMatcher(cv::NORM_HAMMING, // 特征匹配,计算Hamming距离
true);
vector<KeyPoint> keypoints1; // 用于保存图中的特征点
vector<KeyPoint> keypoints2;
Mat descriptors1; // 用于保存图中的特征点的特征描述
Mat descriptors2;
detector->detect(img1, keypoints1); // 检测第一张图中的特征点
detector->detect(img2, keypoints2);
extractor->compute(img1, keypoints1, descriptors1); // 计算图中特征点位置的特征描述
extractor->compute(img2, keypoints2, descriptors2);
vector<DMatch> matches;
matcher->match(descriptors1, descriptors2, matches);
Mat imResultOri;
drawMatches(img1, keypoints1, img2, keypoints2, matches, imResultOri,
CV_RGB(0, 255, 0), CV_RGB(0, 255, 0));
cout << "[Info] # of matches : " << matches.size() << endl;
Mat matHomo;
refineMatchesWithHomography(keypoints1, keypoints2, 3, matches, matHomo);
cout << "[Info] Homography T : " << matHomo << endl;
cout << "[Info] # of matches : " << matches.size() << endl;
Mat imResult;
drawMatches(img1, keypoints1, img2, keypoints2, matches, imResult,
CV_RGB(0, 255, 0), CV_RGB(0, 255, 0));
// 计算光流
vector<uchar> vstatus;
vector<float> verrs;
vector<Point2f> points1;
vector<Point2f> points2;
KeyPointsToPoints(keypoints1, points1);
calcOpticalFlowPyrLK(img1, img2, points1, points2, vstatus, verrs);
Mat imOFKL = img1.clone();
for (int i = 0; i < vstatus.size(); i++) {
if (vstatus[i] && verrs[i] < 15) {
line(imOFKL, points1[i], points2[i], CV_RGB(255, 255, 255), 1, 8, 0);
circle(imOFKL, points2[i], 3, CV_RGB(255, 255, 255), 1, 8, 0);
}
}
imwrite("opt.jpg", imOFKL);
imwrite("re1.jpg", imResultOri);
imwrite("re2.jpg", imResult);
imshow("Optical Flow", imOFKL);
imshow("origin matches", imResultOri);
imshow("refined matches", imResult);
waitKey();
return -1;
}
bool refineMatchesWithHomography(
const std::vector<cv::KeyPoint>& queryKeypoints,
const std::vector<cv::KeyPoint>& trainKeypoints,
float reprojectionThreshold, std::vector<cv::DMatch>& matches,
cv::Mat& homography) {
const int minNumberMatchesAllowed = 8;
if (matches.size() < minNumberMatchesAllowed)
return false;
// Prepare data for cv::findHomography
std::vector<cv::Point2f> srcPoints(matches.size());
std::vector<cv::Point2f> dstPoints(matches.size());
for (size_t i = 0; i < matches.size(); i++) {
srcPoints[i] = trainKeypoints[matches[i].trainIdx].pt;
dstPoints[i] = queryKeypoints[matches[i].queryIdx].pt;
}
// Find homography matrix and get inliers mask
std::vector<unsigned char> inliersMask(srcPoints.size());
homography = cv::findHomography(srcPoints, dstPoints, CV_FM_RANSAC,
reprojectionThreshold, inliersMask);
std::vector<cv::DMatch> inliers;
for (size_t i = 0; i < inliersMask.size(); i++) {
if (inliersMask[i])
inliers.push_back(matches[i]);
}
matches.swap(inliers);
return matches.size() > minNumberMatchesAllowed;
}
void KeyPointsToPoints(vector<KeyPoint> kpts, vector<Point2f> &pts) {
for (int i = 0; i < kpts.size(); i++) {
pts.push_back(kpts[i].pt);
}
return;
}
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。