当前位置:   article > 正文

SLAM十四讲-ch7(2)-位姿估计(包含2d-2d、3d-2d、3d-3d、以及三角化实现代码的注释)_3d-2d位姿估计

3d-2d位姿估计

一、利用2d-2d进行位姿估计

源码如下:

//
//
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>

using namespace std;
using namespace cv;
//声明特征匹配函数
void find_feature_matches(const Mat &img1,const Mat &img2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches);

//声明位姿估计函数
void pose_estimated_2d2d( std::vector<KeyPoint> keypoints_1,
                          std::vector<KeyPoint> keypoints_2,
                          std::vector<DMatch> matches,
                          Mat &R, Mat &t);

//声明 像素坐标转归一化坐标 p99页 公式5.5 变换一下就好啦!
//这里的函数我改了一下,书上的只是得到二维点,主程序中,还把二维点转换为三维点,我觉得多此一举,一个函数实现不就好了么
//下面是我实现的坐标变换函数 返回的是Mat类型,其实照片也是矩阵
Mat pixel2cam(const Point2d &p,const Mat &K);//p是像素坐标,K是相机内参矩阵

int main(int argc,char** argv)
{
    //运行可执行文件+图片1的路径+图片2的路径
    if(argc!=3)
    {
        cout<<"usage: ./pose_estimated_2d2d img1 img2"<<endl;
        return 1;
    }
    /*argv[1]="/home/nnz/data/slam_pratice/pratice_pose_estimated/1.png";
    argv[2]="/home/nnz/data/slam_pratice/pratice_pose_estimated/2.png";*/
    //读取图片
    Mat img1=imread(argv[1],CV_LOAD_IMAGE_COLOR);//彩色图
    Mat img2=imread(argv[2],CV_LOAD_IMAGE_COLOR);
    //定义要用到的变量啊,比如 keypoints_1,keypoints_2,matches
    vector<KeyPoint> keypoints_1, keypoints_2;
    vector<DMatch> matches;
    //计算两个keypoints的matches
    find_feature_matches(img1,img2,keypoints_1,keypoints_2,matches);
    //这样就得到matche啦!
    cout << "一共找到了" << matches.size() << "组匹配点" << endl;
    //重点来了
    //估计两组图片之间的运动
    Mat R,t;//定义一下旋转和平移
    pose_estimated_2d2d(keypoints_1,keypoints_2,matches,R,t);
    //这样就算出R ,t啦
    //当然我们还需要验证一下对极约束 准不准了
    //验证 E=t^R*scale;
    //t_x是t的反对称矩阵
    Mat t_x =
            (Mat_<double>(3, 3) << 0, -t.at<double>(2, 0), t.at<double>(1, 0),
                    t.at<double>(2, 0), 0, -t.at<double>(0, 0),
                    -t.at<double>(1, 0), t.at<double>(0, 0), 0);

    cout << "t^R=" << endl << t_x * R << endl;
    //验证对极约束 对应P167 页公式7.10 x2TEx1=0  E=t^*R
    //定义内参矩阵
    Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1,
                                                 0, 521.0,249.7,
                                                 0, 0, 1);
    for(DMatch m: matches)
    {
        Mat x1=pixel2cam(keypoints_1[m.queryIdx].pt,K);
        Mat x2=pixel2cam(keypoints_2[m.queryIdx].pt,K);
        Mat d=x2.t()*t_x*R*x1;//若d很趋近于零,则说明没啥问题
        cout << "epipolar constraint = " << d << endl;
    }
    return 0;

}
//最复杂的地方来咯
//函数的实现
//匹配函数
void find_feature_matches(const Mat &img1,const Mat &img2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches)
{
    //先初始化,创建咱们要用到的对象
    //定义两个关键点对应的描述子,同时创建检测keypoints的检测器
    Mat descriptors_1, descriptors_2;
    vector<DMatch> match;//暂时存放匹配点,因为后面还要进行筛选
    Ptr<FeatureDetector> detector=ORB::create();//keypoints检测器
    Ptr<DescriptorExtractor> descriptor =ORB::create();//描述子提取器
    Ptr<DescriptorMatcher> matcher =DescriptorMatcher::create(
            "BruteForce-Hamming");//描述子匹配器(方法:暴力匹配)
    //step1:找到角点
    detector->detect(img1,keypoints_1);//得到图1的关键点(keypoints_1)
    detector->detect(img2,keypoints_2);//得到图2的关键点(keypoints_2)
    //step2:计算关键点所对应的描述子
    descriptor->compute(img1,keypoints_1,descriptors_1);//得到descriptors_1
    descriptor->compute(img2,keypoints_2,descriptors_2);//得到descriptors_2
    //step3:进行暴力匹配
    matcher->match(descriptors_1,descriptors_2,match);
    //step4:对match进行筛选,得到好的匹配点,把好的匹配点放在matches中
    //先定义两个变量,一个是最大距离,一个是最小距离
    double min_dist=1000, max_dist=0;
    //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
    for(int i=0;i<descriptors_1.rows;i++) //描述子本质是由 0,1 组成的向量
    {
        double dist =match[i].distance;
        //还记得orb_cv中如何找最大距离和最远距离的吗,那里面的程序是用下面的函数实现的,下面的函数得到的是pair first 里面是最小距离,second里面是最大距离
        // minmax_element(matches.begin(),matched.end,[](const DMatch &m1,const DMatch &m2){return m1.distance<m2.distance;});
        //本程序用下面的if语句得到距离
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }
    printf("-- Max dist : %f \n", max_dist);
    printf("-- Min dist : %f \n", min_dist);
    //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.
    // 但有时候最小距离会非常小,设置一个经验值30作为下限.
    for(int i=0;i<descriptors_1.rows;i++)
    {
        if(match[i].distance<=max(2*min_dist,30.0))
        {
            matches.push_back(match[i]);
        }
    }
}
//像素到归一化坐标
Mat pixel2cam(const Point2d &p,const Mat &K)//p是像素坐标,K是相机内参矩阵
{
    Mat t;
    t=(Mat_<double>(3,1)<<(p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
            (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1),1);
    return t;
}

//实现位姿估计函数
void pose_estimated_2d2d( std::vector<KeyPoint> keypoints_1,
                          std::vector<KeyPoint> keypoints_2,
                          std::vector<DMatch> matches,
                          Mat &R, Mat &t)
{
    // 相机内参来源于 TUM Freiburg2
    Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1,
                                                 0, 521.0, 249.7,
                                                 0, 0, 1);

    //咱们要把关键点的像素点拿出来 ,定义两个容器接受两个图关键点的像素位置
    vector<Point2d> points1;
    vector<Point2d> points2;
    for(int i=0;i<(int)matches.size();i++)
    {
        //queryIdx是图1中匹配的关键点的对应编号
        //trainIdx是图2中匹配的关键点的对应编号
        //pt可以把关键点的像素位置取出来
        points1.push_back(keypoints_1[matches[i].queryIdx].pt);
        points2.push_back(keypoints_2[matches[i].trainIdx].pt);
    }
    //-- 计算基础矩阵
    Mat fundamental_matrix;
    fundamental_matrix = findFundamentalMat(points1, points2, CV_FM_8POINT);
    cout << "fundamental_matrix is " << endl << fundamental_matrix << endl;


    //计算本质矩阵 E
    //把cx ,cy放进一个向量里面 =相机的光心
    Point2d principal_point(325.1, 249.7);
    double focal_length=521;//相机的焦距
    //之所以取上面的principal_point、focal_length是因为计算本质矩阵的函数要用
    //得到本质矩阵essential_matrix
    Mat essential_matrix=findEssentialMat(points1,points2,focal_length,principal_point);
    cout<<" essential_matrix =\n"<< essential_matrix <<endl;

    //-- 计算单应矩阵 homography_matrix
    //-- 但是本例中场景不是平面,单应矩阵意义不大
    Mat homography_matrix;
    homography_matrix = findHomography(points1, points2, RANSAC, 3);
    cout << "homography_matrix is " << endl << homography_matrix << endl;
    //通过本质矩阵恢复咱们的 R  t
    recoverPose(essential_matrix,points1,points2,R,t,focal_length,principal_point);
    //输出咱们的 R t
    cout<<" 得到图1到图2 的位姿变换:\n "<<endl;
    cout<<"R= \n"<< R <<endl;
    cout<<"t= \n"<< t <<endl;
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183

运行结果如下:

[ INFO:0] Initialize OpenCL runtime...
-- Max dist : 95.000000 
-- Min dist : 7.000000 
一共找到了81组匹配点
fundamental_matrix is 
[5.435453065936354e-06, 0.0001366043242989653, -0.02140890086948144;
 -0.0001321142229824715, 2.339475702778067e-05, -0.006332906454396007;
 0.02107630352202796, -0.003666833952953114, 0.9999999999999999]
 essential_matrix =
[0.01724015832721706, 0.328054335794133, 0.0473747783144249;
 -0.3243229585962962, 0.03292958445202408, -0.6262554366073018;
 -0.005885857752320116, 0.6253830041920333, 0.0153167864909267]
homography_matrix is 
[0.9131751791807657, -0.1092435315823255, 29.95860009984688;
 0.02223560352312278, 0.9826008005062553, 6.508910839573075;
 -0.0001001560381022834, 0.000103777943639824, 0.9999999999999999]
 得到图1到图2 的位姿变换:
 
R= 
[0.9985534106102478, -0.05339308467584758, 0.006345444621108698;
 0.05321959721496264, 0.9982715997131746, 0.02492965459802003;
 -0.007665548311697523, -0.02455588961730239, 0.9996690690694516]
t= 
[-0.8829934995085544;
 -0.05539655431450562;
 0.4661048182498402]
t^R=
[-0.02438126572381045, -0.4639388908753606, -0.06699805400667856;
 0.4586619266358499, -0.04656946493536188, 0.8856589319599302;
 0.008323859859529846, -0.8844251262060034, -0.0216612071874423]
epipolar constraint = [0.03476355922338803]
epipolar constraint = [-0.1785274785583587]
epipolar constraint = [-0.1592252171336243]
epipolar constraint = [-0.1005706392865832]
epipolar constraint = [-0.1082908847178009]
epipolar constraint = [0.1850680419367683]
epipolar constraint = [-0.08433455123502798]
epipolar constraint = [-0.1987242522626735]
epipolar constraint = [0.077557129085982]
epipolar constraint = [-0.4432147691759752]
epipolar constraint = [0.03967488105974747]
epipolar constraint = [0.01692177768085537]
epipolar constraint = [-0.08992370621510326]
epipolar constraint = [-0.01854869039504867]
epipolar constraint = [0.04540455021438544]
epipolar constraint = [-0.0535232890689569]
epipolar constraint = [0.004049975808890993]
epipolar constraint = [0.2379528025207592]
epipolar constraint = [-0.1105775448810121]
epipolar constraint = [-0.1915542644648525]
epipolar constraint = [0.1198819084857832]
epipolar constraint = [-0.2078623305807761]
epipolar constraint = [-0.00782819336102007]
epipolar constraint = [0.1738957236709565]
epipolar constraint = [-0.1503510738002913]
epipolar constraint = [-0.03942101168583886]
epipolar constraint = [-0.1189459874887444]
epipolar constraint = [-0.08267346651883575]
epipolar constraint = [-0.07689699672043672]
epipolar constraint = [-0.06522721403249562]
epipolar constraint = [0.2295944244309724]
epipolar constraint = [0.03364594342311077]
epipolar constraint = [-0.1168461424736572]
epipolar constraint = [-0.1534773056580732]
epipolar constraint = [-0.3157541678742523]
epipolar constraint = [-0.159082742412996]
epipolar constraint = [-0.1788954148676432]
epipolar constraint = [-0.1064364695825235]
epipolar constraint = [0.2523848156765162]
epipolar constraint = [-0.2066277709399551]
epipolar constraint = [0.03661172055801167]
epipolar constraint = [-0.1415911736663649]
epipolar constraint = [0.09283991927947977]
epipolar constraint = [-0.2528116966463155]
epipolar constraint = [-0.09716572286347072]
epipolar constraint = [-0.09615485914891105]
epipolar constraint = [-0.3211934222061693]
epipolar constraint = [-0.1864497340313232]
epipolar constraint = [-0.1126460548244069]
epipolar constraint = [-0.04424085979735514]
epipolar constraint = [-0.1769347002087819]
epipolar constraint = [0.0001236409258935089]
epipolar constraint = [-0.1746865303036559]
epipolar constraint = [0.1196164533808175]
epipolar constraint = [-0.1272960963964681]
epipolar constraint = [-0.06012226615704597]
epipolar constraint = [-0.1142387515391697]
epipolar constraint = [-0.1390209376267806]
epipolar constraint = [-0.02539053748601561]
epipolar constraint = [-0.03586341329167451]
epipolar constraint = [-0.04649215151754624]
epipolar constraint = [0.2122475297481123]
epipolar constraint = [-0.247134089433402]
epipolar constraint = [-0.04037646993035322]
epipolar constraint = [0.02692447364606575]
epipolar constraint = [0.02671342319157689]
epipolar constraint = [0.0758736255549073]
epipolar constraint = [-0.2380995942323509]
epipolar constraint = [-0.04705184948187023]
epipolar constraint = [-0.08197359650419991]
epipolar constraint = [-0.2225998320298153]
epipolar constraint = [-0.1544953793737884]
epipolar constraint = [0.01227925071372281]
epipolar constraint = [-0.02305122735209618]
epipolar constraint = [-0.0861848544279937]
epipolar constraint = [0.1542593013301575]
epipolar constraint = [-0.03688829520493091]
epipolar constraint = [-0.2132705615189206]
epipolar constraint = [-0.07502229322147005]
epipolar constraint = [0.1202800678977438]
epipolar constraint = [-0.3493015689499778]

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112

二、在上面的2d2d位姿估计的基础上,利用三角化来获得特征匹配点的深度信息(通过画图,验证三维点与特征点的重投影关系)

源码如下:

//
// Created by wenbo on 2020/11/3.
//
//该程序在pose_estimated_2d2d的基础上加上三角化,以求得匹配的特征点在世界下的三维点
//
//
#include <iostream>
#include <opencv2/opencv.hpp>
// #include "extra.h" // used in opencv2
using namespace std;
using namespace cv;
//声明特征匹配函数
void find_feature_matches(const Mat &img1,const Mat &img2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches);

//声明位姿估计函数
void pose_estimated_2d2d( std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches,
                          Mat &R, Mat &t);

//声明 像素坐标转归一化坐标 p99页 公式5.5 变换一下就好啦!
//这里的函数我改了一下,书上的只是得到二维点,主程序中,还把二维点转换为三维点,我觉得多此一举,一个函数实现不就好了么
//下面是我实现的坐标变换函数 返回的是Mat类型,其实照片也是矩阵
Point2f pixel2cam(const Point2d &p, const Mat &K);//p是像素坐标,K是相机内参矩阵

//声明三角化函数
void triangulation(
        const vector<KeyPoint> &keypoint_1,
        const vector<KeyPoint> &keypoint_2,
        const std::vector<DMatch> &matches,
        const Mat &R, const Mat &t,
        vector<Point3d> &points
);

/// 作图用
inline cv::Scalar get_color(float depth) {
    float up_th = 50, low_th = 10, th_range = up_th - low_th;
    if (depth > up_th) depth = up_th;
    if (depth < low_th) depth = low_th;
    return cv::Scalar(255 * depth / th_range, 0, 255 * (1 - depth / th_range));
}

int main(int argc,char** argv)
{
    //运行可执行文件+图片1的路径+图片2的路径
    if(argc!=3)
    {
        cout<<"usage: ./triangulation img1 img2"<<endl;
        return 1;
    }

    //读取图片
    Mat img1=imread(argv[1],CV_LOAD_IMAGE_COLOR);//彩色图
    Mat img2=imread(argv[2],CV_LOAD_IMAGE_COLOR);
    //定义要用到的变量啊,比如 keypoints_1,keypoints_2,matches
    vector<KeyPoint> keypoints_1, keypoints_2;
    vector<DMatch> matches;
    //计算两个keypoints的matches
    find_feature_matches(img1,img2,keypoints_1,keypoints_2,matches);
    //这样就得到matches啦!
    cout << "一共找到了" << matches.size() << "组匹配点" << endl;
    //重点来了
    //估计两组图片之间的运动
    Mat R,t;//定义一下旋转和平移
    pose_estimated_2d2d(keypoints_1,keypoints_2,matches,R,t);
    //这样就算出R ,t啦

    //三角化
    //定义一个容器 points 用来存放特征匹配点在世界坐标系下的3d点
    vector<Point3d> points;
    triangulation(keypoints_1,keypoints_2,matches,R,t,points);
    //得到三维点

    //验证三维点与特征点的重投影关系
    Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1,
                                                 0, 521.0, 249.7,
                                                 0, 0, 1);
    Mat img_plot1=img1.clone();
    Mat img_plot2=img2.clone();
    //利用循环找到图1和图2特征点在图上的位置,并圈出来
    for(int i=0;i<matches.size();i++)
    {
        //先画图1中特征点
        //在这里,为什么从一个世界坐标系下的3d点,就可以得到,图1相机坐标下的深度点呢?
        //我觉得是因为 图1的位姿: R是单位矩阵,t为0(在三角化函数中有写到) 所以可以把图1的相机坐标看成是世界坐标
        float  depth1=points[i].z;//取出图1各个特征点的深度信息
        cout<<"depth: "<<depth1<<endl;
        Point2d pt1_cam=pixel2cam(keypoints_1[matches[i].queryIdx].pt,K);
        cv::circle(img_plot1, keypoints_1[matches[i].queryIdx].pt, 2, get_color(depth1), 2);

        //画图2
        //得到图2坐标系下的3d点,得到图2的深度信息
        Mat pt2_trans=R*(Mat_<double>(3, 1) <<points[i].x,points[i].y,points[i].z)+t;
        float depth2 = pt2_trans.at<double>(2, 0);
        cv::circle(img_plot2, keypoints_2[matches[i].trainIdx].pt, 2, get_color(depth2), 2);
    }
//画图
    cv::imshow("img 1", img_plot1);
    cv::imshow("img 2", img_plot2);
    cv::waitKey();


    return 0;

}
//最复杂的地方来咯
//函数的实现
//匹配函数
void find_feature_matches(const Mat &img1,const Mat &img2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches)
{
    //先初始化,创建咱们要用到的对象
    //定义两个关键点对应的描述子,同时创建检测keypoints的检测器
    Mat descriptors_1, descriptors_2;
    vector<DMatch> match;//暂时存放匹配点,因为后面还要进行筛选
    Ptr<FeatureDetector> detector=ORB::create();//keypoints检测器
    Ptr<DescriptorExtractor> descriptor =ORB::create();//描述子提取器
    Ptr<DescriptorMatcher> matcher =DescriptorMatcher::create(
            "BruteForce-Hamming");//描述子匹配器(方法:暴力匹配)
    //step1:找到角点
    detector->detect(img1,keypoints_1);//得到图1的关键点(keypoints_1)
    detector->detect(img2,keypoints_2);//得到图2的关键点(keypoints_2)
    //step2:计算关键点所对应的描述子
    descriptor->compute(img1,keypoints_1,descriptors_1);//得到descriptors_1
    descriptor->compute(img2,keypoints_2,descriptors_2);//得到descriptors_2
    //step3:进行暴力匹配
    matcher->match(descriptors_1,descriptors_2,match);
    //step4:对match进行筛选,得到好的匹配点,把好的匹配点放在matches中
    //先定义两个变量,一个是最大距离,一个是最小距离
    double min_dist=10000, max_dist=0;
    //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
    for(int i=0;i<descriptors_1.rows;i++) //描述子本质是由 0,1 组成的向量
    {
        double dist =match[i].distance;
        //还记得orb_cv中如何找最大距离和最远距离的吗,那里面的程序是用下面的函数实现的,下面的函数得到的是pair first 里面是最小距离,second里面是最大距离
        // minmax_element(matches.begin(),matched.end,[](const DMatch &m1,const DMatch &m2){return m1.distance<m2.distance;});
        //本程序用下面的if语句得到距离
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }
    printf("-- Max dist : %f \n", max_dist);
    printf("-- Min dist : %f \n", min_dist);
    //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.
    // 但有时候最小距离会非常小,设置一个经验值30作为下限.
    for(int i=0;i<descriptors_1.rows;i++)
    {
        if(match[i].distance<=max(2*min_dist,30.0))
        {
            matches.push_back(match[i]);
        }
    }
}
//像素到归一化坐标
Point2f pixel2cam(const Point2d &p, const Mat &K) {
    return Point2f
            (
                    (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
                    (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
            );
}

//实现位姿估计函数
void pose_estimated_2d2d( std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches,
                          Mat &R, Mat &t)
{
    // 相机内参来源于 TUM Freiburg2
    Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1,
            0, 521.0, 249.7,
            0, 0, 1);

    //咱们要把关键点的像素点拿出来 ,定义两个容器接受两个图关键点的像素位置
    vector<Point2d> points1;
    vector<Point2d> points2;
    for(int i=0;i<(int)matches.size();i++)
    {
        //queryIdx是图1中匹配的关键点的对应编号
        //trainIdx是图2中匹配的关键点的对应编号
        //pt可以把关键点的像素位置取出来
        points1.push_back(keypoints_1[matches[i].queryIdx].pt);
        points2.push_back(keypoints_2[matches[i].trainIdx].pt);
    }
    /*//-- 计算基础矩阵
    Mat fundamental_matrix;
    fundamental_matrix = findFundamentalMat(points1, points2, CV_FM_8POINT);
    cout << "fundamental_matrix is " << endl << fundamental_matrix << endl;*/




    //计算本质矩阵 E
    //把cx ,cy放进一个向量里面 =相机的光心
    Point2d principal_point(325.1, 249.7);
    int focal_length=521;//相机的焦距
    //之所以取上面的principal_point、focal_length是因为计算本质矩阵的函数要用
    //得到本质矩阵essential_matrix
    Mat essential_matrix=findEssentialMat(points1,points2,focal_length,principal_point);
    //Mat essential_matrix=K.t()*fundamental_matrix*K;//这个是利用公式 F=K^(-T)*E*K^(-1)得到的E=K^T*F*K  不知道为什么这个不对
    cout<<" essential_matrix =\n"<< essential_matrix <<endl;

    //-- 计算单应矩阵 homography_matrix
    //-- 但是本例中场景不是平面,单应矩阵意义不大
    /*Mat homography_matrix;
    homography_matrix = findHomography(points1, points2, RANSAC, 3);
    cout << "homography_matrix is " << endl << homography_matrix << endl;*/

    //通过本质矩阵恢复咱们的 R  t
    recoverPose(essential_matrix,points1,points2,R,t,focal_length,principal_point);
    //输出咱们的 R t
  /*  cout<<" 得到图1到图2 的位姿变换:\n "<<endl;
    cout<<"R= \n"<< R <<endl;
    cout<<"t= \n"<< t <<endl;*/
}
void triangulation(
        const vector<KeyPoint> &keypoint_1,
        const vector<KeyPoint> &keypoint_2,
        const std::vector<DMatch> &matches,
        const Mat &R, const Mat &t,
        vector<Point3d> &points
)
{
    //定义图1在世界坐标系下的位姿
    Mat T1 = (Mat_<float>(3, 4) <<
                                1, 0, 0, 0,
                                0, 1, 0, 0,
                                0, 0, 1, 0);

    //定义图2在世界坐标系下的位姿
    Mat T2 = (Mat_<float>(3, 4) <<
                                R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), t.at<double>(0, 0),
            R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), t.at<double>(1, 0),
            R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), t.at<double>(2, 0)
    );
    Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
    //容器 pts_1、pts_2分别存放图1和图2中特征点对应的自己相机归一化坐标中的 x与 y
    vector<Point2f> pts_1,pts_2;
    for(DMatch m:matches)//这样的遍历写起来比较快
    {
        //将像素坐标变为相机下的归一化坐标
        pts_1.push_back(pixel2cam(keypoint_1[m.queryIdx].pt,K));
        pts_2.push_back(pixel2cam(keypoint_2[m.trainIdx].pt,K));
    }
    Mat pts_4d;
    cv::triangulatePoints(T1,T2,pts_1,pts_2,pts_4d);
    //转换为非齐次坐标
    for(int i=0;i<pts_4d.cols;i++)
    {
        //定义x来接收每一个三维点
        Mat x=pts_4d.col(i);
        x/=x.at<float>(3,0);
        Point3d p(x.at<float>(0, 0),
                  x.at<float>(1, 0),
                  x.at<float>(2, 0));
        points.push_back(p);
    }

}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
  • 242
  • 243
  • 244
  • 245
  • 246
  • 247
  • 248
  • 249
  • 250
  • 251
  • 252
  • 253
  • 254
  • 255
  • 256
  • 257
  • 258
  • 259
  • 260
  • 261
  • 262
  • 263

运行结果如下:

[ INFO:0] Initialize OpenCL runtime...
-- Max dist : 95.000000 
-- Min dist : 7.000000 
一共找到了81组匹配点
 essential_matrix =
[0.01724015832721706, 0.328054335794133, 0.0473747783144249;
 -0.3243229585962962, 0.03292958445202408, -0.6262554366073018;
 -0.005885857752320116, 0.6253830041920333, 0.0153167864909267]
depth: 66.0186
depth: 21.0728
depth: 20.3952
depth: 16.9029
depth: 19.8927
depth: 37.7193
depth: 22.5936
depth: 20.9212
depth: 16.1058
depth: 16.9235
depth: 21.188
depth: 22.2664
depth: 17.1165
depth: 17.4836
depth: 21.982
depth: 34.889
depth: 77.3512
depth: 80.3103
depth: 20.7005
depth: 17.1341
depth: 17.7618
depth: 20.41
depth: 17.1729
depth: 37.5745
depth: 20.7774
depth: 17.3433
depth: 21.9547
depth: 14.74
depth: 16.6306
depth: 34.7793
depth: 34.5093
depth: 17.7108
depth: 19.9396
depth: 17.077
depth: 20.9776
depth: 16.3867
depth: 16.5827
depth: 16.2495
depth: 65.2504
depth: 17.1249
depth: 35.5666
depth: 35.8194
depth: 68.6612
depth: 21.0837
depth: 22.3647
depth: 21.2923
depth: 17.3458
depth: 20.1207
depth: 27.1816
depth: 19.8897
depth: 24.2379
depth: 37.1623
depth: 20.4894
depth: 18.3041
depth: 25.206
depth: 18.6171
depth: 23.9337
depth: 17.8096
depth: 18.5897
depth: 20.0973
depth: 24.9094
depth: 22.4146
depth: 12.884
depth: 22.8616
depth: 16.1684
depth: 20.0982
depth: 30.8074
depth: 11.9117
depth: 19.4316
depth: 11.6402
depth: 11.3385
depth: 20.5574
depth: 17.5733
depth: 23.1775
depth: 23.7922
depth: 21.7831
depth: 24.9361
depth: 20.0201
depth: 17.1896
depth: 20.7021
depth: 13.3843
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89

在这里插入图片描述
在这里插入图片描述
从上面的图片,大致可以看出,通过世界三维点,重投影到图1与图2上去,图1与图2的特征点基本都是匹配的!!!

三、利用3d-2d进行位姿估计

源码如下:

//
// Created by nnz on 2020/11/5.
//
//
// Created by nnz on 2020/11/4.
//
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/sparse_optimizer.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <sophus/se3.hpp>
#include <chrono>
//该程序用了三种方法实现位姿估计
//第一种,调用cv的函数pnp求解 R ,t
//第二种,手写高斯牛顿进行位姿优化
//第三种,利用g2o进行位姿优化
using namespace std;
using namespace cv;

typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;//VecVector2d可以定义存放二维向量的容器
typedef vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> VecVector3d;//VecVector3d可以定义存放三维向量的容器

void find_feature_matches(
        const Mat &img_1, const Mat &img_2,
        std::vector<KeyPoint> &keypoints_1,
        std::vector<KeyPoint> &keypoints_2,
        std::vector<DMatch> &matches);

// 像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &p, const Mat &K);

// BA by gauss-newton 手写高斯牛顿进行位姿优化
void bundleAdjustmentGaussNewton(
        const VecVector3d &points_3d,
        const VecVector2d &points_2d,
        const Mat &K,
        Sophus::SE3d &pose
);

//利用g2o优化pose
void bundleAdjustmentG2O(
        const VecVector3d &points_3d,
        const VecVector2d &points_2d,
        const Mat &K,
        Sophus::SE3d &pose);

int main(int argc ,char** argv)
{


    //读取图片
    if (argc != 5) {
        cout << "usage: pose_estimation_3d2d img1 img2 depth1 depth2" << endl;
        return 1;
    }
    //读取图片
    Mat img_1=imread(argv[1],CV_LOAD_IMAGE_COLOR);//读取彩色图
    Mat img_2=imread(argv[2],CV_LOAD_IMAGE_COLOR);
    assert(img_1.data && img_2.data && "Can Not load images!");//若读取的图片没有内容,就终止程序
    vector<KeyPoint> keypoints_1, keypoints_2;
    vector<DMatch> matches;
    find_feature_matches(img_1,img_2,keypoints_1,keypoints_2,matches);//得到两个图片的特征匹配点
    cout << "一共找到了" << matches.size() << "组匹配点" << endl;



    //建立3d点,把深度图信息读进来,构造三维点
    Mat d1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
    Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
    vector<Point3d> pts_3d;//创建容器pts_3d存放3d点(图1对应的特征点的相机坐标下的3d点)
    vector<Point2d> pts_2d;//创建容器pts_2d存放图2的特征点
    for(DMatch m:matches)
    {
        //把对应的图1的特征点的深度信息拿出来
        ushort d = d1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
        if(d==0) //深度有问题
            continue;
        float dd=d/5000.0;//用dd存放换算过尺度的深度信息
        Point2d p1=pixel2cam(keypoints_1[m.queryIdx].pt,K);//p1里面放的是图1特征点在相机坐标下的归一化坐标(只包含 x,y)
        pts_3d.push_back(Point3d(p1.x*dd,p1.y*dd,dd));//得到图1特征点在相机坐标下的3d坐标
        pts_2d.push_back(keypoints_2[m.trainIdx].pt);//得到图2特张点的像素坐标
    }

    cout<<"3d-2d pairs:"<< pts_3d.size() <<endl;//3d-2d配对个数得用pts_3d的size
    cout<<"使用cv求解 位姿"<<endl;
    cout<<"***********************************opencv***********************************"<<endl;
    chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
    Mat r, t;
    //Mat()这个参数指的是畸变系数向量?
    solvePnP(pts_3d, pts_2d, K, Mat(), r, t, false); // 调用OpenCV 的 PnP 求解,可选择EPNP,DLS等方法
    Mat R;
    cv::Rodrigues(r,R);//r是旋转向量,利用cv的Rodrigues()函数将旋转向量转换为旋转矩阵

    chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
    chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
    cout << "solve pnp in opencv cost time: " << time_used.count() << " seconds." << endl;

    cout << "R=" << endl << R << endl;
    cout << "t=" << endl << t << endl;
    cout<<"***********************************opencv***********************************"<<endl;
    cout<<"手写高斯牛顿优化位姿"<<endl;
    cout<<"***********************************手写高斯牛顿***********************************"<<endl;
    VecVector3d pts_3d_eigen;//存放3d点(图1对应的特征点的相机坐标下的3d点)
    VecVector2d pts_2d_eigen;//存放图2的特征点
    for(size_t i=0;i<pts_3d.size();i++)//size_t
    {
        pts_3d_eigen.push_back(Eigen::Vector3d(pts_3d[i].x,pts_3d[i].y,pts_3d[i].z));
        pts_2d_eigen.push_back(Eigen::Vector2d(pts_2d[i].x,pts_2d[i].y));
    }
    Sophus::SE3d pose_gn;//位姿(李群)
    t1 = chrono::steady_clock::now();
    bundleAdjustmentGaussNewton(pts_3d_eigen, pts_2d_eigen, K, pose_gn);
    t2 = chrono::steady_clock::now();
    time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
    cout << "solve pnp by gauss newton cost time: " << time_used.count() << " seconds." << endl;
    cout<<"R = \n"<<pose_gn.rotationMatrix()<<endl;
    cout<<"t = "<<pose_gn.translation().transpose()<<endl;
    cout<<"***********************************手写高斯牛顿***********************************"<<endl;

    cout<<"g2o优化位姿"<<endl;
    cout << "***********************************g2o***********************************" << endl;
    Sophus::SE3d pose_g2o;
    t1 = chrono::steady_clock::now();
    bundleAdjustmentG2O(pts_3d_eigen, pts_2d_eigen, K, pose_g2o);
    t2 = chrono::steady_clock::now();
    time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
    cout << "solve pnp by g2o cost time: " << time_used.count() << " seconds." << endl;
    cout << "***********************************g2o***********************************" << endl;
    return 0;

}
//实现特征匹配
void find_feature_matches(const Mat &img_1, const Mat &img_2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches) {
    //-- 初始化
    Mat descriptors_1, descriptors_2;
    // used in OpenCV3
    Ptr<FeatureDetector> detector = ORB::create();
    Ptr<DescriptorExtractor> descriptor = ORB::create();
    // use this if you are in OpenCV2
    // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
    // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
    //-- 第一步:检测 Oriented FAST 角点位置
    detector->detect(img_1, keypoints_1);
    detector->detect(img_2, keypoints_2);

    //-- 第二步:根据角点位置计算 BRIEF 描述子
    descriptor->compute(img_1, keypoints_1, descriptors_1);
    descriptor->compute(img_2, keypoints_2, descriptors_2);

    //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
    vector<DMatch> match;
    // BFMatcher matcher ( NORM_HAMMING );
    matcher->match(descriptors_1, descriptors_2, match);

    //-- 第四步:匹配点对筛选
    double min_dist = 10000, max_dist = 0;

    //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
    for (int i = 0; i < descriptors_1.rows; i++) {
        double dist = match[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }

    printf("-- Max dist : %f \n", max_dist);
    printf("-- Min dist : %f \n", min_dist);

    //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
    for (int i = 0; i < descriptors_1.rows; i++) {
        if (match[i].distance <= max(2 * min_dist, 30.0)) {
            matches.push_back(match[i]);
        }
    }
}

//实现像素坐标到相机坐标的转换(求出来的只是包含相机坐标下的x,y的二维点)
Point2d pixel2cam(const Point2d &p, const Mat &K) {
    return Point2d
            (
                    (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
                    (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
            );
}

//手写高斯牛顿
void bundleAdjustmentGaussNewton(
        const VecVector3d &points_3d,
        const VecVector2d &points_2d,
        const Mat &K,
        Sophus::SE3d &pose
)
{
    typedef Eigen::Matrix<double,6,1> Vector6d;
    const int iters=10;//迭代次数
    double cost=0,lastcost=0;//代价函数(目标函数)
    //拿出内参
    double fx = K.at<double>(0, 0);
    double fy = K.at<double>(1, 1);
    double cx = K.at<double>(0, 2);
    double cy = K.at<double>(1, 2);
    //进入迭代
    for (int iter = 0; iter <iters ; iter++)
    {
        Eigen::Matrix<double,6,6> H = Eigen::Matrix<double,6,6>::Zero();//初始化H矩阵
        Vector6d b = Vector6d::Zero();//对b矩阵初始化

        cost = 0;
        // 遍历所有的特征点  计算cost
        for(int i=0;i<points_3d.size();i++)
        {
            Eigen::Vector3d pc=pose*points_3d[i];//利用待优化的pose得到图2的相机坐标下的3d点
            double inv_z=1.0/pc[2];//得到图2的相机坐标下的3d点的z的倒数,也就是1/z
            double inv_z2 = inv_z * inv_z;//(1/z)^2
            //定义投影
            Eigen::Vector2d proj(fx * pc[0] / pc[2] + cx, fy * pc[1] / pc[2] + cy);
            //定义误差
            Eigen::Vector2d e=points_2d[i]-proj;
            cost += e.squaredNorm();//cost=e*e
            //定义雅克比矩阵J
            Eigen::Matrix<double, 2, 6> J;
            J << -fx * inv_z,
                    0,
                    fx * pc[0] * inv_z2,
                    fx * pc[0] * pc[1] * inv_z2,
                    -fx - fx * pc[0] * pc[0] * inv_z2,
                    fx * pc[1] * inv_z,
                    0,
                    -fy * inv_z,
                    fy * pc[1] * inv_z2,
                    fy + fy * pc[1] * pc[1] * inv_z2,
                    -fy * pc[0] * pc[1] * inv_z2,
                    -fy * pc[0] * inv_z;

            H += J.transpose() * J;
            b += -J.transpose() * e;
        }
        //出了这个内循环,表述结束一次迭代的计算,接下来,要求pose了
        Vector6d dx;//P129页 公式6.33 计算增量方程 Hdx=b
        dx = H.ldlt().solve(b);//算出增量dx
        //判断dx这个数是否有效
        if (isnan(dx[0]))
        {
            cout << "result is nan!" << endl;
            break;
        }
        //如果我们进行了迭代,且最后的cost>=lastcost的话,那就表明满足要求了,可以停止迭代了
        if (iter > 0 && cost >= lastcost)
        {
            // cost increase, update is not good
            cout << "cost: " << cost << ", last cost: " << lastcost << endl;
            break;
        }
        //优化pose 也就是用dx更新pose
        pose=Sophus::SE3d::exp(dx) * pose;//dx是李代数,要转换为李群
        lastcost=cost;
        cout << "iteration " << iter << " cost="<< std::setprecision(12) << cost << endl;
        //std::setprecision(12)浮点数控制位数为12位
        //如果误差特别小了,也结束迭代
        if (dx.norm() < 1e-6)
        {
            // converge
            break;
        }
    }
    cout<<"pose by g-n \n"<<pose.matrix()<<endl;
}

//对于用g2o来进行优化的话,首先要定义顶点和边的模板
//顶点,也就是咱们要优化的pose 用李代数表示它 6维
class Vertexpose: public g2o::BaseVertex<6,Sophus::SE3d>
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW;//必须写,我也不知道为什么
    //重载setToOriginImpl函数 这个应该就是把刚开的待优化的pose放进去
    virtual void setToOriginImpl() override
    {
        _estimate = Sophus::SE3d();
    }
    //重载oplusImpl函数,用来更新pose(待优化的系数)
    virtual void oplusImpl(const double *update) override
    {
        Eigen::Matrix<double,6,1> update_eigen;//更新的量,就是增量呗,dx
        update_eigen << update[0], update[1], update[2], update[3], update[4], update[5];
        _estimate=Sophus::SE3d::exp(update_eigen)* _estimate;//更新pose 李代数要转换为李群,这样才可以左乘
    }
    //存盘 读盘 :留空
    virtual bool read(istream &in) override {}

    virtual bool write(ostream &out) const override {}
};
//定义边模板 边也就是误差,二维 并且把顶点也放进去
class EdgeProjection : public g2o::BaseUnaryEdge<2,Eigen::Vector2d,Vertexpose>
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW;//必须写,我也不知道为什么
    //有参构造,初始化 图1中的3d点 以及相机内参K
    EdgeProjection(const Eigen::Vector3d &pos, const Eigen::Matrix3d &K) : _pos3d(pos),_K(K) {}
    //计算误差
    virtual void computeError() override
    {
        const Vertexpose *v=static_cast<const Vertexpose *>(_vertices[0]);
        Sophus::SE3d T=v->estimate();
        Eigen::Vector3d pos_pixel = _K * (T * _pos3d);//T * _pos3d是图2的相机坐标下的3d点
        pos_pixel /= pos_pixel[2];//得到了像素坐标的齐次形式
        _error = _measurement - pos_pixel.head<2>();
    }
    //计算雅克比矩阵
    virtual void linearizeOplus() override
    {
        const Vertexpose *v = static_cast<Vertexpose *> (_vertices[0]);
        Sophus::SE3d T = v->estimate();
        Eigen::Vector3d pos_cam=T*_pos3d;//图2的相机坐标下的3d点
        double fx = _K(0, 0);
        double fy = _K(1, 1);
        double cx = _K(0, 2);
        double cy = _K(1, 2);
        double X = pos_cam[0];
        double Y = pos_cam[1];
        double Z = pos_cam[2];
        double Z2 = Z * Z;
        //雅克比矩阵见 书 p187 公式7.46
        _jacobianOplusXi
                << -fx / Z, 0, fx * X / Z2, fx * X * Y / Z2, -fx - fx * X * X / Z2, fx * Y / Z,
                0, -fy / Z, fy * Y / (Z * Z), fy + fy * Y * Y / Z2, -fy * X * Y / Z2, -fy * X / Z;


    }
    //存盘 读盘 :留空
    virtual bool read(istream &in) override {}

    virtual bool write(ostream &out) const override {}
private:
    Eigen::Vector3d _pos3d;
    Eigen::Matrix3d _K;
};


//利用g2o优化pose
void bundleAdjustmentG2O(
        const VecVector3d &points_3d,
        const VecVector2d &points_2d,
        const Mat &K,
        Sophus::SE3d &pose)
{
    // 构建图优化,先设定g2o
    typedef g2o::BlockSolver<g2o::BlockSolverTraits<6, 3>> BlockSolverType;  //  优化系数pose is 6, 数据点 landmark is 3
    typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType; // 线性求解器类型
    // 梯度下降方法,可以从GN, LM, DogLeg 中选
    auto solver = new g2o::OptimizationAlgorithmGaussNewton(
            g2o::make_unique<BlockSolverType>
                    (g2o::make_unique<LinearSolverType>()));//把设定的类型都放进求解器

    g2o::SparseOptimizer optimizer;     // 图模型
    optimizer.setAlgorithm(solver);   // 设置求解器 算法g-n
    optimizer.setVerbose(true);       // 打开调试输出
    //加入顶点
    Vertexpose *v=new Vertexpose();
    v->setEstimate(Sophus::SE3d());
    v->setId(0);
    optimizer.addVertex(v);
    //K
    // K
    Eigen::Matrix3d K_eigen;
    K_eigen <<
            K.at<double>(0, 0), K.at<double>(0, 1), K.at<double>(0, 2),
            K.at<double>(1, 0), K.at<double>(1, 1), K.at<double>(1, 2),
            K.at<double>(2, 0), K.at<double>(2, 1), K.at<double>(2, 2);

    //加入边
    int index=1;
    for(size_t i=0;i<points_2d.size();++i)
    {
        //遍历 把3d点和像素点拿出来
        auto p2d = points_2d[i];
        auto p3d = points_3d[i];
        EdgeProjection *edge = new EdgeProjection(p3d, K_eigen);//有参构造
        edge->setId(index);
        edge->setVertex(0,v);
        edge->setMeasurement(p2d);//设置观测值,其实就是图2 里的匹配特征点的像素位置
        edge->setInformation(Eigen::Matrix2d::Identity());//信息矩阵是二维方阵,因为误差是二维
        optimizer.addEdge(edge);//加入边
        index++;//边的编号++
    }
    chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
    optimizer.setVerbose(true);
    optimizer.initializeOptimization();//开始  初始化
    optimizer.optimize(10);//迭代次数
    chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
    chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
    cout << "optimization costs time: " << time_used.count() << " seconds." << endl;
    cout << "pose estimated by g2o =\n" << v->estimate().matrix() << endl;
    pose = v->estimate();

}

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
  • 242
  • 243
  • 244
  • 245
  • 246
  • 247
  • 248
  • 249
  • 250
  • 251
  • 252
  • 253
  • 254
  • 255
  • 256
  • 257
  • 258
  • 259
  • 260
  • 261
  • 262
  • 263
  • 264
  • 265
  • 266
  • 267
  • 268
  • 269
  • 270
  • 271
  • 272
  • 273
  • 274
  • 275
  • 276
  • 277
  • 278
  • 279
  • 280
  • 281
  • 282
  • 283
  • 284
  • 285
  • 286
  • 287
  • 288
  • 289
  • 290
  • 291
  • 292
  • 293
  • 294
  • 295
  • 296
  • 297
  • 298
  • 299
  • 300
  • 301
  • 302
  • 303
  • 304
  • 305
  • 306
  • 307
  • 308
  • 309
  • 310
  • 311
  • 312
  • 313
  • 314
  • 315
  • 316
  • 317
  • 318
  • 319
  • 320
  • 321
  • 322
  • 323
  • 324
  • 325
  • 326
  • 327
  • 328
  • 329
  • 330
  • 331
  • 332
  • 333
  • 334
  • 335
  • 336
  • 337
  • 338
  • 339
  • 340
  • 341
  • 342
  • 343
  • 344
  • 345
  • 346
  • 347
  • 348
  • 349
  • 350
  • 351
  • 352
  • 353
  • 354
  • 355
  • 356
  • 357
  • 358
  • 359
  • 360
  • 361
  • 362
  • 363
  • 364
  • 365
  • 366
  • 367
  • 368
  • 369
  • 370
  • 371
  • 372
  • 373
  • 374
  • 375
  • 376
  • 377
  • 378
  • 379
  • 380
  • 381
  • 382
  • 383
  • 384
  • 385
  • 386
  • 387
  • 388
  • 389
  • 390
  • 391
  • 392
  • 393
  • 394
  • 395
  • 396
  • 397
  • 398
  • 399
  • 400
  • 401
  • 402
  • 403
  • 404
  • 405
  • 406
  • 407
  • 408

运行结果如下:

-- Max dist : 95.000000 
-- Min dist : 7.000000 
一共找到了81组匹配点
3d-2d pairs:77
使用cv求解 位姿
***********************************opencv***********************************
solve pnp in opencv cost time: 0.000684295 seconds.
R=
[0.9979193252724496, -0.05138618848139433, 0.03894200663979159;
 0.05033852863383521, 0.9983556575256342, 0.02742286676369363;
 -0.04028712924445606, -0.02540552538156528, 0.9988651092601679]
t=
[-0.1255867083463836;
 -0.007363520669256008;
 0.06099926576331584]
***********************************opencv***********************************
手写高斯牛顿优化位姿
***********************************手写高斯牛顿***********************************
iteration 0 cost=44765.3530351
iteration 1 cost=431.695510883
iteration 2 cost=319.560183817
iteration 3 cost=319.559014215
pose by g-n 
   0.997919325271   -0.051386188447   0.0389420067273   -0.125586708494
   0.050338528597    0.998355657527   0.0274228667703 -0.00736352068189
 -0.0402871293313  -0.0254055253851    0.998865109257   0.0609992657984
                0                 0                 0                 1
solve pnp by gauss newton cost time: 0.000185698 seconds.
R = 
  0.997919325271  -0.051386188447  0.0389420067273
  0.050338528597   0.998355657527  0.0274228667703
-0.0402871293313 -0.0254055253851   0.998865109257
t =   -0.125586708494 -0.00736352068189   0.0609992657984
***********************************手写高斯牛顿***********************************
g2o优化位姿
***********************************g2o***********************************
iteration= 0     chi2= 431.695511        time= 5.1883e-05        cumTime= 5.1883e-05     edges= 77       schur= 0
iteration= 1     chi2= 319.560184        time= 5.2597e-05        cumTime= 0.00010448     edges= 77       schur= 0
iteration= 2     chi2= 319.559014        time= 5.5606e-05        cumTime= 0.000160086    edges= 77       schur= 0
iteration= 3     chi2= 319.559014        time= 8.3903e-05        cumTime= 0.000243989    edges= 77       schur= 0
iteration= 4     chi2= 319.559014        time= 1.6639e-05        cumTime= 0.000260628    edges= 77       schur= 0
iteration= 5     chi2= 319.559014        time= 1.3466e-05        cumTime= 0.000274094    edges= 77       schur= 0
iteration= 6     chi2= 319.559014        time= 1.2935e-05        cumTime= 0.000287029    edges= 77       schur= 0
iteration= 7     chi2= 319.559014        time= 1.3457e-05        cumTime= 0.000300486    edges= 77       schur= 0
iteration= 8     chi2= 319.559014        time= 1.289e-05         cumTime= 0.000313376    edges= 77       schur= 0
iteration= 9     chi2= 319.559014        time= 1.2828e-05        cumTime= 0.000326204    edges= 77       schur= 0
optimization costs time: 0.001154916 seconds.
pose estimated by g2o =
   0.997919325272   -0.051386188482   0.0389420066391   -0.125586708345
  0.0503385286344    0.998355657526   0.0274228667651 -0.00736352067148
 -0.0402871292439   -0.025405525383     0.99886510926   0.0609992657627
                0                 0                 0                 1
solve pnp by g2o cost time: 0.001376277 seconds.
***********************************g2o***********************************
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54

四、利用3d-3d进行位姿估计

源码如下:

//
// Created by nnz on 2020/11/5.
//
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/sparse_optimizer.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <sophus/se3.hpp>
#include <chrono>
using namespace std;
using namespace cv;

void find_feature_matches(
        const Mat &img_1, const Mat &img_2,
        std::vector<KeyPoint> &keypoints_1,
        std::vector<KeyPoint> &keypoints_2,
        std::vector<DMatch> &matches);

// 像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &p, const Mat &K);

void pose_estimation_3d3d(
        const vector<Point3f> &pts1,
        const vector<Point3f> &pts2,
        Mat &R, Mat &t
);

//
void bundleAdjustment(
        const vector<Point3f> &pts1,
        const vector<Point3f> &pts2,
        Mat &R, Mat &t);
int main(int argc,char** argv)
{
    if(argc!=5)
    {
        cout<<" usage: pose_estimation_3d3d img1 img2 depth1 depth2 "<<endl;
        return 1;
    }
    //读取图片
    Mat img_1=imread(argv[1],CV_LOAD_IMAGE_COLOR);//读取彩色图
    Mat img_2=imread(argv[2],CV_LOAD_IMAGE_COLOR);
    vector<KeyPoint> keypoints_1, keypoints_2;//容器keypoints_1, keypoints_2分别存放图1和图2的特征点
    vector<DMatch> matches;
    find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);//得到图1与图2的特征匹配点
    cout << "一共找到了" << matches.size() << "组匹配点" << endl;
    //接下来的是建立3d点 利用深度图可以获取深度信息
    //depth1是图1对应的深度图 depth2是图2对应的深度图
    Mat depth1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
    Mat depth2 = imread(argv[4], CV_LOAD_IMAGE_UNCHANGED);

    //内参矩阵
    Mat K =(Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
    vector<Point3f> pts1, pts2;

    for(DMatch m:matches)
    {
        //先把两图特征匹配点对应的深度拿出来
        ushort d1=depth1.ptr<unsigned short >(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
        ushort d2=depth2.ptr<unsigned short >(int(keypoints_2[m.trainIdx].pt.y))[int(keypoints_2[m.trainIdx].pt.x)];
        if(d1==0 || d2==0)//深度无效
            continue;
        Point2d p1=pixel2cam(keypoints_1[m.queryIdx].pt,K);//得到图1的特征匹配点在其相机坐标下的x ,y
        Point2d p2=pixel2cam(keypoints_2[m.trainIdx].pt,K);//得到图2的特征匹配点在其相机坐标下的x ,y
        //对深度进行尺度变化得到真正的深度
        float dd1 = float(d1) / 5000.0;
        float dd2 = float(d2) / 5000.0;
        //容器 pts_1与pts_2分别存放 图1中的特征匹配点其相机坐标下的3d点 和 图2中的特征匹配点其相机坐标下的3d点
        pts1.push_back(Point3f(p1.x * dd1, p1.y * dd1, dd1));
        pts2.push_back(Point3f(p2.x * dd2, p2.y * dd2, dd2));
    }
    //这样就可以得到 3d-3d的匹配点
    cout << "3d-3d pairs: " << pts1.size() << endl;
    Mat R, t;
    cout<<" ************************************ICP 3d-3d by SVD**************************************** "<<endl;
    pose_estimation_3d3d(pts1, pts2, R, t);
    cout << "ICP via SVD results: " << endl;
    cout << "R = " << R << endl;
    cout << "t = " << t << endl;
    cout << "R^T = " << R.t() << endl;
    cout << "t^T = " << -R.t() * t << endl;
    cout<<" ************************************ICP 3d-3d by SVD**************************************** "<<endl;
    cout<<endl;
    cout<<" ************************************ICP 3d-3d by g2o**************************************** "<<endl;
    bundleAdjustment(pts1, pts2, R, t);
    cout<<"R= \n"<<R<<endl;
    cout<<"t = "<< t.t() <<endl;
    cout<<"验证 p2 = R*P1 +t "<<endl;
    for (int i = 0; i < 5; i++) {
        cout << "p1 = " << pts1[i] << endl;
        cout << "p2 = " << pts2[i] << endl;
        cout << "(R*p1+t) = " <<
             R * (Mat_<double>(3, 1) << pts1[i].x, pts1[i].y, pts1[i].z) + t
             << endl;
        cout << endl;
    }
    cout<<" ************************************ICP 3d-3d by g2o**************************************** "<<endl;

    return 0;
}

void find_feature_matches(const Mat &img_1, const Mat &img_2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches) {
    //-- 初始化
    Mat descriptors_1, descriptors_2;
    // used in OpenCV3
    Ptr<FeatureDetector> detector = ORB::create();
    Ptr<DescriptorExtractor> descriptor = ORB::create();
    // use this if you are in OpenCV2
    // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
    // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
    //-- 第一步:检测 Oriented FAST 角点位置
    detector->detect(img_1, keypoints_1);
    detector->detect(img_2, keypoints_2);

    //-- 第二步:根据角点位置计算 BRIEF 描述子
    descriptor->compute(img_1, keypoints_1, descriptors_1);
    descriptor->compute(img_2, keypoints_2, descriptors_2);

    //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
    vector<DMatch> match;
    // BFMatcher matcher ( NORM_HAMMING );
    matcher->match(descriptors_1, descriptors_2, match);

    //-- 第四步:匹配点对筛选
    double min_dist = 10000, max_dist = 0;

    //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
    for (int i = 0; i < descriptors_1.rows; i++) {
        double dist = match[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }

    printf("-- Max dist : %f \n", max_dist);
    printf("-- Min dist : %f \n", min_dist);

    //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
    for (int i = 0; i < descriptors_1.rows; i++) {
        if (match[i].distance <= max(2 * min_dist, 30.0)) {
            matches.push_back(match[i]);
        }
    }
}

//实现像素坐标到相机坐标的转换(求出来的只是包含相机坐标下的x,y的二维点)
Point2d pixel2cam(const Point2d &p, const Mat &K) {
    return Point2d
            (
                    (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
                    (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
            );
}

//参考书上的p197页
void pose_estimation_3d3d(
        const vector<Point3f> &pts1,
        const vector<Point3f> &pts2,
        Mat &R, Mat &t
)
{
    int N=pts1.size();//匹配的3d点个数
    Point3f p1,p2;//质心
    for(int i=0;i<N;i++)
    {
        p1+=pts1[i];
        p2+=pts2[i];
    }
    p1 = Point3f(Vec3f(p1)/N);//得到质心
    p2 = Point3f(Vec3f(p2) / N);
    vector<Point3f> q1(N),q2(N);
    for(int i=0;i<N;i++)
    {
        //去质心
        q1[i]=pts1[i]-p1;
        q2[i]=pts2[i]-p2;
    }
    //计算 W+=q1*q2^T(求和)
    Eigen::Matrix3d W=Eigen::Matrix3d::Zero();//初始化
    for(int i=0;i<N;i++)
    {
        W+= Eigen::Vector3d (q1[i].x,q1[i].y,q1[i].z)*(Eigen::Vector3d (q2[i].x,q2[i].y,q2[i].z).transpose());
    }
    cout<<"W = "<<endl<<W<<endl;
    //利用svd分解 W=U*sigema*V
    Eigen::JacobiSVD<Eigen::Matrix3d> svd(W,Eigen::ComputeFullU | Eigen::ComputeFullV);
    Eigen::Matrix3d U=svd.matrixU();//得到U矩阵
    Eigen::Matrix3d V=svd.matrixV();//得到V矩阵
    cout << "U=" << U << endl;
    cout << "V=" << V << endl;
    Eigen::Matrix3d R_=U*(V.transpose());
    if (R_.determinant() < 0)//若旋转矩阵R_的行列式<0 则取负号
    {
        R_ = -R_;
    }
    Eigen::Vector3d t_=Eigen::Vector3d (p1.x,p1.y,p1.z)-R_*Eigen::Vector3d (p2.x,p2.y,p2.z);//得到平移向量
    //把 Eigen形式的 r 和 t_ 转换为CV 中的Mat格式
    R = (Mat_<double>(3, 3) <<
                            R_(0, 0), R_(0, 1), R_(0, 2),
            R_(1, 0), R_(1, 1), R_(1, 2),
            R_(2, 0), R_(2, 1), R_(2, 2)
    );
    t = (Mat_<double>(3, 1) << t_(0, 0), t_(1, 0), t_(2, 0));
}

//对于用g2o来进行优化的话,首先要定义顶点和边的模板
//顶点,也就是咱们要优化的pose 用李代数表示它 6维
class Vertexpose: public g2o::BaseVertex<6,Sophus::SE3d>
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW;//必须写,我也不知道为什么
    //重载setToOriginImpl函数 这个应该就是把刚开的待优化的pose放进去
    virtual void setToOriginImpl() override
    {
        _estimate = Sophus::SE3d();
    }
    //重载oplusImpl函数,用来更新pose(待优化的系数)
    virtual void oplusImpl(const double *update) override
    {
        Eigen::Matrix<double,6,1> update_eigen;//更新的量,就是增量呗,dx
        update_eigen << update[0], update[1], update[2], update[3], update[4], update[5];
        _estimate=Sophus::SE3d::exp(update_eigen)* _estimate;//更新pose 李代数要转换为李群,这样才可以左乘
    }
    //存盘 读盘 :留空
    virtual bool read(istream &in) override {}

    virtual bool write(ostream &out) const override {}
};
//定义边
class EdgeProjectXYZRGBD: public g2o::BaseUnaryEdge<3,Eigen::Vector3d,Vertexpose>
{
public:
    EdgeProjectXYZRGBD(const Eigen::Vector3d &point) : _point(point) {}//赋值这个是图1坐标下的3d点
    //计算误差
    virtual void computeError() override
    {
        const Vertexpose *v=static_cast<const Vertexpose *>(_vertices[0]);//顶点v
        _error = _measurement - v->estimate() * _point;


    }
    //计算雅克比
    virtual void linearizeOplus() override
    {
        const Vertexpose *v=static_cast<const Vertexpose *>(_vertices[0]);//顶点v
        Sophus::SE3d T=v->estimate();//把顶点的待优化系数拿出来
        Eigen::Vector3d xyz_trans=T*_point;//变换到图2下的坐标点
        //下面的雅克比没看懂
        _jacobianOplusXi.block<3, 3>(0, 0) = -Eigen::Matrix3d::Identity();
        _jacobianOplusXi.block<3, 3>(0, 3) = Sophus::SO3d::hat(xyz_trans);

    }

    bool read(istream &in) {}

    bool write(ostream &out) const {}
protected:
    Eigen::Vector3d _point;
};
//利用g2o
void bundleAdjustment(const vector<Point3f> &pts1,
                      const vector<Point3f> &pts2,
                      Mat &R, Mat &t)
{
// 构建图优化,先设定g2o
    typedef g2o::BlockSolver<g2o::BlockSolverTraits<6, 3>> BlockSolverType;  //  优化系数pose is 6, 数据点 landmark is 3
    typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType; // 线性求解器类型
    // 梯度下降方法,可以从GN, LM, DogLeg 中选
    auto solver = new g2o::OptimizationAlgorithmGaussNewton(
            g2o::make_unique<BlockSolverType>
                    (g2o::make_unique<LinearSolverType>()));//把设定的类型都放进求解器
    g2o::SparseOptimizer optimizer;     // 图模型
    optimizer.setAlgorithm(solver);   // 设置求解器 算法g-n
    optimizer.setVerbose(true);       // 打开调试输出
    //加入顶点
    Vertexpose *v=new Vertexpose();
    v->setEstimate(Sophus::SE3d());
    v->setId(0);
    optimizer.addVertex(v);

    //加入边
    int index=1;
    for(size_t i=0;i<pts1.size();i++)
    {
        EdgeProjectXYZRGBD *edge = new EdgeProjectXYZRGBD(Eigen::Vector3d(pts1[i].x,pts1[i].y,pts1[i].z));
        edge->setId(index);//边的编号
        edge->setVertex(0,v);//设置顶点  顶点编号
        edge->setMeasurement(Eigen::Vector3d(pts2[i].x,pts2[i].y,pts2[i].z));
        edge->setInformation(Eigen::Matrix3d::Identity());//set信息矩阵为单位矩阵
        optimizer.addEdge(edge);//加入边
        index++;
    }
    chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
    optimizer.initializeOptimization();//开始
    optimizer.optimize(10);//迭代次数
    chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
    chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
    cout << "optimization costs time: " << time_used.count() << " seconds." << endl;

    cout << endl << "after optimization:" << endl;
    cout << "T=\n" << v->estimate().matrix() << endl;

    // 把位姿转换为Mat类型
    Eigen::Matrix3d R_ = v->estimate().rotationMatrix();
    Eigen::Vector3d t_ = v->estimate().translation();
    R = (Mat_<double>(3, 3) <<
                            R_(0, 0), R_(0, 1), R_(0, 2),
            R_(1, 0), R_(1, 1), R_(1, 2),
            R_(2, 0), R_(2, 1), R_(2, 2)
    );
    t = (Mat_<double>(3, 1) << t_(0, 0), t_(1, 0), t_(2, 0));
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
  • 242
  • 243
  • 244
  • 245
  • 246
  • 247
  • 248
  • 249
  • 250
  • 251
  • 252
  • 253
  • 254
  • 255
  • 256
  • 257
  • 258
  • 259
  • 260
  • 261
  • 262
  • 263
  • 264
  • 265
  • 266
  • 267
  • 268
  • 269
  • 270
  • 271
  • 272
  • 273
  • 274
  • 275
  • 276
  • 277
  • 278
  • 279
  • 280
  • 281
  • 282
  • 283
  • 284
  • 285
  • 286
  • 287
  • 288
  • 289
  • 290
  • 291
  • 292
  • 293
  • 294
  • 295
  • 296
  • 297
  • 298
  • 299
  • 300
  • 301
  • 302
  • 303
  • 304
  • 305
  • 306
  • 307
  • 308
  • 309
  • 310
  • 311
  • 312
  • 313
  • 314
  • 315
  • 316
  • 317
  • 318
  • 319
  • 320
  • 321
  • 322
  • 323
  • 324

运行结果如下:

-- Max dist : 95.000000 
-- Min dist : 7.000000 
一共找到了81组匹配点
3d-3d pairs: 75
 ************************************ICP 3d-3d by SVD**************************************** 
W = 
  11.8688 -0.717698   1.89486
 -1.88065   3.83391  -5.78219
  3.25846  -5.82734   9.65267
U=  0.592295  -0.805658 -0.0101195
 -0.418171  -0.318113   0.850845
  0.688709   0.499719   0.525319
V=   0.64736  -0.761401 -0.0345329
 -0.388765  -0.368829   0.844291
  0.655581   0.533135   0.534772
ICP via SVD results: 
R = [0.9972065648712515, 0.05834273264213721, -0.04663895931006065;
 -0.05787745374349923, 0.9982601222918895, 0.01126625891543587;
 0.04721511755620855, -0.008535443141899024, 0.9988482762084163]
t = [0.1379879639364653;
 -0.06551699902642329;
 -0.02981649372255712]
R^T = [0.9972065648712515, -0.05787745374349923, 0.04721511755620855;
 0.05834273264213721, 0.9982601222918895, -0.008535443141899024;
 -0.04663895931006065, 0.01126625891543587, 0.9988482762084163]
t^T = [-0.1399866713350009;
 0.05709791558567718;
 0.03695589986706022]
 ************************************ICP 3d-3d by SVD**************************************** 

 ************************************ICP 3d-3d by g2o**************************************** 
iteration= 0     chi2= 1.816312  time= 5.5857e-05        cumTime= 5.5857e-05     edges= 75       schur= 0
iteration= 1     chi2= 1.815198  time= 3.915e-05         cumTime= 9.5007e-05     edges= 75       schur= 0
iteration= 2     chi2= 1.815193  time= 3.6083e-05        cumTime= 0.00013109     edges= 75       schur= 0
iteration= 3     chi2= 1.815193  time= 3.6725e-05        cumTime= 0.000167815    edges= 75       schur= 0
iteration= 4     chi2= 1.815193  time= 3.6591e-05        cumTime= 0.000204406    edges= 75       schur= 0
iteration= 5     chi2= 1.815193  time= 3.4875e-05        cumTime= 0.000239281    edges= 75       schur= 0
iteration= 6     chi2= 1.815193  time= 3.8182e-05        cumTime= 0.000277463    edges= 75       schur= 0
iteration= 7     chi2= 1.815193  time= 3.624e-05         cumTime= 0.000313703    edges= 75       schur= 0
iteration= 8     chi2= 1.815193  time= 3.6928e-05        cumTime= 0.000350631    edges= 75       schur= 0
iteration= 9     chi2= 1.815193  time= 3.559e-05         cumTime= 0.000386221    edges= 75       schur= 0
optimization costs time: 0.00139961 seconds.

after optimization:
T=
   0.997207  -0.0578775   0.0472151   -0.139987
  0.0583427     0.99826 -0.00853546    0.057098
  -0.046639   0.0112663    0.998848   0.0369563
          0           0           0           1
R= 
[0.9972065649604284, -0.05787745737903488, 0.04721511121623029;
 0.05834273685912923, 0.9982601219265494, -0.008535457045579595;
 -0.04663895212812353, 0.01126627261025276, 0.9988482763892933]
t = [-0.1399866667461035, 0.05709796036198918, 0.03695625733240535]
验证 p2 = R*P1 +t 
p1 = [-0.0374123, -0.830816, 2.7448]
p2 = [-0.0111479, -0.746763, 2.7652]
(R*p1+t) = [0.0003870556250864243;
 -0.7978830553505724;
 2.770979772909325]

p1 = [-0.243698, -0.117719, 1.5848]
p2 = [-0.299118, -0.0975683, 1.6558]
(R*p1+t) = [-0.3013644819130167;
 -0.08816155238043549;
 1.629970589669722]

p1 = [-0.627753, 0.160186, 1.3396]
p2 = [-0.709645, 0.159033, 1.4212]
(R*p1+t) = [-0.7120075585448206;
 0.1689466847826468;
 1.406095804685492]

p1 = [-0.323443, 0.104873, 1.4266]
p2 = [-0.399079, 0.12047, 1.4838]
(R*p1+t) = [-0.4012388452506169;
 0.1307411591512773;
 1.478179756889323]

p1 = [-0.627221, 0.101454, 1.3116]
p2 = [-0.709709, 0.100216, 1.3998]
(R*p1+t) = [-0.7094003465992538;
 0.1105864276598082;
 1.377441574523275]

 ************************************ICP 3d-3d by g2o****************************************
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/很楠不爱3/article/detail/292827
推荐阅读
相关标签
  

闽ICP备14008679号