// Find correspondences ORBmatcher matcher(0.9,true); int nmatches = matcher.SearchForInitialization(mInitialFrame,mCurrentFrame,mvbPrevMatched,mvIniMatches,100);
// Check if there are enough correspondences if(nmatches<100) { delete mpInitializer; mpInitializer = static_cast<Initializer*>(NULL); return; }
cv::Mat Rcw; // Current Camera Rotation cv::Mat tcw; // Current Camera Translation vector<bool> vbTriangulated; // Triangulated Correspondences (mvIniMatches)
float RH = SH/(SH+SF); // Try to reconstruct from homography or fundamental depending on the ratio (0.40-0.45) if(RH>0.40) return ReconstructH(vbMatchesInliersH,H,mK,R21,t21,vP3D,vbTriangulated,1.0,50); else //if(pF_HF>0.6) return ReconstructF(vbMatchesInliersF,F,mK,R21,t21,vP3D,vbTriangulated,1.0,50);
// Store frame pose information to retrieve the complete camera trajectory afterwards. if(!mCurrentFrame.mTcw.empty()) { cv::Mat Tcr = mCurrentFrame.mTcw*mCurrentFrame.mpReferenceKF->GetPoseInverse(); mlRelativeFramePoses.push_back(Tcr); mlpReferences.push_back(mpReferenceKF); mlFrameTimes.push_back(mCurrentFrame.mTimeStamp); mlbLost.push_back(mState==LOST); } else { // This can happen if tracking is lost mlRelativeFramePoses.push_back(mlRelativeFramePoses.back()); mlpReferences.push_back(mlpReferences.back()); mlFrameTimes.push_back(mlFrameTimes.back()); mlbLost.push_back(mState==LOST); }
bOK = TrackReferenceKeyFrame(); //用最近的一个KeyFrame做为referenceFrame做追踪
bOK = TrackWithMotionModel(); //用之前的结果继续做追踪
bOK = Relocalization(); //重定位模式 一般是追踪丢失了或者是用已经建立好的地图做重定位
这里简单提一下,不想看理论的直接跳过这一段。(SLAM,特别是单目的SLAM会因为每一帧的计算误差形成累积误差,所以KeyFrame based SLAM大家会用取KeyFrame的方法来计算关键帧之间的运动和点云,然后再优化关键帧与关键帧之间的误差,从而降低每帧都计算的误差。算的次数少了,累积误差当然就降低了,速度也提高了,所以就产生了前两个模式。第三个模式很容易理解,在前面都失败了的情况下,Tracking丢失了,只能去通过回到之前看到过的KeyFrame场景下,从这个KeyFrame开始重新定位追踪。详细的大家可以去稍微看一下SLAM原理,很多讲义上的前几页就会提到。)
int nmatches = matcher.SearchByBoW(pKF,mCurrentFrame,vvpMapPointMatches[i]); 这里的matching()方式就是通过词包的匹配去匹配储存好的KeyFrame,需要先对整个空间场景进行扫描,然后再走匹配和Tracking流程。特别的,在这个函数中包含:
PnPsolver* pSolver = new PnPsolver(mCurrentFrame,vvpMapPointMatches[i]); pSolver->SetRansacParameters(0.99,10,300,4,0.5,5.991);
这一步。如果匹配到KeyFrame,则直接计算 PnPProblem (这个大家可以查Google,Perspective N Points)。
else { // Localization Mode: Local Mapping is deactivated
if(mState==LOST) { bOK = Relocalization(); } else { if(!mbVO) { // In last frame we tracked enough MapPoints in the map
if(!mVelocity.empty()) { bOK = TrackWithMotionModel(); } else { bOK = TrackReferenceKeyFrame(); } } else { // In last frame we tracked mainly "visual odometry" points.
// We compute two camera poses, one from motion model and one doing relocalization. // If relocalization is sucessfull we choose that solution, otherwise we retain // the "visual odometry" solution.