当前位置:   article > 正文

高翔《视觉SLAM十四讲》第8讲ch8视觉里程计2实践代码报错解决方法-不用安装opencv4_视觉slam14讲ch8 报错

视觉slam14讲ch8 报错

 一、终端报错

 在运行14讲第二版的第8讲的程序时,出现了以下错误:

原因是 parallel_for_函数出现错误,这是因为opencv3不支持std::bind函数,作者写书的时候忘提这件事了,后来在勘误里加上了,可以自己安装opencv 4,或者自行改写下parallel_for_函数。(高翔回答https://github.com/gaoxiang12/slambook2/issues/57)

二、解决方法

有两种解决方法:

1.安装opencv4

2.修改optical_flow.cpp与direct_method.cpp程序(本人采用第二种方法)

     继续深入查询发现已经有人分享能使用opencv3运行的direct_method.cpp(https://github.com/gaoxiang12/slambook2/issues/32),但没有找到修改好的optical_flow.cpp。本人对optical_flow.cpp进行修改,下面分享修改好的两个文件:     

     direct_method.cpp(转自https://github.com/rancheng/slambook2/blob/opencv3_2_0/ch8/direct_method.cpp):

  1. #include <opencv2/opencv.hpp>
  2. #include <sophus/se3.hpp>
  3. #include <boost/format.hpp>
  4. #include <pangolin/pangolin.h>
  5. using namespace std;
  6. typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;
  7. // Camera intrinsics
  8. double fx = 718.856, fy = 718.856, cx = 607.1928, cy = 185.2157;
  9. // baseline
  10. double baseline = 0.573;
  11. // paths
  12. string left_file = "./left.png";
  13. string disparity_file = "./disparity.png";
  14. boost::format fmt_others("./%06d.png"); // other files
  15. // useful typedefs
  16. typedef Eigen::Matrix<double, 6, 6> Matrix6d;
  17. typedef Eigen::Matrix<double, 2, 6> Matrix26d;
  18. typedef Eigen::Matrix<double, 6, 1> Vector6d;
  19. /**
  20. * pose estimation using direct method
  21. * @param img1
  22. * @param img2
  23. * @param px_ref
  24. * @param depth_ref
  25. * @param T21
  26. */
  27. void DirectPoseEstimationMultiLayer(
  28. const cv::Mat &img1,
  29. const cv::Mat &img2,
  30. const VecVector2d &px_ref,
  31. const vector<double> depth_ref,
  32. Sophus::SE3d &T21
  33. );
  34. /**
  35. * pose estimation using direct method
  36. * @param img1
  37. * @param img2
  38. * @param px_ref
  39. * @param depth_ref
  40. * @param T21
  41. */
  42. void DirectPoseEstimationSingleLayer(
  43. const cv::Mat &img1,
  44. const cv::Mat &img2,
  45. const VecVector2d &px_ref,
  46. const vector<double> depth_ref,
  47. Sophus::SE3d &T21
  48. );
  49. // bilinear interpolation
  50. inline float GetPixelValue(const cv::Mat &img, float x, float y) {
  51. // boundary check
  52. if (x < 0) x = 0;
  53. if (y < 0) y = 0;
  54. if (x >= img.cols) x = img.cols - 1;
  55. if (y >= img.rows) y = img.rows - 1;
  56. uchar *data = &img.data[int(y) * img.step + int(x)];
  57. float xx = x - floor(x);
  58. float yy = y - floor(y);
  59. return float(
  60. (1 - xx) * (1 - yy) * data[0] +
  61. xx * (1 - yy) * data[1] +
  62. (1 - xx) * yy * data[img.step] +
  63. xx * yy * data[img.step + 1]
  64. );
  65. }
  66. /// class for accumulator jacobians in parallel
  67. class JacobianAccumulator: public cv::ParallelLoopBody {
  68. private:
  69. const cv::Mat &img1;
  70. const cv::Mat &img2;
  71. const VecVector2d &px_ref;
  72. const vector<double> depth_ref;
  73. Sophus::SE3d &T21;
  74. mutable VecVector2d projection; // projected points
  75. mutable std::mutex hessian_mutex;
  76. mutable Matrix6d H = Matrix6d::Zero();
  77. mutable Vector6d b = Vector6d::Zero();
  78. mutable double cost = 0;
  79. public:
  80. JacobianAccumulator(
  81. const cv::Mat &img1_,
  82. const cv::Mat &img2_,
  83. const VecVector2d &px_ref_,
  84. const vector<double> depth_ref_,
  85. Sophus::SE3d &T21_) :
  86. img1(img1_), img2(img2_), px_ref(px_ref_), depth_ref(depth_ref_), T21(T21_) {
  87. projection = VecVector2d(px_ref.size(), Eigen::Vector2d(0, 0));
  88. }
  89. /// accumulate jacobians in a range
  90. // void accumulate_jacobian(const cv::Range &range);
  91. /// get hessian matrix
  92. Matrix6d hessian() const { return H; }
  93. /// get bias
  94. Vector6d bias() const { return b; }
  95. /// get total cost
  96. double cost_func() const { return cost; }
  97. /// get projected points
  98. VecVector2d projected_points() const { return projection; }
  99. /// reset h, b, cost to zero
  100. void reset() {
  101. H = Matrix6d::Zero();
  102. b = Vector6d::Zero();
  103. cost = 0;
  104. }
  105. virtual void operator()(const cv::Range& range) const {
  106. // parameters
  107. const int half_patch_size = 1;
  108. int cnt_good = 0;
  109. Matrix6d hessian = Matrix6d::Zero();
  110. Vector6d bias = Vector6d::Zero();
  111. double cost_tmp = 0;
  112. for (size_t i = range.start; i < range.end; i++) {
  113. // compute the projection in the second image
  114. Eigen::Vector3d point_ref =
  115. depth_ref[i] * Eigen::Vector3d((px_ref[i][0] - cx) / fx, (px_ref[i][1] - cy) / fy, 1);
  116. Eigen::Vector3d point_cur = T21 * point_ref;
  117. if (point_cur[2] < 0) // depth invalid
  118. continue;
  119. float u = fx * point_cur[0] / point_cur[2] + cx, v = fy * point_cur[1] / point_cur[2] + cy;
  120. if (u < half_patch_size || u > img2.cols - half_patch_size || v < half_patch_size ||
  121. v > img2.rows - half_patch_size)
  122. continue;
  123. projection[i] = Eigen::Vector2d(u, v);
  124. double X = point_cur[0], Y = point_cur[1], Z = point_cur[2],
  125. Z2 = Z * Z, Z_inv = 1.0 / Z, Z2_inv = Z_inv * Z_inv;
  126. cnt_good++;
  127. // and compute error and jacobian
  128. for (int x = -half_patch_size; x <= half_patch_size; x++)
  129. for (int y = -half_patch_size; y <= half_patch_size; y++) {
  130. double error = GetPixelValue(img1, px_ref[i][0] + x, px_ref[i][1] + y) -
  131. GetPixelValue(img2, u + x, v + y);
  132. Matrix26d J_pixel_xi;
  133. Eigen::Vector2d J_img_pixel;
  134. J_pixel_xi(0, 0) = fx * Z_inv;
  135. J_pixel_xi(0, 1) = 0;
  136. J_pixel_xi(0, 2) = -fx * X * Z2_inv;
  137. J_pixel_xi(0, 3) = -fx * X * Y * Z2_inv;
  138. J_pixel_xi(0, 4) = fx + fx * X * X * Z2_inv;
  139. J_pixel_xi(0, 5) = -fx * Y * Z_inv;
  140. J_pixel_xi(1, 0) = 0;
  141. J_pixel_xi(1, 1) = fy * Z_inv;
  142. J_pixel_xi(1, 2) = -fy * Y * Z2_inv;
  143. J_pixel_xi(1, 3) = -fy - fy * Y * Y * Z2_inv;
  144. J_pixel_xi(1, 4) = fy * X * Y * Z2_inv;
  145. J_pixel_xi(1, 5) = fy * X * Z_inv;
  146. J_img_pixel = Eigen::Vector2d(
  147. 0.5 * (GetPixelValue(img2, u + 1 + x, v + y) - GetPixelValue(img2, u - 1 + x, v + y)),
  148. 0.5 * (GetPixelValue(img2, u + x, v + 1 + y) - GetPixelValue(img2, u + x, v - 1 + y))
  149. );
  150. // total jacobian
  151. Vector6d J = -1.0 * (J_img_pixel.transpose() * J_pixel_xi).transpose();
  152. hessian += J * J.transpose();
  153. bias += -error * J;
  154. cost_tmp += error * error;
  155. }
  156. }
  157. if (cnt_good) {
  158. // set hessian, bias and cost
  159. unique_lock<mutex> lck(hessian_mutex);
  160. H += hessian;
  161. b += bias;
  162. cost += cost_tmp / cnt_good;
  163. }
  164. }
  165. };
  166. int main(int argc, char **argv) {
  167. cv::Mat left_img = cv::imread(left_file, 0);
  168. cv::Mat disparity_img = cv::imread(disparity_file, 0);
  169. if (left_img.empty() || disparity_img.empty())
  170. {
  171. std::cout << "!!! Failed imread(): image not found" << std::endl;
  172. return 1;
  173. }
  174. // let's randomly pick pixels in the first image and generate some 3d points in the first image's frame
  175. cv::RNG rng;
  176. int nPoints = 2000;
  177. int boarder = 20;
  178. VecVector2d pixels_ref;
  179. vector<double> depth_ref;
  180. cout << "left_img.cols" << left_img.cols << endl;
  181. cout << "left_img: " << left_img << endl;
  182. // generate pixels in ref and load depth data
  183. for (int i = 0; i < nPoints; i++) {
  184. int x = rng.uniform(boarder, left_img.cols - boarder); // don't pick pixels close to boarder
  185. int y = rng.uniform(boarder, left_img.rows - boarder); // don't pick pixels close to boarder
  186. int disparity = disparity_img.at<uchar>(y, x);
  187. double depth = fx * baseline / disparity; // you know this is disparity to depth
  188. depth_ref.push_back(depth);
  189. pixels_ref.push_back(Eigen::Vector2d(x, y));
  190. }
  191. // estimates 01~05.png's pose using this information
  192. Sophus::SE3d T_cur_ref;
  193. for (int i = 1; i < 6; i++) { // 1~10
  194. cv::Mat img = cv::imread((fmt_others % i).str(), 0);
  195. // try single layer by uncomment this line
  196. // DirectPoseEstimationSingleLayer(left_img, img, pixels_ref, depth_ref, T_cur_ref);
  197. DirectPoseEstimationMultiLayer(left_img, img, pixels_ref, depth_ref, T_cur_ref);
  198. }
  199. return 0;
  200. }
  201. void DirectPoseEstimationSingleLayer(
  202. const cv::Mat &img1,
  203. const cv::Mat &img2,
  204. const VecVector2d &px_ref,
  205. const vector<double> depth_ref,
  206. Sophus::SE3d &T21) {
  207. const int iterations = 10;
  208. double cost = 0, lastCost = 0;
  209. auto t1 = chrono::steady_clock::now();
  210. JacobianAccumulator jaco_accu(img1, img2, px_ref, depth_ref, T21);
  211. for (int iter = 0; iter < iterations; iter++) {
  212. jaco_accu.reset();
  213. cv::parallel_for_(cv::Range(0, px_ref.size()), jaco_accu);
  214. Matrix6d H = jaco_accu.hessian();
  215. Vector6d b = jaco_accu.bias();
  216. // solve update and put it into estimation
  217. Vector6d update = H.ldlt().solve(b);;
  218. T21 = Sophus::SE3d::exp(update) * T21;
  219. cost = jaco_accu.cost_func();
  220. if (std::isnan(update[0])) {
  221. // sometimes occurred when we have a black or white patch and H is irreversible
  222. cout << "update is nan" << endl;
  223. break;
  224. }
  225. if (iter > 0 && cost > lastCost) {
  226. cout << "cost increased: " << cost << ", " << lastCost << endl;
  227. break;
  228. }
  229. if (update.norm() < 1e-3) {
  230. // converge
  231. break;
  232. }
  233. lastCost = cost;
  234. cout << "iteration: " << iter << ", cost: " << cost << endl;
  235. }
  236. cout << "T21 = \n" << T21.matrix() << endl;
  237. auto t2 = chrono::steady_clock::now();
  238. auto time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  239. cout << "direct method for single layer: " << time_used.count() << endl;
  240. // plot the projected pixels here
  241. cv::Mat img2_show;
  242. cv::cvtColor(img2, img2_show, CV_GRAY2BGR);
  243. VecVector2d projection = jaco_accu.projected_points();
  244. for (size_t i = 0; i < px_ref.size(); ++i) {
  245. auto p_ref = px_ref[i];
  246. auto p_cur = projection[i];
  247. if (p_cur[0] > 0 && p_cur[1] > 0) {
  248. cv::circle(img2_show, cv::Point2f(p_cur[0], p_cur[1]), 2, cv::Scalar(0, 250, 0), 2);
  249. cv::line(img2_show, cv::Point2f(p_ref[0], p_ref[1]), cv::Point2f(p_cur[0], p_cur[1]),
  250. cv::Scalar(0, 250, 0));
  251. }
  252. }
  253. cv::imshow("current", img2_show);
  254. cv::waitKey();
  255. }
  256. void DirectPoseEstimationMultiLayer(
  257. const cv::Mat &img1,
  258. const cv::Mat &img2,
  259. const VecVector2d &px_ref,
  260. const vector<double> depth_ref,
  261. Sophus::SE3d &T21) {
  262. // parameters
  263. int pyramids = 4;
  264. double pyramid_scale = 0.5;
  265. double scales[] = {1.0, 0.5, 0.25, 0.125};
  266. // create pyramids
  267. vector<cv::Mat> pyr1, pyr2; // image pyramids
  268. for (int i = 0; i < pyramids; i++) {
  269. if (i == 0) {
  270. pyr1.push_back(img1);
  271. pyr2.push_back(img2);
  272. } else {
  273. cv::Mat img1_pyr, img2_pyr;
  274. cv::resize(pyr1[i - 1], img1_pyr,
  275. cv::Size(pyr1[i - 1].cols * pyramid_scale, pyr1[i - 1].rows * pyramid_scale));
  276. cv::resize(pyr2[i - 1], img2_pyr,
  277. cv::Size(pyr2[i - 1].cols * pyramid_scale, pyr2[i - 1].rows * pyramid_scale));
  278. pyr1.push_back(img1_pyr);
  279. pyr2.push_back(img2_pyr);
  280. }
  281. }
  282. double fxG = fx, fyG = fy, cxG = cx, cyG = cy; // backup the old values
  283. for (int level = pyramids - 1; level >= 0; level--) {
  284. VecVector2d px_ref_pyr; // set the keypoints in this pyramid level
  285. for (auto &px: px_ref) {
  286. px_ref_pyr.push_back(scales[level] * px);
  287. }
  288. // scale fx, fy, cx, cy in different pyramid levels
  289. fx = fxG * scales[level];
  290. fy = fyG * scales[level];
  291. cx = cxG * scales[level];
  292. cy = cyG * scales[level];
  293. DirectPoseEstimationSingleLayer(pyr1[level], pyr2[level], px_ref_pyr, depth_ref, T21);
  294. }
  295. }

     optical_flow.cpp(修改后)

  1. #include <opencv2/opencv.hpp>
  2. #include <string>
  3. #include <chrono>
  4. #include <Eigen/Core>
  5. #include <Eigen/Dense>
  6. using namespace std;
  7. using namespace cv;
  8. string file_1 = "./LK1.png"; // first image
  9. string file_2 = "./LK2.png"; // second image
  10. /**
  11. * single level optical flow
  12. * @param [in] img1 the first image
  13. * @param [in] img2 the second image
  14. * @param [in] kp1 keypoints in img1
  15. * @param [in|out] kp2 keypoints in img2, if empty, use initial guess in kp1
  16. * @param [out] success true if a keypoint is tracked successfully
  17. * @param [in] inverse use inverse formulation?
  18. */
  19. void OpticalFlowSingleLevel(
  20. const Mat &img1,
  21. const Mat &img2,
  22. const vector<KeyPoint> &kp1,
  23. vector<KeyPoint> &kp2,
  24. vector<bool> &success,
  25. bool inverse = false,
  26. bool has_initial_guess = false
  27. );
  28. /**
  29. * multi level optical flow, scale of pyramid is set to 2 by default
  30. * the image pyramid will be create inside the function
  31. * @param [in] img1 the first pyramid
  32. * @param [in] img2 the second pyramid
  33. * @param [in] kp1 keypoints in img1
  34. * @param [out] kp2 keypoints in img2
  35. * @param [out] success true if a keypoint is tracked successfully
  36. * @param [in] inverse set true to enable inverse formulation
  37. */
  38. void OpticalFlowMultiLevel(
  39. const Mat &img1,
  40. const Mat &img2,
  41. const vector<KeyPoint> &kp1,
  42. vector<KeyPoint> &kp2,
  43. vector<bool> &success,
  44. bool inverse = false
  45. );
  46. /**
  47. * get a gray scale value from reference image (bi-linear interpolated)
  48. * @param img
  49. * @param x
  50. * @param y
  51. * @return the interpolated value of this pixel
  52. */
  53. inline float GetPixelValue(const cv::Mat &img, float x, float y) {
  54. // boundary check
  55. if (x < 0) x = 0;
  56. if (y < 0) y = 0;
  57. if (x >= img.cols) x = img.cols - 1;
  58. if (y >= img.rows) y = img.rows - 1;
  59. uchar *data = &img.data[int(y) * img.step + int(x)];
  60. float xx = x - floor(x);
  61. float yy = y - floor(y);
  62. return float(
  63. (1 - xx) * (1 - yy) * data[0] +
  64. xx * (1 - yy) * data[1] +
  65. (1 - xx) * yy * data[img.step] +
  66. xx * yy * data[img.step + 1]
  67. );
  68. }
  69. /// Optical flow tracker and interface
  70. class OpticalFlowTracker: public cv::ParallelLoopBody {
  71. private:
  72. const Mat &img1;
  73. const Mat &img2;
  74. const vector<KeyPoint> &kp1;
  75. vector<KeyPoint> &kp2;
  76. vector<bool> &success;
  77. bool inverse = true;
  78. bool has_initial = false;
  79. public:
  80. OpticalFlowTracker(
  81. const Mat &img1_,
  82. const Mat &img2_,
  83. const vector<KeyPoint> &kp1_,
  84. vector<KeyPoint> &kp2_,
  85. vector<bool> &success_,
  86. bool inverse_ = true, bool has_initial_ = false) :
  87. img1(img1_), img2(img2_), kp1(kp1_), kp2(kp2_), success(success_), inverse(inverse_),
  88. has_initial(has_initial_) {}
  89. // void calculateOpticalFlow(const Range &range);
  90. virtual void operator()(const Range &range) const {
  91. // parameters
  92. int half_patch_size = 4;
  93. int iterations = 10;
  94. for (size_t i = range.start; i < range.end; i++) {
  95. auto kp = kp1[i];
  96. double dx = 0, dy = 0; // dx,dy need to be estimated
  97. if (has_initial) {
  98. dx = kp2[i].pt.x - kp.pt.x;
  99. dy = kp2[i].pt.y - kp.pt.y;
  100. }
  101. double cost = 0, lastCost = 0;
  102. bool succ = true; // indicate if this point succeeded
  103. // Gauss-Newton iterations
  104. Eigen::Matrix2d H = Eigen::Matrix2d::Zero(); // hessian
  105. Eigen::Vector2d b = Eigen::Vector2d::Zero(); // bias
  106. Eigen::Vector2d J; // jacobian
  107. for (int iter = 0; iter < iterations; iter++) {
  108. if (inverse == false) {
  109. H = Eigen::Matrix2d::Zero();
  110. b = Eigen::Vector2d::Zero();
  111. } else {
  112. // only reset b
  113. b = Eigen::Vector2d::Zero();
  114. }
  115. cost = 0;
  116. // compute cost and jacobian
  117. for (int x = -half_patch_size; x < half_patch_size; x++)
  118. for (int y = -half_patch_size; y < half_patch_size; y++) {
  119. double error = GetPixelValue(img1, kp.pt.x + x, kp.pt.y + y) -
  120. GetPixelValue(img2, kp.pt.x + x + dx, kp.pt.y + y + dy);; // Jacobian
  121. if (inverse == false) {
  122. J = -1.0 * Eigen::Vector2d(
  123. 0.5 * (GetPixelValue(img2, kp.pt.x + dx + x + 1, kp.pt.y + dy + y) -
  124. GetPixelValue(img2, kp.pt.x + dx + x - 1, kp.pt.y + dy + y)),
  125. 0.5 * (GetPixelValue(img2, kp.pt.x + dx + x, kp.pt.y + dy + y + 1) -
  126. GetPixelValue(img2, kp.pt.x + dx + x, kp.pt.y + dy + y - 1))
  127. );
  128. } else if (iter == 0) {
  129. // in inverse mode, J keeps same for all iterations
  130. // NOTE this J does not change when dx, dy is updated, so we can store it and only compute error
  131. J = -1.0 * Eigen::Vector2d(
  132. 0.5 * (GetPixelValue(img1, kp.pt.x + x + 1, kp.pt.y + y) -
  133. GetPixelValue(img1, kp.pt.x + x - 1, kp.pt.y + y)),
  134. 0.5 * (GetPixelValue(img1, kp.pt.x + x, kp.pt.y + y + 1) -
  135. GetPixelValue(img1, kp.pt.x + x, kp.pt.y + y - 1))
  136. );
  137. }
  138. // compute H, b and set cost;
  139. b += -error * J;
  140. cost += error * error;
  141. if (inverse == false || iter == 0) {
  142. // also update H
  143. H += J * J.transpose();
  144. }
  145. }
  146. // compute update
  147. Eigen::Vector2d update = H.ldlt().solve(b);
  148. if (std::isnan(update[0])) {
  149. // sometimes occurred when we have a black or white patch and H is irreversible
  150. cout << "update is nan" << endl;
  151. succ = false;
  152. break;
  153. }
  154. if (iter > 0 && cost > lastCost) {
  155. break;
  156. }
  157. // update dx, dy
  158. dx += update[0];
  159. dy += update[1];
  160. lastCost = cost;
  161. succ = true;
  162. if (update.norm() < 1e-2) {
  163. // converge
  164. break;
  165. }
  166. }
  167. success[i] = succ;
  168. // set kp2
  169. kp2[i].pt = kp.pt + Point2f(dx, dy);
  170. }
  171. }
  172. };
  173. int main(int argc, char **argv) {
  174. // images, note they are CV_8UC1, not CV_8UC3
  175. Mat img1 = imread(file_1, 0);
  176. Mat img2 = imread(file_2, 0);
  177. // key points, using GFTT here.
  178. vector<KeyPoint> kp1;
  179. Ptr<GFTTDetector> detector = GFTTDetector::create(500, 0.01, 20); // maximum 500 keypoints
  180. detector->detect(img1, kp1);
  181. // now lets track these key points in the second image
  182. // first use single level LK in the validation picture
  183. vector<KeyPoint> kp2_single;
  184. vector<bool> success_single;
  185. OpticalFlowSingleLevel(img1, img2, kp1, kp2_single, success_single);
  186. // then test multi-level LK
  187. vector<KeyPoint> kp2_multi;
  188. vector<bool> success_multi;
  189. chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  190. OpticalFlowMultiLevel(img1, img2, kp1, kp2_multi, success_multi, true);
  191. chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  192. auto time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  193. cout << "optical flow by gauss-newton: " << time_used.count() << endl;
  194. // use opencv's flow for validation
  195. vector<Point2f> pt1, pt2;
  196. for (auto &kp: kp1) pt1.push_back(kp.pt);
  197. vector<uchar> status;
  198. vector<float> error;
  199. t1 = chrono::steady_clock::now();
  200. cv::calcOpticalFlowPyrLK(img1, img2, pt1, pt2, status, error);
  201. t2 = chrono::steady_clock::now();
  202. time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  203. cout << "optical flow by opencv: " << time_used.count() << endl;
  204. // plot the differences of those functions
  205. Mat img2_single;
  206. cv::cvtColor(img2, img2_single, CV_GRAY2BGR);
  207. for (int i = 0; i < kp2_single.size(); i++) {
  208. if (success_single[i]) {
  209. cv::circle(img2_single, kp2_single[i].pt, 2, cv::Scalar(0, 250, 0), 2);
  210. cv::line(img2_single, kp1[i].pt, kp2_single[i].pt, cv::Scalar(0, 250, 0));
  211. }
  212. }
  213. Mat img2_multi;
  214. cv::cvtColor(img2, img2_multi, CV_GRAY2BGR);
  215. for (int i = 0; i < kp2_multi.size(); i++) {
  216. if (success_multi[i]) {
  217. cv::circle(img2_multi, kp2_multi[i].pt, 2, cv::Scalar(0, 250, 0), 2);
  218. cv::line(img2_multi, kp1[i].pt, kp2_multi[i].pt, cv::Scalar(0, 250, 0));
  219. }
  220. }
  221. Mat img2_CV;
  222. cv::cvtColor(img2, img2_CV, CV_GRAY2BGR);
  223. for (int i = 0; i < pt2.size(); i++) {
  224. if (status[i]) {
  225. cv::circle(img2_CV, pt2[i], 2, cv::Scalar(0, 250, 0), 2);
  226. cv::line(img2_CV, pt1[i], pt2[i], cv::Scalar(0, 250, 0));
  227. }
  228. }
  229. cv::imshow("tracked single level", img2_single);
  230. cv::imshow("tracked multi level", img2_multi);
  231. cv::imshow("tracked by opencv", img2_CV);
  232. cv::waitKey(0);
  233. return 0;
  234. }
  235. void OpticalFlowSingleLevel(
  236. const Mat &img1,
  237. const Mat &img2,
  238. const vector<KeyPoint> &kp1,
  239. vector<KeyPoint> &kp2,
  240. vector<bool> &success,
  241. bool inverse, bool has_initial) {
  242. kp2.resize(kp1.size());
  243. success.resize(kp1.size());
  244. OpticalFlowTracker tracker(img1, img2, kp1, kp2, success, inverse, has_initial);
  245. parallel_for_(Range(0, kp1.size()), tracker);
  246. }
  247. void OpticalFlowMultiLevel(
  248. const Mat &img1,
  249. const Mat &img2,
  250. const vector<KeyPoint> &kp1,
  251. vector<KeyPoint> &kp2,
  252. vector<bool> &success,
  253. bool inverse) {
  254. // parameters
  255. int pyramids = 4;
  256. double pyramid_scale = 0.5;
  257. double scales[] = {1.0, 0.5, 0.25, 0.125};
  258. // create pyramids
  259. chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  260. vector<Mat> pyr1, pyr2; // image pyramids
  261. for (int i = 0; i < pyramids; i++) {
  262. if (i == 0) {
  263. pyr1.push_back(img1);
  264. pyr2.push_back(img2);
  265. } else {
  266. Mat img1_pyr, img2_pyr;
  267. cv::resize(pyr1[i - 1], img1_pyr,
  268. cv::Size(pyr1[i - 1].cols * pyramid_scale, pyr1[i - 1].rows * pyramid_scale));
  269. cv::resize(pyr2[i - 1], img2_pyr,
  270. cv::Size(pyr2[i - 1].cols * pyramid_scale, pyr2[i - 1].rows * pyramid_scale));
  271. pyr1.push_back(img1_pyr);
  272. pyr2.push_back(img2_pyr);
  273. }
  274. }
  275. chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  276. auto time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  277. cout << "build pyramid time: " << time_used.count() << endl;
  278. // coarse-to-fine LK tracking in pyramids
  279. vector<KeyPoint> kp1_pyr, kp2_pyr;
  280. for (auto &kp:kp1) {
  281. auto kp_top = kp;
  282. kp_top.pt *= scales[pyramids - 1];
  283. kp1_pyr.push_back(kp_top);
  284. kp2_pyr.push_back(kp_top);
  285. }
  286. for (int level = pyramids - 1; level >= 0; level--) {
  287. // from coarse to fine
  288. success.clear();
  289. t1 = chrono::steady_clock::now();
  290. OpticalFlowSingleLevel(pyr1[level], pyr2[level], kp1_pyr, kp2_pyr, success, inverse, true);
  291. t2 = chrono::steady_clock::now();
  292. auto time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  293. cout << "track pyr " << level << " cost time: " << time_used.count() << endl;
  294. if (level > 0) {
  295. for (auto &kp: kp1_pyr)
  296. kp.pt /= pyramid_scale;
  297. for (auto &kp: kp2_pyr)
  298. kp.pt /= pyramid_scale;
  299. }
  300. }
  301. for (auto &kp: kp2_pyr)
  302. kp2.push_back(kp);
  303. }

将两个文件修改后可以在不安装opencv4的情况下编译通过。

三、结果

编译:

 运行:optical_flow

./build/optical_flow 

  运行:direct_method

./build/direct_method

声明:本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号