当前位置:   article > 正文

OpenCV3历程(5)——裂缝的检测与测量_opencv识别裂缝时,一条裂缝断开重新连接

opencv识别裂缝时,一条裂缝断开重新连接

一、开始先介绍几个即将用到的函数及知识点

1、LUT函数

函数简介:

  1. void LUT(
  2. InputArray src, //原始图像的地址;
  3. InputArray lut, //查找表的地址,对于多通道图像的查找,它可以有一个通道,也可以与原始图像有相同的通道;
  4. OutputArray dst //输出图像的地址。
  5. )

函数介绍(单通道为例):

对于8位单通道图片,其像素灰度为0-255,假如我们想将图像某一灰度值换成其他灰度值,用查找就很好用。

  例如:我们想将一张图片灰度为0-100的像素的灰度变成0,101-200的变成100,201-255的变成255。我们就可已建立如下的一张表格;

 

当把此表格应用到图片时,图片0-100灰度的像素灰度就变成0,101-200的变成100,201-255的就变成255。映射表差不多就是这个意思。

典型用法(借助图像取反示例说明)是:

虽然手动遍历可以达到同样效果,但尽量使用 OpenCV 内置函数。调用LUT 函数可以获得最快的速度,这是因为OpenCV库可以通过英特尔线程架构启用多线程。

  1. //建立查找表
  2. Mat lookUpTable(1, 256, CV_8U);
  3. uchar *p = lookUpTable.data;
  4. for(int i=0; i<256; i++)
  5. p[i]=255-i;
  6. //通过LUT函数实现图像取反
  7. LUT(img1,lookUpTable,img1);

2、saturate_cast防止数据溢出

在OpenCV学习中经常看见saturate_cast的使用,为什么函数会用到saturate_cast呢,因为无论是加是减,乘除,都会超出一个像素灰度值的范围(0~255)所以,所以当运算完之后,结果为负,则转为0,结果超出255,则为255。另外在梯度锐化的函数里,也会涉及到saturate_cast。示例如下:

代码来自:https://blog.csdn.net/mjlsuccess/article/details/12401839

  1. //使用图像混合例子中的C语言版本演示
  2. for (int i=0; i<src1.rows; i++)
  3. {
  4. const uchar* src1_ptr = src1.ptr<uchar>(i);
  5. const uchar* src2_ptr = src2.ptr<uchar>(i);
  6. uchar* dst_ptr = dst.ptr<uchar>(i);
  7. for (int j=0; j<src1.cols*nChannels; j++)
  8. {
  9. //加溢出保护
  10. dst_ptr[j] = saturate_cast<uchar>(src1_ptr[j]*alpha + src2_ptr[j]*beta + gama);//gama = -100, alpha = beta = 0.5
  11. //不加溢出保护
  12. // dst_ptr[j] = (src1_ptr[j]*alpha + src2_ptr[j]*beta + gama);
  13. }
  14. }
  15. imshow("output",dst);

 

加了溢出保护
没加溢出保护
没加溢出保护

 

 大致的原理应该如下:

  1. if(data<0)
  2. data=0;
  3. else if(data>255)
  4. data=255;

3、std::stack 基本操作

C++ Stack(堆栈) 是一个容器类的改编,为程序员提供了堆栈的全部功能,——也就是说实现了一个先进后出(FILO)的数据结构。

c++ stl栈stack的头文件为

#include <stack> 

c++ stl栈stack的成员函数介绍

  1. //操作 比较和分配堆栈
  2. empty() //堆栈为空则返回真
  3. pop() //移除栈顶元素
  4. push() //在栈顶增加元素
  5. size() //返回栈中元素数目
  6. top() //返回栈顶元素

4、C++中在一个类中定义另一个只有带参数构造函数的类的对象

此处参考网址:https://www.cnblogs.com/rednodel/p/5148156.html

  1. #include<iostream>
  2. using namespace std;
  3. class A
  4. {
  5. public:
  6.   A( int i ){}
  7. };
  8. class B {
  9. public:
  10.   B():a(1){}
  11. //或:B( int i ):a( i ){ }。对a提供参数一定要按这种形式,在冒号后,不能在花括号里面!
  12. private:
  13.   A a;
  14. };
  15. void main()
  16. {
  17.   B b;
  18. }

 5、为什么要定义Mat_类

在读取矩阵元素是,以获取矩阵某行的地址时,需要指定数据类型。这样首先需要不停地写“<uchar>”,让人感觉很繁琐,在繁琐和烦躁中容易犯错,如下面代码中的错误,用at()获取矩阵元素时错误的使用了double类型。这种错误不是语法错误,因此在编译时编译器不会提醒。在程序运行时,at()函数获取到的不是期望的(i,j)位置处的元素,数据已经越界,但是运行时也未必会报错。这样的错误使得你的程序忽而看上去正常,忽而弹出“段错误”,特别是在代码规模很大时,难以查错。

如果使用Mat_类,那么就可以在变量声明时确定元素的类型,访问元素时不再需要制定元素类型,即使得代码简洁,又减少了出错的可能性。上面代码可以用Mat_实现,实现代码如下面例程里的第二个双重for循环。

  1. #include <iostream>
  2. #include "opencv2/opencv.hpp"
  3. #include<stdio.h>
  4. using namespace std;
  5. using namespace cv;
  6. int main(int argc,char* argv[])
  7. {
  8. Mat M(600,800,CV_8UC1);
  9. for(int i=0;i<M.rows;++i){
  10. //获取指针时需要指定类型
  11. uchar *p=M.ptr<uchar>(i);
  12. for(int j=0;j<M.cols;++j){
  13. double d1=(double)((i+j)%255);
  14. //用at读像素时,需要指定类型
  15. M.at<uchar>(i,j)=d1;
  16. double d2=M.at<uchar>(i,j);
  17. }
  18. }
  19. //在变量声明时,指定矩阵元素类型
  20. Mat_<uchar> M1=(Mat_<uchar>&)M;
  21. for(int i=0;i<M1.rows;++i)
  22. {
  23. //不需要指定元素类型,语言简洁
  24. uchar *p=M1.ptr(i);
  25. for(int j=0;j<M1.cols;++j){
  26. double d1=(double)((i+j)%255);
  27. //直接使用matlab风格的矩阵元素读写,简洁
  28. M1(i,j)=d1;
  29. double d2=M1(i,j);
  30. }
  31. }
  32. return 0;
  33. }

二、实例裂缝检测源码

这里的代码来自(表示感谢):https://blog.csdn.net/FunnyWhiteCat/article/details/81387561

首先看原图:

 处理流程思路:

  1. 图像灰度化
  2. 增加对比度
  3. Canny边缘检测
  4. 用形态学连接临近裂缝
  5. 找出所有连通域,删除非裂缝噪点区域
  6. 对每个连通域提取骨架,测量长度和宽度

源码:

  1. #include <opencv2\opencv.hpp>
  2. #include <iostream>
  3. #include <stack>
  4. using namespace cv;
  5. using namespace std;
  6. //c++中在一个类中定义另一个只有带参数构造函数的类的对象
  7. class CrackInfo
  8. {
  9. public:
  10. CrackInfo(Point& position, long length, float width) {};
  11. };
  12. /* 增加对比度 */
  13. void addContrast(Mat & srcImg);
  14. /* 交换两个Mat */
  15. void swapMat(Mat & srcImg, Mat & dstImg);
  16. /* 二值化图像。0->0,非0->255 */
  17. void binaryzation(Mat & srcImg);
  18. /* 检测连通域,并删除不符合条件的连通域 */
  19. void findConnectedDomain(Mat & srcImg, vector<vector<Point>>& connectedDomains, int area, int WHRatio);
  20. /* 提取连通域的骨架 */
  21. void thinImage(Mat & srcImg);
  22. /* 获取图像中白点的数量 */
  23. void getWhitePoints(Mat &srcImg, vector<Point>& domain);
  24. /* 计算宽高信息的放置位置 */
  25. Point calInfoPosition(int imgRows, int imgCols, int padding, const std::vector<cv::Point>& domain);
  26. int main(int argc, char** argv) {
  27. Mat srcImg = imread("./image/20180803215201452.jpg");
  28. Mat dstImg, dstImg2;
  29. //灰度化
  30. cvtColor(srcImg, dstImg, CV_BGR2GRAY, 1);
  31. //增加对比度
  32. addContrast(dstImg);
  33. //图像交换
  34. swapMat(srcImg, dstImg);
  35. //边缘检测
  36. Canny(srcImg, dstImg, 50, 150);
  37. //形态学变换
  38. Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));
  39. dilate(dstImg, dstImg, kernel);//膨胀
  40. morphologyEx(dstImg, dstImg, CV_MOP_CLOSE, kernel, Point(-1, -1), 3);
  41. morphologyEx(dstImg, dstImg, CV_MOP_CLOSE, kernel);
  42. //寻找连通域
  43. vector<vector<Point>> connectedDomains;
  44. findConnectedDomain(dstImg, connectedDomains, 20, 3);
  45. kernel = getStructuringElement(MORPH_ELLIPSE, Size(7, 7));
  46. morphologyEx(dstImg, dstImg, CV_MOP_CLOSE, kernel, Point(-1, -1), 5);
  47. connectedDomains.clear();
  48. findConnectedDomain(dstImg, connectedDomains, 20, 3);
  49. kernel = getStructuringElement(MORPH_CROSS, Size(3, 3));
  50. morphologyEx(dstImg, dstImg, CV_MOP_OPEN, kernel);
  51. kernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));
  52. erode(dstImg, dstImg, kernel);
  53. connectedDomains.clear();
  54. findConnectedDomain(dstImg, connectedDomains, 20, 3);
  55. cout << "开始测量" << endl;
  56. cout << "连通域数量:" << connectedDomains.size() << endl;
  57. Mat lookUpTable(1, 256, CV_8U, Scalar(0));
  58. vector<CrackInfo> crackInfos;
  59. for (auto domain_it = connectedDomains.begin(); domain_it != connectedDomains.end(); ++domain_it) {
  60. LUT(dstImg, lookUpTable, dstImg);
  61. for (auto point_it = domain_it->cbegin(); point_it != domain_it->cend(); ++point_it) {
  62. dstImg.ptr<uchar>(point_it->y)[point_it->x] = 255;
  63. }
  64. double area = (double)domain_it->size();
  65. thinImage(dstImg);
  66. getWhitePoints(dstImg, *domain_it);
  67. long length = (long)domain_it->size();
  68. Point position = calInfoPosition(dstImg.rows, dstImg.cols, 50, *domain_it);
  69. crackInfos.push_back(CrackInfo(position, length, (float)(area / length)));
  70. }
  71. cout << "开始绘制信息" << endl;
  72. cout << "信息数量:" << crackInfos.size() << endl;
  73. LUT(dstImg, lookUpTable, dstImg);
  74. for (auto domain_it = connectedDomains.cbegin(); domain_it != connectedDomains.cend(); ++domain_it) {
  75. for (auto point_it = domain_it->cbegin(); point_it != domain_it->cend(); ++point_it) {
  76. dstImg.ptr<uchar>(point_it->y)[point_it->x] = 255;
  77. }
  78. }
  79. //ostringstream info;
  80. //for (auto it = crackInfos.cbegin(); it != crackInfos.cend(); ++it) {
  81. // info.str("");
  82. // info << *it;
  83. // putText(dstImg, info.str(), it->Position, FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255));
  84. //}
  85. imwrite("result1.png", dstImg);
  86. cout << "保存图像完成" << endl;
  87. return 0;
  88. }
  89. /*利用查找表(Look-up table)增加图像对比度*/
  90. void addContrast(Mat & srcImg) {
  91. Mat lookUpTable(1, 256, CV_8U);
  92. double temp = pow(1.1, 5);
  93. uchar* p = lookUpTable.data;
  94. for (int i = 0; i < 256; ++i)
  95. p[i] = saturate_cast<uchar>(i * temp);
  96. LUT(srcImg, lookUpTable, srcImg);
  97. }
  98. /*图像交换*/
  99. void swapMat(Mat & srcImg, Mat & dstImg) {
  100. Mat tempImg = srcImg;
  101. srcImg = dstImg;
  102. dstImg = tempImg;
  103. }
  104. /* 检测连通域,并删除不符合条件的连通域 */
  105. void findConnectedDomain(Mat & srcImg, vector<vector<Point>>& connectedDomains, int area, int WHRatio) {
  106. Mat_<uchar> tempImg = (Mat_<uchar> &)srcImg;
  107. for (int i = 0; i < tempImg.rows; ++i) {
  108. uchar* row = tempImg.ptr(i); 调取存储图像内存的第i行的指针
  109. for (int j = 0; j < tempImg.cols; ++j) {
  110. if (row[j] == 255) {
  111. stack<Point> connectedPoints;
  112. vector<Point> domain;
  113. connectedPoints.push(Point(j, i));
  114. while (!connectedPoints.empty()) {
  115. Point currentPoint = connectedPoints.top();
  116. domain.push_back(currentPoint);
  117. int colNum = currentPoint.x;
  118. int rowNum = currentPoint.y;
  119. tempImg.ptr(rowNum)[colNum] = 0;
  120. connectedPoints.pop();
  121. if (rowNum - 1 >= 0 && colNum - 1 >= 0 && tempImg.ptr(rowNum - 1)[colNum - 1] == 255) {
  122. tempImg.ptr(rowNum - 1)[colNum - 1] = 0;
  123. connectedPoints.push(Point(colNum - 1, rowNum - 1));
  124. }
  125. if (rowNum - 1 >= 0 && tempImg.ptr(rowNum - 1)[colNum] == 255) {
  126. tempImg.ptr(rowNum - 1)[colNum] = 0;
  127. connectedPoints.push(Point(colNum, rowNum - 1));
  128. }
  129. if (rowNum - 1 >= 0 && colNum + 1 < tempImg.cols && tempImg.ptr(rowNum - 1)[colNum + 1] == 255) {
  130. tempImg.ptr(rowNum - 1)[colNum + 1] = 0;
  131. connectedPoints.push(Point(colNum + 1, rowNum - 1));
  132. }
  133. if (colNum - 1 >= 0 && tempImg.ptr(rowNum)[colNum - 1] == 255) {
  134. tempImg.ptr(rowNum)[colNum - 1] = 0;
  135. connectedPoints.push(Point(colNum - 1, rowNum));
  136. }
  137. if (colNum + 1 < tempImg.cols && tempImg.ptr(rowNum)[colNum + 1] == 255) {
  138. tempImg.ptr(rowNum)[colNum + 1] = 0;
  139. connectedPoints.push(Point(colNum + 1, rowNum));
  140. }
  141. if (rowNum + 1 < tempImg.rows && colNum - 1 > 0 && tempImg.ptr(rowNum + 1)[colNum - 1] == 255) {
  142. tempImg.ptr(rowNum + 1)[colNum - 1] = 0;
  143. connectedPoints.push(Point(colNum - 1, rowNum + 1));
  144. }
  145. if (rowNum + 1 < tempImg.rows && tempImg.ptr(rowNum + 1)[colNum] == 255) {
  146. tempImg.ptr(rowNum + 1)[colNum] = 0;
  147. connectedPoints.push(Point(colNum, rowNum + 1));
  148. }
  149. if (rowNum + 1 < tempImg.rows && colNum + 1 < tempImg.cols && tempImg.ptr(rowNum + 1)[colNum + 1] == 255) {
  150. tempImg.ptr(rowNum + 1)[colNum + 1] = 0;
  151. connectedPoints.push(Point(colNum + 1, rowNum + 1));
  152. }
  153. }
  154. if (domain.size() > area) {
  155. RotatedRect rect = minAreaRect(domain);
  156. float width = rect.size.width;
  157. float height = rect.size.height;
  158. if (width < height) {
  159. float temp = width;
  160. width = height;
  161. height = temp;
  162. }
  163. if (width > height * WHRatio && width > 50) {
  164. for (auto cit = domain.begin(); cit != domain.end(); ++cit) {
  165. tempImg.ptr(cit->y)[cit->x] = 250;
  166. }
  167. connectedDomains.push_back(domain);
  168. }
  169. }
  170. }
  171. }
  172. }
  173. binaryzation(srcImg);
  174. }
  175. /* 二值化图像。0->0,非0->255 */
  176. void binaryzation(Mat & srcImg) {
  177. Mat lookUpTable(1, 256, CV_8U, Scalar(255));
  178. lookUpTable.data[0] = 0;
  179. LUT(srcImg, lookUpTable, srcImg);
  180. }
  181. /* 提取连通域的骨架 */
  182. void thinImage(Mat & srcImg) {
  183. vector<Point> deleteList;
  184. int neighbourhood[9];
  185. int nl = srcImg.rows;
  186. int nc = srcImg.cols;
  187. bool inOddIterations = true;
  188. while (true) {
  189. for (int j = 1; j < (nl - 1); j++) {
  190. uchar* data_last = srcImg.ptr<uchar>(j - 1);
  191. uchar* data = srcImg.ptr<uchar>(j);
  192. uchar* data_next = srcImg.ptr<uchar>(j + 1);
  193. for (int i = 1; i < (nc - 1); i++) {
  194. if (data[i] == 255) {
  195. int whitePointCount = 0;
  196. neighbourhood[0] = 1;
  197. if (data_last[i] == 255) neighbourhood[1] = 1;
  198. else neighbourhood[1] = 0;
  199. if (data_last[i + 1] == 255) neighbourhood[2] = 1;
  200. else neighbourhood[2] = 0;
  201. if (data[i + 1] == 255) neighbourhood[3] = 1;
  202. else neighbourhood[3] = 0;
  203. if (data_next[i + 1] == 255) neighbourhood[4] = 1;
  204. else neighbourhood[4] = 0;
  205. if (data_next[i] == 255) neighbourhood[5] = 1;
  206. else neighbourhood[5] = 0;
  207. if (data_next[i - 1] == 255) neighbourhood[6] = 1;
  208. else neighbourhood[6] = 0;
  209. if (data[i - 1] == 255) neighbourhood[7] = 1;
  210. else neighbourhood[7] = 0;
  211. if (data_last[i - 1] == 255) neighbourhood[8] = 1;
  212. else neighbourhood[8] = 0;
  213. for (int k = 1; k < 9; k++) {
  214. whitePointCount = whitePointCount + neighbourhood[k];
  215. }
  216. if ((whitePointCount >= 2) && (whitePointCount <= 6)) {
  217. int ap = 0;
  218. if ((neighbourhood[1] == 0) && (neighbourhood[2] == 1)) ap++;
  219. if ((neighbourhood[2] == 0) && (neighbourhood[3] == 1)) ap++;
  220. if ((neighbourhood[3] == 0) && (neighbourhood[4] == 1)) ap++;
  221. if ((neighbourhood[4] == 0) && (neighbourhood[5] == 1)) ap++;
  222. if ((neighbourhood[5] == 0) && (neighbourhood[6] == 1)) ap++;
  223. if ((neighbourhood[6] == 0) && (neighbourhood[7] == 1)) ap++;
  224. if ((neighbourhood[7] == 0) && (neighbourhood[8] == 1)) ap++;
  225. if ((neighbourhood[8] == 0) && (neighbourhood[1] == 1)) ap++;
  226. if (ap == 1) {
  227. if (inOddIterations && (neighbourhood[3] * neighbourhood[5] * neighbourhood[7] == 0)
  228. && (neighbourhood[1] * neighbourhood[3] * neighbourhood[5] == 0)) {
  229. deleteList.push_back(Point(i, j));
  230. }
  231. else if (!inOddIterations && (neighbourhood[1] * neighbourhood[5] * neighbourhood[7] == 0)
  232. && (neighbourhood[1] * neighbourhood[3] * neighbourhood[7] == 0)) {
  233. deleteList.push_back(Point(i, j));
  234. }
  235. }
  236. }
  237. }
  238. }
  239. }
  240. if (deleteList.size() == 0)
  241. break;
  242. for (size_t i = 0; i < deleteList.size(); i++) {
  243. Point tem;
  244. tem = deleteList[i];
  245. uchar* data = srcImg.ptr<uchar>(tem.y);
  246. data[tem.x] = 0;
  247. }
  248. deleteList.clear();
  249. inOddIterations = !inOddIterations;
  250. }
  251. }
  252. /* 获取图像中白点的数量 */
  253. void getWhitePoints(Mat &srcImg, vector<Point>& domain) {
  254. domain.clear();
  255. Mat_<uchar> tempImg = (Mat_<uchar> &)srcImg;
  256. for (int i = 0; i < tempImg.rows; i++) {
  257. uchar * row = tempImg.ptr<uchar>(i);
  258. for (int j = 0; j < tempImg.cols; ++j) {
  259. if (row[j] != 0)
  260. domain.push_back(Point(j, i));
  261. }
  262. }
  263. }
  264. /* 计算宽高信息的放置位置 */
  265. Point calInfoPosition(int imgRows, int imgCols, int padding, const std::vector<cv::Point>& domain) {
  266. long xSum = 0;
  267. long ySum = 0;
  268. for (auto it = domain.cbegin(); it != domain.cend(); ++it) {
  269. xSum += it->x;
  270. ySum += it->y;
  271. }
  272. int x = 0;
  273. int y = 0;
  274. x = (int)(xSum / domain.size());
  275. y = (int)(ySum / domain.size());
  276. if (x < padding)
  277. x = padding;
  278. if (x > imgCols - padding)
  279. x = imgCols - padding;
  280. if (y < padding)
  281. y = padding;
  282. if (y > imgRows - padding)
  283. y = imgRows - padding;
  284. return cv::Point(x, y);
  285. }

处理结果:

重叠结果

待优化......

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Li_阴宅/article/detail/825592
推荐阅读
相关标签
  

闽ICP备14008679号