赞
踩
c++写法:
void test2()
{
Mat img = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
cout<<img.depth()<<" "<<img.channels()<<endl;
namedWindow("new",cv::WINDOW_AUTOSIZE);
imshow("new",img);
waitKey(0);
destroyAllWindows();
}
python写法:
def test():
cv2.namedWindow('new', cv2.WINDOW_NORMAL)
cv2.imshow('new', 0)
key = cv2.waitKey(0)
if key & 0xFF == ord('q'):
cv2.destroyAllWindows()
c++写法,操作图像,将图像转换为hsv然后调整亮度值,转换会BGR格式显示
void test2()
{
Mat img = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
cout<<img.depth()<<" "<<img.channels()<<endl;
namedWindow("new",cv::WINDOW_AUTOSIZE);
imshow("new",img);
waitKey(0);
Mat gray,hsv;
cvtColor(img,gray,cv::COLOR_BGR2GRAY);
imshow("new",gray);
waitKey(0);
cvtColor(img,hsv,cv::COLOR_BGR2HSV);
imshow("new",hsv);
waitKey(0);
Mat channels[3];
// 将HSV图像分离为三个单独的通道,这将把HSV图像的三个通道分别存储在channels数组的不同元素中
split(hsv,channels);
int bright = 50;
channels[2] += bright;
// 将亮度通道的像素值限制在0到255之间,大于255截断为255
threshold(channels[2],channels[2],255,255,cv::THRESH_TRUNC);
// 将修改后的通道合并为单个图像
cv::merge(channels, 3, hsv);
Mat outputImage;
cv::cvtColor(hsv, outputImage, cv::COLOR_HSV2BGR);
imshow("new",outputImage);
waitKey(0);
destroyAllWindows();
}
@param filename Name of file to be loaded.
@param flags Flag that can take values of cv::ImreadModes
CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR );
flags标识符介绍:
IMREAD_GRAYSCALE = 0, 灰度图
IMREAD_COLOR = 1,原本是什么颜色就显示什么颜色
def test2():
cv2.namedWindow('new', cv2.WINDOW_AUTOSIZE)
img = cv2.imread(img_name, 1)
cv2.imshow('new', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('1.png',img)
def test3():
# cv2.namedWindow('new', cv2.WINDOW_AUTOSIZE)
# 获取视频设置
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
cv2.imshow('video', frame)
key = cv2.waitKey(10)
if (key & 0xFF) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
录制视频
def test4():
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
vw = cv2.VideoWriter('./out.mp4', fourcc, 25, (1920, 1080))
cap = cv2.VideoCapture(0)
# 判断摄像头是否打开状态
while cap.isOpened():
ret, frame = cap.read()
if ret == True:
cv2.imshow('video', frame)
# 写数据到多媒体文件
vw.write(frame)
key = cv2.waitKey(10)
if (key & 0xFF) == ord('q'):
break
else:
break
cap.release()
cv2.release()
cv2.destroyAllWindows()
import cv2
# 色彩空间
# hsv:色相、饱和度、明亮度
# yuv:4:2:0 4:2:2 4:4:4
def callback():
pass
def test7():
cv2.namedWindow('color',cv2.WINDOW_NORMAL)
img = cv2.imread('./1.png')
colorspaces = [cv2.COLOR_BGR2RGBA,cv2.COLOR_BGR2BRGA,
cv2.COLOR_BGR2GRAY,cv2.COLOR_BGR2HSV_FULLL,
cv2.COLOR_BGR2YUV]
cv2.createTrackbar('curcolor','color',0,len(colorspaces),callback)
while True:
index = cv2.getTrackbarPos('curcolor','color')
cvt_img = cv2.cvtColor(img,colorspaces[index])
cv2.imshow('color',img)
key = cv2.waitKey(10)
if key & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
test7()
深拷贝和浅拷贝
void test3()
{
Mat img,m1,m2;
img = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
// 克隆,深拷贝
m1 = img.clone();
img.copyTo(m2);
// 创建空白图像
Mat m3 = Mat::zeros(Size(8,8),CV_8UC1);
// 创建全部为1的通道,如果创建的是3通道,则只有每个元素的第一个通道为1
Mat m4 = Mat::ones(Size(8,8),CV_8UC3);
cout<<m3.cols<<m3.rows<<m3.channels()<<endl;
// 3通道时候:每个元素的第一个通道为1
m4 = 127;
// 给3个通道都复值
m4 = Scalar(127,127,127);
}
像素值操作:
1、根据下标操作
2、根据指针操作
void test4()
{
// 像素遍历操作
namedWindow("new",WINDOW_NORMAL);
Mat img,gray;
img = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
for(int i=0;i<img.rows;i++){
for(int j=0;j<img.cols;j++){
Vec3b bgr = img.at<Vec3b>(i,j);
img.at<Vec3b>(i,j)[0] = 255 - bgr[0];
img.at<Vec3b>(i,j)[1] = 255 - bgr[1];
img.at<Vec3b>(i,j)[2] = 255 - bgr[2];
}
}
imshow("new",img);
waitKey(0);
cvtColor(img,gray,cv::COLOR_BGR2GRAY);
for(int i=0;i<gray.rows;i++){
for(int j=0;j<gray.cols;j++){
int pt = img.at<uchar>(i,j);
img.at<uchar>(i,j) = 255 - pt;
}
}
imshow("new",gray);
waitKey(0);
// 基于指针访问 速度更快
for(int i=0;i<img.rows;i++){
uchar *current_row = img.ptr<uchar>(i);
for(int j=0;j<img.cols;j++){
if(img.channels() == 1){
int pt = *current_row;
*current_row++ = 255 - pt;
}else if(img.channels() == 3){
*current_row++ = 255 - *current_row;
*current_row++ = 255 - *current_row;
*current_row++ = 255 - *current_row;
}
}
}
destroyAllWindows();
}
像素值的加减乘除
void test5()
{
// 像素操作
namedWindow("new",WINDOW_NORMAL);
Mat img,gray;
img = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
Mat m = Mat::zeros(img.size(),img.type());
m = Scalar(2,2,2);
Mat dst;
// 乘法运算,超过255的时候会截断
multiply(img,m,dst);
// 加法操作
add(img,m,dst);
// 减法
subtract(img,m,dst);
// 除法
divide(img,m,dst);
// saturate_cast会做一个范围判定,是否在uchar范围内
// saturate_cast<uchar>(p1,p2);
imshow("new",dst);
waitKey(0);
destroyAllWindows();
}
GUI操作
void on_trac(int val,void *userdata)
{
Mat *data = (Mat*)userdata;
Mat m = Mat::zeros(data->size(),data->type());
m = Scalar(val,val,val);
Mat dst;
add(*data,m,dst);
imshow("new",dst);
}
void test6()
{
namedWindow("new",WINDOW_AUTOSIZE);
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
int maxValue = 100;
int light = 50;
cout<<src.type()<<endl;
createTrackbar("Value Bar","new",&light,maxValue,on_trac,(void *)(&src));
on_trac(50,&src);
}
addWeighted函数解释
dst = src1 * alpha + src2 * beta + gamma
// 异或操作
void test7()
{
Mat m1 = Mat::zeros(Size(255,255), CV_8UC3);
Mat m2 = Mat::zeros(Size(255,255), CV_8UC3);
rectangle(m1,Rect(100,100,80,80),Scalar(255,255,0),-1,LINE_8,0);
rectangle(m2,Rect(130,130,80,80),Scalar(255,0,255),-1,LINE_8,0);
imshow("new",m1);
waitKey(0);
imshow("new",m2);
waitKey(0);
Mat dst;
// 按位与(都为真就为真)
bitwise_and(m1,m2,dst);
imshow("new",dst);
waitKey(0);
// 按位或(一个为真就为真)
bitwise_or(m1,m2,dst);
imshow("new",dst);
waitKey(0);
// 按位取反
bitwise_not(m1,dst);
imshow("new",dst);
waitKey(0);
// 异或(不同为真,相同为假)
bitwise_xor(m1,m2,dst);
imshow("new",dst);
waitKey(0);
destroyAllWindows();
}
// 通道分离,通道合并
void test8()
{
namedWindow("new",WINDOW_AUTOSIZE);
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
imshow("new",src);
waitKey(0);
vector<Mat> mv;
split(src,mv);
imshow("new",mv[0]);
waitKey(0);
imshow("new",mv[1]);
waitKey(0);
imshow("new",mv[2]);
waitKey(0);
// 将蓝色和绿色通道全部置0,只保留红色通道,然后合并
mv[0] = 0;
mv[1] = 0;
Mat dst;
merge(mv,dst);
imshow("new",dst);
waitKey(0);
// 通道融合
// 第三个参数,0通道到2通道,1通道到1通道,2通道到0通道
int from_to[] = {0,2,1,1,2,0};
mixChannels(&src,1,&dst,1,from_to,3);
imshow("new",dst);
waitKey(0);
}
// 色彩空间转换
void test9()
{
namedWindow("new",WINDOW_AUTOSIZE);
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/green.jpg");
Mat dst,mask;
cvtColor(src,dst,COLOR_BGR2HSV);
// 出来一张2值化的图像,绿色背景是白色,人物是黑色
inRange(dst,Scalar(35,43,46),Scalar(77,255,255),mask);
Mat redback = Mat::zeros(src.size(),src.type());
redback = Scalar(40,40,200);
// 取放之后人物酒变成白色了,背景是黑色的
bitwise_not(mask,mask);
// 只拷贝不为0的区域,为0的区域不拷贝
src.copyTo(redback,mask);
imshow("new",redback);
waitKey(0);
}
// 最大值、最小值
// 均值,方差
// 方差衡量数据集中值的离散程度,数据集中差异较小时,标准方差较小,差异较大时,方差较大(携带有效信息就越多)
// 1、计算出均值
// 2、计算每个元素与平均值的差的平方
// 3、计算平方差的平均值,得到的平方差相加,然后除以数据点的总数
// 4、取平方根:将平均平方差的值进行平方根运算,得到标准方差。
void test10()
{
namedWindow("new",WINDOW_AUTOSIZE);
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/green.jpg",cv::IMREAD_GRAYSCALE);
double minv,maxv;
Point minLoc,maxLoc;
minMaxLoc(src,&minv,&maxv,&minLoc,&maxLoc,Mat());
cout<<"minv:"<<minv<<" maxv:"<<maxv<<" minLoc:"<<minLoc<<" maxLoc:"<<maxLoc<<endl;
Mat mean,stddev;
meanStdDev(src,mean,stddev);
cout<<mean<<" "<<stddev<<endl;
}
void test11()
{
namedWindow("new",WINDOW_AUTOSIZE);
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/green.jpg",cv::IMREAD_COLOR);
// 绘制矩形
Mat bg = Mat::zeros(src.size(),src.type());
Mat dst;
Rect rect(100,100,200,200);
rectangle(bg,rect,Scalar(20,20,255),2,LINE_8,0);
// 绘制圆,圆心坐标Point,15是半径
circle(src,Point(350,400),150,Scalar(255,0,0),2,LINE_8,0);
// 图像融合
addWeighted(src,0.7,bg,0.3,0,dst);
// 绘制线 两个点的位置
line(bg,Point(100,100),Point(350,400),Scalar(0,255,0),2,LINE_8,0);
// 绘制椭圆
RotatedRect rrt;
rrt.center = Point(200,200);
rrt.size = Size(100,200);
rrt.angle = 0;
ellipse(src,rrt,Scalar(0,255,255),2,8);
imshow("new",dst);
waitKey(0);
}
// 多边形绘制
void test13()
{
Mat bg = Mat::zeros(Size(512,512),CV_8UC3);
Point p1(100,200);
Point p2(350,100);
Point p3(450,280);
Point p4(320,450);
Point p5(80,400);
vector<Point> pts;
pts.push_back(p1);
pts.push_back(p2);
pts.push_back(p3);
pts.push_back(p4);
pts.push_back(p5);
polylines(bg,pts, true,Scalar(0,0,255),2,LINE_8,0);
// 填充多边形
// fillPoly()
imshow("new",bg);
waitKey(0);
}
void test12()
{
Mat bg = Mat::zeros(Size(512,512),CV_8UC3);
int w = bg.cols;
int h = bg.rows;
// 随机数
RNG rng(12345);
while (true){
int c = waitKey(10);
if(c == 27) break;
int x1 = rng.uniform(0,w);
int y1 = rng.uniform(0,h);
int x2 = rng.uniform(0,w);
int y2 = rng.uniform(0,h);
// 每次画之前清理一下
bg = Scalar(0,0,0);
line(bg,Point(x1,y1),Point(x2,y2),
Scalar(rng.uniform(0,255)
,rng.uniform(0,255),rng.uniform(0,255)),1,LINE_AA,0);
imshow("new",bg);
}
}
Point sp(-1, -1);
Point ed(-1, -1);
Mat temp;
static void on_draw(int event, int x, int y, int flags, void* userdata)
{
Mat img = *((Mat*)userdata);
if (event == EVENT_LBUTTONDOWN) {
sp.x = x;
sp.y = y;
cout << "sp.x:" << sp.x << " sp.y:" << sp.y << endl;
}
else if (event == EVENT_LBUTTONUP) {
ed.x = x;
ed.y = y;
cout << "ed.x:" << ed.x << " ed.y:" << ed.y << endl;
int x1 = x - sp.x;
int y1 = y - sp.y;
if (x1 > 0 && y1 > 0) {
Rect rect(sp.x, sp.y, x1, y1);
temp.copyTo(img);
imshow("ROI区域", img(rect));
rectangle(img,rect,Scalar(0,0,255),2,8,0);
imshow("new", img);
sp.x = -1;
sp.y = -1;
}
}
else if (event == EVENT_MOUSEMOVE) {
if (sp.x > 0 && sp.y > 0) {
ed.x = x;
ed.y = y;
int dx = ed.x - sp.x;
int dy = ed.y - sp.y;
if (dx > 0 && dy > 0) {
Rect box(sp.x, sp.y, dx, dy);
temp.copyTo(img);
rectangle(img, box, Scalar(0, 0, 255), 2, 8, 0);
imshow("new", img);
}
}
}
}
void test14()
{
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
namedWindow("new", WINDOW_AUTOSIZE);
setMouseCallback("new", on_draw, (void*)&src);
src.copyTo(temp);
imshow("new", src);
waitKey(0);
destroyAllWindows();
}
13、normalize归一化原理解释
函数原型,归一化类型,常见的有NORM_L1, NORM_L2, NORM_INF, NORM_MINMAX,不同的norm计算方式不一样
注意:
alpha:如果norm_type为NORM_MINMAX ,则alpha为最小值或最大值;如果norm_type为其他类型,则为归一化要乘的系数
beta:如果norm_type为NORM_MINMAX ,则beta为最小值或最大值;如果norm_type为其他类型,beta被忽略,此处不会被用到,一般传入0【重点】
void cv::normalize (
InputArray src,
InputOutputArray dst,
double alpha = 1,
double beta = 0,
int norm_type = NORM_L2,
int dtype = -1,
InputArray mask = noArray()
)
13.1、NORM_L1
输入值:
[2, 3, 4;
3, 4, 5;
4, 5, 6]
输出结果:
[0.055555556, 0.083333336, 0.11111111;
0.083333336, 0.11111111, 0.1388889;
0.11111111, 0.1388889, 0.16666667]
1、先求出norm
公式:L1_norm = ∥src1∥L1=∑I|src1(I)|,即各数的绝对值之和
2+3+4+5+3+4+5+4+5+6 = 36
2、再求dst
公式:dst = alphasrc/L1_norm
dst[0] = 1 2/ 36 = 0.055555556
应用场景:机器学习、信号处理、图像去噪。
用途:L2规范化倾向于使得所有元素的大小都差不多,这在避免过拟合(如机器学习中的权重衰减)、信号去噪(通过最小化能量)等场景中非常有用。
优势:防止数据中的个别点对总体结果产生过大影响,提高模型的泛化能力。
13.2、NORM_L2
输出结果:
[0.16012815, 0.24019222, 0.32025629;
0.24019222, 0.32025629, 0.40032035;
0.32025629, 0.40032035, 0.48038444]
1、先求出norm
公式:L2_norm = 矩阵中各元素的平方和的开方(欧几里德)(L2-范数)
4+9+16+9+16+25+16+25+36 = 开平方之后 = 12.4899959967968
2、再求dst
公式:dst = alphasrc/L2_norm
dst[0] = 1 2/ 12.4899959967968= 0.16012815
应用场景:稀疏编码、特征选择、压缩感知。
用途:L1规范化倾向于产生稀疏的解,即大部分元素为零,少数元素非零。这在特征选择中特别有用,可以自动地忽略不重要的特征。
优势:促进稀疏性,有助于提高模型的可解释性,减少模型复杂度。
13.3、NORM_INF
输出结果:
[0.33333334, 0.5, 0.66666669;
0.5, 0.66666669, 0.83333337;
0.66666669, 0.83333337, 1]
1、先求出norm
INF_norm = 矩阵中所有元素的最大值max
2.、再求dst
公式:dst = alphasrc/INF_norm
dst[0] = 1.02/ 6 = 0.33333334
应用场景:优化问题、数值计算。
用途:限制数组或向量的无穷范数(即最大绝对值)。在某些优化问题中,可能需要将解的范围限制在一定界限内,使用NORM_INF规范化可以达到这一目的。
优势:保证了数据中没有极端的大值,有利于数值稳定性
13.4、NORM_MINMAX
1、 alpha、beta分别为归一化后的最小值、最大值
函数会自动判断哪个为最大值,最小值
输出结果: [0, 0.25, 0.5;
0.25, 0.5, 0.75;
0.5, 0.75, 1]
min=2,max=6
(1-0) * (2-2)/(6-2) + 0 =0
(1-0) * (3-2)/(6-2) + 0 = 0.25
(1-0) * (4-2)/(6-2) + 0 = 0.25
应用场景:图像处理、数据预处理。
用途:将数据规范化到一个特定的范围内,常用于调整图像亮度和对比度,或在输入机器学习模型前将特征缩放到同一范围。这有助于提高算法的收敛速度和精度。
优势:简单直观,易于实现,可以使数据分布更均匀。
总结:
NORM_MINMAX适用于需要将数据规范化到相同尺度的场景。
NORM_INF适用于需要控制数据的最大值不超过特定阈值的场景。
NORM_L1适用于需要稀疏解的场景,如特征选择或构建稀疏模型。
NORM_L2适用于需要平滑解、防止过拟合的场景,如机器学习中的正则化处理。
// 1、MINMAX
void test15()
{
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
namedWindow("new", WINDOW_AUTOSIZE);
Mat img,img2;
cout<<src.type()<<endl;
src.convertTo(src,CV_32FC3);
cout<<src.type()<<endl;
normalize(src,img,1.0,0,NORM_MINMAX);
// imshow想显示浮点数据,必须归一化0-1之间
imshow("new",img);
normalize(img,img2,255.0,0,NORM_MINMAX);
img2.convertTo(img2,CV_8UC3);
imshow("new1",img2);
waitKey(0);
}
// resize
void test16()
{
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
namedWindow("new", WINDOW_AUTOSIZE);
Mat zoomin,zoomout;
int w = src.cols;
int h = src.rows;
resize(src,zoomin,Size(w/2,h/2),0,0,INTER_LINEAR);
resize(src,zoomout,Size(w*1.5,h*1.5),0,0,INTER_LINEAR);
imshow("1",src);
imshow("2",zoomin);
imshow("3",zoomout);
waitKey(0);
}
void test17()
{
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
// namedWindow("new", WINDOW_AUTOSIZE);
Mat dst,left,m1,m3;
flip(src,dst,0);
flip(src,left,1);
flip(src,m1,-1);
imshow("上下翻转",dst);
imshow("左右翻转",left);
imshow("180度旋转",m1);
waitKey(0);
// 图像旋转
Mat d,M;
int w = src.cols;
int h = src.rows;
M = getRotationMatrix2D(Point2f(w/2,h/2),45,1.0);
double cos = abs(M.at<double>(0,0));
double sin = abs(M.at<double>(0,1));
int nw = cos * w + sin * h;
int nh = sin * w + cos * h;
M.at<double>(0,2) += (nw/2-w/2);
M.at<double>(1,2) += (nh/2-h/2);
warpAffine(src,d,M,Size(nw,nh),INTER_LINEAR,0,Scalar(0,0,255));
imshow("new",d);
waitKey(0);
}
void test18()
{
VideoCapture cap(0);
Mat fram;
while(true){
cap.read(fram);
if(fram.empty()){
break;
}
flip(fram,fram,1);
imshow("new",fram);
int n = waitKey(10);
if(n == 27){
break;
}
}
cap.release();
}
// 图像卷积模糊操作
void test19()
{
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
namedWindow("new", WINDOW_AUTOSIZE);
Mat dst;
blur(src,dst,Size(3,3),Point(-1,-1));
imshow("new",dst);
waitKey(0);
}
// 高斯双边模糊
void test21()
{
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
namedWindow("new", WINDOW_AUTOSIZE);
Mat dst;
bilateralFilter(src,dst,0,100,10);
imshow("new",dst);
waitKey(0);
}
// 高斯模糊
void test20()
{
Mat src = imread("/Users/zhulei/CLionProjects/opencv_test/1.jpg");
namedWindow("new", WINDOW_AUTOSIZE);
Rect rect(100,100,500,500);
Mat roi = src(rect);
GaussianBlur(roi,roi,Size(0,0),15);
roi.copyTo(src(rect));
imshow("new",src);
waitKey(0);
}
作用:在图像四周填充指定像素形成边框。
利用此函数,对图片进行padding操作
void test5()
{
int width = 384,height = 226;
Mat img = imread("H:\\face\\_00067cfb-e535423e.jpg");
float rw = (float)width/img.cols;
float rh = (float)height/img.rows;
cout << "rw:" << rw << " rh:" << rh << endl;
float min_ratoi = min(rw, rh);
// 384 * 216
resize(img, img, Size(img.cols *min_ratoi, img.rows *min_ratoi));
//resize(img, img, Size(),min_ratoi,min_ratoi);
imshow("new", img);
// 边缘填充
int delta_width = width - img.cols;
int delta_height = height - img.rows;
int top = delta_height/2;
int bottom = delta_height -top;
int left = delta_width/2;
int right = delta_width-left;
cout << "del_width:" << delta_width << "del_height:" << delta_height << endl;
cout <<"top:"<< top <<"bottom:" << bottom<<"left:" << left<<"right:" << right << endl;
RNG rng;
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
Mat m1, m2, m3, m4, m5, m6,m7,m8;
// 使用常数填充
copyMakeBorder(img, m1, top, bottom, left, right, BORDER_CONSTANT, color);
// 复制最边缘像素
copyMakeBorder(img, m2, top, bottom, left, right, BORDER_REPLICATE, color);
// 反射法。以两边为轴
copyMakeBorder(img, m3, top, bottom, left, right, BORDER_REFLECT, color);
// 环绕式填充
copyMakeBorder(img, m4, top, bottom, left, right, BORDER_WRAP, color);
// 反射(Reflect)边缘填充,也称为反射101。图像的边缘像素被复制,并且第二边缘内部的像素被镜像填充。
copyMakeBorder(img, m5, top, bottom, left, right, cv::BORDER_REFLECT_101, color);
// 反射(Reflect)边缘填充,图像的边缘像素被复制,并且第二边缘内部的像素被镜像填充
copyMakeBorder(img, m6, top, bottom, left, right, cv::BORDER_REFLECT101, color);
// 默认边缘填充方式
copyMakeBorder(img, m7, top, bottom, left, right, cv::BORDER_DEFAULT, color);
// 孤立边缘填充。每个通道被独立填充,而不考虑其他通道。
copyMakeBorder(img, m8, top, bottom, left, right, cv::BORDER_ISOLATED, color);
imshow("new1", m1);
imshow("new2", m2);
imshow("new3", m3);
imshow("new4", m4);
imshow("new5", m5);
imshow("new6", m6);
imshow("new7", m7);
imshow("new8", m8);
waitKey(0);
}
#include <opencv2/imgproc.hpp>
函数说明:void cv::adaptiveThreshold( InputArray src, OutputArray dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C );
输入参数:
src 8位单通道图像。
dst 与src大小和类型相同的目标图像。
maxValue 指定给满足条件的像素的非零值
adaptiveMethod 自适应阈值算法。BORDER_REPLICATE|BORDER_ISOLATED用于处理边界。
cv::ADAPTIVE_THRESH_MEAN_C = 0 窗口均值阈值法。计算出领域的平均值再减去参数double C的值
cv::ADAPTIVE_THRESH_GAUSSIAN_C = 1 高斯分布阈值法。计算出领域的高斯均值再减去参数double C的值
thresholdType 阈值化类型(只有两个取值)。
cv::THRESH_BINARY = 0 若大于thresh,则设置为maxval,否则设置为0。(常用)
cv::THRESH_BINARY_INV = 1 若大于thresh,则设置为0,否则设置为maxval(反操作)。
blockSize 像素邻域大小(单位):3、5、7,依此类推。自适应阈值算法的阈值计算时使用。
C 偏移值。自适应阈值算法的阈值计算时使用。
void test6()
{
Mat img = imread("H:\\face\\_00067cfb-e535423e.jpg");
resize(img, img, Size(384, 226));
Mat srcGray;
cvtColor(img, srcGray, COLOR_BGR2GRAY);
cv::Mat ADAPTIVE_THRESH_MEAN_C0, ADAPTIVE_THRESH_MEAN_C1, ADAPTIVE_THRESH_GAUSSIAN_C0, ADAPTIVE_THRESH_GAUSSIAN_C1;
int blockSize = 5;
int constValue = 10;
const int maxVal = 255;
cv::adaptiveThreshold(srcGray, ADAPTIVE_THRESH_MEAN_C0, maxVal, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, blockSize, constValue);
cv::adaptiveThreshold(srcGray, ADAPTIVE_THRESH_MEAN_C1, maxVal, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, blockSize, constValue);
cv::adaptiveThreshold(srcGray, ADAPTIVE_THRESH_GAUSSIAN_C0, maxVal, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY_INV, blockSize, constValue);
cv::adaptiveThreshold(srcGray, ADAPTIVE_THRESH_GAUSSIAN_C1, maxVal, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY_INV, blockSize, constValue);
imshow("new1", ADAPTIVE_THRESH_MEAN_C0);
imshow("new2", ADAPTIVE_THRESH_MEAN_C1);
imshow("new3", ADAPTIVE_THRESH_GAUSSIAN_C0);
imshow("new4", ADAPTIVE_THRESH_GAUSSIAN_C1);
waitKey(0);
}
第一行的(1,0,tx)和(x,y,1)一一对应相乘
# 仿射变化之平移
def test3():
img = cv2.imread("./1.png")
h,w,ch = img.shape
# 平移操作
# 变换矩阵,最少是float32
M = np.float32([[1,0,200],[0,1,0]])
img = cv2.warpAffine(img,M,dsize=(w,h))
cv2.imshow("new",img)
cv2.waitKey(0)
# 仿射变化之获取变换矩阵,旋转矩阵
# opencv提供获取变换矩阵的api
def test4():
img = cv2.imread("./1.png")
h,w,ch = img.shape
# 获取变换矩阵
# 按住(100,100)这个点,逆时钟旋转15度,1表示不缩放
M = cv2.getRotationMatrix2D((100,100),15,1)
print(M)
img = cv2.warpAffine(img,M,dsize=(w,h))
cv2.imshow("new",img)
cv2.waitKey(0)
# 通过三个点来确定变换矩阵
def test5():
img = cv2.imread("./1.png")
h,w,ch = img.shape
src = np.float32([[200,100],[300,100],[200,300]])
dst = np.float32([[100,150],[360,200],[280,120]])
M = cv2.getAffineTransform(src,dst)
print(M)
img = cv2.warpAffine(img,M,dsize=(w,h))
cv2.imshow("new",img)
cv2.waitKey(0)
透视变换就是一种坐标系变换成另一种坐标系,简单来说可以把一张斜的图变正
# 透视变换
# 透视变换就是一种坐标系变换成另一种坐标系,简单来说可以把一张斜的图变正
def test6():
img = cv2.imread("./1.png")
h,w,ch = img.shape
# 获取变换矩阵
src = np.float32([[100,120],[300,120],[100,300],[300,300]])
dst = np.float32([[0,0],[400,0],[0,300],[400,300]])
M = cv2.getPerspectiveTransform(src,dst)
img = cv2.warpPerspective(img,M,dsize=(400,300))
cv2.imshow("new",img)
cv2.waitKey(0)
(1 * 1+1 * 0+1 * 1) + (0 * 0+1 * 1+1*0) + (0 * 1+0 * 0+1 * 1) = 4
计算padding填充多少
h2是卷积之后的高度,h1是原图像高度,f卷积核高度,p是填充多少,s是卷积核步长
h2 = (h1 - F + 2p)/s + 1
计算一下
// 在不进行填充的情况下,5*5的图像,在经过3*3卷积之后的结果就变成3*3了
(5-3+0)/1 + 1 = 3
// 如果想保证原图像不变就需要进行padding操作
5 = (5-3+2p)/1 + 1
p = 1
// 所以需要在原图像周围补充一圈,具体补充什么值可以通过borderType进行设置
使用常量进行填充,卷积核里面
borderType 描述要添加哪种边框。
(1)BORDER_REPLICATE :复制法,即用复制最边缘像素。
(2)BORDER_REFLECT :反射法,对感兴趣的图像中的像素在两边进行复制。例如:fedcba | abcdefgh | hgfedcb
(3)BORDER_REFLECT_101 :反射法,即用以最边缘像素为轴,对称。gfedcb | abcdefgh | gfedcba
(4)BORDER_WRAP :外包装法。abcdefgh | abcdefgh | abcdefgh
(5)BORDER_CONSTANT :常量法,即用常数值填充
Mat img = (Mat_<uint8_t>(5, 5) << 3, 5, 7, 9, 2
, 13, 15, 71, 19, 12
, 23, 25, 27, 29, 22
, 1, 4, 2, 5, 6
, 31, 51, 21, 12, 7);
Mat dst, dst2;
blur(img, dst, Size(3, 3),Point(-1,-1),BORDER_CONSTANT);
输出结果:
dst:[ 4, 13, 14, 13, 5;
9, 21, 23, 22, 10;
9, 20, 22, 21, 10;
15, 21, 20, 15, 9;
10, 12, 11, 6, 3]
默认使用的是BORDER_REFLECT_101 ,即用以最边缘像素为轴,进行复制,所以结果就是一下结果
Mat img = (Mat_<uint8_t>(5, 5)<<3,5,7,9,2, 13
, 15, 71, 19, 12
,23, 25, 27, 29, 22
,1, 4, 2, 5, 6
,31, 51,21, 12, 7);
Mat dst,dst2;
blur(img, dst, Size(3, 3));
boxFilter(img, dst2, -1, Size(3, 3));
cout << "dst:" << dst << endl;
cout << "dst2:" << dst2 << endl;
原理:取卷积核的中位数,作为卷积核的结果,当卷积核在图像上进行移动,获取到的数之后进行排序,取中位数作为卷积核的结果。
注意:默认中值滤波默认填充方式是,使用边界像素的复制来进行填充
Mat img = (Mat_<uint8_t>(5, 5) << 3, 5, 7, 9, 2
, 13, 15, 71, 19, 12
, 23, 25, 27, 29, 22
, 1, 4, 2, 5, 6
, 31, 51, 21, 12, 7);
Mat dst, dst2;
medianBlur(img, dst, 3);
cout << "dst:" << dst << endl;
输出结果:
dst:[ 5, 7, 9, 9, 9;
13, 15, 19, 19, 12;
13, 15, 19, 19, 12;
23, 23, 21, 12, 7;
31, 21, 12, 7, 7]
边缘处的二阶导数=0,我们可以利用这一特征去寻找图像的边缘,注意:二阶导数为0的位子也可能是无意义的位置(噪声)
膨胀操作时腐蚀操作相反的操作,基本原理只要保证卷积核的锚点为0,其他部位不管是否为0,都会变成非0
Mat img = imread("E:\\chrome_down\\yolov5-master\\data\\images\\out0.jpg",IMREAD_GRAYSCALE);
Mat dst;
// 获取形态学卷积核
Mat kernel1 = getStructuringElement(MORPH_RECT, Size(5, 5));
// 腐蚀操作 去除图像边缘信息,会让图片变瘦
Mat kernel = Mat::ones(Size(3, 3), CV_8UC1);
erode(img, dst, kernel);
// 膨胀操作,会让图片变胖
Mat dst2;
dilate(img, dst2, kernel1);
开运算:腐蚀之后在膨胀(提供另一种,去除图像外噪声的思路)
闭运算:膨胀之后腐蚀 去除图像内部噪声
Mat img = imread("E:\\chrome_down\\yolov5-master\\data\\images\\out0.jpg", IMREAD_GRAYSCALE);
Mat kernel1 = getStructuringElement(MORPH_RECT, Size(5, 5));
Mat dst,dst1;
// 开运算:腐蚀之后在膨胀(提供另一种,去除噪声的思路)
morphologyEx(img, dst, MORPH_OPEN, kernel1);
// 闭运算:膨胀之后腐蚀 去除图像内部噪声
morphologyEx(img, dst1, MORPH_CLOSE, kernel1);
imshow("new", dst);
imshow("new", dst1);
waitKey(0);
梯度 = 原图 - 腐蚀之后图
腐蚀之后的图变小, 原图减去腐蚀之后的图就会的到,腐蚀掉的边缘信息
Mat img = imread("E:\\chrome_down\\yolov5-master\\data\\images\\out0.jpg", IMREAD_GRAYSCALE);
Mat kernel1 = getStructuringElement(MORPH_RECT, Size(5, 5));
Mat dst;
morphologyEx(img, dst, MORPH_GRADIENT, kernel1);
imshow("new", dst);
waitKey(0);
顶帽 = 原图 - 开运算(腐蚀+膨胀)
开运算是去除外图形外的噪音,原图减去开运算就得到,去除的外噪声
Mat img = imread("E:\\chrome_down\\yolov5-master\\data\\images\\out0.jpg", IMREAD_GRAYSCALE);
Mat kernel1 = getStructuringElement(MORPH_RECT, Size(5, 5));
Mat dst;
morphologyEx(img, dst, MORPH_TOPHAT, kernel1);
imshow("new", dst);
waitKey(0);
黑帽操作 = 原图 - 闭运算(膨胀 + 腐蚀)
闭运算是去除图形内部的噪声,原图减去闭运算就得到,去除的内噪声
Mat img = imread("E:\\chrome_down\\yolov5-master\\data\\images\\out0.jpg", IMREAD_GRAYSCALE);
Mat kernel1 = getStructuringElement(MORPH_RECT, Size(5, 5));
Mat dst;
morphologyEx(img, dst, MORPH_BLACKHAT, kernel1);
imshow("new", dst);
waitKey(0);
图像轮廓是具有相同颜色灰度连续的轮廓点的曲线
从右到左,从里到外
vector<vector<Point>> threshCnts;
findContours(image_2_CLOSE.clone(), threshCnts, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。