赞
踩
金字塔的每一层特征可以采取不同的方式获得,这样每一层的特征都不同。
图像金字塔是一组图像的集合,集合中的所有图像都是通过对某一图像连续降采样得到的一组图像序列。
cv2.pyrUp: 上采样
cv2.pyrDown: 下采样
有两种经典的金字塔:高斯金字塔和拉普拉斯金字塔,前者采用向下采样,后者是向上采样需要的缺失的信息。
向下采样(生成高斯金字塔)的具体操作为: 从大到小
1. 对图像进行高斯卷积
2. 删除所有的偶数行和偶数列
向上采样的缺失信息(生成拉普拉斯金字塔)的具体操作为:从小到大
1. 首先将维数扩大两倍
2. 将扩大位的值置为0
3. 对新的图像进行高斯卷积
4. 用新的层次的高斯金字塔减去 3 中形成的图像
- import cv2
- img = cv2.imread("D:/WeChat.picture/AM.png")
- cv_show(img ,"img")
- print(img.shape)
- up = cv2.pyrUp(img)
- cv_show(up ,"up")
- print(up.shape)
- down = cv2.pyrDown(up)
- cv_show(down ,"down")
- print(down.shape)
- import numpy as np
- import cv2
- img = cv2.imread("D:/WeChat.picture/lufei.jpg")
- up_down = cv2.pyrUp(img)
- up_down = cv2.pyrDown(up_down)
- # res = np.hstack((img , up_down))
- cv_show(np.hstack((img , up_down)) , "res")
- import numpy as np # 在 pyrUp or pyrDown 的过程中 会损失一些数据 (亮度等)
- import cv2
- img = cv2.imread("D:/WeChat.picture/AM.png",cv2.IMREAD_GRAYSCALE)
- up_down = cv2.pyrUp(img)
- up_down = cv2.pyrDown(up_down)
- # res = np.hstack((img , up_down))
- #cv_show(np.hstack((img , up_down)) , "res")
-
- l_1=img-up_down
- cv_show(l_1,'l_1')
cv2.findContours(img,mode,method)
1 RETR_EXTERNAL :只检索最外面的轮廓;
2 RETR_LIST:检索所有的轮廓,并将其保存到一条链表当中;
3 RETR_CCOMP:检索所有的轮廓,并将他们组织为两层:顶层是各部分的外部边界,第二层是空洞的边界;
4 RETR_TREE:检索所有的轮廓,并重构嵌套轮廓的整个层次; (通常选这个)
返回值
为了更高的准确率,使用二值图像
- def cv_show( img ,name): # 做二值结果 是为更好的 做边缘检测
- cv2.imshow(name , img)
- cv2.waitKey(0)
- cv2.destroyAllWindows()
- import cv2
- img = cv2.imread("D:/WeChat.picture/contours.png")
- gray = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)
- ret , thresh = cv2.threshold(gray , 127 , 255 ,cv2.THRESH_BINARY)
- cv_show(thresh , "thresh")
- #hierarchy 层次,等级
- # contours 轮廓线,保存的是轮廓信息,是一个list值,可以用np.array(contours).shape查看
- import numpy as np
- contours , hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
np.array(contours).shape # 如果您打算这样做,则在创建ndarray时必须指定‘dtype=object’。“”启动IPython内核的入口点
- def cv_show( img ,name):
- cv2.imshow(name , img)
- cv2.waitKey(0)
- cv2.destroyAllWindows()
- import cv2
- img = cv2.imread("D:/WeChat.picture/contours.png")
- cv_show(img ,"img")
- draw_img = img.copy() # 保存原图需要 copy 进行浅拷贝
- draw = cv2.drawContours(draw_img , contours , -1 ,(0 , 0 ,255), 2) # -1 表示 all in
- # (B , G ,R) 2 表示 penszie
- cv_show(draw , "draw")
cnt = contours[1]
- # 面积
- cv2.contourArea(cnt)
- # 周长 ,True 表示闭合
- cv2.arcLength(cnt ,True)
- def cv_show(img ,name):
- cv2.imshow(name , img)
- cv2.waitKey(0)
- cv2.destroyAllWindows()
- import cv2
- import numpy as np
- img1 = cv2.imread("D:/WeChat.picture/contours2.png")
- cv_show(img1 , "img")
近似函数:受函数控制
cv_show(img1 , "img")
- img = cv2.imread("D:/WeChat.picture/contours2.png")
- gray = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY) # 不是 ( img ,cv2.IMREAD_GRAYSCALE)
- ret , thresh =cv2.threshold( gray ,127 , 255 , cv2.THRESH_BINARY)
-
-
- contours , hierarachy = cv2.findContours(thresh , cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # 二值化
- cnt = contours[0]
-
-
- res = cv2.drawContours(draw_img , cnt , -1 , (0 ,0 ,255) , 2)
-
- cv_show(res , "res")
-
- #边缘近似
- epsilon = 0.01 * cv2.arcLength(cnt , True)
- approx = cv2.approxPolyDP( cnt , epsilon , True)
- draw_img = img.copy() # copy 少了括号 () “DrawContess”>过载解决方案失败:
- # -Image不是Numpy数组,也不是标量>-预期PTR<cv::UMAT>用于参数“图像”
- res = cv2.drawContours(draw_img , [approx] , -1 , (0 , 0, 255) , 2)
- cv_show(res , "res")
-
- img = cv2.imread("D:/WeChat.picture/contours2.png") #读图
- gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #转成灰度图
- ret,thresh=cv2.threshold(gray,127,255,cv2.THRESH_BINARY)#二值化
-
- #边缘检测
- contours,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
- cnt=contours[0]
- draw_img=img.copy() #.copy是复制另存
-
- res=cv2.drawContours(draw_img,[cnt],-1,(0,0,255),2)
- # -1 是所有轮廓都画进来,可以改 (0,0,255)B,G,R的颜色,红色
-
- cv_show(res,'res')
-
-
- #边缘近似
- epsilon=0.1*cv2.arcLength(cnt,True)#0.1倍周长,指定的越小,变化程度越小
- #cv2.approxPolyDP(),近似函数,cnt:轮廓,epsilon:周长百分比作为比较
-
- approx=cv2.approxPolyDP(cnt,epsilon,True)
- draw_img=img.copy()
-
- res=cv2.drawContours(draw_img,[approx],-1,(0,0,255),2)
- # -1 是所有轮廓都画进来,可以改 (0,0,255)B,G,R的颜色,红色
- cv_show(res,'res')
- img = cv2.imread("D:/WeChat.picture/contours.png")
-
- gray = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)
- ret , thresh =cv2.threshold ( gray , 127 , 255 ,cv2.THRESH_BINARY)
- contours , hierarchy = cv2.findContours(thresh , cv2.RETR_TREE , cv2.CHAIN_APPROX_NONE)
- cnt = contours[4]
-
- x ,y ,w ,h =cv2.boundingRect(cnt)
- img = cv2.rectangle(img , (x ,y) ,(x+w ,y+h) ,(0 ,225 ,0) ,2)
- cv_show(img , "img")
- area = cv2.contourArea(cnt)
- x , y ,w ,h = cv2.boundingRect(cnt)
- rect_area = w * h
- extent = float(area) / rect_area
- print("轮廓面积与边界形比", extent)
轮廓面积与边界形比 0.520365296803653
外接圆
- (x ,y) ,radius = cv2.minEnclosingCircle(cnt) # 没有 maxEnclosingcircle()
- center = (int(x),int(y))
- radius = int(radius)
- img = cv2.circle(img , center , radius , (0 , 255 ,0) ,2)
- cv_show(img , "img")
模板匹配与卷积原理很像,模板在原图像从原点开始滑动,计算模板与(图像被模板覆盖的地方)的差别程度,这个差别程度的计算方法在opencv里有6种,然后将每次计算的结果放入一个矩阵里,作为结果输出。加入原图形式AB大小,而模板是ab大小,则输出结果矩阵是(A-a+1)*(B-b+1)
def cv_show( img ,name): cv2.imshow(name , img) cv2.waitKey(0) cv2.destroyAllWindows() import cv2 img_1 = cv2.imread("D:/WeChat.picture/lena.png",0) template = cv2.imread("D:/WeChat.picture/face.jpg",0) cv_show(img_1 , "img_1") cv_show(template , "img_2") img_1.shape ,template .shape # 并排才能都输出
methods = ["cv2.TM_SQDIFF" , "cv2.TM_CCORR" , "cv2.TM_CCOEFF" , "cv2.TM_SQDIFF_NORMED" , "cv2.TM_CCORR_NORMED" , "cv2.TM_CCOEFF_NORMED"]
- res = cv2.matchTemplate(img_1, template ,1) # cv2.matchTemplate(img_1, template ,cv2.TM_SQDIFF)
- res.shape
- def cv_show( img ,name):
- cv2.imshow(name , img)
- cv2.waitKey(0)
- cv2.destroyAllWindows()
- import cv2
- import matplotlib.pyplot as plt
- img_1 = cv2.imread("D:/WeChat.picture/lena.png",0)
- template = cv2.imread("D:/WeChat.picture/face.jpg",0)
- h ,w = template.shape[:2] # 这一步不可以少
-
- methods = ["cv2.TM_SQDIFF" , "cv2.TM_CCORR" , "cv2.TM_CCOEFF" , "cv2.TM_SQDIFF_NORMED" , "cv2.TM_CCORR_NORMED" , "cv2.TM_CCOEFF_NORMED"]
- for meth in methods:
- img_2 = img_1.copy()
-
- # 匹配方法
- method = eval(meth)
- print(method)
- res = cv2.matchTemplate(img_1, template ,1) # 1 表示采用什么方法 尽量使用归一化的办法 这里只用到了一种方法 所以结果与下面大不相同
- min_val , max_val , min_loc , max_loc = cv2.minMaxLoc(res) # 采用小值(左上角) 因为其存在的差异小
-
- # 如果是平方差匹配TM_SQDIEF或归一化平方差匹配TM_SQDIEF_NORMED 取最小值
- if method in [ cv2.TM_SQDIFF , cv2.TM_SQDIFF_NORMED]:
- top_left = min_loc # 坐标
- else:
- top_left = max_loc
- bottom_right = (top_left[0] + w ,top_left[1] + h) # top_left[0]是左上角横坐标
-
-
- #画矩形
- cv2.rectangle(img_2 , top_left , bottom_right , 255 ,2 )
-
- plt.subplot(121), plt.imshow(res ,cmap = "gray")
- plt.xticks([]), plt.yticks([]) #隐藏坐标
- plt.subplot(122), plt.imshow(img_2 ,cmap = "gray")
- plt.xticks([]), plt.yticks([]) #隐藏坐标
- plt.suptitle(meth)
- plt.show()
-
0
2
4
1
3
5
- def cv_show( img ,name):
- cv2.imshow(name , img)
- cv2.waitKey(0)
- cv2.destroyAllWindows()
- import cv2
- import matplotlib.pyplot as plt
- img_1 = cv2.imread("D:/WeChat.picture/lena.png",0)
- template1 = cv2.imread("D:/WeChat.picture/face.jpg",0)
- h ,w = template1.shape[:2] # 这一步不可以少
-
- methods = ["cv2.TM_SQDIFF" , "cv2.TM_CCORR" , "cv2.TM_CCOEFF" , "cv2.TM_SQDIFF_NORMED" , "cv2.TM_CCORR_NORMED" , "cv2.TM_CCOEFF_NORMED"]
-
-
- for meth in methods:
- img_2 = img_1.copy()
-
- # 匹配方法的真值
- method = eval(meth)
- print (method)
- res = cv2.matchTemplate(img_1, template1, method)
- min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
-
- # 如果是平方差匹配TM_SQDIFF或归一化平方差匹配TM_SQDIFF_NORMED,取最小值
- if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
- top_left = min_loc
- else:
- top_left = max_loc
- bottom_right = (top_left[0] + w, top_left[1] + h)
-
- # 画矩形
- cv2.rectangle(img_2, top_left, bottom_right, 255, 2)
-
- plt.subplot(121), plt.imshow(res, cmap='gray')
- plt.xticks([]), plt.yticks([]) # 隐藏坐标轴
- plt.subplot(122), plt.imshow(img_2, cmap='gray')
- plt.xticks([]), plt.yticks([])
- plt.suptitle(meth)
- plt.show()
0
2
4
1
3
5
匹配多个对象
- def cv_show( img ,name):
- cv2.imshow(name , img)
- cv2.waitKey(0)
- cv2.destroyAllWindows()
- import cv2
- import numpy as np
- img_1 = cv2.imread("D:/WeChat.picture/mario.jpg")
- img_gray = cv2.cvtColor(img_1 , cv2.COLOR_BGR2GRAY)
- template = cv2.imread("D:/WeChat.picture/mario_coin.jpg",0)
- h ,w = template.shape[:2] # h ,w = template[:2] 出现- Can't parse 'pt2'. Sequence item with index 0 has a wrong type
- # 无法解析“pt2”。索引0的序列项类型错误。
- res = cv2.matchTemplate(img_gray , template , cv2.TM_CCOEFF_NORMED)
- threshold = 0.8
- #匹配程度大于80%的坐标
- loc = np.where( res >= threshold)
- for pt in zip (*loc[::-1]): # *表示可选参数
- bottom_right = (pt[0] + w , pt[1] + h)
- cv2.rectangle(img_1 , pt , bottom_right ,( 0,0 ,255) , 2)
-
- cv_show(img_1 , "img")
- import matplotlib.pyplot as plt
- img_rgb = cv2.imread("D:/WeChat.picture/mario.jpg")
- img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
- template = cv2.imread("D:/WeChat.picture/mario_coin.jpg", 0)
- h, w = template.shape[:2]
-
- res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
- threshold = 0.8
- # 取匹配程度大于%80的坐标
- loc = np.where(res >= threshold)
- for pt in zip(*loc[::-1]): # *号表示可选参数
- bottom_right = (pt[0] + w, pt[1] + h)
- cv2.rectangle(img_rgb, pt, bottom_right, (0, 0, 255), 2)
-
- cv2.imshow('img_rgb', img_rgb)
- cv2.waitKey(0)
- cv2.destroyAllWindows()
-
- plt.imshow(img_rgb)
- plt.show()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。