赞
踩
参考学习来自OpenCV基础(23)特征检测与匹配
在计算机图像处理中,特征检测和匹配是两个至关重要的环节,它们在多个领域如SLAM(即时定位与地图构建)、SFM(结构从运动恢复)、AR(增强现实)、VR(虚拟现实)等中发挥着关键作用。以下是对特征检测和匹配的详细解析:
一、特征检测
特征检测是指使用计算机算法从图像中提取出具有独特性质的特征点,这些特征点能够代表图像中的关键信息,并且通常具有旋转、尺度和光照变化的不变性。这些特性使得特征点在图像的不同位置和角度下都能被准确检测。
二、特征匹配
特征匹配是指将两个或多个图像中的特征点进行对应,以实现图像间的关联和匹配。通过特征匹配,可以进行目标跟踪、图像配准、三维重建等任务。
三、总结
在计算机图像处理中,特征检测和匹配是实现图像识别、目标跟踪、图像配准等任务的基础。通过设计和改进特征点检测算法、优化特征描述子、采用多尺度和多模态特征检测与匹配策略,以及应用深度学习等先进技术,可以进一步提高特征检测和匹配的准确性和鲁棒性。这些技术的应用不仅推动了计算机视觉领域的发展,也为自动驾驶、智能监控、虚拟现实等多个领域提供了有力的技术支持。
特征值,特征向量(花了10分钟,终于弄懂了特征值和特征向量到底有什么意义)
矩阵乘法即线性变换——对向量进行旋转和长度伸缩,效果与函数相同;
特征向量指向只缩放不旋转的方向;
特征值即缩放因子;
Harris角点检测是一种在计算机视觉中广泛应用的角点检测算法,它能够有效地在图像中检测出角点特征。以下是关于Harris角点检测的详细介绍:
为了解决Harris角点检测算法的一些缺点,研究者们提出了一些改进的方法,如尺度不变特征变换(SIFT)和加速稳健特征(SURF)等。这些改进算法在角点检测的准确性和鲁棒性上有所提升。
Harris角点检测算法在计算机视觉领域有广泛的应用,包括但不限于
总之,Harris角点检测是一种简单、快速且有效的角点检测方法,在计算机视觉领域具有广泛的应用前景。
实现可参考 :【python】OpenCV—Edge, Corner, Face Detection(3)
import cv2 import numpy as np # 读取原始图像 cv_image = cv2.imread("1.jpg") cv2.namedWindow("src", 0) cv2.imshow("src", cv_image) # 转换为灰度 gray_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) # 将图像像素数据转换为 float32,以避免进一步的尺寸不兼容冲突 gray_image = np.float32(gray_image) # syntax cv2.corenrHarris(input_image, block size for neighborhood pixels to be considered, sobel operator size, border type) result_image = cv2.cornerHarris(gray_image, blockSize=2, ksize=3, k=0.04) cv2.imshow("result_image", result_image) cv2.imwrite("result_image.jpg", result_image) # 膨胀以突出角点 result_image = cv2.dilate(result_image, None) cv2.imshow("dilate", result_image) cv2.imwrite("dilate.jpg", result_image) # 使用最佳阈值恢复到原始图像 cv_image[result_image > 0.01 * result_image.max()] = [0, 0, 255] cv2.namedWindow("haris", 0) cv2.imwrite("haris.jpg", cv_image) cv2.imshow("haris", cv_image) cv2.waitKey()
输入图片
result_image.jpg
dilate.jpg
一、算法概述
二、算法原理
三、demo
实现可参考 :【python】OpenCV—Edge, Corner, Face Detection(3)
import numpy as np
import cv2
# 输入图像
cv_img = cv2.imread('1.jpg')
cv_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
# syntax cv2.goodFeaturesToTrack(input_image, max_corner_to_detect, qualityLevel, minDistance)
corners = cv2.goodFeaturesToTrack(cv_gray, maxCorners=25, qualityLevel=0.01, minDistance=10)
corners = np.float32(corners)
for item in corners:
x, y = item[0].astype("int")
cv2.circle(cv_img, (x, y), 3, (0, 0, 255), -1)
cv2.imshow("image", cv_img)
cv2.imwrite("shi_result.jpg", cv_img)
cv2.waitKey()
输入图片
输出
可以看到零星的红色角点,效果一般
FAST(Features from Accelerated Segment Test)角点检测算法是一种快速且高效的角点检测方法,广泛应用于计算机视觉和图像处理领域。
一、算法原理
二、算法步骤
三、算法优势
四、demo
import cv2
# 输入图像
cv_img = cv2.imread('../1.jpg')
cv_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
fast = cv2.FastFeatureDetector_create()
fast.setNonmaxSuppression(False)
keypoint = fast.detect(cv_gray, None)
keypoint_image = cv2.drawKeypoints(cv_img, keypoint, None, color=(0, 0, 255))
cv2.imshow("FAST", keypoint_image)
cv2.imwrite("fast.jpg", keypoint_image)
cv2.waitKey()
输出图像,不开启 NMS
下面对比下开启 NMS 和不开启 NMS 的结果对比
#!/usr/bin/python3 # -*- encoding: utf-8 -*- # FAST import cv2 import matplotlib.pyplot as plt import numpy as np # 加载图像 image = cv2.imread('../1.jpg') # 将图像转换为 RGB image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # 将图像转换为灰度 gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # 显示图像和灰度图像 fx, plots = plt.subplots(1, 2, figsize=(20,10)) plots[0].set_title("Orignal Image") plots[0].imshow(image) plots[0].axis("off") plots[1].set_title("Gray Image") plots[1].imshow(gray, cmap="gray") plots[1].axis("off") plt.savefig("gray.jpg") plt.show() fast = cv2.FastFeatureDetector_create() # 用非最大抑制检测关键点 keypoints_with_nonmax = fast.detect(gray, None) # 禁用非最大抑制 fast.setNonmaxSuppression(False) # 在没有非最大抑制的情况下检测关键点 keypoints_without_nonmax = fast.detect(gray, None) image_with_nonmax = np.copy(image) image_without_nonmax = np.copy(image) # 在输入图像上绘制关键点 cv2.drawKeypoints(image, keypoints_with_nonmax, image_with_nonmax, color=(255,0,0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.drawKeypoints(image, keypoints_without_nonmax, image_without_nonmax, color=(255,0,0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) # 显示有和没有非最大抑制的图像 fx, plots = plt.subplots(1, 2, figsize=(20,10)) plots[0].set_title("With non max suppression") plots[0].imshow(image_with_nonmax) plots[0].axis("off") plt.imsave("nms.jpg", image_with_nonmax) plots[1].set_title("Without non max suppression") plots[1].imshow(image_without_nonmax) plots[1].axis("off") plt.imsave("nonms.jpg", image_without_nonmax) # 打印使用NMS在图像中检测到的关键点数 print("Number of Keypoints Detected In The Image With Non Max Suppression: ", len(keypoints_with_nonmax)) # 打印不使用NMS在图像中检测到的关键点数 print("Number of Keypoints Detected In The Image Without Non Max Suppression: ", len(keypoints_without_nonmax)) plt.show() # Number of Keypoints Detected In The Image With Non Max Suppression: 17295 # Number of Keypoints Detected In The Image Without Non Max Suppression: 89925
输出 gray.jpg
输出 nonms.jpg
输出 nms.jpg
在图像领域,特征描述子(Feature Descriptors)是用于描述图像中特定属性或纹理信息的数学表示,它们在图像匹配、对象识别和图像检索等任务中发挥着关键作用。eg:SHIFT(尺度不变特征变换)、SURF(加速稳健特征)、ORB(加速稳健特征)、HOG(方向梯度直方图)、LBP(局部二值模式)
BRIEF(Binary Robust Independent Elementary Features)是一种高效的特征描述子算法,它主要用于对图像中的关键点(如角点)进行描述,以便进行特征匹配、图像识别等任务。BRIEF算法的核心在于通过比较关键点周围随机选取的点对的像素强度差异,来生成一个紧凑的二进制字符串作为该关键点的描述子。
特点
Demo
#!/usr/bin/python3 # -*- encoding: utf-8 -*- """ @File : BRIEF.py @Time : 2021/10/13 11:07 @Author : David @Software: PyCharm """ # BRIEF(Binary Robust Independent Elementary Features) # 导入库,显示图像 import cv2 import matplotlib.pyplot as plt import numpy as np # 加载图像 image1 = cv2.imread('./2.png') # 将训练图像转换为RGB training_image = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB) # 将训练图像转换为灰度 training_gray = cv2.cvtColor(training_image, cv2.COLOR_RGB2GRAY) # 通过添加缩放不变性和旋转不变性来创建测试图像 test_image = cv2.pyrDown(training_image) test_image = cv2.pyrDown(test_image) num_rows, num_cols = test_image.shape[:2] rotation_matrix = cv2.getRotationMatrix2D((num_cols / 2, num_rows / 2), 30, 1) test_image = cv2.warpAffine(test_image, rotation_matrix, (num_cols, num_rows)) test_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY) # 显示训练图像和测试图像 fx, plots = plt.subplots(1, 2, figsize=(20, 10)) plots[0].set_title("Training Image") plots[0].imshow(training_image) plots[0].axis("off") plots[1].set_title("Testing Image") plots[1].imshow(test_image) plots[1].axis("off") plt.savefig("tran-test.jpg") # 检测关键点并创建描述符 fast = cv2.FastFeatureDetector_create() brief = cv2.xfeatures2d.BriefDescriptorExtractor_create() train_keypoints = fast.detect(training_gray, None) test_keypoints = fast.detect(test_gray, None) train_keypoints, train_descriptor = brief.compute(training_gray, train_keypoints) test_keypoints, test_descriptor = brief.compute(test_gray, test_keypoints) keypoints_without_size = np.copy(training_image) keypoints_with_size = np.copy(training_image) cv2.drawKeypoints(training_image, train_keypoints, keypoints_without_size, color=(0, 255, 0)) cv2.drawKeypoints(training_image, train_keypoints, keypoints_with_size, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) # 显示图像有和没有关键点大小 fx, plots = plt.subplots(1, 2, figsize=(20, 10)) plots[0].set_title("Train keypoints With Size") plots[0].imshow(keypoints_with_size, cmap='gray') plots[0].axis("off") plots[1].set_title("Train keypoints Without Size") plots[1].imshow(keypoints_without_size, cmap='gray') plots[1].axis("off") plt.savefig("keypoints.jpg") # 打印训练图像中检测到的关键点数量 print("Number of Keypoints Detected In The Training Image: ", len(train_keypoints)) # 打印查询图像中检测到的关键点数量 print("Number of Keypoints Detected In The Query Image: ", len(test_keypoints)) # 匹配关键点 # 创建一个Brute Force Matcher对象。 bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) # 对训练图像和测试图像的BRIEF描述符进行匹配 matches = bf.match(train_descriptor, test_descriptor) # 距离较短的是我们想要的。 matches = sorted(matches, key=lambda x: x.distance) result = cv2.drawMatches(training_image, train_keypoints, test_gray, test_keypoints, matches, test_gray, flags=2) # 显示最佳匹配点 plt.rcParams['figure.figsize'] = [14.0, 7.0] plt.title('Best Matching Points') plt.imsave("matching.jpg", result) plt.imshow(result) plt.show() # 打印训练图像和查询图像之间的匹配点总数 print("\nNumber of Matching Keypoints Between The Training and Query Images: ", len(matches)) # Number of Keypoints Detected In The Training Image: 7975 # Number of Keypoints Detected In The Query Image: 525 # Number of Matching Keypoints Between The Training and Query Images: 194
tran-test.jpg
注意,测试图片进行了两次下采样,大小为原图的 1/4,这里可视化成一样的大小了
keypoints.jpg
matching.jpg——BRIEF+Brute Force Matcher
看看另外一张图片的匹配结果
import cv2 import matplotlib.pyplot as plt import numpy as np # 加载图像 image1 = cv2.imread('../2.png') # 将训练图像转换为RGB training_image = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB) # 将训练图像转换为灰度 training_gray = cv2.cvtColor(training_image, cv2.COLOR_RGB2GRAY) # 通过添加缩放不变性和旋转不变性来创建测试图像 test_image = training_image num_rows, num_cols = test_image.shape[:2] rotation_matrix = cv2.getRotationMatrix2D((num_cols/2, num_rows/2), 30, 1) test_image = cv2.warpAffine(test_image, rotation_matrix, (num_cols, num_rows)) test_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY) # 显示训练图像和测试图像 fx, plots = plt.subplots(1, 2, figsize=(20,10)) plots[0].set_title("Training Image") plots[0].imshow(training_image) plots[0].axis("off") plt.imsave("training_image.jpg", training_image) plots[1].set_title("Testing Image") plots[1].imshow(test_image) plots[1].axis("off") plt.imsave("test_image.jpg", test_image) orb = cv2.ORB_create() train_keypoints, train_descriptor = orb.detectAndCompute(training_gray, None) test_keypoints, test_descriptor = orb.detectAndCompute(test_gray, None) keypoints_without_size = np.copy(training_image) keypoints_with_size = np.copy(training_image) cv2.drawKeypoints(training_image, train_keypoints, keypoints_without_size, color = (0, 255, 0)) cv2.drawKeypoints(training_image, train_keypoints, keypoints_with_size, flags = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) # 显示图像有和没有关键点大小 fx, plots = plt.subplots(1, 2, figsize=(20,10)) plots[0].set_title("Train keypoints With Size") plots[0].imshow(keypoints_with_size, cmap='gray') plots[0].axis("off") plt.imsave("keypoints_with_size.jpg", keypoints_with_size) plots[1].set_title("Train keypoints Without Size") plots[1].imshow(keypoints_without_size, cmap='gray') plots[1].axis("off") plt.imsave("keypoints_without_size.jpg", keypoints_without_size) # 打印训练图像中检测到的关键点个数 print("Number of Keypoints Detected In The Training Image: ", len(train_keypoints)) # 打印查询图像中检测到的关键点数量 print("Number of Keypoints Detected In The Query Image: ", len(test_keypoints)) # 创建一个Brute Force Matcher对象。 bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True) # 执行训练图像和测试图像的ORB描述符之间的匹配 matches = bf.match(train_descriptor, test_descriptor) # 距离较短的是我们想要的。 matches = sorted(matches, key = lambda x : x.distance) result = cv2.drawMatches(training_image, train_keypoints, test_gray, test_keypoints, matches, test_gray, flags = 2) # 显示最佳匹配点 plt.rcParams['figure.figsize'] = [14.0, 7.0] plt.title('Best Matching Points') plt.imsave("result.jpg", result) plt.imshow(result) plt.show() # 打印训练图像和查询图像之间的匹配点总数 print("\nNumber of Matching Keypoints Between The Training and Query Images: ", len(matches)) # Number of Keypoints Detected In The Training Image: 500 # Number of Keypoints Detected In The Query Image: 500 # # Number of Matching Keypoints Between The Training and Query Images: 325
输入图片
输出
test_image.jpg,这次实验测试图片和原图的大小保持了一致
keypoints_with_size.jpg
keypoints_without_size.jpg
result.jpg——ORB+Brute Force Matcher
测试图片 4 倍下采样了
#!/usr/bin/python3 # -*- encoding: utf-8 -*- """ @File : sift.py @Time : 2021/10/13 13:01 @Author : David @Software: PyCharm """ # SIFT (Scale-Invariant Feature Transform) # 导入库函数与显示图像 import cv2 import matplotlib.pyplot as plt import numpy as np # 导入图像 image1 = cv2.imread('../2.png') # 将训练图像转换为RGB training_image = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB) # 将训练图像转换为灰度 training_gray = cv2.cvtColor(training_image, cv2.COLOR_RGB2GRAY) # 通过添加缩放不变性和旋转不变性来创建测试图像 test_image = cv2.pyrDown(training_image) test_image = cv2.pyrDown(test_image) num_rows, num_cols = test_image.shape[:2] rotation_matrix = cv2.getRotationMatrix2D((num_cols / 2, num_rows / 2), 30, 1) test_image = cv2.warpAffine(test_image, rotation_matrix, (num_cols, num_rows)) test_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY) # 显示训练图像和测试图像 fx, plots = plt.subplots(1, 2, figsize=(20, 10)) plots[0].set_title("Training Image") plots[0].imshow(training_image) plots[0].axis("off") plt.imsave("training_image.jpg", training_image) plots[1].set_title("Testing Image") plots[1].imshow(test_image) plots[1].axis("off") plt.imsave("test_image.jpg", test_image) # 检测关键点并创建描述符 sift = cv2.xfeatures2d.SIFT_create() train_keypoints, train_descriptor = sift.detectAndCompute(training_gray, None) test_keypoints, test_descriptor = sift.detectAndCompute(test_gray, None) keypoints_without_size = np.copy(training_image) keypoints_with_size = np.copy(training_image) cv2.drawKeypoints(training_image, train_keypoints, keypoints_without_size, color=(0, 255, 0)) cv2.drawKeypoints(training_image, train_keypoints, keypoints_with_size, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) # 显示图像有和没有关键点大小 fx, plots = plt.subplots(1, 2, figsize=(20, 10)) plots[0].set_title("Train keypoints With Size") plots[0].imshow(keypoints_with_size, cmap='gray') plots[0].axis("off") plt.imsave("keypoints_with_size.jpg", keypoints_with_size) plots[1].set_title("Train keypoints Without Size") plots[1].imshow(keypoints_without_size, cmap='gray') plots[1].axis("off") plt.imsave("keypoints_without_size.jpg", keypoints_without_size) # 打印训练图像中检测到的关键点个数 print("Number of Keypoints Detected In The Training Image: ", len(train_keypoints)) # 打印查询图像中检测到的关键点数量 print("Number of Keypoints Detected In The Query Image: ", len(test_keypoints)) # 关键点匹配 # 创建一个Brute Force Matcher对象。 bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False) # 对训练图像和测试图像的SIFT描述子进行匹配 matches = bf.match(train_descriptor, test_descriptor) # 距离较短的是我们想要的。 matches = sorted(matches, key=lambda x: x.distance) result = cv2.drawMatches(training_image, train_keypoints, test_gray, test_keypoints, matches, test_gray, flags=2) # 显示最佳匹配点 plt.rcParams['figure.figsize'] = [14.0, 7.0] plt.title('Best Matching Points') plt.imsave("result.jpg", result) plt.imshow(result) plt.show() # 打印训练图像和查询图像之间的匹配点总数 print("\nNumber of Matching Keypoints Between The Training and Query Images: ", len(matches)) # Number of Keypoints Detected In The Training Image: 1080 # Number of Keypoints Detected In The Query Image: 245 # # Number of Matching Keypoints Between The Training and Query Images: 1080
输出
keypoints_with_size.jpg
keypoints_without_size.jpg
result.jpg——SIFT+Brute Force Matcher
较新的 opencv 版本不支持 SURF,这里仅展示 Code
#!/usr/bin/python3 # -*- encoding: utf-8 -*- """ @File : surf.py # SURF (Speeded-Up Robust Features) # 新版OpenCV不能使用Surf了,我使用了opencv-contrib-python==3.4.11.45 """ # 导入库和显示图像 import cv2 import matplotlib.pyplot as plt import numpy as np # 加载图像 image1 = cv2.imread('./1.jpg') # 将训练图像转换为RGB training_image = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB) # 将训练图像转换为灰度 training_gray = cv2.cvtColor(training_image, cv2.COLOR_RGB2GRAY) # 通过添加缩放不变性和旋转不变性来创建测试图像 test_image = cv2.pyrDown(training_image) test_image = cv2.pyrDown(test_image) num_rows, num_cols = test_image.shape[:2] rotation_matrix = cv2.getRotationMatrix2D((num_cols / 2, num_rows / 2), 30, 1) test_image = cv2.warpAffine(test_image, rotation_matrix, (num_cols, num_rows)) test_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY) # 显示训练图像和测试图像 fx, plots = plt.subplots(1, 2, figsize=(20, 10)) plots[0].set_title("Training Image") plots[0].imshow(training_image) plots[0].axis("off") plt.imsave("training_image.jpg", training_image) plots[1].set_title("Testing Image") plots[1].imshow(test_image) plots[1].axis("off") plt.imsave("test_image.jpg", test_image) # 检测关键点并创建描述符 surf = cv2.xfeatures2d.SURF_create(800) train_keypoints, train_descriptor = surf.detectAndCompute(training_gray, None) test_keypoints, test_descriptor = surf.detectAndCompute(test_gray, None) keypoints_without_size = np.copy(training_image) keypoints_with_size = np.copy(training_image) cv2.drawKeypoints(training_image, train_keypoints, keypoints_without_size, color=(0, 255, 0)) cv2.drawKeypoints(training_image, train_keypoints, keypoints_with_size, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) # 显示图像有和没有关键点大小 fx, plots = plt.subplots(1, 2, figsize=(20, 10)) plots[0].set_title("Train keypoints With Size") plots[0].imshow(keypoints_with_size, cmap='gray') plots[0].axis("off") plt.imsave("keypoints_with_size.jpg", keypoints_with_size) plots[1].set_title("Train keypoints Without Size") plots[1].imshow(keypoints_without_size, cmap='gray') plots[1].axis("off") plt.imsave("keypoints_without_size.jpg", keypoints_without_size) # 打印训练图像中检测到的关键点个数 print("Number of Keypoints Detected In The Training Image: ", len(train_keypoints)) # 打印查询图像中检测到的关键点数量 print("Number of Keypoints Detected In The Query Image: ", len(test_keypoints)) # 关键点匹配 # 创建一个Brute Force Matcher对象。 bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True) # 对训练图像和测试图像的SURF描述符进行匹配 matches = bf.match(train_descriptor, test_descriptor) # 距离较短的是我们想要的。 matches = sorted(matches, key=lambda x: x.distance) result = cv2.drawMatches(training_image, train_keypoints, test_gray, test_keypoints, matches, test_gray, flags=2) # 显示最佳匹配点 plt.rcParams['figure.figsize'] = [14.0, 7.0] plt.title('Best Matching Points') plt.imsave("result.jpg", result) plt.imshow(result) plt.show() # 打印训练图像和查询图像之间的匹配点总数 print("\nNumber of Matching Keypoints Between The Training and Query Images: ", len(matches)) # Number of Keypoints Detected In The Training Image: 242 # Number of Keypoints Detected In The Query Image: 29 # # Number of Matching Keypoints Between The Training and Query Images: 21
import cv2
cv_img1 = cv2.imread('1.jpg', 1)
cv_img2 = cv2.imread('2.jpg', 1)
orb = cv2.ORB_create(nfeatures=500)
kp1, des1 = orb.detectAndCompute(cv_img1, None)
kp2, des2 = orb.detectAndCompute(cv_img2, None)
# matcher接受normType,它被设置为cv2.NORM_L2用于SIFT和SURF, cv2.NORM_HAMMING用于ORB, FAST and BRIEF
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance) # 前50匹配点
match_img = cv2.drawMatches(cv_img1, kp1, cv_img2, kp2, matches[:50], None)
cv2.imshow('Matches', match_img)
cv2.imwrite("Match.jpg", match_img)
cv2.waitKey()
输入 1.jpg
输入 2.jpg
输出
使用 ORB 检测器和校正失真图像来实现匹配操作。
import cv2 import numpy as np def get_corrected_img(cv_img1, cv_img2): MIN_MATCHES = 10 orb = cv2.ORB_create(nfeatures=500) kp1, des1 = orb.detectAndCompute(cv_img1, None) kp2, des2 = orb.detectAndCompute(cv_img2, None) index_params = dict(algorithm=6, table_number=6, key_size=12, multi_probe_level=2) search_params = {} flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) # 根据 Lowe 的比率测试来过滤良好的匹配 good_matches = [] for m, n in matches: if m.distance < 0.8 * n.distance: good_matches.append(m) if len(good_matches) > MIN_MATCHES: src_points = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2) dst_points = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2) print(src_points.shape) good_matches = sorted(good_matches, key=lambda x: x.distance) # 前50匹配点 match_img = cv2.drawMatches(cv_img1, kp1, cv_img2, kp2, good_matches[:50], None) cv2.imshow('flannMatches', match_img) cv2.imwrite("flannMatch.jpg", match_img) cv2.waitKey() m, mask = cv2.findHomography(src_points, dst_points, cv2.RANSAC, 5.0) corrected_img = cv2.warpPerspective(cv_img1, m, (cv_img2.shape[1], cv_img2.shape[0])) return corrected_img return None if __name__ == "__main__": cv_im1 = cv2.imread('1.jpg') cv_im2 = cv2.imread('2.jpg') img = get_corrected_img(cv_im2, cv_im1) if img is not None: cv2.imshow('Corrected image', img) cv2.imwrite("corrected_image.jpg", img) cv2.waitKey()
输入图片
corrected_image.jpg
flannMatch.jpg
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。