赞
踩
基于homography的特征匹配算法在图像拼接中起着关键作用,它能够定位和匹配两张待拼接图像中的特征点,从而实现图像的对齐和融合。该算法主要包括以下实现步骤:
import math import cv2 as cv import numpy as np class FeatureMatcher: def __init__( self, matcher_type="homography", range_width=-1, **kwargs ): if matcher_type == "homography": if range_width == -1: self.matcher = cv.detail_BestOf2NearestMatcher(**kwargs) else: self.matcher = cv.detail_BestOf2NearestRangeMatcher(range_width, **kwargs) else: raise ValueError("Unknown matcher type") def match_features(self, features, *args, **kwargs): pairwise_matches = self.matcher.apply2(features, *args, **kwargs) self.matcher.collectGarbage() return pairwise_matches @staticmethod def draw_matches_matrix( imgs, features, matches, conf_thresh=1, inliers=False, **kwargs ): matches_matrix = FeatureMatcher.get_matches_matrix(matches) for idx1, idx2 in FeatureMatcher.get_all_img_combinations(len(imgs)): match = matches_matrix[idx1, idx2] if match.confidence < conf_thresh: continue if inliers: kwargs["matchesMask"] = match.getInliers() yield idx1, idx2, FeatureMatcher.draw_matches( imgs[idx1], features[idx1], imgs[idx2], features[idx2], match, **kwargs ) @staticmethod def get_confidence_matrix(pairwise_matches): matches_matrix = FeatureMatcher.get_matches_matrix(pairwise_matches) match_confs = [[m.confidence for m in row] for row in matches_matrix] match_conf_matrix = np.array(match_confs) return match_conf_matrix
为了演示图像拼接的整体实现流程,这里我们选择一组我本人拍摄的玉泉校内的两只猫和周边环境图——“cat”,两幅有重叠画面的原图如下图所示。其中下面那只白猫几乎是静止不动的,上面的带橘色斑点的白猫在两幅图中的位置有相对移动。
from stitching.images import Images # 1. load images, low_imgs, medium_imgs, final_imgs = load_images(img_path) images_to_match = medium_imgs # 2. plot original images plot_images(images_to_match, (20, 20), save=f'{save_path}/1-original.png') # 3. print image size print(f'Original image size: {images_to_match[0].shape}') ################ Load images #################### def load_images(img_path): images = Images.of(img_path) medium_imgs = list(images.resize(Images.Resolution.MEDIUM)) low_imgs = list(images.resize(Images.Resolution.LOW)) final_imgs = list(images.resize(Images.Resolution.FINAL)) return images, low_imgs, medium_imgs, final_imgs ################ Plot function#################### def plot_image(img, figsize_in_inches=(10, 10), save=None): """N_image = 1""" def plot_images(imgs, figsize_in_inches=(10, 10), save=None): """N_images > 1"""
根据特征检测器提取的特征点,生成特征点位置图。这里我们以ORB特征检测器为例,下图中的绿色小圈展示了待拼接图像中检测到的特征点的分布情况。
from stitching.feature_detector import FeatureDetector
# 4. Feature detection: ORB, SIFT
finder = FeatureDetector(detector=detector)
features = [finder.detect_features(img) for img in images_to_match]
key_points_img = []
for i in range(len(images_to_match)):
key_points_img.append(finder.draw_keypoints(images_to_match[i], features[i]))
plot_images(key_points_img, (20, 20), save=f'{save_path}/2-key_points.png')
通过homography特征匹配算法(具体代码见第3节),将两张待拼接图像中匹配的特征点进行连接,生成特征点匹配结果图。下图中的绿色线段展示了特征点之间的对应关系。
from Feature_matcher import *
# 5. Feature matching: homography
matcher = FeatureMatcher()
matches = matcher.match_features(features)
print(matcher.get_confidence_matrix(matches))
# 6. plot matching
all_relevant_matches = matcher.draw_matches_matrix(images_to_match, features, matches, conf_thresh=1,
inliers=True, matchColor=(0, 255, 0))
for idx1, idx2, img in all_relevant_matches:
print(f"Matches Image {idx1 + 1} to Image {idx2 + 1}")
plot_image(img, (20, 10), save=f'{save_path}/3-matching.png')
根据homography矩阵,对两张图像进行透视变换,使其对齐,生成校准结果图。下图的子图a为校准过程得到的mask图,子图b展示了经过校准后的待拼接图像,最终拼接图的大小与待拼接图像的大小一致。
from stitching.camera_estimator import CameraEstimator from stitching.camera_adjuster import CameraAdjuster from stitching.camera_wave_corrector import WaveCorrector from stitching.warper import Warper from stitching.timelapser import Timelapser # 7. Camera Estimation, Adjustion and Correction cameras = camera_correction(features, matches) # 8. Warp images (warped_low_imgs, warped_low_masks, low_corners, low_sizes, warped_final_imgs, warped_final_masks, final_corners, final_sizes, frame) \ = warp_image(images, cameras, low_imgs, final_imgs) plot_images(warped_low_imgs, (10, 10), save=f'{save_path}/4-warped_low_imgs.png') plot_images(warped_low_masks, (10, 10), save=f'{save_path}/4-warped_low_masks.png') plot_images(frame, (20, 10), save=f'{save_path}/4-warped_final_imgs.png') ################ Camera Estimation ################## def camera_correction(features, matches): camera_estimator = CameraEstimator() camera_adjuster = CameraAdjuster() wave_corrector = WaveCorrector() cameras = camera_estimator.estimate(features, matches) cameras = camera_adjuster.adjust(features, matches, cameras) cameras = wave_corrector.correct(cameras) return cameras ################ Warp images #################### def warp_image(images, cameras, low_imgs, final_imgs): warper = Warper() warper.set_scale(cameras) low_sizes = images.get_scaled_img_sizes(Images.Resolution.LOW) camera_aspect = images.get_ratio(Images.Resolution.MEDIUM, Images.Resolution.LOW) # since cameras were obtained on medium imgs warped_low_imgs = list(warper.warp_images(low_imgs, cameras, camera_aspect)) warped_low_masks = list(warper.create_and_warp_masks(low_sizes, cameras, camera_aspect)) low_corners, low_sizes = warper.warp_rois(low_sizes, cameras, camera_aspect) final_sizes = images.get_scaled_img_sizes(Images.Resolution.FINAL) camera_aspect = images.get_ratio(Images.Resolution.MEDIUM, Images.Resolution.FINAL) warped_final_imgs = list(warper.warp_images(final_imgs, cameras, camera_aspect)) warped_final_masks = list(warper.create_and_warp_masks(final_sizes, cameras, camera_aspect)) final_corners, final_sizes = warper.warp_rois(final_sizes, cameras, camera_aspect) # Timelapser timelapser = Timelapser('as_is') timelapser.initialize(final_corners, final_sizes) frame = [] for img, corner in zip(warped_final_imgs, final_corners): timelapser.process_frame(img, corner) frame.append(timelapser.get_frame()) return (warped_low_imgs, warped_low_masks, low_corners, low_sizes, warped_final_imgs, warped_final_masks, final_corners, final_sizes, frame)
将经过校准的两张图像进行融合,生成拼接结果图。根据用户的选择,可以提供剪裁相机校准结果的选项(stitching(crop = True),默认为False)。图1分别展示了未剪裁和剪裁后的校准图(5a&c)和拼接图时的接缝(5b&d)。最后拼接图结果见图2,上面三幅图不包括剪裁步骤,下面三幅存在剪裁步骤。可以看到,在拼接之前剪裁至规则的四边形对拼接时的seam line的选取有较大的影响,有一定概率导致最终的拼接图像不符合预期。
from stitching.cropper import Cropper from stitching.seam_finder import SeamFinder # 9. Crop images if crop: (cropped_low_imgs, cropped_low_masks, cropped_final_imgs, cropped_final_masks, final_corners, final_sizes, frame) = ( crop_image(images, warped_low_imgs, warped_low_masks, low_corners, low_sizes, warped_final_imgs, warped_final_masks, final_corners, final_sizes)) plot_images(frame, (20, 10), save=f'{save_path}/5-cropped_final_imgs.png') else: cropped_low_imgs = warped_low_imgs cropped_low_masks = warped_low_masks cropped_final_imgs = warped_final_imgs cropped_final_masks = warped_final_masks # 10. Seam Masks seam_finder, seam_masks_plots, compensated_imgs, seam_masks = ( seam(cropped_low_imgs, low_corners, cropped_low_masks, cropped_final_masks, cropped_final_imgs, final_corners)) plot_images(seam_masks_plots, (15, 10), save=f'{save_path}/6-seam_masks.png') # 11. Matching result blender = Blender() blender.prepare(final_corners, final_sizes) for img, mask, corner in zip(compensated_imgs, seam_masks, final_corners): blender.feed(img, mask, corner) panorama, _ = blender.blend() blended_seam_masks = seam_finder.blend_seam_masks(seam_masks, final_corners, final_sizes) plot_image(panorama, (20, 20), save=f'{save_path}/7-matched_result.png') plot_image(seam_finder.draw_seam_lines(panorama, blended_seam_masks, linesize=3), (15, 10), save=f'{save_path}/8-seam_lines.png') plot_image(seam_finder.draw_seam_polygons(panorama, blended_seam_masks), (15, 10), save=f'{save_path}/9-seam_polygons.png') # 12. Done print('Done!') ################ Crop images #################### def crop_image(images, warped_low_imgs, warped_low_masks, low_corners, low_sizes, warped_final_imgs, warped_final_masks, final_corners, final_sizes): cropper = Cropper() mask = cropper.estimate_panorama_mask(warped_low_imgs, warped_low_masks, low_corners, low_sizes) lir = cropper.estimate_largest_interior_rectangle(mask) low_corners = cropper.get_zero_center_corners(low_corners) rectangles = cropper.get_rectangles(low_corners, low_sizes) overlap = cropper.get_overlap(rectangles[1], lir) intersection = cropper.get_intersection(rectangles[1], overlap) cropper.prepare(warped_low_imgs, warped_low_masks, low_corners, low_sizes) cropped_low_masks = list(cropper.crop_images(warped_low_masks)) cropped_low_imgs = list(cropper.crop_images(warped_low_imgs)) low_corners, low_sizes = cropper.crop_rois(low_corners, low_sizes) lir_aspect = images.get_ratio(Images.Resolution.LOW, Images.Resolution.FINAL) # since lir was obtained on low imgs cropped_final_masks = list(cropper.crop_images(warped_final_masks, lir_aspect)) cropped_final_imgs = list(cropper.crop_images(warped_final_imgs, lir_aspect)) final_corners, final_sizes = cropper.crop_rois(final_corners, final_sizes, lir_aspect) # Redo the timelapse with cropped Images: timelapser = Timelapser('as_is') timelapser.initialize(final_corners, final_sizes) frame = [] for img, corner in zip(cropped_final_imgs, final_corners): timelapser.process_frame(img, corner) frame.append(timelapser.get_frame()) return (cropped_low_imgs, cropped_low_masks, cropped_final_imgs, cropped_final_masks, final_corners, final_sizes, frame)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。