赞
踩
本文是结合IOS的Cmera和CIImage中的矩形检测器做的一个识别矩形的demo,对于简单场景效果还不错。复杂场景下检测效果还是差了点,后面会更新结合自己写的opencv算法去检测。
1.首先封装一个camera
/*
* Camera.h
*/
#import <AVFoundation/AVFoundation.h>
@interface Camera : NSObject
@property (strong, nonatomic) AVCaptureDevice *device;
@property (strong, nonatomic) AVCaptureVideoPreviewLayer *previewLayer;
@property (strong, nonatomic) AVCaptureSession *captureSession;
@property (strong, nonatomic) AVCaptureVideoDataOutput *output;
- (void)addVideoPreviewLayer;
- (void)addVideoInputFromCamera;
- (void)addVideoOutput;
@end
/*
* Camera.m
*/
#import <UIKit/UIKit.h>
#import "Camera.h"
@implementation Camera
- (id)init
{
if ((self = [super init]))
{
[self setCaptureSession:[[AVCaptureSession alloc] init]];
_captureSession.sessionPreset = AVCaptureSessionPreset1920x1080;
}
return self;
}
- (void)addVideoPreviewLayer
{
[self setPreviewLayer:[[AVCaptureVideoPreviewLayer alloc] initWithSession:[self captureSession]]];
[_previewLayer setVideoGravity:AVLayerVideoGravityResizeAspectFill];
}
- (void)addVideoInputFromCamera
{
self.device = [[AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo] firstObject];
NSError *error = nil;
AVCaptureDeviceInput *backFacingCameraDeviceInput = [AVCaptureDeviceInput deviceInputWithDevice:self.device error:&error];
if (!error)
{
if ([_captureSession canAddInput:backFacingCameraDeviceInput])
{
[_captureSession addInput:backFacingCameraDeviceInput];
}
}
}
- (void)addVideoOutput{
[self setOutput:[[AVCaptureVideoDataOutput alloc]init]];
[_output setAlwaysDiscardsLateVideoFrames:YES];
[_output setVideoSettings:@{(id)kCVPixelBufferPixelFormatTypeKey:@(kCVPixelFormatType_32BGRA)}];
if([_captureSession canAddOutput:_output]){
[_captureSession addOutput:_output];
}
}
- (void)dealloc {
[[self captureSession] stopRunning];
_previewLayer = nil;
_captureSession = nil;
_output = nil;
}
@end
2.封装一个实时更新矩形的view
/*
* RectView.h
*/
#import <UIKit/UIKit.h>
@interface RectView : UIView
@property (atomic) CGPoint point1;
@property (atomic) CGPoint point2;
@property (atomic) CGPoint point3;
@property (atomic) CGPoint point4;
- (void)drawWithPointsfirst:(CGPoint)point1 second:(CGPoint)point2 thrid:(CGPoint)point3 forth:(CGPoint)point4;
@end
/*
* RectView.m
*/
#import "RectView.h"
@implementation RectView
- (instancetype)init {
if(self = [super init]) {
self.backgroundColor = [UIColor clearColor];
}
return self;
}
- (void)drawRect:(CGRect)rect;
{
[super drawRect:rect];
CGContextRef context = UIGraphicsGetCurrentContext();
if (context)
{
CGContextMoveToPoint(context, _point1.x, _point1.y);
CGContextAddLineToPoint(context, _point2.x, _point2.y);
CGContextAddLineToPoint(context, _point3.x, _point3.y);
CGContextAddLineToPoint(context, _point4.x, _point4.y);
CGContextAddLineToPoint(context, _point1.x, _point1.y);
CGContextSetRGBStrokeColor(context, 83 /255.0, 239/255.0, 111/255.0, 1);//green
CGContextSetLineJoin(context, kCGLineJoinRound);
CGContextSetLineWidth(context, 3.0f);
CGContextStrokePath(context);
}
}
- (void)drawWithPointsfirst:(CGPoint)point1 second:(CGPoint)point2 thrid:(CGPoint)point3 forth:(CGPoint)point4 {
_point1 = point1;
_point2 = point2;
_point3 = point3;
_point4 = point4;
}
@end
3.在检测view中添加cameraview和rectview
/*
* 添加camera的预览view
*/
self.cameraView = [[UIView alloc] init];
self.cameraView.backgroundColor = [UIColor blackColor];
self.cameraView.frame = self.view.bounds;
[self.view addSubview:self.cameraView];
[self setCaptureManager:[[Camera alloc] init]];
[_captureManager addVideoInputFromCamera];
[_captureManager addVideoOutput];
dispatch_queue_t queue = dispatch_queue_create("VideoQueue", DISPATCH_QUEUE_SERIAL);
[_captureManager.output setSampleBufferDelegate:self queue:queue];
[_captureManager addVideoPreviewLayer];
_captureManager.previewLayer.frame = _cameraView.bounds;
[_cameraView.layer addSublayer:_captureManager.previewLayer];
/*
* 添加实时绘制矩形的view
*/
self.rectView = [[RectView alloc] init];
self.rectView.frame = self.cameraView.frame;
self.rectView.hidden = NO;
[self.view addSubview:selfw];
3.获取检测器
- (CIDetector *)highAccuracyRectangleDetector
{
static CIDetector *detector = nil;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^
{
detector = [CIDetector detectorOfType:CIDetectorTypeRectangle context:nil options:@{CIDetectorAccuracy : CIDetectorAccuracyHigh}];
});
return detector;
}
4.在每帧中检测
/*
* samplebuffer转CIImage
*/
CVPixelBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CIImage *image = [CIImage imageWithCVPixelBuffer:imageBuffer];
/*
* 实际图像是旋转90°的,做个旋转
*/
CIFilter *transform = [CIFilter filterWithName:@"CIAffineTransform"];
[transform setValue:image forKey:kCIInputImageKey];
NSValue *rotation = [NSValue valueWithCGAffineTransform:CGAffineTransformMakeRotation(-90 * (M_PI/180))];
[transform setValue:rotation forKey:@"inputTransform"];
image = [transform outputImage];
/*
* 使用CIRectangleFeature检测
*/
CIRectangleFeature *rectangleFeature = [self biggestRectangleInRectangles:[[self highAccuracyRectangleDetector] featuresInImage:image]];
5.坐标转换和实时更新view
/*
* 坐标调整
*/
CGRect previewRect = _cameraView.frame;
CGRect imageRect = image.extent;
CGFloat deltaX = CGRectGetWidth(previewRect)/CGRectGetWidth(imageRect);
CGFloat deltaY = CGRectGetHeight(previewRect)/CGRectGetHeight(imageRect);
CGAffineTransform transform2 = CGAffineTransformMakeTranslation(0.f, CGRectGetHeight(previewRect));
transform2 = CGAffineTransformScale(transform2, 1, -1);
transform2 = CGAffineTransformScale(transform2, deltaX, deltaY);
CGPoint points[4];
points[0] = CGPointApplyAffineTransform(rectangleFeature.topLeft, transform2);
points[1] = CGPointApplyAffineTransform(rectangleFeature.topRight, transform2);
points[2] = CGPointApplyAffineTransform(rectangleFeature.bottomRight, transform2);
points[3] = CGPointApplyAffineTransform(rectangleFeature.bottomLeft, transform2);
/*
* 实时绘图
*/
if(rectangleFeature){
/*
* 点给rectView
*/
[_rectView drawWithPointsfirst:points[0]
second:points[1]
thrid:points[2]
forth:points[3]];
/*
* 子线程dispatch到主线程更新view
*/
dispatch_async(dispatch_get_main_queue(), ^{
[self.rectView setNeedsDisplay];
});
}
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。