首先初始化AVCaptureSessionsession
// 初始化 AVCaptureSession _session = [[AVCaptureSession alloc] init];
设置采集的 Video 和 Audio 格式,这两个是分开设置的,也就是说,你能够只采集视频。ide
/ 配置采集输入源(摄像头) NSError*error=nil; // 得到一个采集设备,例如前置/后置摄像头 AVCaptureDevice*videoDevice=[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo]; // 用设备初始化一个采集的输入对象 AVCaptureDeviceInput*videoInput=[AVCaptureDeviceInput deviceInputWithDevice:videoDevice error:&error]; if(error){ NSLog(@"Error getting video input device: %@",error.description); } if([_session canAddInput:videoInput]){ [_session addInput:videoInput];// 添加到Session } // 配置采集输出,即咱们取得视频图像的接口 _videoQueue=dispatch_queue_create("Video Capture Queue",DISPATCH_QUEUE_SERIAL); _videoOutput=[[AVCaptureVideoDataOutputalloc] init]; [_videoOutput setSampleBufferDelegate:self queue:_videoQueue]; // 配置输出视频图像格式 NSDictionary*captureSettings=@{(NSString*)kCVPixelBufferPixelFormatTypeKey:@(kCVPixelFormatType_32BGRA)}; _videoOutput.videoSettings=captureSettings; _videoOutput.alwaysDiscardsLateVideoFrames=YES; if([_session canAddOutput:_videoOutput]){ [_session addOutput:_videoOutput]; // 添加到Session } // 保存Connection,用于在SampleBufferDelegate中判断数据来源(是Video/Audio?) _videoConnection=[_videoOutput connectionWithMediaType:AVMediaTypeVideo];
实现 AVCaptureOutputDelegate:ui
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection { // 这里的sampleBuffer就是采集到的数据了,但它是Video仍是Audio的数据,得根据connection来判断 if (connection == _videoConnection) { // Video /* // 取得当前视频尺寸信息 CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); int width = CVPixelBufferGetWidth(pixelBuffer); int height = CVPixelBufferGetHeight(pixelBuffer); NSLog(@"video width: %d height: %d", width, height); */ NSLog(@"在这里得到video sampleBuffer,作进一步处理(编码H.264)"); } else if (connection == _audioConnection) { // Audio NSLog(@"这里得到audio sampleBuffer,作进一步处理(编码AAC)"); } }
配置完成,如今启动 Session:编码
// 启动 Session [_sessionstartRunning];
很简单,发送端直接使用自家的AVCaptureVideoPreviewLayer
显示atom
_previewLayer=[AVCaptureVideoPreviewLayer layerWithSession:_session]; _previewLayer.videoGravity=AVLayerVideoGravityResizeAspectFill;// 设置预览时的视频缩放方式 [[_previewLayerconnection] setVideoOrientation:AVCaptureVideoOrientationPortrait];// 设置视频的朝向 _previewLayer.frame=self.view.layer.bounds; [self.view.layer addSublayer:_previewLayer];
而后将这个layer添加到界面中便可显示了。spa
具体实现代码:.net
#import "MyAVController.h" #import <AVFoundation/AVFoundation.h> #import <CoreGraphics/CoreGraphics.h> #import <CoreVideo/CoreVideo.h> #import <CoreMedia/CoreMedia.h> @interface MyAVController()<AVCaptureVideoDataOutputSampleBufferDelegate> @property (nonatomic, retain) AVCaptureSession *captureSession; @property (nonatomic, retain) UIImageView *imageView; @property (nonatomic, retain) CALayer *customLayer; @property (nonatomic, retain) AVCaptureVideoPreviewLayer *prevLayer; - (void)initCapture; @end #import "MyAVController.h" @implementation MyAVController { AVCaptureSession *_captureSession; UIImageView *_imageView; CALayer *_customLayer; AVCaptureVideoPreviewLayer *_prevLayer; AVCaptureConnection *_videoConnection; AVCaptureConnection *_audioConnection; } #pragma mark - #pragma mark Initialization - (id)init { self = [super init]; if (self) { self.imageView = nil; self.prevLayer = nil; self.customLayer = nil; } return self; } - (void)viewDidLoad { [self initCapture]; } - (void)initCapture { //配置采集输入源(摄像头) AVCaptureDevice*videoDevice=[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo]; //用设备初始化一个采集的输入对象 AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput deviceInputWithDevice:videoDevice error:nil]; AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc]init]; captureOutput.alwaysDiscardsLateVideoFrames = YES; //captureOutput.minFrameDuration = CMTimeMake(1, 10); //配置采集输出,即咱们取得视频图像的接口 dispatch_queue_t queue; queue = dispatch_queue_create("cameraQueue", NULL); [captureOutput setSampleBufferDelegate:self queue:queue]; dispatch_release(queue); NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey; //配置输出视频图像格式 NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA]; NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key]; [captureOutput setVideoSettings:videoSettings]; self.captureSession = [[AVCaptureSession alloc] init]; [self.captureSession addInput:captureInput]; [self.captureSession addOutput:captureOutput]; [self.captureSession startRunning]; //保存Connection,用于在SampleBufferDelegate中判断数据来源(是Video/Audio?) _videoConnection=[captureOutput connectionWithMediaType:AVMediaTypeVideo]; //view self.customLayer = [CALayer layer]; self.customLayer.frame = self.view.bounds; self.customLayer.transform = CATransform3DRotate( CATransform3DIdentity, M_PI/2.0f, 0, 0, 1); self.customLayer.contentsGravity = kCAGravityResizeAspectFill; [self.view.layer addSublayer:self.customLayer]; self.imageView = [[UIImageView alloc] init]; self.imageView.frame = CGRectMake(0, 0, 100, 100); [self.view addSubview:self.imageView]; self.prevLayer = [AVCaptureVideoPreviewLayer layerWithSession: self.captureSession]; self.prevLayer.frame = CGRectMake(100, 0, 100, 100); self.prevLayer.videoGravity = AVLayerVideoGravityResizeAspectFill; [self.view.layer addSublayer: self.prevLayer]; } #pragma mark - #pragma mark AVCaptureSession delegate - (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection { // 这里的sampleBuffer就是采集到的数据了,但它是Video仍是Audio的数据,得根据connection来判断 if (connection == _videoConnection) { // Video /* // 取得当前视频尺寸信息 CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); int width = CVPixelBufferGetWidth(pixelBuffer); int height = CVPixelBufferGetHeight(pixelBuffer); NSLog(@"video width: %d height: %d", width, height); */ NSLog(@"在这里得到video sampleBuffer,作进一步处理(编码H.264)"); } else if (connection == _audioConnection) { // Audio NSLog(@"这里得到audio sampleBuffer,作进一步处理(编码AAC)"); } NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; // 这里的sampleBuffer就是采集到的数据了,但它是Video仍是Audio的数据,得根据connection来判断 CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); CVPixelBufferLockBaseAddress(imageBuffer,0); uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer); size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); size_t width = CVPixelBufferGetWidth(imageBuffer); size_t height = CVPixelBufferGetHeight(imageBuffer); CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); CGImageRef newImage = CGBitmapContextCreateImage(newContext); CGContextRelease(newContext); CGColorSpaceRelease(colorSpace); [self.customLayer performSelectorOnMainThread:@selector(setContents:) withObject: (__bridge id) newImage waitUntilDone:YES]; UIImage *image= [UIImage imageWithCGImage:newImage scale:1.0 orientation:UIImageOrientationRight]; CGImageRelease(newImage); [self.imageView performSelectorOnMainThread:@selector(setImage:) withObject:image waitUntilDone:YES]; CVPixelBufferUnlockBaseAddress(imageBuffer,0); [pool drain]; } #pragma mark - #pragma mark Memory management - (void)viewDidUnload { self.imageView = nil; self.customLayer = nil; self.prevLayer = nil; } - (void)dealloc { [self.captureSession release]; } @end