2012-06-27 49 views
1

在我的應用程序,我使用此代碼打開視頻預覽層:無法捕捉AVCaptureVideoPreviewLayer

AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput 
             deviceInputWithDevice:[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo] 
             error:nil]; 
/*We setupt the output*/ 
AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init]; 


captureOutput.alwaysDiscardsLateVideoFrames = YES; 

dispatch_queue_t queue; 
queue = dispatch_queue_create("cameraQueue", NULL); 
[captureOutput setSampleBufferDelegate:self queue:queue]; 
dispatch_release(queue); 
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey; 
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA]; 
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key]; 
[captureOutput setVideoSettings:videoSettings]; 

self.captureSession = [[AVCaptureSession alloc] init]; 
[self.captureSession addInput:captureInput]; 
[self.captureSession addOutput:captureOutput]; 
/*We use medium quality, ont the iPhone 4 this demo would be laging too much, the conversion in UIImage and CGImage demands too much ressources for a 720p resolution.*/ 
[self.captureSession setSessionPreset:AVCaptureSessionPresetMedium]; 



CGRect Vframe; 
Vframe = CGRectMake(self.viewNo2.frame.origin.x, self.viewNo2.frame.origin.y, self.viewNo2.frame.size.width, self.viewNo2.frame.size.height); 



/*We add the Custom Layer (We need to change the orientation of the layer so that the video is displayed correctly)*/ 
self.customLayer = [CALayer layer]; 
self.customLayer.frame = Vframe; 
self.customLayer.contentsGravity = kCAGravityResizeAspect; 
[self.view.layer addSublayer:self.customLayer]; 


CGRect VFrame1; 
VFrame1 = CGRectMake(self.viewNo3.frame.origin.x, self.viewNo3.frame.origin.y, self.viewNo3.frame.size.width, self.viewNo3.frame.size.height); 

/*We add the Custom Layer (We need to change the orientation of the layer so that the video is displayed correctly)*/ 
self.customLayer1 = [CALayer layer]; 
self.customLayer1.frame = VFrame1; 
self.customLayer1.contentsGravity = kCAGravityResizeAspect; 
[self.view.layer addSublayer:self.customLayer1]; 


///*We add the imageView*/ 
//self.imageView = [[UIImageView alloc] init]; 
//self.imageView.frame = CGRectMake(9, 9, 137, 441); 
//[self.view addSubview:self.imageView]; 
/*We add the preview layer*/ 


CGRect VFrame2; 
VFrame2 = CGRectMake(self.viewNo1.frame.origin.x, self.viewNo1.frame.origin.y, self.viewNo1.frame.size.width, self.viewNo1.frame.size.height); 

self.prevLayer = [AVCaptureVideoPreviewLayer layerWithSession: self.captureSession]; 
self.prevLayer.frame = VFrame2; 

self.prevLayer.videoGravity = AVLayerVideoGravityResizeAspectFill; 
[self.view.layer addSublayer: self.prevLayer]; 
/*We start the capture*/ 
[self.captureSession startRunning]; 

當我試圖捕捉畫面用這種方法:

-(IBAction)Photo{ 

CGRect rect = [self.view bounds]; 
UIGraphicsBeginImageContextWithOptions(rect.size,YES,0.0f); 
CGContextRef context = UIGraphicsGetCurrentContext(); 
[self.view.layer renderInContext:context]; 
UIImage *viewImage = UIGraphicsGetImageFromCurrentImageContext(); 
UIGraphicsEndImageContext(); 
UIImageWriteToSavedPhotosAlbum(viewImage, nil, nil, nil);} 

的prevLayer沒有被捕獲,我錯過了什麼?

回答

2

AVCaptureVideoPreviewLayer不響應捕獲屏幕UIGraphicsGetImageFromCurrentImageContext()。這只是蘋果所做的一個奇怪的規則。獲取當前屏幕圖像的唯一方法是從AVCaptureInput中點擊圖像數據緩衝區。然後可以手動將其添加到屏幕截圖中。

+2

如果我在我的'AVCaptureVideoPreviewLayer'中有覆蓋層,那麼我怎樣才能得到它在緩衝區? –

+0

@ iTroyd23 http://stackoverflow.com/a/17010373/3055415 –

+0

@JustinMoser謝謝你讓我試試... –