2017-03-05 185 views
0

這是在iPad Mini上顯示時的視圖。在iPhone上,相機圖像看起來是正確的,它填滿了屏幕。 如何告訴相機充滿整個區域?我的代碼遵循圖像。如何在預覽圖層上設置正確的相機圖像尺寸?

事實上,該項目在較小的設備上正常工作,只有當我移動到iPad時,它才顯示出這個邊界。也許在iPhone 7上,但我沒有。 在模擬器上運行沒有區別,因爲它是被限制的攝像機視圖。 我已經包含了所有的代碼,因爲它是一種密切聯繫在一起的。

enter image description here

import UIKit 
import AVFoundation 
import Foundation 

class ViewController: UIViewController { 

@IBOutlet weak var navigationBar: UINavigationBar! 
@IBOutlet weak var imgOverlay: UIImageView! 
@IBOutlet weak var btnCapture: UIButton! 

@IBOutlet weak var shapeLayer: UIView! 

let captureSession = AVCaptureSession() 
let stillImageOutput = AVCaptureStillImageOutput() 
var previewLayer : AVCaptureVideoPreviewLayer? 

//===================== 
let screenWidth = UIScreen.main.bounds.size.width 
let screenHeight = UIScreen.main.bounds.size.height 
var aspectRatio: CGFloat = 1.0 


var viewFinderHeight: CGFloat = 0.0 
var viewFinderWidth: CGFloat = 0.0 
var viewFinderMarginLeft: CGFloat = 0.0 
var viewFinderMarginTop: CGFloat = 0.0 
//====================== 


// If we find a device we'll store it here for later use 
var captureDevice : AVCaptureDevice? 

override func viewDidLoad() { 
    super.viewDidLoad() 
    // Do any additional setup after loading the view, typically from a nib. 

    //On iPad Mini this returns 760 x 1024 = correct 
    //On the iPhone SE, this returns 320x568 = correct 
    print("Width: \(screenWidth)") 
    print("Height: \(screenHeight)") 

    //======================= 

    captureSession.sessionPreset = AVCaptureSessionPresetHigh 

    if #available(iOS 10.0, *) { 
     if let devices = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: .back) { 

      print("Device name: \(devices.localizedName)") 

     } 
    } else { 
     // Fallback on earlier versions 
    } 

    if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] { 
     // Loop through all the capture devices on this phone 
     for device in devices { 

      print("Device name: \(device.localizedName)") 

      // Make sure this particular device supports video 
      if (device.hasMediaType(AVMediaTypeVideo)) { 
       // Finally check the position and confirm we've got the back camera 
       if(device.position == AVCaptureDevicePosition.back) { 
        captureDevice = device 
        if captureDevice != nil { 
         print("Capture device found") 
         beginSession() 
        } 
       } 
      } 
     } 
    } 
} 


@IBAction func actionCameraCapture(_ sender: AnyObject) { 

    print("Camera button pressed") 
    saveToCamera() 
} 

func beginSession() { 

    do { 
     try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice)) 
     stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG] 

     if captureSession.canAddOutput(stillImageOutput) { 
      captureSession.addOutput(stillImageOutput) 
     } 

    } 
    catch { 
     print("error: \(error.localizedDescription)") 
    } 

    guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else { 
     print("no preview layer") 
     return 
    } 
    // this is what displays the camera view. But - it's on TOP of the drawn view, and under the overview. ?? 
    //======================================= 
    self.view.layer.addSublayer(previewLayer) 
    previewLayer.frame = self.view.layer.frame 
    //self.previewLayer?.frame = self.view.bounds 
    //======================================= 


    imgOverlay.frame = self.view.frame 
    imgOverlay.image = self.drawCirclesOnImage(fromImage: nil, targetSize: imgOverlay.bounds.size) 

    self.view.bringSubview(toFront: navigationBar) 
    self.view.bringSubview(toFront: imgOverlay) 
    self.view.bringSubview(toFront: btnCapture) 
    // don't use shapeLayer anymore... 
    //  self.view.bringSubview(toFront: shapeLayer) 


    captureSession.startRunning() 
    print("Capture session running") 

} 


func getImageWithColor(color: UIColor, size: CGSize) -> UIImage { 
    let rect = CGRect(origin: CGPoint(x: 0, y: 0), size: CGSize(width: size.width, height: size.height)) 
    UIGraphicsBeginImageContextWithOptions(size, false, 0) 
    color.setFill() 
    UIRectFill(rect) 
    let image: UIImage = UIGraphicsGetImageFromCurrentImageContext()! 
    UIGraphicsEndImageContext() 
    return image 
} 

func drawCirclesOnImage(fromImage: UIImage? = nil, targetSize: CGSize? = CGSize.zero) -> UIImage? { 

    if fromImage == nil && targetSize == CGSize.zero { 
     return nil 
    } 

    var tmpimg: UIImage? 

    if targetSize == CGSize.zero { 

     tmpimg = fromImage 

    } else { 

     tmpimg = getImageWithColor(color: UIColor.clear, size: targetSize!) 

    } 

    guard let img = tmpimg else { 
     return nil 
    } 

    let imageSize = img.size 
    let scale: CGFloat = 0 
    UIGraphicsBeginImageContextWithOptions(imageSize, false, scale) 

    img.draw(at: CGPoint.zero) 

    let w = imageSize.width 

    //print("Width: \(w)") 

    let midX = imageSize.width/2 
    let midY = imageSize.height/2 

    // red circles - radius in % 
    let circleRads = [ 0.07, 0.13, 0.17, 0.22, 0.29, 0.36, 0.40, 0.48, 0.60, 0.75 ] 

    // center "dot" - radius is 1.5% 
    var circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat(w * 0.015), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true) 

    UIColor.red.setFill() 
    circlePath.stroke() 
    circlePath.fill() 

    // blue circle is between first and second red circles 
    circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: w * CGFloat((circleRads[0] + circleRads[1])/2.0), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true) 

    UIColor.blue.setStroke() 
    circlePath.lineWidth = 2.5 
    circlePath.stroke() 

    UIColor.red.setStroke() 

    for pct in circleRads { 

     let rad = w * CGFloat(pct) 

     circlePath = UIBezierPath(arcCenter: CGPoint(x: midX, y: midY), radius: CGFloat(rad), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true) 

     circlePath.lineWidth = 2.5 
     circlePath.stroke() 

    } 

    let newImage = UIGraphicsGetImageFromCurrentImageContext() 

    UIGraphicsEndImageContext() 

    return newImage 
} 

func saveToCamera() { 

    if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) { 
     stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in 

      if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) { 
       if let cameraImage = UIImage(data: imageData) { 

        if let nImage = self.drawCirclesOnImage(fromImage: cameraImage, targetSize: CGSize.zero) { 
         UIImageWriteToSavedPhotosAlbum(nImage, nil, nil, nil) 
        } 

       } 
      } 
     }) 
    } 
} 



override func didReceiveMemoryWarning() { 
    super.didReceiveMemoryWarning() 
    // Dispose of any resources that can be recreated. 
} 

}

回答

1

試試這個,它的做工精細用我這個application。 :)

override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) { 
      super.viewWillTransition(to: size, with: coordinator) 
      // Update camera orientation 
      let videoOrientation: AVCaptureVideoOrientation 
      switch UIDevice.current.orientation { 
      case .portrait: 
       videoOrientation = .portrait 
      case .portraitUpsideDown: 
       videoOrientation = .portraitUpsideDown 
      case .landscapeLeft: 
       videoOrientation = .landscapeRight 
      case .landscapeRight: 
       videoOrientation = .landscapeLeft 
      default: 
       videoOrientation = .portrait 
      } 
      cameraView.layer.connection.videoOrientation = videoOrientation 
     } 
+0

這似乎是控制方向 - 如何做我雖然把它從我的代碼,並且它不會出現解決正在爲顯示錯誤的大小相機視圖的問題。 – Postmaster

+0

除非我錯過了一些東西 - github上的示例應用程序是空的? – Postmaster

+0

嘿@ user7653427感謝您告訴我該項目是空的,我現在已經更新了它。 :) –

0

我找到了答案。視頻預覽圖像現在在iPhone SE和iPad Mini上都是合適的尺寸。現在應該也適用於其他人。

將原始ViewController.swift中的代碼,從第107行開始的第127行改爲此代碼。爲了清晰起見,我已將註釋掉的部分留在原地。

關鍵路線是previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill;

感謝您的幫助。

  //============================== 
    /* 
    guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else { 
     print("no preview layer") 
     return 
    } 
    */ 
    previewLayer = AVCaptureVideoPreviewLayer(session: captureSession); 
    previewLayer?.frame = view.layer.bounds 
    previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill; 
    view.layer.addSublayer(previewLayer!); 


    // this is what displays the camera view. 
    //======================================= 
    self.view.layer.addSublayer(previewLayer!) 
    previewLayer?.frame = self.view.layer.bounds 
    //self.previewLayer?.frame = self.view.bounds 


    //======================================= 
+0

還有一點要做。它仍然保存裁剪的圖像,而不是新的全屏圖像。這將在保存到相機位。 – Postmaster