Live Photosをキャプチャするには、キャプチャパイプラインの内部再構成が必要です。これには時間がかかり、進行中のキャプチャが中断されます。最初のライブ写真を撮影する前に、AVCapturePhotoOutputオブジェクトでライブ写真のキャプチャを有効にして、パイプラインが適切に構成されていることを確認してください。
let photoOutput = AVCapturePhotoOutput()
// Attempt to add the photo output to the session.
if captureSession.canAddOutput(photoOutput) {
captureSession.sessionPreset = .photo
captureSession.addOutput(photoOutput)
} else {
throw CameraError.configurationFailed
}
// Configure the photo output's behavior.
photoOutput.isHighResolutionCaptureEnabled = true
photoOutput.isLivePhotoCaptureEnabled = photoOutput.isLivePhotoCaptureSupported
// Start the capture session.
captureSession.startRunning()
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.livePhotoMovieFileURL = // output url
// Shoot the Live Photo, using a custom class to handle capture delegate callbacks.
let captureProcessor = LivePhotoCaptureProcessor()
photoOutput.capturePhoto(with: photoSettings, delegate: captureProcessor)
func saveLivePhotoToPhotosLibrary(stillImageData: Data, livePhotoMovieURL: URL) { PHPhotoLibrary.requestAuthorization { status in
guard status == .authorized else { return }
PHPhotoLibrary.shared().performChanges({
// Add the captured photo's file data as the main resource for the Photos asset.
let creationRequest = PHAssetCreationRequest.forAsset()
creationRequest.addResource(with: .photo, data: stillImageData, options: nil)
// Add the movie file URL as the Live Photo's paired video resource.
let options = PHAssetResourceCreationOptions()
options.shouldMoveFile = true
creationRequest.addResource(with: .pairedVideo, fileURL: livePhotoMovieURL, options: options)
}) { success, error in
// Handle completion.
}
}
}
Live Photosをキャプチャすると、「写真キャプチャの進行状況の追跡」に示すプロセスに2つの追加ステップが追加されます。静止画の結果の配信(ステップ4)の後、写真出力はムービーキャプチャステータスを通知し(ステップ5)、ムービー結果を配信します(ステップ6) )。(最終クリーンアップはステップ7になります。)
class LivePhotoCaptureProcessor: NSObject, AVCapturePhotoCaptureDelegate {
// ... other PhotoCaptureDelegate methods and supporting properties ...
// A handler to call when Live Photo capture begins and ends.
var livePhotoStatusHandler: (Bool) -> () = { _ in }
// A property for tracking in-progress captures and updating UI accordingly.
var livePhotosInProgress = 0 {
didSet {
// Update the UI accordingly based on the value of this property
}
}
// Call the handler when PhotoCaptureDelegate methods indicate Live Photo capture is in progress.
func photoOutput(_ output: AVCapturePhotoOutput,
willBeginCaptureFor resolvedSettings: AVCaptureResolvedPhotoSettings) {
let capturingLivePhoto = (resolvedSettings.livePhotoMovieDimensions.width > 0 && resolvedSettings.livePhotoMovieDimensions.height > 0)
livePhotoStatusHandler(capturingLivePhoto)
}
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishRecordingLivePhotoMovieForEventualFileAt outputFileURL: URL,
resolvedSettings: AVCaptureResolvedPhotoSettings) {
livePhotoStatusHandler(false)
}
}
PHPhotoLibrary.requestAuthorization { status in
guard status == .authorized else { return }
// Use PHPhotoLibrary.shared().performChanges(...) to add assets.
}
class PhotoCaptureProcessor: NSObject, AVCapturePhotoCaptureDelegate {
var completionHandler: () -> () = {}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishCaptureFor resolvedSettings: AVCaptureResolvedPhotoSettings, error: Error?) {
completionHandler()
}
// ... other delegate methods to handle capture results...
}
// Keep a set of in-progress capture delegates.
var capturesInProgress = Set<PhotoCaptureProcessor>()
func shootPhoto() {
// Make a new capture delegate for each capture and add it to the set.
let captureProcessor = PhotoCaptureProcessor()
capturesInProgress.insert(captureProcessor)
// Schedule for the capture delegate to be removed from the set after capture.
captureProcessor.completionHandler = { [weak self] in
self?.capturesInProgress.remove(captureProcessor); return
}
self.photoOutput.capturePhoto(with: self.settingsForNextPhoto(), delegate: captureProcessor)
}
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized: // The user has previously granted access to the camera.
self.setupCaptureSession()
case .notDetermined: // The user has not yet been asked for camera access.
AVCaptureDevice.requestAccess(for: .video) { granted in
if granted {
self.setupCaptureSession()
}
}
case .denied: // The user has previously denied access.
return
case .restricted: // The user can't grant access due to restrictions.
return
}
class PreviewView: UIView {
override class var layerClass: AnyClass {
return AVCaptureVideoPreviewLayer.self
}
/// Convenience wrapper to get layer as its statically known type.
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
}
// Remove the existing device input first, because AVCaptureSession doesn't support
// simultaneous use of the rear and front cameras.
self.session.removeInput(self.videoDeviceInput)
if self.session.canAddInput(videoDeviceInput) {
NotificationCenter.default.removeObserver(self, name: .AVCaptureDeviceSubjectAreaDidChange, object: currentVideoDevice)
NotificationCenter.default.addObserver(self, selector: #selector(self.subjectAreaDidChange), name: .AVCaptureDeviceSubjectAreaDidChange, object: videoDeviceInput.device)
self.session.addInput(videoDeviceInput)
self.videoDeviceInput = videoDeviceInput
} else {
self.session.addInput(self.videoDeviceInput)
}
if reason == .audioDeviceInUseByAnotherClient || reason == .videoDeviceInUseByAnotherClient {
showResumeButton = true
} else if reason == .videoDeviceNotAvailableWithMultipleForegroundApps {
// Fade-in a label to inform the user that the camera is unavailable.
cameraUnavailableLabel.alpha = 0
cameraUnavailableLabel.isHidden = false
UIView.animate(withDuration: 0.25) {
self.cameraUnavailableLabel.alpha = 1
}
} else if reason == .videoDeviceNotAvailableDueToSystemPressure {
print("Session stopped running due to shutdown system pressure level.")
}
// Flash the screen to signal that AVCam took a photo.
DispatchQueue.main.async {
self.previewView.videoPreviewLayer.opacity = 0
UIView.animate(withDuration: 0.25) {
self.previewView.videoPreviewLayer.opacity = 1
}
}
// A portrait effects matte gets generated only if AVFoundation detects a face.
if var portraitEffectsMatte = photo.portraitEffectsMatte {
if let orientation = photo.metadata[ String(kCGImagePropertyOrientation) ] as? UInt32 {
portraitEffectsMatte = portraitEffectsMatte.applyingExifOrientation(CGImagePropertyOrientation(rawValue: orientation)!)
}
let portraitEffectsMattePixelBuffer = portraitEffectsMatte.mattingImage
let portraitEffectsMatteImage = CIImage( cvImageBuffer: portraitEffectsMattePixelBuffer, options: [ .auxiliaryPortraitEffectsMatte: true ] )
if var portraitEffectsMatte = photo.portraitEffectsMatte {
if let orientation = photo.metadata[String(kCGImagePropertyOrientation)] as? UInt32 {
portraitEffectsMatte = portraitEffectsMatte.applyingExifOrientation(CGImagePropertyOrientation(rawValue: orientation)!)
}
let portraitEffectsMattePixelBuffer = portraitEffectsMatte.mattingImage
// Find the semantic segmentation matte image for the specified type.
guard var segmentationMatte = photo.semanticSegmentationMatte(for: ssmType) else { return }
// Retrieve the photo orientation and apply it to the matte image.
if let orientation = photo.metadata[String(kCGImagePropertyOrientation)] as? UInt32,
let exifOrientation = CGImagePropertyOrientation(rawValue: orientation) {
// Apply the Exif orientation to the matte image.
segmentationMatte = segmentationMatte.applyingExifOrientation(exifOrientation)
}
var imageOption: CIImageOption!
// Switch on the AVSemanticSegmentationMatteType value.
switch ssmType {
case .hair:
imageOption = .auxiliarySemanticSegmentationHairMatte
case .skin:
imageOption = .auxiliarySemanticSegmentationSkinMatte
case .teeth:
imageOption = .auxiliarySemanticSegmentationTeethMatte
default:
print("This semantic segmentation type is not supported!")
return
}
guard let perceptualColorSpace = CGColorSpace(name: CGColorSpace.sRGB) else { return }
// Create a new CIImage from the matte's underlying CVPixelBuffer.
let ciImage = CIImage( cvImageBuffer: segmentationMatte.mattingImage,
options: [imageOption: true,
.colorSpace: perceptualColorSpace])
// Get the HEIF representation of this image.
guard let imageData = context.heifRepresentation(of: ciImage,
format: .RGBA8,
colorSpace: perceptualColorSpace,
options: [.depthImage: ciImage]) else { return }
// Add the image data to the SSM data array for writing to the photo library.
semanticSegmentationMatteDataArray.append(imageData)
if let dualCameraDevice = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back) {
defaultVideoDevice = dualCameraDevice
} else if let backCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back) {
// If a rear dual camera is not available, default to the rear wide angle camera.
defaultVideoDevice = backCameraDevice
} else if let frontCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front) {
// If the rear wide angle camera isn't available, default to the front wide angle camera.
defaultVideoDevice = frontCameraDevice
}
PHPhotoLibrary.shared().performChanges({
let options = PHAssetResourceCreationOptions()
options.shouldMoveFile = true
let creationRequest = PHAssetCreationRequest.forAsset()
creationRequest.addResource(with: .video, fileURL: outputFileURL, options: options)
}, completionHandler: { success, error in
if !success {
print("AVCam couldn't save the movie to your photo library: \(String(describing: error))")
}
cleanup()
}
)