134 lines
5.4 KiB
Swift
134 lines
5.4 KiB
Swift
//
|
||
// VideoConvertor.swift
|
||
// tdvideo
|
||
//
|
||
// Created by aaa on 2024/1/24.
|
||
//
|
||
|
||
|
||
import Foundation
|
||
import AVKit
|
||
import VideoToolbox
|
||
|
||
enum VideoReaderError : Error {
|
||
case invalidVideo
|
||
case notSpacialVideo
|
||
}
|
||
|
||
class VideoConvertor {
|
||
|
||
|
||
func convertVideo( inputFile : URL, outputFile: URL, progress: ((Float)->())? = nil ) async throws {
|
||
|
||
do {
|
||
try FileManager.default.removeItem(atPath: outputFile.path)
|
||
print("视频文件删除成功")
|
||
} catch {
|
||
print("删除视频文件出错:\(error)")
|
||
}
|
||
|
||
// Load the AVAsset
|
||
let asset = AVAsset(url: inputFile)
|
||
let assetReader = try AVAssetReader(asset: asset)
|
||
|
||
//检查是否为空间视频
|
||
let userDataItems = try await asset.loadMetadata(for:.quickTimeMetadata)
|
||
let spacialCharacteristics = userDataItems.filter { $0.identifier?.rawValue == "mdta/com.apple.quicktime.spatial.format-version" }
|
||
if spacialCharacteristics.count == 0 {
|
||
print("该视频不是空间视频")
|
||
}
|
||
|
||
//获取输入视频的方向和大小(用于设置输出方向)
|
||
let (orientation, videoSize) = try await getOrientationAndResolutionSizeForVideo(asset: asset)
|
||
|
||
//加载视频轨迹
|
||
let output = try await AVAssetReaderTrackOutput(
|
||
track: asset.loadTracks(withMediaType: .video).first!,
|
||
outputSettings: [
|
||
AVVideoDecompressionPropertiesKey: [
|
||
kVTDecompressionPropertyKey_RequestedMVHEVCVideoLayerIDs: [0, 1] as CFArray,
|
||
],
|
||
]
|
||
)
|
||
assetReader.add(output)
|
||
assetReader.startReading()
|
||
|
||
|
||
|
||
//输出宽度为宽度的一半
|
||
//我们有两个并排的视频,我们保持长宽比
|
||
let vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height/2), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main)
|
||
|
||
let duration = try await asset.load(.duration)
|
||
|
||
// Based on code from https://www.finnvoorhees.com/words/reading-and-writing-spatial-video-with-avfoundation
|
||
while let nextSampleBuffer = output.copyNextSampleBuffer() {
|
||
guard let taggedBuffers = nextSampleBuffer.taggedBuffers else { return }
|
||
|
||
let leftEyeBuffer = taggedBuffers.first(where: {
|
||
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.leftEye)
|
||
})?.buffer
|
||
let rightEyeBuffer = taggedBuffers.first(where: {
|
||
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.rightEye)
|
||
})?.buffer
|
||
|
||
if let leftEyeBuffer,
|
||
let rightEyeBuffer,
|
||
case let .pixelBuffer(leftEyePixelBuffer) = leftEyeBuffer,
|
||
case let .pixelBuffer(rightEyePixelBuffer) = rightEyeBuffer {
|
||
|
||
let lciImage = CIImage(cvPixelBuffer: leftEyePixelBuffer)
|
||
let rciImage = CIImage(cvPixelBuffer: rightEyePixelBuffer)
|
||
|
||
let newpb = joinImages( leftImage: lciImage, rightImage: rciImage )
|
||
|
||
let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer)
|
||
|
||
_ = vw!.add(image: newpb, presentationTime: time)
|
||
print( "Added frame at \(time)")
|
||
|
||
// callback with progress
|
||
progress?( Float(time.value)/Float(duration.value))
|
||
|
||
// This sleep is needed to stop memory blooming - keeps around 280Mb rather than spiraling up to 8+Gig!
|
||
try await Task.sleep(nanoseconds: 3_000_000)
|
||
}
|
||
}
|
||
|
||
_ = try await vw!.finish()
|
||
|
||
print( "status - \(assetReader.status)")
|
||
print( "status - \(assetReader.error?.localizedDescription ?? "None")")
|
||
print( "Finished")
|
||
|
||
}
|
||
|
||
func getOrientationAndResolutionSizeForVideo(asset:AVAsset) async throws -> (CGAffineTransform, CGSize) {
|
||
guard let track = try await asset.loadTracks(withMediaType: AVMediaType.video).first
|
||
else{throw VideoReaderError.invalidVideo}
|
||
let naturalSize = try await track.load(.naturalSize)
|
||
let naturalTransform = try await track.load(.preferredTransform)
|
||
let size = naturalSize.applying(naturalTransform)
|
||
return (naturalTransform, CGSize(width: abs(size.width), height: abs(size.height)) )
|
||
}
|
||
|
||
//将两张图片合成一张图片
|
||
func joinImages( leftImage:CIImage, rightImage:CIImage) -> CIImage {
|
||
let left = UIImage(ciImage: leftImage )
|
||
let right = UIImage(ciImage: rightImage )
|
||
|
||
let imageWidth = left.size.width/2 + right.size.width/2
|
||
let imageHeight = left.size.height/2
|
||
|
||
let newImageSize = CGSize(width:imageWidth, height: imageHeight);
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
|
||
left.draw(in: CGRect(x:0, y:0, width:imageWidth/2, height:imageHeight))
|
||
right.draw(in: CGRect(x:imageWidth/2, y:0, width:imageWidth/2, height:imageHeight))
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
}
|