// // VideoConvertor2.swift // tdvideo // // Created by aaa on 2024/1/24. // 视频转码 //红蓝立体 //com.nsk.tdvideo /* let rr:AVAsynchronousCIImageFilteringRequest? // 创建视频合成器,用于交叉眼视频 AVAsynchronousCIImageFilteringRequest let videoComposition = AVMutableVideoComposition(asset: asset) { request in // 获取当前帧的时间戳 let time = request.compositionTime // callback with progress progress?(Float(time.value) / Float(duration.value)) } videoComposition.renderSize = CGSize(width: Int(videoSize.width), height: Int(videoSize.height/2)) videoComposition.frameDuration = CMTime(value: 1, timescale: 30) playerItem?.videoComposition = videoComposition //开始播放 let nc:NotificationCenter = NotificationCenter.default let noti:NSNotification = NSNotification.init(name: NSNotification.Name(rawValue: "upvideo"), object: nil) nc.post(noti as Notification) // Based on code from https://www.finnvoorhees.com/words/reading-and-writing-spatial-video-with-avfoundation // CIScreenBlendMode: 通过将颜色通道值反转并相乘,然后将结果反转回来,将两个图像合成为屏幕混合效果。 // CIHardLightBlendMode: 使用源图像的亮度来决定如何混合两个图像。较亮的像素将更多地影响结果。 // CILightenBlendMode: 比较两个图像的像素,并选择较亮的像素作为最终结果。 // CIColorDodgeBlendMode: 使用源图像的颜色信息来增加目标图像的颜色亮度。 // CIColorBurnBlendMode: 使用源图像的颜色信息来降低目标图像的颜色亮度。 // CIDarkenBlendMode: 比较两个图像的像素,并选择较暗的像素作为最终结果。 // CILinearDodgeBlendMode: 使用线性增加的方式将两个图像相加,产生一种亮度叠加的效果。 // CIMultiplyBlendMode: 将两个图像的像素值相乘,产生一种乘法混合效果。 // CISourceOverCompositing: 将源图像放在目标图像上方,产生一种覆盖混合效果。 */ import Foundation import AVKit import VideoToolbox import CoreImage import ImageIO class VideoConvertor2 { ///在立体视频中,正在处理的当前帧的左眼视图。 var leftEyeImage: CVPixelBuffer? ///在立体视频中,正在处理的当前帧的右眼视图。 var rightEyeImage: CVPixelBuffer? //空间视频 交叉眼 红蓝立体 高斯模糊 var type = 0 func convertVideo( asset : AVAsset, outputFile: URL, progress: ((Float)->())? = nil ) async throws { do { try FileManager.default.removeItem(atPath: outputFile.path) print("视频文件删除成功") } catch { print("删除视频文件出错:\(error)") } let assetReader = try AVAssetReader(asset: asset) //检查是否为空间视频 print("该视频不是空间视频或图片") let userDataItems = try await asset.loadMetadata(for:.quickTimeMetadata) let spacialCharacteristics = userDataItems.filter { $0.identifier?.rawValue == "mdta/com.apple.quicktime.spatial.format-version" } if spacialCharacteristics.count == 0 { print("不是空间视频") } //获取输入视频的方向和大小(用于设置输出方向) let (orientation, videoSize) = try await getOrientationAndResolutionSizeForVideo(asset: asset) //输出宽度为宽度的一半 //我们有两个并排的视频,我们保持长宽比 let vw:VideoWriter? if(type == 3){ //空间视频+红蓝立体 vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main) } else{ //交叉眼 + 平行眼 vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height/2), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main) } //加载视频轨道 let output = try await AVAssetReaderTrackOutput( track: asset.loadTracks(withMediaType: .video).first!, outputSettings: [ AVVideoDecompressionPropertiesKey: [ kVTDecompressionPropertyKey_RequestedMVHEVCVideoLayerIDs: [0, 1] as CFArray, ], ] ) assetReader.add(output) assetReader.startReading() let duration = try await asset.load(.duration) while let nextSampleBuffer = output.copyNextSampleBuffer() { guard let taggedBuffers = nextSampleBuffer.taggedBuffers else { return } let leftEyeBuffer = taggedBuffers.first(where: { $0.tags.first(matchingCategory: .stereoView) == .stereoView(.leftEye) })?.buffer let rightEyeBuffer = taggedBuffers.first(where: { $0.tags.first(matchingCategory: .stereoView) == .stereoView(.rightEye) })?.buffer if let leftEyeBuffer, let rightEyeBuffer, case let .pixelBuffer(leftEyePixelBuffer) = leftEyeBuffer, case let .pixelBuffer(rightEyePixelBuffer) = rightEyeBuffer { leftEyeImage = leftEyePixelBuffer rightEyeImage = rightEyePixelBuffer let lciImage = CIImage(cvPixelBuffer: leftEyePixelBuffer) let rciImage = CIImage(cvPixelBuffer: rightEyePixelBuffer) //交叉眼 if(type == 2){ let newpb = joinImages( leftImage: lciImage, rightImage:rciImage ) let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer) _ = vw!.add(image: newpb, presentationTime: time) print( "Added frame at \(time)") // callback with progress progress?( Float(time.value)/Float(duration.value)) // This sleep is needed to stop memory blooming - keeps around 280Mb rather than spiraling up to 8+Gig! try await Task.sleep(nanoseconds: 3_000_000) } //红蓝立体 if(type == 3){ // 创建红色和蓝色滤镜 let redColorMatrix: [CGFloat] = [ 0.0, 0.0, 0.0, 0.0, 0.0, // 红色通道 0.0, 0.0, 0.0, 0.0, 0.0, // 绿色通道 0.0, 0.0, 1.0, 0.0, 0.0, // 蓝色通道 0.0, 0.0, 0.0, 1.0, 0.0 // 透明通道 ] let blueColorMatrix: [CGFloat] = [ 1.0, 0.0, 0.0, 0.0, 0.0, // 红色通道 0.0, 0.0, 0.0, 0.0, 0.0, // 绿色通道 0.0, 0.0, 0.0, 0.0, 0.0, // 蓝色通道 0.0, 0.0, 0.0, 1.0, 0.0 // 透明通道 ] let redFilter = CIFilter(name: "CIColorMatrix")! redFilter.setValue(lciImage, forKey: kCIInputImageKey) redFilter.setValue(CIVector(values: redColorMatrix, count: redColorMatrix.count), forKey: "inputRVector") let blueFilter = CIFilter(name: "CIColorMatrix")! blueFilter.setValue(rciImage, forKey: kCIInputImageKey) blueFilter.setValue(CIVector(values: blueColorMatrix, count: blueColorMatrix.count), forKey: "inputBVector") // 获取处理后的图像 if let redOutputImage = redFilter.outputImage, let blueOutputImage = blueFilter.outputImage { let compositeFilter = CIFilter(name: "CIScreenBlendMode")! compositeFilter.setValue(redOutputImage, forKey: kCIInputImageKey) compositeFilter.setValue(blueOutputImage, forKey: kCIInputBackgroundImageKey) let sharpenedFilter = CIFilter(name: "CISharpenLuminance")! sharpenedFilter.setValue(compositeFilter.outputImage, forKey: kCIInputImageKey) sharpenedFilter.setValue(2, forKey: kCIInputSharpnessKey) // let colorControlsFilter = CIFilter(name: "CIColorControls")! // colorControlsFilter.setValue(sharpenedFilter.outputImage, forKey: kCIInputImageKey) // colorControlsFilter.setValue(0.7, forKey: kCIInputSaturationKey) let lastImg = sharpenedFilter.outputImage! let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer) _ = vw!.add(image: lastImg, presentationTime: time) print( "Added frame at \(time)") // callback with progress progress?( Float(time.value)/Float(duration.value)) // This sleep is needed to stop memory blooming - keeps around 280Mb rather than spiraling up to 8+Gig! try await Task.sleep(nanoseconds: 3_000_000) } } //高斯模糊 if(type == 4){ let filter1 = CIFilter(name: "CIGaussianBlur")! filter1.setValue(lciImage, forKey: kCIInputImageKey) let filter2 = CIFilter(name: "CIGaussianBlur")! filter2.setValue(rciImage, forKey: kCIInputImageKey) let newpb = joinImages( leftImage: filter1.outputImage!, rightImage:filter2.outputImage! ) let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer) _ = vw!.add(image: newpb, presentationTime: time) print( "Added frame at \(time)") // callback with progress progress?( Float(time.value)/Float(duration.value)) // This sleep is needed to stop memory blooming - keeps around 280Mb rather than spiraling up to 8+Gig! try await Task.sleep(nanoseconds: 3_000_000) } } } print( "status - \(assetReader.status)") print( "status - \(assetReader.error?.localizedDescription ?? "None")") print( "Finished") _ = try await vw!.finish() } //获取ciimage的数据 func isSpatialImage2(from ciImage: CIImage) { let context = CIContext() guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return } let dataProvider = CGDataProvider(data: cgImage.dataProvider!.data! as CFData) let imageSource = CGImageSourceCreateWithDataProvider(dataProvider!, nil) let frameCount = CGImageSourceGetCount(imageSource!) print(frameCount) for index in 0.. (CGAffineTransform, CGSize) { guard let track = try await asset.loadTracks(withMediaType: AVMediaType.video).first else{throw VideoReaderError.invalidVideo} let naturalSize = try await track.load(.naturalSize) let naturalTransform = try await track.load(.preferredTransform) let size = naturalSize.applying(naturalTransform) return (naturalTransform, CGSize(width: abs(size.width), height: abs(size.height)) ) } //将两张图片合成一张图片 func joinImages( leftImage:CIImage, rightImage:CIImage) -> CIImage { let left = UIImage(ciImage: leftImage ) let right = UIImage(ciImage: rightImage ) let imageWidth = left.size.width/2 + right.size.width/2 let imageHeight = left.size.height/2 let newImageSize = CGSize(width:imageWidth, height: imageHeight); UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1); left.draw(in: CGRect(x:0, y:0, width:imageWidth/2, height:imageHeight)) right.draw(in: CGRect(x:imageWidth/2, y:0, width:imageWidth/2, height:imageHeight)) let image = UIGraphicsGetImageFromCurrentImageContext()! UIGraphicsEndImageContext(); let ci = CIImage(cgImage: image.cgImage!) return ci } }