VPCamera3/tdvideo/tdvideo/VideoConvertor2.swift
2024-03-05 11:44:34 +08:00

303 lines
14 KiB
Swift
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

//
// VideoConvertor2.swift
// tdvideo
//
// Created by aaa on 2024/1/24.
//
//
//com.nsk.tdvideo
/*
let rr:AVAsynchronousCIImageFilteringRequest?
// AVAsynchronousCIImageFilteringRequest
let videoComposition = AVMutableVideoComposition(asset: asset) { request in
//
let time = request.compositionTime
// callback with progress
progress?(Float(time.value) / Float(duration.value))
}
videoComposition.renderSize = CGSize(width: Int(videoSize.width), height: Int(videoSize.height/2))
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
playerItem?.videoComposition = videoComposition
//
let nc:NotificationCenter = NotificationCenter.default
let noti:NSNotification = NSNotification.init(name: NSNotification.Name(rawValue: "upvideo"), object: nil)
nc.post(noti as Notification)
// Based on code from https://www.finnvoorhees.com/words/reading-and-writing-spatial-video-with-avfoundation
// CIScreenBlendMode:
// CIHardLightBlendMode: 使
// CILightenBlendMode:
// CIColorDodgeBlendMode: 使
// CIColorBurnBlendMode: 使
// CIDarkenBlendMode:
// CILinearDodgeBlendMode: 使线
// CIMultiplyBlendMode:
// CISourceOverCompositing:
*/
import Foundation
import AVKit
import VideoToolbox
import CoreImage
import ImageIO
class VideoConvertor2 {
///
var leftEyeImage: CVPixelBuffer?
///
var rightEyeImage: CVPixelBuffer?
//
var type = 0
func convertVideo( asset : AVAsset, outputFile: URL, progress: ((Float)->())? = nil ) async throws {
do {
try FileManager.default.removeItem(atPath: outputFile.path)
print("视频文件删除成功")
} catch {
print("删除视频文件出错:\(error)")
}
let assetReader = try AVAssetReader(asset: asset)
// print("")
let userDataItems = try await asset.loadMetadata(for:.quickTimeMetadata)
let spacialCharacteristics = userDataItems.filter { $0.identifier?.rawValue == "mdta/com.apple.quicktime.spatial.format-version" }
if spacialCharacteristics.count == 0 {
print("不是空间视频")
}
//()
let (orientation, videoSize) = try await getOrientationAndResolutionSizeForVideo(asset: asset)
//
//
let vw:VideoWriter?
if(type == 3){
//+
vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main)
}
else{
// +
vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height/2), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main)
}
//
let output = try await AVAssetReaderTrackOutput(
track: asset.loadTracks(withMediaType: .video).first!,
outputSettings: [
AVVideoDecompressionPropertiesKey: [
kVTDecompressionPropertyKey_RequestedMVHEVCVideoLayerIDs: [0, 1] as CFArray,
],
]
)
assetReader.add(output)
assetReader.startReading()
let duration = try await asset.load(.duration)
while let nextSampleBuffer = output.copyNextSampleBuffer() {
guard let taggedBuffers = nextSampleBuffer.taggedBuffers else { return }
let leftEyeBuffer = taggedBuffers.first(where: {
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.leftEye)
})?.buffer
let rightEyeBuffer = taggedBuffers.first(where: {
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.rightEye)
})?.buffer
if let leftEyeBuffer,
let rightEyeBuffer,
case let .pixelBuffer(leftEyePixelBuffer) = leftEyeBuffer,
case let .pixelBuffer(rightEyePixelBuffer) = rightEyeBuffer {
leftEyeImage = leftEyePixelBuffer
rightEyeImage = rightEyePixelBuffer
let lciImage = CIImage(cvPixelBuffer: leftEyePixelBuffer)
let rciImage = CIImage(cvPixelBuffer: rightEyePixelBuffer)
//
if(type == 2){
let newpb = joinImages( leftImage: lciImage, rightImage:rciImage )
let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer)
_ = vw!.add(image: newpb, presentationTime: time)
print( "Added frame at \(time)")
// callback with progress
progress?( Float(time.value)/Float(duration.value))
// This sleep is needed to stop memory blooming - keeps around 280Mb rather than spiraling up to 8+Gig!
try await Task.sleep(nanoseconds: 3_000_000)
}
//
if(type == 3){
//
let redColorMatrix: [CGFloat] = [
0.0, 0.0, 0.0, 0.0, 0.0, //
0.0, 0.0, 0.0, 0.0, 0.0, // 绿
0.0, 0.0, 1.0, 0.0, 0.0, //
0.0, 0.0, 0.0, 1.0, 0.0 //
]
let blueColorMatrix: [CGFloat] = [
1.0, 0.0, 0.0, 0.0, 0.0, //
0.0, 0.0, 0.0, 0.0, 0.0, // 绿
0.0, 0.0, 0.0, 0.0, 0.0, //
0.0, 0.0, 0.0, 1.0, 0.0 //
]
let redFilter = CIFilter(name: "CIColorMatrix")!
redFilter.setValue(lciImage, forKey: kCIInputImageKey)
redFilter.setValue(CIVector(values: redColorMatrix, count: redColorMatrix.count), forKey: "inputRVector")
let blueFilter = CIFilter(name: "CIColorMatrix")!
blueFilter.setValue(rciImage, forKey: kCIInputImageKey)
blueFilter.setValue(CIVector(values: blueColorMatrix, count: blueColorMatrix.count), forKey: "inputBVector")
//
if let redOutputImage = redFilter.outputImage,
let blueOutputImage = blueFilter.outputImage {
let compositeFilter = CIFilter(name: "CIScreenBlendMode")!
compositeFilter.setValue(redOutputImage, forKey: kCIInputImageKey)
compositeFilter.setValue(blueOutputImage, forKey: kCIInputBackgroundImageKey)
let sharpenedFilter = CIFilter(name: "CISharpenLuminance")!
sharpenedFilter.setValue(compositeFilter.outputImage, forKey: kCIInputImageKey)
sharpenedFilter.setValue(2, forKey: kCIInputSharpnessKey)
// let colorControlsFilter = CIFilter(name: "CIColorControls")!
// colorControlsFilter.setValue(sharpenedFilter.outputImage, forKey: kCIInputImageKey)
// colorControlsFilter.setValue(0.7, forKey: kCIInputSaturationKey)
let lastImg = sharpenedFilter.outputImage!
let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer)
_ = vw!.add(image: lastImg, presentationTime: time)
print( "Added frame at \(time)")
// callback with progress
progress?( Float(time.value)/Float(duration.value))
// This sleep is needed to stop memory blooming - keeps around 280Mb rather than spiraling up to 8+Gig!
try await Task.sleep(nanoseconds: 3_000_000)
}
}
//
if(type == 4){
let filter1 = CIFilter(name: "CIGaussianBlur")!
filter1.setValue(lciImage, forKey: kCIInputImageKey)
let filter2 = CIFilter(name: "CIGaussianBlur")!
filter2.setValue(rciImage, forKey: kCIInputImageKey)
let newpb = joinImages( leftImage: filter1.outputImage!, rightImage:filter2.outputImage! )
let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer)
_ = vw!.add(image: newpb, presentationTime: time)
print( "Added frame at \(time)")
// callback with progress
progress?( Float(time.value)/Float(duration.value))
// This sleep is needed to stop memory blooming - keeps around 280Mb rather than spiraling up to 8+Gig!
try await Task.sleep(nanoseconds: 3_000_000)
}
}
}
print( "status - \(assetReader.status)")
print( "status - \(assetReader.error?.localizedDescription ?? "None")")
print( "Finished")
_ = try await vw!.finish()
}
//ciimage
func isSpatialImage2(from ciImage: CIImage) {
let context = CIContext()
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else {
return
}
let dataProvider = CGDataProvider(data: cgImage.dataProvider!.data! as CFData)
let imageSource = CGImageSourceCreateWithDataProvider(dataProvider!, nil)
let frameCount = CGImageSourceGetCount(imageSource!)
print(frameCount)
for index in 0..<frameCount {
let properties = CGImageSourceCopyPropertiesAtIndex(imageSource!, index, nil) as? [CFString: Any]
print(properties as Any)
guard let frameImage = CGImageSourceCreateImageAtIndex(imageSource!, index, nil) else {
continue
}
print(frameImage)
}
}
func getOrientationAndResolutionSizeForVideo(asset:AVAsset) async throws -> (CGAffineTransform, CGSize) {
guard let track = try await asset.loadTracks(withMediaType: AVMediaType.video).first
else{throw VideoReaderError.invalidVideo}
let naturalSize = try await track.load(.naturalSize)
let naturalTransform = try await track.load(.preferredTransform)
let size = naturalSize.applying(naturalTransform)
return (naturalTransform, CGSize(width: abs(size.width), height: abs(size.height)) )
}
//
func joinImages( leftImage:CIImage, rightImage:CIImage) -> CIImage {
let left = UIImage(ciImage: leftImage )
let right = UIImage(ciImage: rightImage )
let imageWidth = left.size.width/2 + right.size.width/2
let imageHeight = left.size.height/2
let newImageSize = CGSize(width:imageWidth, height: imageHeight);
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
left.draw(in: CGRect(x:0, y:0, width:imageWidth/2, height:imageHeight))
right.draw(in: CGRect(x:imageWidth/2, y:0, width:imageWidth/2, height:imageHeight))
let image = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext();
let ci = CIImage(cgImage: image.cgImage!)
return ci
}
}