完成app内视频拍摄完毕后的声音合成功能

This commit is contained in:
bluesea 2024-03-14 10:07:32 +08:00
parent 40b5d1d7c5
commit dc2fb63193

View File

@ -30,9 +30,14 @@ class SpatialVideoWriter {
let leftEyeAsset = AVURLAsset(url: leftEyeVideoURL) let leftEyeAsset = AVURLAsset(url: leftEyeVideoURL)
let rightEyeAsset = AVURLAsset(url: rightEyeVideoURL) let rightEyeAsset = AVURLAsset(url: rightEyeVideoURL)
let assetWriter = try AVAssetWriter(outputURL: outputVideoURL, fileType: .mov)
let leftVideoTrack = leftEyeAsset.tracks(withMediaType: .video).first! let leftVideoTrack = leftEyeAsset.tracks(withMediaType: .video).first!
let rightVideoTrack = rightEyeAsset.tracks(withMediaType: .video).first!
let letAudioTrack = leftEyeAsset.tracks(withMediaType: .audio).first!
//
let assetWriter = try AVAssetWriter(outputURL: outputVideoURL, fileType: .mov)
let videoSettings: [String: Any] = [ let videoSettings: [String: Any] = [
AVVideoWidthKey: leftVideoTrack.naturalSize.width, AVVideoWidthKey: leftVideoTrack.naturalSize.width,
AVVideoHeightKey: leftVideoTrack.naturalSize.height, AVVideoHeightKey: leftVideoTrack.naturalSize.height,
@ -43,13 +48,40 @@ class SpatialVideoWriter {
kVTCompressionPropertyKey_HorizontalDisparityAdjustment: 200, // asset-specific kVTCompressionPropertyKey_HorizontalDisparityAdjustment: 200, // asset-specific
] ]
] ]
let input = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings) let input_video = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
assetWriter.add(input) input_video.expectsMediaDataInRealTime = true
let adaptor_inputVideo = AVAssetWriterInputTaggedPixelBufferGroupAdaptor(assetWriterInput: input_video)
assetWriter.add(input_video)
//
let inputSettings_Audio = [
AVFormatIDKey: kAudioFormatLinearPCM, //
AVSampleRateKey: 44100,
AVNumberOfChannelsKey: 2,
AVLinearPCMIsBigEndianKey:true,
AVLinearPCMIsFloatKey:true,
AVLinearPCMBitDepthKey:32,
AVLinearPCMIsNonInterleaved:false,
] as [String:Any]
let writerInput_Audio_left = AVAssetWriterInput.init(mediaType: .audio, outputSettings: inputSettings_Audio)
writerInput_Audio_left.expectsMediaDataInRealTime = false
if assetWriter.canAdd(writerInput_Audio_left) {
assetWriter.add(writerInput_Audio_left)
print("assetWriter 添加writerInput_Audio_left成功...")
}
else {
print("assetWriter 添加writerInput_Audio_left失败...")
}
let adaptor = AVAssetWriterInputTaggedPixelBufferGroupAdaptor(assetWriterInput: input)
assetWriter.startWriting() assetWriter.startWriting()
assetWriter.startSession(atSourceTime: .zero) assetWriter.startSession(atSourceTime: .zero)
//
let leftEyeReader = try AVAssetReader(asset: leftEyeAsset) let leftEyeReader = try AVAssetReader(asset: leftEyeAsset)
let rightEyeReader = try AVAssetReader(asset: rightEyeAsset) let rightEyeReader = try AVAssetReader(asset: rightEyeAsset)
@ -60,14 +92,39 @@ class SpatialVideoWriter {
] ]
let leftEyeOutput = AVAssetReaderTrackOutput(track: leftVideoTrack, outputSettings: readerOutputSettings) let leftEyeOutput = AVAssetReaderTrackOutput(track: leftVideoTrack, outputSettings: readerOutputSettings)
let rightEyeOutput = AVAssetReaderTrackOutput(track: rightEyeAsset.tracks(withMediaType: .video).first!, outputSettings: readerOutputSettings) let rightEyeOutput = AVAssetReaderTrackOutput(track:rightVideoTrack , outputSettings: readerOutputSettings)
leftEyeReader.add(leftEyeOutput) leftEyeReader.add(leftEyeOutput)
rightEyeReader.add(rightEyeOutput) rightEyeReader.add(rightEyeOutput)
//
let outputSettings_Audio = [
AVFormatIDKey: kAudioFormatLinearPCM, //
AVSampleRateKey: 44100,
AVNumberOfChannelsKey: 2,
]
//
let output_audio_left = AVAssetReaderTrackOutput(
track: letAudioTrack,
outputSettings:outputSettings_Audio
)
if leftEyeReader.canAdd(output_audio_left){
leftEyeReader.add(output_audio_left)
print("output_audio_left添加音频read output成功。。。。")
}
else{
print("output_audio_left添加音频read output失败。。。。")
}
leftEyeReader.startReading() leftEyeReader.startReading()
rightEyeReader.startReading() rightEyeReader.startReading()
while let leftBuffer = leftEyeOutput.copyNextSampleBuffer(), while let leftBuffer = leftEyeOutput.copyNextSampleBuffer(),
let rightBuffer = rightEyeOutput.copyNextSampleBuffer() { let rightBuffer = rightEyeOutput.copyNextSampleBuffer() {
@ -87,16 +144,20 @@ class SpatialVideoWriter {
let left = CMTaggedBuffer(tags: [.stereoView(.leftEye), .videoLayerID(0)], pixelBuffer: leftCVPixelBuffer) let left = CMTaggedBuffer(tags: [.stereoView(.leftEye), .videoLayerID(0)], pixelBuffer: leftCVPixelBuffer)
let right = CMTaggedBuffer(tags: [.stereoView(.rightEye), .videoLayerID(1)], pixelBuffer: rightCVPixelBuffer) let right = CMTaggedBuffer(tags: [.stereoView(.rightEye), .videoLayerID(1)], pixelBuffer: rightCVPixelBuffer)
while !adaptor.assetWriterInput.isReadyForMoreMediaData { while !adaptor_inputVideo.assetWriterInput.isReadyForMoreMediaData {
// writerInput // writerInput
Thread.sleep(forTimeInterval: 0.1) // Thread.sleep(forTimeInterval: 0.1) //
} }
adaptor.appendTaggedBuffers([left, right], withPresentationTime: leftBuffer.presentationTimeStamp) adaptor_inputVideo.appendTaggedBuffers([left, right], withPresentationTime: leftBuffer.presentationTimeStamp)
} }
self.addAudio(assetTrackOutput: output_audio_left, audio_input: writerInput_Audio_left)
// //
print("完成写入") print("完成写入")
input.markAsFinished() writerInput_Audio_left.markAsFinished()
input_video.markAsFinished()
outputVideoURL.stopAccessingSecurityScopedResource() outputVideoURL.stopAccessingSecurityScopedResource()
assetWriter.finishWriting { [self] in assetWriter.finishWriting { [self] in
print("可以保存") print("可以保存")
@ -110,15 +171,32 @@ class SpatialVideoWriter {
} }
} }
private func saveVideoToLibrary(videoURL: URL, completion: @escaping (Bool, Error?) -> Void) {
PHPhotoLibrary.shared().performChanges({ //
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: videoURL) func addAudio(assetTrackOutput:AVAssetReaderTrackOutput,audio_input:AVAssetWriterInput) {
}) { success, error in while let sample = assetTrackOutput.copyNextSampleBuffer() {
if success { print("audio read buffer....")
print("保存成功") let formatDesc:CMFormatDescription = CMSampleBufferGetFormatDescription(sample)!
} else if let error = error { let mediaType:CMMediaType = CMFormatDescriptionGetMediaType(formatDesc);
print("保存失败") if mediaType == kCMMediaType_Audio {
if audio_input.isReadyForMoreMediaData {
if audio_input.append(sample) == false {
// print("....:\(String(describing: self.writer.error?.localizedDescription))")
print("追加音频失败.....")
}
else{
print("audio 追加成功....")
}
}
else {
print("audio 追加还未准备好...")
}
}
else {
print("不是audio类型...")
}
} }
} print("audio func 执行完毕。。。。...")
} }
} }