diff --git a/SwiftProject/SwiftProject/Project/Controller/RecordingVideo/CCSpatialVideoDisplayController.swift b/SwiftProject/SwiftProject/Project/Controller/RecordingVideo/CCSpatialVideoDisplayController.swift index d84ec2c..a3b3871 100644 --- a/SwiftProject/SwiftProject/Project/Controller/RecordingVideo/CCSpatialVideoDisplayController.swift +++ b/SwiftProject/SwiftProject/Project/Controller/RecordingVideo/CCSpatialVideoDisplayController.swift @@ -286,39 +286,60 @@ class CCSpatialVideoDisplayController: BaseController, AVPlayerViewControllerDel // self.quickLoadAReader(timeRange: tr) - while let nextSampleBuffer = self.assetOutput!.copyNextSampleBuffer() { -// print("compositon......copy samplebuffer") - } -// if(self.assetReader?.status == .completed){ -// print("okkkssss...") - self.assetOutput?.reset(forReadingTimeRanges:[NSValue(timeRange: tr)]) -// } -// else{ -// -// print("status:\(self.assetReader?.error?.localizedDescription)") -// } - - var ciImg:CIImage? = nil switch self.selectedIndex { case .crossedEyes://交叉眼 + while let nextSampleBuffer = self.assetOutput!.copyNextSampleBuffer() { + } + self.assetOutput?.reset(forReadingTimeRanges:[NSValue(timeRange: tr)]) + + ciImg = videoTranserConvertor.convertVideo(asset: videoOriginalAsset, assetOutput: self.assetOutput!, type: self.selectedIndex, time: compositionTime) break + + case .fsbs: + while let nextSampleBuffer = self.assetOutput!.copyNextSampleBuffer() { + } + self.assetOutput?.reset(forReadingTimeRanges:[NSValue(timeRange: tr)]) + ciImg = videoTranserConvertor.convertVideo(asset: videoOriginalAsset, assetOutput: self.assetOutput!, type: self.selectedIndex, time: compositionTime) break + + case .hsbs: + while let nextSampleBuffer = self.assetOutput!.copyNextSampleBuffer() { + } + self.assetOutput?.reset(forReadingTimeRanges:[NSValue(timeRange: tr)]) + + ciImg = videoTranserConvertor.convertVideo(asset: videoOriginalAsset, assetOutput: self.assetOutput!, type: self.selectedIndex, time: compositionTime) break + + case .parallelEyes://平行眼 + while let nextSampleBuffer = self.assetOutput!.copyNextSampleBuffer() { +// print("平心眼...") + } + self.assetOutput?.reset(forReadingTimeRanges:[NSValue(timeRange: tr)]) + + ciImg = videoTranserConvertor.convertVideo(asset: videoOriginalAsset, assetOutput: self.assetOutput!, type: self.selectedIndex, time: compositionTime) break + + case .monocular2D: ciImg = request.sourceImage break + + case .redBlueSolid://红蓝立体 + while let nextSampleBuffer = self.assetOutput!.copyNextSampleBuffer() { + } + self.assetOutput?.reset(forReadingTimeRanges:[NSValue(timeRange: tr)]) + ciImg = videoTranserConvertor.convertVideo(asset: videoOriginalAsset, assetOutput: self.assetOutput!, type: self.selectedIndex, time: compositionTime) break