618 lines
25 KiB
Swift
618 lines
25 KiB
Swift
//
|
||
// CCSpatialPhotoDisplayController.swift
|
||
// SwiftProject
|
||
//
|
||
// Created by Zhang, Joyce on 2024/3/2.
|
||
//
|
||
|
||
import UIKit
|
||
import AVFoundation
|
||
import VideoToolbox
|
||
import CoreVideo
|
||
import UIKit
|
||
import ImageIO
|
||
import CoreImage
|
||
import Photos
|
||
|
||
|
||
enum VideoReaderError : Error {
|
||
case invalidVideo
|
||
case notSpacialVideo
|
||
}
|
||
|
||
class CCSpatialPhotoDisplayController: BaseController {
|
||
|
||
var player:AVPlayer = AVPlayer()
|
||
|
||
var outputVideoURL:URL?
|
||
|
||
|
||
//图片源数据
|
||
var photoOriginalURL:URL?
|
||
var photoOriginalData:Data?
|
||
|
||
|
||
var imgData:Data?
|
||
//空间视频 交叉眼 红蓝立体 高斯模糊
|
||
var type = 0
|
||
|
||
|
||
lazy var mTopImgView:UIImageView = {
|
||
//393*236
|
||
let view = UIImageView(frame: CGRect(x: 0, y: 0, width: SCREEN_Width, height: SCREEN_Height * 236/393))
|
||
|
||
view.image = UIImage.init(named: "BG_Top")
|
||
return view
|
||
}()
|
||
|
||
|
||
|
||
|
||
lazy var transformButton: UIButton = {
|
||
|
||
//76*56
|
||
let transformButton = UIButton.init(type: UIButton.ButtonType.custom)
|
||
|
||
transformButton.tag = 201
|
||
transformButton.isSelected = false
|
||
transformButton.backgroundColor = UIColor(hexString: "#5326D6")
|
||
transformButton.addTarget(self, action: #selector(navgationButtonClick2(sender:)), for: UIControl.Event.touchUpInside)
|
||
let img2:UIImage = UIImage.init(named: "transform_button" as String)!
|
||
transformButton.setImage(img2, for: UIControl.State.normal)
|
||
transformButton.frame = CGRect(x: 0, y: 0, width: 56, height: 36)
|
||
transformButton.layer.cornerRadius = 18
|
||
transformButton.layer.masksToBounds = true
|
||
transformButton.centerY = StatuBar_Height + NavBar_Height * 0.5
|
||
transformButton.right = SCREEN_Width - 24
|
||
|
||
return transformButton
|
||
}()
|
||
|
||
lazy var mTopCenterTypeButton: UIButton = {
|
||
//173*36
|
||
let button = UIButton()
|
||
button.backgroundColor = UIColor.hexStringToColor(hexString: "#1F1E20")
|
||
button.tag = 202
|
||
button.isSelected = false
|
||
button.addTarget(self, action: #selector(navgationButtonClick2(sender:)), for: UIControl.Event.touchUpInside)
|
||
button.frame = CGRect(x: 2, y: 10, width: SCREEN_Width * 0.4, height: 36)
|
||
button.centerY = StatuBar_Height + NavBar_Height * 0.5
|
||
button.centerX = SCREEN_Width * 0.5
|
||
button.clipsToBounds = true
|
||
button.layer.cornerRadius = 18
|
||
button.layer.borderWidth = 1
|
||
button.layer.borderColor = UIColor.white.cgColor
|
||
button.setTitle("单眼2D", for: UIControl.State.normal)
|
||
button.setImage(UIImage.init(named: "type_button_arrow_down"), for: .normal)
|
||
button.updateBtnEdgeInsets(style: .Right, space: 10)
|
||
button.setTitleColor(UIColor.white, for: UIControl.State.normal)
|
||
button.titleLabel?.font = KFont_Medium(14)
|
||
return button
|
||
}()
|
||
|
||
lazy var mImgView:UIImageView = {
|
||
|
||
let imageView = UIImageView()
|
||
imageView.frame = CGRect.init(x: 0, y: 250, width: self.view.frame.size.width, height: 240)
|
||
imageView.contentMode = .scaleAspectFit
|
||
imageView.backgroundColor = UIColor.hexStringToColor(hexString: "#383739")
|
||
|
||
|
||
// let image = UIImage(contentsOfFile: photoOriginalURL!.path)
|
||
// imageView.image = image
|
||
imageView.isUserInteractionEnabled = true
|
||
// let tapGesture = UITapGestureRecognizer(target: self, action: #selector(imageTapped(_:)))
|
||
// imageView.addGestureRecognizer(tapGesture)
|
||
|
||
return imageView
|
||
}()
|
||
|
||
var typeData:[(icon:String,title:String,isHiden:Bool)] = [(icon:"type_check",title:"单眼2D",isHiden:false),
|
||
(icon:"type_check",title:"平行眼",isHiden:false),
|
||
(icon:"type_check",title:"红蓝立体",isHiden:false),
|
||
(icon:"type_check",title:"交叉眼",isHiden:false)]
|
||
|
||
lazy var menuView: CCSpatialDisplayTypeView = {
|
||
//数据源(icon可不填)
|
||
// let popData = [(icon:"type_check",title:"单眼2D",isHiden:false),
|
||
// (icon:"type_check",title:"平行眼",isHiden:false),
|
||
// (icon:"type_check",title:"红蓝立体",isHiden:false),
|
||
// (icon:"type_check",title:"交叉眼",isHiden:false)]
|
||
|
||
//设置参数
|
||
let parameters:[CCSpatialDisplayTypeConfigure] = [
|
||
.PopMenuTextColor(UIColor.white),
|
||
.popMenuItemHeight(40),
|
||
.PopMenuTextFont(KFont_Medium(12)),
|
||
.PopMenuBackgroudColor(UIColor(hexString: "#1F1E20"))
|
||
]
|
||
|
||
|
||
//init (test随机生成点位置,注意:arrow点是基于屏幕的位置)
|
||
let pointOnScreen = navtionImgView!.convert(CGPointMake(navtionImgView!.centerX, navtionImgView!.bottom), to: KWindow)
|
||
let popMenu = CCSpatialDisplayTypeView(menuWidth: SCREEN_Width * 0.4, arrow: pointOnScreen, datas: typeData,configures: parameters)
|
||
return popMenu
|
||
}()
|
||
|
||
override func viewDidLoad() {
|
||
super.viewDidLoad()
|
||
|
||
self.view.backgroundColor = UIColor(hexString: "#060507")
|
||
// Do any additional setup after loading the view.
|
||
|
||
|
||
|
||
// let path = Bundle.main.path(forResource: "img3", ofType: "HEIC")
|
||
// photoOriginalURL = URL.init(filePath: path!)
|
||
outputVideoURL = URL.documentsDirectory.appending(path:"output11114.jpg")
|
||
|
||
//获取图片源数据
|
||
var originalData:Data?
|
||
if photoOriginalURL != nil {
|
||
do {
|
||
originalData = try Data(contentsOf: photoOriginalURL!)
|
||
}catch let error as NSError {
|
||
print(error)
|
||
}
|
||
}else if photoOriginalData != nil {
|
||
originalData = photoOriginalData
|
||
}
|
||
|
||
imgData = originalData
|
||
//展示
|
||
let image = UIImage(data: originalData!)
|
||
mImgView.image = image
|
||
|
||
let isSpatial = isSpatialImage(originalData: originalData!)
|
||
if !isSpatial {
|
||
print("这不是一张空间图片")
|
||
return
|
||
}
|
||
|
||
//设置返回按钮图片
|
||
self.setLeftOneBtnImg(imgStr: "spatial_back_button")
|
||
// self.setLeftBtnImg(imgStr1: "", imgStr2: "spatial_back_button")
|
||
self.setNavgationBarColorImg(color: .clear)
|
||
self.setNavgationBarLine(color: .clear)
|
||
|
||
self.view.addSubview(mTopImgView)
|
||
self.view.bringSubviewToFront(self.navtionBar!)
|
||
// navtionBar?.addSubview(backButton)
|
||
navtionBar?.addSubview(transformButton)
|
||
navtionBar?.addSubview(mTopCenterTypeButton)
|
||
self.view.addSubview(mImgView)
|
||
|
||
|
||
|
||
|
||
|
||
}
|
||
|
||
|
||
//MARK: - action
|
||
@objc public func navgationButtonClick2(sender:UIButton){
|
||
|
||
if sender.tag == 200 {
|
||
//左边按钮
|
||
}else if sender.tag == 201 {
|
||
//右边按钮
|
||
let transVC = VRPhotoTransformController()
|
||
transVC.sourceImageData = photoOriginalData
|
||
self.navigationController?.pushViewController(transVC, animated: true)
|
||
}else if sender.tag == 202 {
|
||
//中间按钮
|
||
mTopCenterTypeButton.setImage(UIImage.init(named: "type_button_arrow_up"), for: .normal)
|
||
menuView.show()
|
||
|
||
//click
|
||
menuView.didSelectMenuBlock = { [weak self](index:Int)->Void in
|
||
print("block select \(index)")
|
||
self?.mTopCenterTypeButton.setImage(UIImage.init(named: "type_button_arrow_down"), for: .normal)
|
||
self?.selectedSpatialType(selectedIndex: index)
|
||
}
|
||
}
|
||
}
|
||
|
||
func selectedSpatialType(selectedIndex:Int) {
|
||
// 处理分段选择器值改变事件
|
||
|
||
/*
|
||
let popData = [/*(icon:"saoyisao",title:"扫一扫"),*/
|
||
(icon:"",title:"单眼2D",isHiden:true),
|
||
(icon:"",title:"平行眼",isHiden:false),
|
||
(icon:"",title:"红蓝立体",isHiden:false),
|
||
(icon:"",title:"交叉眼",isHiden:false)]
|
||
|
||
*/
|
||
|
||
print("选中了第 \(selectedIndex) 个选项")
|
||
player.pause()
|
||
NotificationCenter.default.removeObserver(self)
|
||
mImgView.frame = CGRect.init(x: 0, y: 200, width: self.view.frame.size.width, height: 240)
|
||
|
||
// guard let imageSource = CGImageSourceCreateWithURL(photoOriginalURL! as CFURL, nil) else {
|
||
// return
|
||
// }
|
||
guard let imageSource = CGImageSourceCreateWithData(imgData! as CFData, nil) else {
|
||
return
|
||
}
|
||
|
||
// print(imageSource)
|
||
let frameCount = CGImageSourceGetCount(imageSource)
|
||
var frames: [CGImage] = []
|
||
for index in 0..<frameCount {
|
||
guard let frameImage = CGImageSourceCreateImageAtIndex(imageSource, index, nil) else {
|
||
continue
|
||
}
|
||
frames.append(frameImage)
|
||
}
|
||
|
||
if(frames.count < 2){return}
|
||
let lciImage = CIImage(cgImage: frames.first!)
|
||
let rciImage = CIImage(cgImage: frames[1])
|
||
|
||
if(selectedIndex == 0){
|
||
//空间照片 --- 单眼2D(展示原照片或者广角或者主摄其中一个)
|
||
// let image = UIImage(contentsOfFile: photoOriginalURL!.path)
|
||
let image = UIImage(data: photoOriginalData!)
|
||
mImgView.image = image
|
||
|
||
}else if(selectedIndex == 1){
|
||
//平行眼
|
||
// mImgView.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 130)
|
||
let newpb = joinImages( leftImage: lciImage, rightImage:rciImage )
|
||
let lastImg = convertCIImageToUIImage(ciImage: newpb)!
|
||
DispatchQueue.main.async { [weak self] in
|
||
self!.mImgView.image = lastImg
|
||
}
|
||
|
||
}else if(selectedIndex == 2){
|
||
//红蓝立体
|
||
// mImgView.frame = CGRect.init(x: 0, y: 180, width: self.view.frame.size.width, height: 380)
|
||
let redColorMatrix: [CGFloat] = [
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 红色通道
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 绿色通道
|
||
0.0, 0.0, 0.5, 0.0, 0.0, // 蓝色通道
|
||
0.0, 0.0, 0.0, 1.0, 0.0 // 透明通道
|
||
]
|
||
|
||
let blueColorMatrix: [CGFloat] = [
|
||
0.5, 0.0, 0.0, 0.0, 0.0, // 红色通道
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 绿色通道
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 蓝色通道
|
||
0.0, 0.0, 0.0, 1.0, 0.0 // 透明通道
|
||
]
|
||
|
||
let redFilter = CIFilter(name: "CIColorMatrix")!
|
||
redFilter.setValue(lciImage, forKey: kCIInputImageKey)
|
||
redFilter.setValue(CIVector(values: redColorMatrix, count: redColorMatrix.count), forKey: "inputRVector")
|
||
|
||
let blueFilter = CIFilter(name: "CIColorMatrix")!
|
||
blueFilter.setValue(rciImage, forKey: kCIInputImageKey)
|
||
blueFilter.setValue(CIVector(values: blueColorMatrix, count: blueColorMatrix.count), forKey: "inputBVector")
|
||
|
||
// 获取处理后的图像
|
||
if let redOutputImage = redFilter.outputImage,
|
||
let blueOutputImage = blueFilter.outputImage {
|
||
|
||
// CIScreenBlendMode: 通过将颜色通道值反转并相乘,然后将结果反转回来,将两个图像合成为屏幕混合效果。
|
||
// CIHardLightBlendMode: 使用源图像的亮度来决定如何混合两个图像。较亮的像素将更多地影响结果。
|
||
// CILightenBlendMode: 比较两个图像的像素,并选择较亮的像素作为最终结果。
|
||
// CIColorDodgeBlendMode: 使用源图像的颜色信息来增加目标图像的颜色亮度。
|
||
// CIColorBurnBlendMode: 使用源图像的颜色信息来降低目标图像的颜色亮度。
|
||
// CIDarkenBlendMode: 比较两个图像的像素,并选择较暗的像素作为最终结果。
|
||
// CILinearDodgeBlendMode: 使用线性增加的方式将两个图像相加,产生一种亮度叠加的效果。
|
||
// CIMultiplyBlendMode: 将两个图像的像素值相乘,产生一种乘法混合效果。
|
||
// CISourceOverCompositing: 将源图像放在目标图像上方,产生一种覆盖混合效果。
|
||
let compositeFilter = CIFilter(name: "CIScreenBlendMode")!
|
||
compositeFilter.setValue(redOutputImage, forKey: kCIInputImageKey)
|
||
compositeFilter.setValue(blueOutputImage, forKey: kCIInputBackgroundImageKey)
|
||
|
||
// let sharpenedFilter = CIFilter(name: "CISharpenLuminance")!
|
||
// sharpenedFilter.setValue(compositeFilter.outputImage, forKey: kCIInputImageKey)
|
||
// sharpenedFilter.setValue(2, forKey: kCIInputSharpnessKey)
|
||
|
||
// let colorControlsFilter = CIFilter(name: "CIColorControls")!
|
||
// colorControlsFilter.setValue(sharpenedFilter.outputImage, forKey: kCIInputImageKey)
|
||
// colorControlsFilter.setValue(0.7, forKey: kCIInputSaturationKey)
|
||
|
||
let lastImg = compositeFilter.outputImage!
|
||
DispatchQueue.main.async { [weak self] in
|
||
self!.mImgView.image = UIImage(ciImage: lastImg)
|
||
}
|
||
}
|
||
|
||
}else if(selectedIndex == 3){
|
||
//交叉眼
|
||
// mImgView.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 130)
|
||
let newpb = joinImages( leftImage:rciImage , rightImage:lciImage )
|
||
let lastImg = convertCIImageToUIImage(ciImage: newpb)!
|
||
DispatchQueue.main.async { [weak self] in
|
||
self!.mImgView.image = lastImg
|
||
}
|
||
}
|
||
}
|
||
|
||
//MARK: - 判断是不是空间照片
|
||
//判断是不是空间照片 非空间照片只有一帧
|
||
//空间照片包含 let makerAppleProperties = imageProperties["{HEIF}"]
|
||
func isSpatialImage(originalData: Data) -> Bool {
|
||
|
||
|
||
//通过data获取
|
||
guard let imageSource = CGImageSourceCreateWithData(originalData as CFData, nil) else {
|
||
return false
|
||
}
|
||
|
||
//通过url获取
|
||
// guard let imageSource = CGImageSourceCreateWithURL(imageURL as CFURL, nil) else {
|
||
// return false
|
||
// }
|
||
guard let properties = CGImageSourceCopyPropertiesAtIndex(imageSource, 1, nil) as? [CFString: Any] else {
|
||
return false
|
||
}
|
||
print(properties)
|
||
/*
|
||
[ProfileName: sRGB IEC61966-2.1, {TIFF}: {
|
||
Orientation = 1;
|
||
TileLength = 512;
|
||
TileWidth = 512;
|
||
}, PixelWidth: 4032, PixelHeight: 3024, {HEIF}: {
|
||
CameraExtrinsics = {
|
||
CoordinateSystemID = 0;
|
||
Position = (
|
||
"-0.019238",
|
||
0,
|
||
0
|
||
);
|
||
Rotation = (
|
||
1,
|
||
0,
|
||
0,
|
||
0,
|
||
1,
|
||
0,
|
||
0,
|
||
0,
|
||
1
|
||
);
|
||
};
|
||
}, Depth: 8, Orientation: 1, ColorModel: RGB]
|
||
|
||
|
||
判断是否包含:{HEIF} 代表空间图片
|
||
|
||
*/
|
||
|
||
//这里判断两张图片,gif也可能是两张
|
||
// let frameCount = CGImageSourceGetCount(imageSource)
|
||
// if(frameCount == 1){
|
||
// return false
|
||
// }
|
||
return true
|
||
}
|
||
|
||
//MARK: - 判断是不是空间照片
|
||
func createCVPixelBuffer(from image: UIImage, with frame: CGRect) -> CVPixelBuffer? {
|
||
let options: [String: Any] = [
|
||
kCVPixelBufferCGImageCompatibilityKey as String: true,
|
||
kCVPixelBufferCGBitmapContextCompatibilityKey as String: true
|
||
]
|
||
|
||
var pixelBuffer: CVPixelBuffer?
|
||
let status = CVPixelBufferCreate(kCFAllocatorDefault,
|
||
Int(frame.width),
|
||
Int(frame.height),
|
||
kCVPixelFormatType_32BGRA,
|
||
options as CFDictionary,
|
||
&pixelBuffer)
|
||
|
||
guard status == kCVReturnSuccess, let buffer = pixelBuffer else {
|
||
return nil
|
||
}
|
||
|
||
CVPixelBufferLockBaseAddress(buffer, [])
|
||
let pixelData = CVPixelBufferGetBaseAddress(buffer)
|
||
let colorSpace = CGColorSpaceCreateDeviceRGB()
|
||
|
||
guard let context = CGContext(data: pixelData,
|
||
width: Int(frame.width),
|
||
height: Int(frame.height),
|
||
bitsPerComponent: 8,
|
||
bytesPerRow: CVPixelBufferGetBytesPerRow(buffer),
|
||
space: colorSpace,
|
||
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue) else {
|
||
return nil
|
||
}
|
||
|
||
context.translateBy(x: -frame.origin.x, y: -frame.origin.y)
|
||
context.draw(image.cgImage!, in: CGRect(origin: .zero, size: image.size))
|
||
|
||
CVPixelBufferUnlockBaseAddress(buffer, [])
|
||
|
||
return buffer
|
||
}
|
||
|
||
|
||
|
||
//将两张图片合成一张图片
|
||
func joinImages2( leftImage:CIImage, rightImage:CIImage) -> CIImage {
|
||
let left = UIImage(ciImage: leftImage )
|
||
let right = UIImage(ciImage: rightImage )
|
||
|
||
let imageWidth = left.size.width/2 + right.size.width/2
|
||
let imageHeight = left.size.height/2
|
||
|
||
let newImageSize = CGSize(width:imageWidth, height: imageHeight);
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
|
||
left.draw(in: CGRect(x:0, y:0, width:imageWidth/2, height:imageHeight))
|
||
right.draw(in: CGRect(x:imageWidth/2, y:0, width:imageWidth/2, height:imageHeight))
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
|
||
|
||
func convertVideo( inputFile : URL, outputFile: URL, progress: ((Float)->())? = nil ) async throws {
|
||
do {
|
||
try FileManager.default.removeItem(atPath: outputFile.path)
|
||
print("视频文件删除成功")
|
||
} catch {
|
||
print("删除视频文件出错:\(error)")
|
||
}
|
||
|
||
// Load the AVAsset
|
||
let asset = AVAsset(url: inputFile)
|
||
let assetReader = try AVAssetReader(asset: asset)
|
||
|
||
|
||
//检查是否为空间视频
|
||
let userDataItems = try await asset.loadMetadata(for:.quickTimeMetadata)
|
||
let spacialCharacteristics = userDataItems.filter { $0.identifier?.rawValue == "mdta/com.apple.quicktime.spatial.format-version" }
|
||
if spacialCharacteristics.count == 0 {
|
||
print("该视频不是空间视频")
|
||
}
|
||
|
||
//获取输入视频的方向和大小(用于设置输出方向)
|
||
let (orientation, videoSize) = try await getOrientationAndResolutionSizeForVideo(asset: asset)
|
||
|
||
//输出宽度为宽度的一半
|
||
//我们有两个并排的视频,我们保持长宽比
|
||
let vw:VideoWriter?
|
||
if(type == 3){
|
||
vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main)
|
||
}
|
||
else{
|
||
vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height/2), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main)
|
||
}
|
||
|
||
//加载音轨
|
||
let output = try await AVAssetReaderTrackOutput(
|
||
track: asset.loadTracks(withMediaType: .video).first!,
|
||
outputSettings: [
|
||
AVVideoDecompressionPropertiesKey: [
|
||
kVTDecompressionPropertyKey_RequestedMVHEVCVideoLayerIDs: [0, 1] as CFArray,
|
||
],
|
||
]
|
||
)
|
||
assetReader.add(output)
|
||
assetReader.startReading()
|
||
let duration = try await asset.load(.duration)
|
||
|
||
if let playerItem = player.currentItem {
|
||
playerItem.videoComposition = AVVideoComposition(asset: playerItem.asset) { request in
|
||
|
||
print(request.sourceImage)
|
||
}
|
||
}
|
||
|
||
while let nextSampleBuffer = output.copyNextSampleBuffer() {
|
||
guard let taggedBuffers = nextSampleBuffer.taggedBuffers else { return }
|
||
|
||
let leftEyeBuffer = taggedBuffers.first(where: {
|
||
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.leftEye)
|
||
})?.buffer
|
||
let rightEyeBuffer = taggedBuffers.first(where: {
|
||
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.rightEye)
|
||
})?.buffer
|
||
|
||
if let leftEyeBuffer,
|
||
let rightEyeBuffer,
|
||
case let .pixelBuffer(leftEyePixelBuffer) = leftEyeBuffer,
|
||
case let .pixelBuffer(rightEyePixelBuffer) = rightEyeBuffer {
|
||
|
||
let lciImage = CIImage(cvPixelBuffer: leftEyePixelBuffer)
|
||
let rciImage = CIImage(cvPixelBuffer: rightEyePixelBuffer)
|
||
//交叉眼
|
||
let newpb = joinImages( leftImage: lciImage, rightImage:rciImage )
|
||
let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer)
|
||
_ = vw!.add(image: newpb, presentationTime: time)
|
||
// print( "Added frame at \(time)")
|
||
progress?( Float(time.value)/Float(duration.value))
|
||
|
||
|
||
// try await Task.sleep(nanoseconds: 3_000_000)
|
||
}
|
||
}
|
||
|
||
_ = try await vw!.finish()
|
||
print( "Finished")
|
||
|
||
|
||
}
|
||
|
||
|
||
func getOrientationAndResolutionSizeForVideo(asset:AVAsset) async throws -> (CGAffineTransform, CGSize) {
|
||
guard let track = try await asset.loadTracks(withMediaType: AVMediaType.video).first
|
||
else{throw VideoReaderError.invalidVideo}
|
||
let naturalSize = try await track.load(.naturalSize)
|
||
let naturalTransform = try await track.load(.preferredTransform)
|
||
let size = naturalSize.applying(naturalTransform)
|
||
return (naturalTransform, CGSize(width: abs(size.width), height: abs(size.height)) )
|
||
}
|
||
|
||
|
||
func convertCIImageToUIImage(ciImage: CIImage) -> UIImage? {
|
||
let context = CIContext(options: nil)
|
||
if let cgImage = context.createCGImage(ciImage, from: ciImage.extent) {
|
||
let uiImage = UIImage(cgImage: cgImage)
|
||
return uiImage
|
||
}
|
||
return nil
|
||
}
|
||
|
||
//将两张图片合成一张图片
|
||
func joinImages( leftImage:CIImage, rightImage:CIImage) -> CIImage {
|
||
let left = UIImage(ciImage: leftImage )
|
||
let right = UIImage(ciImage: rightImage )
|
||
|
||
let imageWidth = left.size.width/2 + right.size.width/2
|
||
let imageHeight = left.size.height/2
|
||
|
||
let newImageSize = CGSize(width:imageWidth, height: imageHeight);
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
|
||
left.draw(in: CGRect(x:0, y:0, width:imageWidth/2, height:imageHeight))
|
||
right.draw(in: CGRect(x:imageWidth/2, y:0, width:imageWidth/2, height:imageHeight))
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
|
||
func pixelBuffer(from ciImage: CIImage) -> CVPixelBuffer? {
|
||
var pixelBuffer: CVPixelBuffer?
|
||
let attributes: [String: Any] = [
|
||
kCVPixelBufferCGImageCompatibilityKey as String: kCFBooleanTrue,
|
||
kCVPixelBufferCGBitmapContextCompatibilityKey as String: kCFBooleanTrue
|
||
]
|
||
|
||
let width = Int(ciImage.extent.width)
|
||
let height = Int(ciImage.extent.height)
|
||
|
||
let status = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32ARGB, attributes as CFDictionary, &pixelBuffer)
|
||
|
||
if status == kCVReturnSuccess, let pixelBuffer = pixelBuffer {
|
||
let context = CIContext()
|
||
context.render(ciImage, to: pixelBuffer)
|
||
return pixelBuffer
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
|
||
/*
|
||
// MARK: - Navigation
|
||
|
||
// In a storyboard-based application, you will often want to do a little preparation before navigation
|
||
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
|
||
// Get the new view controller using segue.destination.
|
||
// Pass the selected object to the new view controller.
|
||
}
|
||
*/
|
||
|
||
}
|