349 lines
16 KiB
Swift
349 lines
16 KiB
Swift
//
|
||
// PlayByTransferConvertor.swift
|
||
// SwiftProject
|
||
//
|
||
// Created by aaa on 2024/3/12.
|
||
//
|
||
|
||
import Foundation
|
||
import AVKit
|
||
import VideoToolbox
|
||
import CoreImage
|
||
import ImageIO
|
||
|
||
|
||
|
||
class PlayByTransferConvertor {
|
||
func convertVideo(asset:AVAsset, assetOutput:AVAssetReaderTrackOutput,type:SpatialType,time: CMTime,scale:CGFloat,ed:Int)->(CIImage?) {
|
||
var newpb:CIImage? = nil
|
||
// var presentationTime:CMTime? = nil
|
||
// print("sta.....>>>>>>>thread")
|
||
|
||
while let nextSampleBuffer = assetOutput.copyNextSampleBuffer() {
|
||
let presentationTime = CMSampleBufferGetPresentationTimeStamp(nextSampleBuffer)
|
||
// print("presentationTime: \(presentationTime) \ntime: \(time)")
|
||
// if presentationTime! > time {//如果当前获取的buffer的时间>time的时间,则直接返回即可
|
||
// print("如果当前获取的buffer的时间>time的时间,则直接返回即可...")
|
||
// break
|
||
// }
|
||
|
||
if presentationTime == time {
|
||
guard let taggedBuffers = nextSampleBuffer.taggedBuffers else { break }
|
||
|
||
let leftEyeBuffer = taggedBuffers.first(where: {
|
||
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.leftEye)
|
||
})?.buffer
|
||
let rightEyeBuffer = taggedBuffers.first(where: {
|
||
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.rightEye)
|
||
})?.buffer
|
||
|
||
|
||
if let leftEyeBuffer,
|
||
let rightEyeBuffer,
|
||
case let .pixelBuffer(leftEyePixelBuffer) = leftEyeBuffer,
|
||
case let .pixelBuffer(rightEyePixelBuffer) = rightEyeBuffer {
|
||
|
||
let lciImage = CIImage(cvPixelBuffer: leftEyePixelBuffer)
|
||
let rciImage = CIImage(cvPixelBuffer: rightEyePixelBuffer)
|
||
|
||
let left = UIImage(ciImage: lciImage )
|
||
let right = UIImage(ciImage: rciImage )
|
||
|
||
|
||
var cwidth:CGFloat
|
||
var cheight:CGFloat
|
||
switch type {
|
||
case .hsbs:
|
||
cwidth = left.size.width
|
||
cheight = left.size.height
|
||
newpb = joinImages_sbs(left: left, right: right, imgWidth: cwidth, imgHeight:cheight )
|
||
break
|
||
case .fsbs:
|
||
// cwidth = left.size.width
|
||
// cheight = left.size.height
|
||
// newpb = joinImages_fsbs(left: left, right: right, imgWidth: cwidth, imgHeight: cheight)
|
||
newpb = joinImages(leftImage: lciImage, rightImage: rciImage,scale: scale,ed: ed)
|
||
break
|
||
case .parallelEyes://平行眼
|
||
|
||
newpb = joinImages(leftImage: lciImage, rightImage: rciImage,scale: scale,ed: ed)
|
||
break
|
||
case .crossedEyes://交叉眼
|
||
newpb = joinImages(leftImage: rciImage, rightImage: lciImage,scale: scale,ed: ed)
|
||
break
|
||
case .redBlueSolid://红蓝立体
|
||
newpb = joinImages_red_blue(lciImage: lciImage, rciImage: rciImage,scale: scale)
|
||
break
|
||
default:
|
||
break
|
||
}
|
||
}
|
||
CMSampleBufferInvalidate(nextSampleBuffer)
|
||
break
|
||
}
|
||
else{//如果当没找到,则直接可以break,等待重新校准time后,再来
|
||
break
|
||
}
|
||
}
|
||
// print("PlayByTransferConvertor 测试看是否有返回....")
|
||
return (newpb)
|
||
|
||
}
|
||
|
||
|
||
//合成红蓝立体图片
|
||
func joinImages_red_blue(lciImage:CIImage,rciImage:CIImage,scale: CGFloat) -> CIImage {
|
||
// 创建红色和蓝色滤镜
|
||
let redColorMatrix: [CGFloat] = [
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 红色通道
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 绿色通道
|
||
0.0, 0.0, 1.0, 0.0, 0.0, // 蓝色通道
|
||
0.0, 0.0, 0.0, 1.0, 0.0 // 透明通道
|
||
]
|
||
|
||
let blueColorMatrix: [CGFloat] = [
|
||
1.0, 0.0, 0.0, 0.0, 0.0, // 红色通道
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 绿色通道
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 蓝色通道
|
||
0.0, 0.0, 0.0, 1.0, 0.0 // 透明通道
|
||
]
|
||
|
||
let redFilter = CIFilter(name: "CIColorMatrix")!
|
||
redFilter.setValue(lciImage, forKey: kCIInputImageKey)
|
||
redFilter.setValue(CIVector(values: redColorMatrix, count: redColorMatrix.count), forKey: "inputRVector")
|
||
|
||
let blueFilter = CIFilter(name: "CIColorMatrix")!
|
||
blueFilter.setValue(rciImage, forKey: kCIInputImageKey)
|
||
blueFilter.setValue(CIVector(values: blueColorMatrix, count: blueColorMatrix.count), forKey: "inputBVector")
|
||
var lastImg:CIImage? = nil
|
||
// 获取处理后的图像
|
||
if let redOutputImage = redFilter.outputImage,
|
||
let blueOutputImage = blueFilter.outputImage {
|
||
|
||
let compositeFilter = CIFilter(name: "CIScreenBlendMode")!
|
||
compositeFilter.setValue(redOutputImage, forKey: kCIInputImageKey)
|
||
compositeFilter.setValue(blueOutputImage, forKey: kCIInputBackgroundImageKey)
|
||
|
||
let sharpenedFilter = CIFilter(name: "CISharpenLuminance")!
|
||
sharpenedFilter.setValue(compositeFilter.outputImage, forKey: kCIInputImageKey)
|
||
sharpenedFilter.setValue(2, forKey: kCIInputSharpnessKey)
|
||
lastImg = sharpenedFilter.outputImage!
|
||
}
|
||
if scale == 1 {
|
||
return lastImg!
|
||
}
|
||
else {
|
||
var drawImg = UIImage(ciImage: lastImg!)
|
||
let drawImageSize = drawImg.size
|
||
let newImgSize = CGSizeMake(drawImageSize.width * scale, drawImageSize.height * scale)
|
||
let newRect = CGRect(origin: CGPoint(x: (drawImageSize.width - newImgSize.width) * 0.5, y: (drawImageSize.height - newImgSize.height) * 0.5), size: newImgSize)
|
||
UIGraphicsBeginImageContextWithOptions(drawImageSize, false, 1);
|
||
drawImg.draw(in: newRect)
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
}
|
||
|
||
|
||
//将两张图片合成一张图片 SBS
|
||
func joinImages_sbs( left:UIImage, right:UIImage,imgWidth:CGFloat,imgHeight:CGFloat) -> CIImage {
|
||
let newImageSize = CGSize(width:imgWidth, height: imgHeight);
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
|
||
left.draw(in: CGRect(x:0, y:0, width:imgWidth/2, height:imgHeight))
|
||
right.draw(in: CGRect(x:imgWidth/2, y:0, width:imgWidth/2, height:imgHeight))
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
|
||
//FSBS
|
||
func joinImages_fsbs( left:UIImage, right:UIImage,imgWidth:CGFloat,imgHeight:CGFloat) -> CIImage {
|
||
let newImageSize = CGSize(width:imgWidth, height: imgHeight);//在播放过程中,务必保证宽、高尺寸不变
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
|
||
left.draw(in: CGRect(x:0, y:imgHeight/4, width:imgWidth/2, height:imgHeight/2))
|
||
right.draw(in: CGRect(x:imgWidth/2, y:imgHeight/4, width:imgWidth/2, height:imgHeight/2))
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
|
||
|
||
//将两张图片合成一张图片 OU
|
||
func joinImages_ou( left:UIImage, right:UIImage,imgWidth:CGFloat,imgHeight:CGFloat) -> CIImage {
|
||
let newImageSize = CGSize(width:imgWidth, height: imgHeight);
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
|
||
left.draw(in: CGRect(x:0, y:0, width:imgWidth, height:imgHeight/2))
|
||
right.draw(in: CGRect(x:0, y:imgHeight/2, width:imgWidth, height:imgHeight/2))
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
|
||
//将两张图片合成一张图片
|
||
func joinImages( leftImage:CIImage, rightImage:CIImage,scale:CGFloat,ed:Int) -> CIImage {
|
||
|
||
//缩放参数,此处必须大于0.5
|
||
//瞳距参数,暂定 瞳距与像素点的比例为1:2,瞳距的值范围为 -30 ~ +30 之间的整数
|
||
let edS = 20 //比列
|
||
|
||
let left = UIImage(ciImage: leftImage )
|
||
let right = UIImage(ciImage: rightImage )
|
||
|
||
|
||
|
||
//获取缩放图片
|
||
let (n_left,n_left_size) = getImgWithScale(image: left, scale: scale)
|
||
let (n_right,n_right_size) = getImgWithScale(image: right, scale: scale)
|
||
|
||
|
||
//获取调整了瞳距的图片
|
||
let x_offset = CGFloat(abs(ed) * edS) //关于瞳距的x偏移量
|
||
let imageWidth = left.size.width*0.5 - x_offset
|
||
print("imageWidth:\(imageWidth) ,left.size.width*0.5:\(left.size.width*0.5) ")
|
||
let imageHeight = left.size.height
|
||
let (ed_left,last_left_size) = getImgWithED(image: n_left, drawImgWidth: imageWidth,drawSize: n_left_size)
|
||
let (ed_right,last_right_size) = getImgWithED(image: n_right, drawImgWidth: imageWidth,drawSize: n_right_size)
|
||
print("n_left_size:\(n_left_size)\nlast_left_size\(last_left_size)")
|
||
print("n_right_size:\(n_right_size)\nlast_right_size\(last_right_size)")
|
||
// testImageUi(image: ed_left)
|
||
let newImageSize = left.size
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1)
|
||
|
||
if (ed > 0) {//左视图宽度减小,同时x点的坐标值为0;右视图宽度减小,同时x点的坐标值增加
|
||
ed_left.draw(in: CGRect(x:0, y:(imageHeight - last_left_size.height) * 0.5, width:last_left_size.width, height:last_left_size.height))
|
||
ed_right.draw(in: CGRect(x:newImageSize.width - last_right_size.width, y:(imageHeight - last_right_size.height) * 0.5, width:last_right_size.width, height:last_right_size.height))
|
||
|
||
print("ed>>>>> 0....")
|
||
}
|
||
else if (ed < 0) {
|
||
ed_left.draw(in: CGRect(x:newImageSize.width*0.5 - last_left_size.width, y:(imageHeight - n_left_size.height) * 0.5, width:last_left_size.width, height:last_left_size.height))
|
||
ed_right.draw(in: CGRect(x:newImageSize.width*0.5, y:(imageHeight - n_right_size.height) * 0.5, width:last_right_size.width, height:last_right_size.height))
|
||
print("ed < 0....")
|
||
}
|
||
else {//瞳距没有改变的情况
|
||
let leftRect = CGRect(x:(imageWidth - n_left_size.width) * 0.5, y:(imageHeight - n_left_size.height) * 0.5 , width:n_left_size.width, height:n_left_size.height)
|
||
ed_left.draw(in:leftRect )
|
||
|
||
let rightRect = CGRect(x:imageWidth + (imageWidth - n_right_size.width) * 0.5 , y:(imageHeight - n_right_size.height) * 0.5, width:n_right_size.width, height:n_right_size.height)
|
||
ed_right.draw(in:rightRect )
|
||
|
||
print("newImageSize:\(newImageSize) \nimageWidth:\(imageWidth) \ned_left.size:\(ed_left.size) \nleftRect:\(leftRect) \nrightRect:\(rightRect)")
|
||
}
|
||
|
||
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
|
||
//图像缩放 调整默认scale,默认值应当为1
|
||
func getImgWithScale(image:UIImage,scale:CGFloat) -> (UIImage,CGSize) {
|
||
//获取原始的rect
|
||
let originSize = CGSize(width: image.size.width*0.5, height: image.size.height*0.5)
|
||
|
||
var newImage = image
|
||
var newSize = originSize//作图的区域
|
||
|
||
if scale > 1 {//图片会被裁减,但size的宽度不会改变,而高度会随scale发生改变
|
||
//计算放大图片需要用于截取图片的size,
|
||
//本次height在画布上的增量
|
||
let Hs_add = (image.size.height - image.size.height*0.5)*(scale - 1)
|
||
//根据本次在画布上的增量height重新计算画布size
|
||
newSize = CGSizeMake(originSize.width, originSize.height + Hs_add)
|
||
|
||
//本次根据scale进行初步计算的裁剪区域size
|
||
let cutSize = CGSizeMake((2 - scale)*(image.size.height * newSize.width/newSize.height),(2 - scale) * image.size.height)
|
||
|
||
let cutRect = CGRectMake((image.size.width - cutSize.width)*0.5, (image.size.height - cutSize.height) * 0.5, cutSize.width, cutSize.height)
|
||
newImage = image.imageAtRect(rect: cutRect)!
|
||
}
|
||
else if scale < 1 {//图片不会被裁减,但size会被缩小
|
||
//计算被等比缩小的size
|
||
newSize = CGSizeMake(scale*originSize.width, scale*originSize.height)
|
||
// newImage = image.imageAtRect(rect: CGRect(origin: .zero, size: image.size))!
|
||
}
|
||
// testImageUi(image: newImage)
|
||
return (newImage,newSize)
|
||
}
|
||
|
||
//图像瞳距,参数条件必须满足:image能够等比缩放后填充满drawSize,即image的真实size与drawSize预期size并不要求一致,但要求等比
|
||
func getImgWithED(image:UIImage,drawImgWidth:CGFloat,drawSize:CGSize) -> (UIImage,CGSize) {
|
||
if drawSize.width <= drawImgWidth {
|
||
|
||
print("瞳距返回,不用处理")
|
||
return (image,drawSize)
|
||
}
|
||
print("瞳距要处理")
|
||
// let s = drawSize.height / image.size.height
|
||
let newImageWidth = drawImgWidth * image.size.height / drawSize.height
|
||
let dRect = CGRectMake((image.size.width - newImageWidth) * 0.5, 0, newImageWidth, image.size.height)
|
||
let newImage = image.imageAtRect(rect: dRect)
|
||
return (newImage!,CGSize(width: drawImgWidth, height: drawSize.height))
|
||
}
|
||
|
||
func testImageUi(image:UIImage) {
|
||
DispatchQueue.main.async {
|
||
// var imageView = KWindow?.viewWithTag(9988) as? UIImageView
|
||
// if let iv = imageView {
|
||
// iv.image = image
|
||
// }
|
||
// else {
|
||
// imageView = UIImageView(frame: CGRect(x: 20, y: 100, width: KScreenWidth*0.5, height: 260))
|
||
// imageView?.tag = 9988
|
||
// imageView?.backgroundColor = .red
|
||
// imageView?.contentMode = .scaleAspectFit
|
||
// imageView?.clipsToBounds = true
|
||
// imageView?.image = image
|
||
// KWindow?.addSubview(imageView!)
|
||
// }
|
||
var line = KWindow?.viewWithTag(9981 ) as? UIView
|
||
guard line == nil else {
|
||
return
|
||
}
|
||
|
||
line = UIView()
|
||
KWindow?.addSubview(line!)
|
||
line?.backgroundColor = .red
|
||
line!.snp.makeConstraints { make in
|
||
make.centerX.equalToSuperview()
|
||
make.width.equalTo(2)
|
||
make.top.bottom.equalToSuperview()
|
||
}
|
||
}
|
||
|
||
}
|
||
|
||
func joinImages_backup( leftImage:CIImage, rightImage:CIImage) -> CIImage {
|
||
let left = UIImage(ciImage: leftImage )
|
||
let right = UIImage(ciImage: rightImage )
|
||
|
||
let imageWidth = left.size.width/2 + right.size.width/2
|
||
let imageHeight = left.size.height/2
|
||
|
||
let newImageSize = CGSize(width:imageWidth, height: left.size.height);
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
|
||
left.draw(in: CGRect(x:0, y:imageHeight/2, width:imageWidth/2, height:imageHeight))
|
||
right.draw(in: CGRect(x:imageWidth/2, y:imageHeight/2, width:imageWidth/2, height:imageHeight))
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
}
|
||
|