506 lines
20 KiB
Swift
506 lines
20 KiB
Swift
//
|
||
// PlayController.swift
|
||
// tdvideo
|
||
//
|
||
// Created by aaa on 2024/1/25.
|
||
//
|
||
|
||
import Foundation
|
||
import AVKit
|
||
import VideoToolbox
|
||
import CoreVideo
|
||
import UIKit
|
||
import ImageIO
|
||
import CoreImage
|
||
import Photos
|
||
|
||
|
||
//图片转码
|
||
class PlayController: UIViewController {
|
||
|
||
var imgData:Data?
|
||
|
||
///在立体视频中,正在处理的当前帧的左眼视图。
|
||
var leftEyeImage: CVPixelBuffer?
|
||
|
||
///在立体视频中,正在处理的当前帧的右眼视图。
|
||
var rightEyeImage: CVPixelBuffer?
|
||
|
||
//滤镜
|
||
var lvjing = "CIGaussianBlur"
|
||
|
||
//空间视频 交叉眼 红蓝立体 高斯模糊
|
||
var type = 0
|
||
|
||
// var playerItem:AVPlayerItem?
|
||
|
||
var playerLay:AVPlayerLayer?
|
||
var player:AVPlayer = AVPlayer()
|
||
var btn3:UIButton?
|
||
|
||
var sourceVideoURL:URL?
|
||
var outputVideoURL:URL?
|
||
var playerLooper: AVPlayerLooper?
|
||
|
||
var mImgView: UIImageView?
|
||
|
||
|
||
|
||
//判断是不是空间照片 非空间照片只有一帧
|
||
//空间照片包含 let makerAppleProperties = imageProperties["{HEIF}"]
|
||
func isSpatialImage(imageURL: URL) -> Bool {
|
||
|
||
guard let imageSource = CGImageSourceCreateWithURL(imageURL as CFURL, nil) else {
|
||
return false
|
||
}
|
||
guard let properties = CGImageSourceCopyPropertiesAtIndex(imageSource, 1, nil) as? [CFString: Any] else {
|
||
return false
|
||
}
|
||
print(properties)
|
||
/*
|
||
[ProfileName: sRGB IEC61966-2.1, {TIFF}: {
|
||
Orientation = 1;
|
||
TileLength = 512;
|
||
TileWidth = 512;
|
||
}, PixelWidth: 4032, PixelHeight: 3024, {HEIF}: {
|
||
CameraExtrinsics = {
|
||
CoordinateSystemID = 0;
|
||
Position = (
|
||
"-0.019238",
|
||
0,
|
||
0
|
||
);
|
||
Rotation = (
|
||
1,
|
||
0,
|
||
0,
|
||
0,
|
||
1,
|
||
0,
|
||
0,
|
||
0,
|
||
1
|
||
);
|
||
};
|
||
}, Depth: 8, Orientation: 1, ColorModel: RGB]
|
||
|
||
|
||
判断是否包含:{HEIF} 代表空间图片
|
||
|
||
*/
|
||
|
||
//这里判断两张图片,gif也可能是两张
|
||
// let frameCount = CGImageSourceGetCount(imageSource)
|
||
// if(frameCount == 1){
|
||
// return false
|
||
// }
|
||
return true
|
||
}
|
||
|
||
|
||
override func viewDidLoad() {
|
||
super.viewDidLoad()
|
||
self.view.backgroundColor = UIColor.brown
|
||
|
||
|
||
let path = Bundle.main.path(forResource: "img3", ofType: "HEIC")
|
||
sourceVideoURL = URL.init(filePath: path!)
|
||
outputVideoURL = URL.documentsDirectory.appending(path:"output11114.jpg")
|
||
let nsdata = NSData(contentsOf: sourceVideoURL!)
|
||
imgData = nsdata as? Data
|
||
|
||
let isSpatial = isSpatialImage(imageURL: sourceVideoURL!)
|
||
if !isSpatial {
|
||
print("这不是一张空间图片")
|
||
return
|
||
}
|
||
|
||
|
||
mImgView = UIImageView()
|
||
mImgView!.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 180)
|
||
self.view.addSubview(mImgView!)
|
||
let image = UIImage(contentsOfFile: sourceVideoURL!.path)
|
||
mImgView!.image = image
|
||
mImgView!.isUserInteractionEnabled = true
|
||
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(imageTapped(_:)))
|
||
mImgView!.addGestureRecognizer(tapGesture)
|
||
|
||
// 创建分段选择器
|
||
let segmentedControl = UISegmentedControl(items: ["空间照片", "平行眼", "交叉眼", "红蓝立体"])
|
||
// 设置分段选择器的位置和大小
|
||
segmentedControl.frame = CGRect(x: 20, y: 700, width: 360, height: 45)
|
||
// 设置默认选中的分段
|
||
segmentedControl.selectedSegmentIndex = 0
|
||
// 添加分段选择器到视图中
|
||
self.view.addSubview(segmentedControl)
|
||
segmentedControl.layer.borderWidth = 1.0 // 边框宽度
|
||
segmentedControl.layer.borderColor = UIColor.blue.cgColor // 边框颜色
|
||
segmentedControl.tintColor = UIColor.blue // 选中状态颜色
|
||
let normalTextAttributes = [NSAttributedString.Key.foregroundColor: UIColor.white]
|
||
let selectedTextAttributes = [NSAttributedString.Key.foregroundColor: UIColor.blue]
|
||
|
||
segmentedControl.setTitleTextAttributes(normalTextAttributes, for: .normal)
|
||
segmentedControl.setTitleTextAttributes(selectedTextAttributes, for: .selected)
|
||
// 添加分段选择器的事件处理
|
||
segmentedControl.addTarget(self, action: #selector(segmentedControlValueChanged(_:)), for: .valueChanged)
|
||
}
|
||
|
||
@objc func imageTapped(_ sender: UITapGestureRecognizer) {
|
||
|
||
let vc:PlayControllerImg = PlayControllerImg()
|
||
self.present(vc, animated: true, completion: nil)
|
||
vc.mediaSelectedHandler = { [self]data in
|
||
print("回调")
|
||
print(data)
|
||
imgData = data
|
||
let image = UIImage(data: imgData!)
|
||
mImgView!.image = image
|
||
}
|
||
}
|
||
|
||
//"空间照片", "平行眼", "交叉眼", "红蓝立体"
|
||
@objc func segmentedControlValueChanged(_ sender: UISegmentedControl) {
|
||
// 处理分段选择器值改变事件
|
||
let selectedIndex = sender.selectedSegmentIndex
|
||
print("选中了第 \(selectedIndex) 个选项")
|
||
player.pause()
|
||
NotificationCenter.default.removeObserver(self)
|
||
mImgView!.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 180)
|
||
|
||
// guard let imageSource = CGImageSourceCreateWithURL(sourceVideoURL! as CFURL, nil) else {
|
||
// return
|
||
// }
|
||
|
||
guard let imageSource = CGImageSourceCreateWithData(imgData! as CFData, nil) else {
|
||
return
|
||
}
|
||
|
||
print(imageSource)
|
||
|
||
let frameCount = CGImageSourceGetCount(imageSource)
|
||
|
||
var frames: [CGImage] = []
|
||
|
||
for index in 0..<frameCount {
|
||
guard let frameImage = CGImageSourceCreateImageAtIndex(imageSource, index, nil) else {
|
||
continue
|
||
}
|
||
|
||
frames.append(frameImage)
|
||
}
|
||
|
||
if(frames.count < 2){return}
|
||
let lciImage = CIImage(cgImage: frames.first!)
|
||
let rciImage = CIImage(cgImage: frames[1])
|
||
|
||
//空间照片
|
||
if(selectedIndex == 0){
|
||
let image = UIImage(contentsOfFile: sourceVideoURL!.path)
|
||
mImgView!.image = image
|
||
}
|
||
//平行眼
|
||
if(selectedIndex == 1){
|
||
mImgView!.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 130)
|
||
|
||
|
||
let newpb = joinImages( leftImage: lciImage, rightImage:rciImage )
|
||
|
||
let lastImg = convertCIImageToUIImage(ciImage: newpb)!
|
||
DispatchQueue.main.async { [weak self] in
|
||
self!.mImgView!.image = lastImg
|
||
}
|
||
}
|
||
//交叉眼
|
||
if(selectedIndex == 2){
|
||
mImgView!.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 130)
|
||
|
||
|
||
let newpb = joinImages( leftImage:rciImage , rightImage:lciImage )
|
||
|
||
let lastImg = convertCIImageToUIImage(ciImage: newpb)!
|
||
DispatchQueue.main.async { [weak self] in
|
||
self!.mImgView!.image = lastImg
|
||
}
|
||
}
|
||
//红蓝立体
|
||
if(selectedIndex == 3){
|
||
|
||
//红蓝立体
|
||
mImgView!.frame = CGRect.init(x: 0, y: 180, width: self.view.frame.size.width, height: 380)
|
||
let redColorMatrix: [CGFloat] = [
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 红色通道
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 绿色通道
|
||
0.0, 0.0, 0.5, 0.0, 0.0, // 蓝色通道
|
||
0.0, 0.0, 0.0, 1.0, 0.0 // 透明通道
|
||
]
|
||
|
||
let blueColorMatrix: [CGFloat] = [
|
||
0.5, 0.0, 0.0, 0.0, 0.0, // 红色通道
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 绿色通道
|
||
0.0, 0.0, 0.0, 0.0, 0.0, // 蓝色通道
|
||
0.0, 0.0, 0.0, 1.0, 0.0 // 透明通道
|
||
]
|
||
|
||
|
||
let redFilter = CIFilter(name: "CIColorMatrix")!
|
||
redFilter.setValue(lciImage, forKey: kCIInputImageKey)
|
||
redFilter.setValue(CIVector(values: redColorMatrix, count: redColorMatrix.count), forKey: "inputRVector")
|
||
|
||
let blueFilter = CIFilter(name: "CIColorMatrix")!
|
||
blueFilter.setValue(rciImage, forKey: kCIInputImageKey)
|
||
blueFilter.setValue(CIVector(values: blueColorMatrix, count: blueColorMatrix.count), forKey: "inputBVector")
|
||
|
||
// 获取处理后的图像
|
||
if let redOutputImage = redFilter.outputImage,
|
||
let blueOutputImage = blueFilter.outputImage {
|
||
|
||
// CIScreenBlendMode: 通过将颜色通道值反转并相乘,然后将结果反转回来,将两个图像合成为屏幕混合效果。
|
||
// CIHardLightBlendMode: 使用源图像的亮度来决定如何混合两个图像。较亮的像素将更多地影响结果。
|
||
// CILightenBlendMode: 比较两个图像的像素,并选择较亮的像素作为最终结果。
|
||
// CIColorDodgeBlendMode: 使用源图像的颜色信息来增加目标图像的颜色亮度。
|
||
// CIColorBurnBlendMode: 使用源图像的颜色信息来降低目标图像的颜色亮度。
|
||
// CIDarkenBlendMode: 比较两个图像的像素,并选择较暗的像素作为最终结果。
|
||
// CILinearDodgeBlendMode: 使用线性增加的方式将两个图像相加,产生一种亮度叠加的效果。
|
||
// CIMultiplyBlendMode: 将两个图像的像素值相乘,产生一种乘法混合效果。
|
||
// CISourceOverCompositing: 将源图像放在目标图像上方,产生一种覆盖混合效果。
|
||
let compositeFilter = CIFilter(name: "CIScreenBlendMode")!
|
||
compositeFilter.setValue(redOutputImage, forKey: kCIInputImageKey)
|
||
compositeFilter.setValue(blueOutputImage, forKey: kCIInputBackgroundImageKey)
|
||
|
||
// let sharpenedFilter = CIFilter(name: "CISharpenLuminance")!
|
||
// sharpenedFilter.setValue(compositeFilter.outputImage, forKey: kCIInputImageKey)
|
||
// sharpenedFilter.setValue(2, forKey: kCIInputSharpnessKey)
|
||
|
||
// let colorControlsFilter = CIFilter(name: "CIColorControls")!
|
||
// colorControlsFilter.setValue(sharpenedFilter.outputImage, forKey: kCIInputImageKey)
|
||
// colorControlsFilter.setValue(0.7, forKey: kCIInputSaturationKey)
|
||
|
||
let lastImg = compositeFilter.outputImage!
|
||
DispatchQueue.main.async { [weak self] in
|
||
self!.mImgView!.image = UIImage(ciImage: lastImg)
|
||
}
|
||
}
|
||
}
|
||
|
||
}
|
||
|
||
func createCVPixelBuffer(from image: UIImage, with frame: CGRect) -> CVPixelBuffer? {
|
||
let options: [String: Any] = [
|
||
kCVPixelBufferCGImageCompatibilityKey as String: true,
|
||
kCVPixelBufferCGBitmapContextCompatibilityKey as String: true
|
||
]
|
||
|
||
var pixelBuffer: CVPixelBuffer?
|
||
let status = CVPixelBufferCreate(kCFAllocatorDefault,
|
||
Int(frame.width),
|
||
Int(frame.height),
|
||
kCVPixelFormatType_32BGRA,
|
||
options as CFDictionary,
|
||
&pixelBuffer)
|
||
|
||
guard status == kCVReturnSuccess, let buffer = pixelBuffer else {
|
||
return nil
|
||
}
|
||
|
||
CVPixelBufferLockBaseAddress(buffer, [])
|
||
let pixelData = CVPixelBufferGetBaseAddress(buffer)
|
||
let colorSpace = CGColorSpaceCreateDeviceRGB()
|
||
|
||
guard let context = CGContext(data: pixelData,
|
||
width: Int(frame.width),
|
||
height: Int(frame.height),
|
||
bitsPerComponent: 8,
|
||
bytesPerRow: CVPixelBufferGetBytesPerRow(buffer),
|
||
space: colorSpace,
|
||
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue) else {
|
||
return nil
|
||
}
|
||
|
||
context.translateBy(x: -frame.origin.x, y: -frame.origin.y)
|
||
context.draw(image.cgImage!, in: CGRect(origin: .zero, size: image.size))
|
||
|
||
CVPixelBufferUnlockBaseAddress(buffer, [])
|
||
|
||
return buffer
|
||
}
|
||
|
||
|
||
|
||
//将两张图片合成一张图片
|
||
func joinImages2( leftImage:CIImage, rightImage:CIImage) -> CIImage {
|
||
let left = UIImage(ciImage: leftImage )
|
||
let right = UIImage(ciImage: rightImage )
|
||
|
||
let imageWidth = left.size.width/2 + right.size.width/2
|
||
let imageHeight = left.size.height/2
|
||
|
||
let newImageSize = CGSize(width:imageWidth, height: imageHeight);
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
|
||
left.draw(in: CGRect(x:0, y:0, width:imageWidth/2, height:imageHeight))
|
||
right.draw(in: CGRect(x:imageWidth/2, y:0, width:imageWidth/2, height:imageHeight))
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
|
||
@objc func buttonPressed(sender:UIButton){
|
||
|
||
if(sender.tag == 10){
|
||
let vc:PlayContoller4 = PlayContoller4()
|
||
self.navigationController?.pushViewController(vc, animated: true)
|
||
}
|
||
}
|
||
|
||
func convertVideo( inputFile : URL, outputFile: URL, progress: ((Float)->())? = nil ) async throws {
|
||
do {
|
||
try FileManager.default.removeItem(atPath: outputFile.path)
|
||
print("视频文件删除成功")
|
||
} catch {
|
||
print("删除视频文件出错:\(error)")
|
||
}
|
||
|
||
// Load the AVAsset
|
||
let asset = AVAsset(url: inputFile)
|
||
let assetReader = try AVAssetReader(asset: asset)
|
||
|
||
|
||
//检查是否为空间视频
|
||
let userDataItems = try await asset.loadMetadata(for:.quickTimeMetadata)
|
||
let spacialCharacteristics = userDataItems.filter { $0.identifier?.rawValue == "mdta/com.apple.quicktime.spatial.format-version" }
|
||
if spacialCharacteristics.count == 0 {
|
||
print("该视频不是空间视频")
|
||
}
|
||
|
||
//获取输入视频的方向和大小(用于设置输出方向)
|
||
let (orientation, videoSize) = try await getOrientationAndResolutionSizeForVideo(asset: asset)
|
||
|
||
//输出宽度为宽度的一半
|
||
//我们有两个并排的视频,我们保持长宽比
|
||
let vw:VideoWriter?
|
||
if(type == 3){
|
||
vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main)
|
||
}
|
||
else{
|
||
vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height/2), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main)
|
||
}
|
||
|
||
//加载音轨
|
||
let output = try await AVAssetReaderTrackOutput(
|
||
track: asset.loadTracks(withMediaType: .video).first!,
|
||
outputSettings: [
|
||
AVVideoDecompressionPropertiesKey: [
|
||
kVTDecompressionPropertyKey_RequestedMVHEVCVideoLayerIDs: [0, 1] as CFArray,
|
||
],
|
||
]
|
||
)
|
||
assetReader.add(output)
|
||
assetReader.startReading()
|
||
let duration = try await asset.load(.duration)
|
||
|
||
if let playerItem = player.currentItem {
|
||
playerItem.videoComposition = AVVideoComposition(asset: playerItem.asset) { request in
|
||
|
||
print(request.sourceImage)
|
||
}
|
||
}
|
||
|
||
while let nextSampleBuffer = output.copyNextSampleBuffer() {
|
||
guard let taggedBuffers = nextSampleBuffer.taggedBuffers else { return }
|
||
|
||
let leftEyeBuffer = taggedBuffers.first(where: {
|
||
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.leftEye)
|
||
})?.buffer
|
||
let rightEyeBuffer = taggedBuffers.first(where: {
|
||
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.rightEye)
|
||
})?.buffer
|
||
|
||
if let leftEyeBuffer,
|
||
let rightEyeBuffer,
|
||
case let .pixelBuffer(leftEyePixelBuffer) = leftEyeBuffer,
|
||
case let .pixelBuffer(rightEyePixelBuffer) = rightEyeBuffer {
|
||
|
||
let lciImage = CIImage(cvPixelBuffer: leftEyePixelBuffer)
|
||
let rciImage = CIImage(cvPixelBuffer: rightEyePixelBuffer)
|
||
//交叉眼
|
||
let newpb = joinImages( leftImage: lciImage, rightImage:rciImage )
|
||
let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer)
|
||
_ = vw!.add(image: newpb, presentationTime: time)
|
||
// print( "Added frame at \(time)")
|
||
progress?( Float(time.value)/Float(duration.value))
|
||
|
||
|
||
// try await Task.sleep(nanoseconds: 3_000_000)
|
||
}
|
||
}
|
||
|
||
_ = try await vw!.finish()
|
||
print( "Finished")
|
||
|
||
|
||
}
|
||
|
||
|
||
func getOrientationAndResolutionSizeForVideo(asset:AVAsset) async throws -> (CGAffineTransform, CGSize) {
|
||
guard let track = try await asset.loadTracks(withMediaType: AVMediaType.video).first
|
||
else{throw VideoReaderError.invalidVideo}
|
||
let naturalSize = try await track.load(.naturalSize)
|
||
let naturalTransform = try await track.load(.preferredTransform)
|
||
let size = naturalSize.applying(naturalTransform)
|
||
return (naturalTransform, CGSize(width: abs(size.width), height: abs(size.height)) )
|
||
}
|
||
|
||
|
||
func convertCIImageToUIImage(ciImage: CIImage) -> UIImage? {
|
||
let context = CIContext(options: nil)
|
||
if let cgImage = context.createCGImage(ciImage, from: ciImage.extent) {
|
||
let uiImage = UIImage(cgImage: cgImage)
|
||
return uiImage
|
||
}
|
||
return nil
|
||
}
|
||
|
||
//将两张图片合成一张图片
|
||
func joinImages( leftImage:CIImage, rightImage:CIImage) -> CIImage {
|
||
let left = UIImage(ciImage: leftImage )
|
||
let right = UIImage(ciImage: rightImage )
|
||
|
||
let imageWidth = left.size.width/2 + right.size.width/2
|
||
let imageHeight = left.size.height/2
|
||
|
||
let newImageSize = CGSize(width:imageWidth, height: imageHeight);
|
||
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
|
||
left.draw(in: CGRect(x:0, y:0, width:imageWidth/2, height:imageHeight))
|
||
right.draw(in: CGRect(x:imageWidth/2, y:0, width:imageWidth/2, height:imageHeight))
|
||
let image = UIGraphicsGetImageFromCurrentImageContext()!
|
||
UIGraphicsEndImageContext();
|
||
|
||
let ci = CIImage(cgImage: image.cgImage!)
|
||
return ci
|
||
}
|
||
|
||
func pixelBuffer(from ciImage: CIImage) -> CVPixelBuffer? {
|
||
var pixelBuffer: CVPixelBuffer?
|
||
let attributes: [String: Any] = [
|
||
kCVPixelBufferCGImageCompatibilityKey as String: kCFBooleanTrue,
|
||
kCVPixelBufferCGBitmapContextCompatibilityKey as String: kCFBooleanTrue
|
||
]
|
||
|
||
let width = Int(ciImage.extent.width)
|
||
let height = Int(ciImage.extent.height)
|
||
|
||
let status = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32ARGB, attributes as CFDictionary, &pixelBuffer)
|
||
|
||
if status == kCVReturnSuccess, let pixelBuffer = pixelBuffer {
|
||
let context = CIContext()
|
||
context.render(ciImage, to: pixelBuffer)
|
||
return pixelBuffer
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
}
|
||
|