VPCamera3/tdvideo/tdvideo/转码/PlayController.swift
2024-03-05 11:44:34 +08:00

506 lines
20 KiB
Swift
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

//
// PlayController.swift
// tdvideo
//
// Created by aaa on 2024/1/25.
//
import Foundation
import AVKit
import VideoToolbox
import CoreVideo
import UIKit
import ImageIO
import CoreImage
import Photos
//
class PlayController: UIViewController {
var imgData:Data?
///
var leftEyeImage: CVPixelBuffer?
///
var rightEyeImage: CVPixelBuffer?
//
var lvjing = "CIGaussianBlur"
//
var type = 0
// var playerItem:AVPlayerItem?
var playerLay:AVPlayerLayer?
var player:AVPlayer = AVPlayer()
var btn3:UIButton?
var sourceVideoURL:URL?
var outputVideoURL:URL?
var playerLooper: AVPlayerLooper?
var mImgView: UIImageView?
//
// let makerAppleProperties = imageProperties["{HEIF}"]
func isSpatialImage(imageURL: URL) -> Bool {
guard let imageSource = CGImageSourceCreateWithURL(imageURL as CFURL, nil) else {
return false
}
guard let properties = CGImageSourceCopyPropertiesAtIndex(imageSource, 1, nil) as? [CFString: Any] else {
return false
}
print(properties)
/*
[ProfileName: sRGB IEC61966-2.1, {TIFF}: {
Orientation = 1;
TileLength = 512;
TileWidth = 512;
}, PixelWidth: 4032, PixelHeight: 3024, {HEIF}: {
CameraExtrinsics = {
CoordinateSystemID = 0;
Position = (
"-0.019238",
0,
0
);
Rotation = (
1,
0,
0,
0,
1,
0,
0,
0,
1
);
};
}, Depth: 8, Orientation: 1, ColorModel: RGB]
{HEIF}
*/
//gif
// let frameCount = CGImageSourceGetCount(imageSource)
// if(frameCount == 1){
// return false
// }
return true
}
override func viewDidLoad() {
super.viewDidLoad()
self.view.backgroundColor = UIColor.brown
let path = Bundle.main.path(forResource: "img3", ofType: "HEIC")
sourceVideoURL = URL.init(filePath: path!)
outputVideoURL = URL.documentsDirectory.appending(path:"output11114.jpg")
let nsdata = NSData(contentsOf: sourceVideoURL!)
imgData = nsdata as? Data
let isSpatial = isSpatialImage(imageURL: sourceVideoURL!)
if !isSpatial {
print("这不是一张空间图片")
return
}
mImgView = UIImageView()
mImgView!.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 180)
self.view.addSubview(mImgView!)
let image = UIImage(contentsOfFile: sourceVideoURL!.path)
mImgView!.image = image
mImgView!.isUserInteractionEnabled = true
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(imageTapped(_:)))
mImgView!.addGestureRecognizer(tapGesture)
//
let segmentedControl = UISegmentedControl(items: ["空间照片", "平行眼", "交叉眼", "红蓝立体"])
//
segmentedControl.frame = CGRect(x: 20, y: 700, width: 360, height: 45)
//
segmentedControl.selectedSegmentIndex = 0
//
self.view.addSubview(segmentedControl)
segmentedControl.layer.borderWidth = 1.0 //
segmentedControl.layer.borderColor = UIColor.blue.cgColor //
segmentedControl.tintColor = UIColor.blue //
let normalTextAttributes = [NSAttributedString.Key.foregroundColor: UIColor.white]
let selectedTextAttributes = [NSAttributedString.Key.foregroundColor: UIColor.blue]
segmentedControl.setTitleTextAttributes(normalTextAttributes, for: .normal)
segmentedControl.setTitleTextAttributes(selectedTextAttributes, for: .selected)
//
segmentedControl.addTarget(self, action: #selector(segmentedControlValueChanged(_:)), for: .valueChanged)
}
@objc func imageTapped(_ sender: UITapGestureRecognizer) {
let vc:PlayControllerImg = PlayControllerImg()
self.present(vc, animated: true, completion: nil)
vc.mediaSelectedHandler = { [self]data in
print("回调")
print(data)
imgData = data
let image = UIImage(data: imgData!)
mImgView!.image = image
}
}
//"", "", "", ""
@objc func segmentedControlValueChanged(_ sender: UISegmentedControl) {
//
let selectedIndex = sender.selectedSegmentIndex
print("选中了第 \(selectedIndex) 个选项")
player.pause()
NotificationCenter.default.removeObserver(self)
mImgView!.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 180)
// guard let imageSource = CGImageSourceCreateWithURL(sourceVideoURL! as CFURL, nil) else {
// return
// }
guard let imageSource = CGImageSourceCreateWithData(imgData! as CFData, nil) else {
return
}
print(imageSource)
let frameCount = CGImageSourceGetCount(imageSource)
var frames: [CGImage] = []
for index in 0..<frameCount {
guard let frameImage = CGImageSourceCreateImageAtIndex(imageSource, index, nil) else {
continue
}
frames.append(frameImage)
}
if(frames.count < 2){return}
let lciImage = CIImage(cgImage: frames.first!)
let rciImage = CIImage(cgImage: frames[1])
//
if(selectedIndex == 0){
let image = UIImage(contentsOfFile: sourceVideoURL!.path)
mImgView!.image = image
}
//
if(selectedIndex == 1){
mImgView!.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 130)
let newpb = joinImages( leftImage: lciImage, rightImage:rciImage )
let lastImg = convertCIImageToUIImage(ciImage: newpb)!
DispatchQueue.main.async { [weak self] in
self!.mImgView!.image = lastImg
}
}
//
if(selectedIndex == 2){
mImgView!.frame = CGRect.init(x: 0, y: 100, width: self.view.frame.size.width, height: 130)
let newpb = joinImages( leftImage:rciImage , rightImage:lciImage )
let lastImg = convertCIImageToUIImage(ciImage: newpb)!
DispatchQueue.main.async { [weak self] in
self!.mImgView!.image = lastImg
}
}
//
if(selectedIndex == 3){
//
mImgView!.frame = CGRect.init(x: 0, y: 180, width: self.view.frame.size.width, height: 380)
let redColorMatrix: [CGFloat] = [
0.0, 0.0, 0.0, 0.0, 0.0, //
0.0, 0.0, 0.0, 0.0, 0.0, // 绿
0.0, 0.0, 0.5, 0.0, 0.0, //
0.0, 0.0, 0.0, 1.0, 0.0 //
]
let blueColorMatrix: [CGFloat] = [
0.5, 0.0, 0.0, 0.0, 0.0, //
0.0, 0.0, 0.0, 0.0, 0.0, // 绿
0.0, 0.0, 0.0, 0.0, 0.0, //
0.0, 0.0, 0.0, 1.0, 0.0 //
]
let redFilter = CIFilter(name: "CIColorMatrix")!
redFilter.setValue(lciImage, forKey: kCIInputImageKey)
redFilter.setValue(CIVector(values: redColorMatrix, count: redColorMatrix.count), forKey: "inputRVector")
let blueFilter = CIFilter(name: "CIColorMatrix")!
blueFilter.setValue(rciImage, forKey: kCIInputImageKey)
blueFilter.setValue(CIVector(values: blueColorMatrix, count: blueColorMatrix.count), forKey: "inputBVector")
//
if let redOutputImage = redFilter.outputImage,
let blueOutputImage = blueFilter.outputImage {
// CIScreenBlendMode:
// CIHardLightBlendMode: 使
// CILightenBlendMode:
// CIColorDodgeBlendMode: 使
// CIColorBurnBlendMode: 使
// CIDarkenBlendMode:
// CILinearDodgeBlendMode: 使线
// CIMultiplyBlendMode:
// CISourceOverCompositing:
let compositeFilter = CIFilter(name: "CIScreenBlendMode")!
compositeFilter.setValue(redOutputImage, forKey: kCIInputImageKey)
compositeFilter.setValue(blueOutputImage, forKey: kCIInputBackgroundImageKey)
// let sharpenedFilter = CIFilter(name: "CISharpenLuminance")!
// sharpenedFilter.setValue(compositeFilter.outputImage, forKey: kCIInputImageKey)
// sharpenedFilter.setValue(2, forKey: kCIInputSharpnessKey)
// let colorControlsFilter = CIFilter(name: "CIColorControls")!
// colorControlsFilter.setValue(sharpenedFilter.outputImage, forKey: kCIInputImageKey)
// colorControlsFilter.setValue(0.7, forKey: kCIInputSaturationKey)
let lastImg = compositeFilter.outputImage!
DispatchQueue.main.async { [weak self] in
self!.mImgView!.image = UIImage(ciImage: lastImg)
}
}
}
}
func createCVPixelBuffer(from image: UIImage, with frame: CGRect) -> CVPixelBuffer? {
let options: [String: Any] = [
kCVPixelBufferCGImageCompatibilityKey as String: true,
kCVPixelBufferCGBitmapContextCompatibilityKey as String: true
]
var pixelBuffer: CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault,
Int(frame.width),
Int(frame.height),
kCVPixelFormatType_32BGRA,
options as CFDictionary,
&pixelBuffer)
guard status == kCVReturnSuccess, let buffer = pixelBuffer else {
return nil
}
CVPixelBufferLockBaseAddress(buffer, [])
let pixelData = CVPixelBufferGetBaseAddress(buffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: pixelData,
width: Int(frame.width),
height: Int(frame.height),
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(buffer),
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue) else {
return nil
}
context.translateBy(x: -frame.origin.x, y: -frame.origin.y)
context.draw(image.cgImage!, in: CGRect(origin: .zero, size: image.size))
CVPixelBufferUnlockBaseAddress(buffer, [])
return buffer
}
//
func joinImages2( leftImage:CIImage, rightImage:CIImage) -> CIImage {
let left = UIImage(ciImage: leftImage )
let right = UIImage(ciImage: rightImage )
let imageWidth = left.size.width/2 + right.size.width/2
let imageHeight = left.size.height/2
let newImageSize = CGSize(width:imageWidth, height: imageHeight);
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
left.draw(in: CGRect(x:0, y:0, width:imageWidth/2, height:imageHeight))
right.draw(in: CGRect(x:imageWidth/2, y:0, width:imageWidth/2, height:imageHeight))
let image = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext();
let ci = CIImage(cgImage: image.cgImage!)
return ci
}
@objc func buttonPressed(sender:UIButton){
if(sender.tag == 10){
let vc:PlayContoller4 = PlayContoller4()
self.navigationController?.pushViewController(vc, animated: true)
}
}
func convertVideo( inputFile : URL, outputFile: URL, progress: ((Float)->())? = nil ) async throws {
do {
try FileManager.default.removeItem(atPath: outputFile.path)
print("视频文件删除成功")
} catch {
print("删除视频文件出错:\(error)")
}
// Load the AVAsset
let asset = AVAsset(url: inputFile)
let assetReader = try AVAssetReader(asset: asset)
//
let userDataItems = try await asset.loadMetadata(for:.quickTimeMetadata)
let spacialCharacteristics = userDataItems.filter { $0.identifier?.rawValue == "mdta/com.apple.quicktime.spatial.format-version" }
if spacialCharacteristics.count == 0 {
print("该视频不是空间视频")
}
//()
let (orientation, videoSize) = try await getOrientationAndResolutionSizeForVideo(asset: asset)
//
//
let vw:VideoWriter?
if(type == 3){
vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main)
}
else{
vw = VideoWriter(url: outputFile, width: Int(videoSize.width), height: Int(videoSize.height/2), orientation: orientation, sessionStartTime: CMTime(value: 1, timescale: 30 ), isRealTime: false, queue: .main)
}
//
let output = try await AVAssetReaderTrackOutput(
track: asset.loadTracks(withMediaType: .video).first!,
outputSettings: [
AVVideoDecompressionPropertiesKey: [
kVTDecompressionPropertyKey_RequestedMVHEVCVideoLayerIDs: [0, 1] as CFArray,
],
]
)
assetReader.add(output)
assetReader.startReading()
let duration = try await asset.load(.duration)
if let playerItem = player.currentItem {
playerItem.videoComposition = AVVideoComposition(asset: playerItem.asset) { request in
print(request.sourceImage)
}
}
while let nextSampleBuffer = output.copyNextSampleBuffer() {
guard let taggedBuffers = nextSampleBuffer.taggedBuffers else { return }
let leftEyeBuffer = taggedBuffers.first(where: {
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.leftEye)
})?.buffer
let rightEyeBuffer = taggedBuffers.first(where: {
$0.tags.first(matchingCategory: .stereoView) == .stereoView(.rightEye)
})?.buffer
if let leftEyeBuffer,
let rightEyeBuffer,
case let .pixelBuffer(leftEyePixelBuffer) = leftEyeBuffer,
case let .pixelBuffer(rightEyePixelBuffer) = rightEyeBuffer {
let lciImage = CIImage(cvPixelBuffer: leftEyePixelBuffer)
let rciImage = CIImage(cvPixelBuffer: rightEyePixelBuffer)
//
let newpb = joinImages( leftImage: lciImage, rightImage:rciImage )
let time = CMSampleBufferGetOutputPresentationTimeStamp(nextSampleBuffer)
_ = vw!.add(image: newpb, presentationTime: time)
// print( "Added frame at \(time)")
progress?( Float(time.value)/Float(duration.value))
// try await Task.sleep(nanoseconds: 3_000_000)
}
}
_ = try await vw!.finish()
print( "Finished")
}
func getOrientationAndResolutionSizeForVideo(asset:AVAsset) async throws -> (CGAffineTransform, CGSize) {
guard let track = try await asset.loadTracks(withMediaType: AVMediaType.video).first
else{throw VideoReaderError.invalidVideo}
let naturalSize = try await track.load(.naturalSize)
let naturalTransform = try await track.load(.preferredTransform)
let size = naturalSize.applying(naturalTransform)
return (naturalTransform, CGSize(width: abs(size.width), height: abs(size.height)) )
}
func convertCIImageToUIImage(ciImage: CIImage) -> UIImage? {
let context = CIContext(options: nil)
if let cgImage = context.createCGImage(ciImage, from: ciImage.extent) {
let uiImage = UIImage(cgImage: cgImage)
return uiImage
}
return nil
}
//
func joinImages( leftImage:CIImage, rightImage:CIImage) -> CIImage {
let left = UIImage(ciImage: leftImage )
let right = UIImage(ciImage: rightImage )
let imageWidth = left.size.width/2 + right.size.width/2
let imageHeight = left.size.height/2
let newImageSize = CGSize(width:imageWidth, height: imageHeight);
UIGraphicsBeginImageContextWithOptions(newImageSize, false, 1);
left.draw(in: CGRect(x:0, y:0, width:imageWidth/2, height:imageHeight))
right.draw(in: CGRect(x:imageWidth/2, y:0, width:imageWidth/2, height:imageHeight))
let image = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext();
let ci = CIImage(cgImage: image.cgImage!)
return ci
}
func pixelBuffer(from ciImage: CIImage) -> CVPixelBuffer? {
var pixelBuffer: CVPixelBuffer?
let attributes: [String: Any] = [
kCVPixelBufferCGImageCompatibilityKey as String: kCFBooleanTrue,
kCVPixelBufferCGBitmapContextCompatibilityKey as String: kCFBooleanTrue
]
let width = Int(ciImage.extent.width)
let height = Int(ciImage.extent.height)
let status = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32ARGB, attributes as CFDictionary, &pixelBuffer)
if status == kCVReturnSuccess, let pixelBuffer = pixelBuffer {
let context = CIContext()
context.render(ciImage, to: pixelBuffer)
return pixelBuffer
}
return nil
}
}