234 lines
9.3 KiB
Swift
234 lines
9.3 KiB
Swift
//
|
||
// FrameProcessor.swift
|
||
// SpatialVideoGist
|
||
//
|
||
//
|
||
// Created by Bryan on 12/15/23.
|
||
//
|
||
|
||
import AVFoundation
|
||
import CoreImage
|
||
|
||
///可用于裁剪和操作视频帧以及在底层图像类型之间进行转换的处理器。
|
||
final class FrameProcessor {
|
||
// MARK: - Properties
|
||
|
||
// MARK: Public
|
||
|
||
///一个决定处理器是否已经配置的标志。
|
||
var isPrepared = false
|
||
|
||
// MARK: Private
|
||
|
||
/// CIContext可以用于图像处理。
|
||
private var ciContext: CIContext?
|
||
|
||
///处理图像的输出颜色空间。
|
||
private var outputColorSpace: CGColorSpace?
|
||
|
||
/// ' CVPixelBufferPool '用于保存进程内像素缓冲区。
|
||
private var outputPixelBufferPool: CVPixelBufferPool?
|
||
|
||
///输入' CMFormatDescription '用于配置处理器。
|
||
private(set) var inputFormatDescription: CMFormatDescription?
|
||
|
||
///输出' CMFormatDescription '用于配置处理器。
|
||
private(set) var outputFormatDescription: CMFormatDescription?
|
||
|
||
///系统的金属设备的GPU处理。
|
||
private let metalDevice = MTLCreateSystemDefaultDevice()!
|
||
|
||
///保存' CVMetalTexture '项的缓存。
|
||
private var textureCache: CVMetalTextureCache!
|
||
|
||
// MARK: - Methods
|
||
|
||
///用提供的格式描述和要保留的缓冲区准备这个处理器。
|
||
/// -参数:
|
||
/// - formatDescription:注入的' CMFormatDescription '来帮助处理器进行设置。
|
||
/// - outputRetainedBufferCountHint:处理过程中保留的缓冲区数量。
|
||
func prepare(
|
||
with formatDescription: CMFormatDescription,
|
||
outputRetainedBufferCountHint: Int
|
||
) {
|
||
reset()
|
||
|
||
(outputPixelBufferPool,
|
||
outputColorSpace,
|
||
outputFormatDescription) = allocateOutputBufferPool(
|
||
with: formatDescription,
|
||
outputRetainedBufferCountHint: outputRetainedBufferCountHint
|
||
)
|
||
|
||
if outputPixelBufferPool == nil {
|
||
return
|
||
}
|
||
inputFormatDescription = formatDescription
|
||
ciContext = CIContext()
|
||
|
||
var metalTextureCache: CVMetalTextureCache?
|
||
if CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, metalDevice, nil, &metalTextureCache) != kCVReturnSuccess {
|
||
assertionFailure("Unable to allocate texture cache")
|
||
} else {
|
||
textureCache = metalTextureCache
|
||
}
|
||
|
||
isPrepared = true
|
||
}
|
||
|
||
///将提供的' CIImage '裁剪为给定的' CGRect ',然后转换为' CVPixelBuffer '
|
||
/// -参数:
|
||
/// - pixelBufferImage:源像素缓冲区,作为' CIImage '。
|
||
/// - targetRect:像素缓冲图像应该裁剪到的目标“CGRect”。
|
||
/// -返回:裁剪的图像作为' CVPixelBuffer ',如果它可以被处理。
|
||
func cropPixelBuffer(
|
||
pixelBufferImage: CIImage,
|
||
targetRect: CGRect
|
||
) -> CVPixelBuffer? {
|
||
guard let ciContext = ciContext,
|
||
isPrepared
|
||
else {
|
||
isPrepared = false
|
||
return nil
|
||
}
|
||
|
||
var croppedImage = pixelBufferImage.cropped(to: targetRect)
|
||
|
||
let originTransform = CGAffineTransform(
|
||
translationX: -croppedImage.extent.origin.x,
|
||
y: -croppedImage.extent.origin.y
|
||
)
|
||
croppedImage = croppedImage.transformed(by: originTransform)
|
||
|
||
var pbuf: CVPixelBuffer?
|
||
CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &pbuf)
|
||
guard let outputPixelBuffer = pbuf else {
|
||
print("Allocation failure")
|
||
return nil
|
||
}
|
||
|
||
//将过滤后的图像渲染到像素缓冲区(不需要锁定,因为CIContext的渲染方法会这样做)
|
||
ciContext.render(
|
||
croppedImage,
|
||
to: outputPixelBuffer,
|
||
bounds: croppedImage.extent,
|
||
colorSpace: outputColorSpace
|
||
)
|
||
|
||
return outputPixelBuffer
|
||
}
|
||
|
||
// MARK: - Private
|
||
|
||
///复位处理器到未准备状态。
|
||
private func reset() {
|
||
ciContext = nil
|
||
outputColorSpace = nil
|
||
outputPixelBufferPool = nil
|
||
outputFormatDescription = nil
|
||
inputFormatDescription = nil
|
||
textureCache = nil
|
||
isPrepared = false
|
||
}
|
||
}
|
||
|
||
///设置处理器的Helper方法。
|
||
private extension FrameProcessor {
|
||
|
||
///为输出像素缓冲池分配内存。
|
||
/// -参数:
|
||
/// - inputFormatDescription:提供的用于配置处理器的' CMFormatDescription '。
|
||
/// - outputRetainedBufferCountHint:保留提示的缓冲区数量。
|
||
/// -返回:包含输出像素缓冲池,颜色空间和格式描述的元组。
|
||
private func allocateOutputBufferPool(
|
||
with inputFormatDescription: CMFormatDescription,
|
||
outputRetainedBufferCountHint: Int
|
||
) ->(
|
||
outputBufferPool: CVPixelBufferPool?,
|
||
outputColorSpace: CGColorSpace?,
|
||
outputFormatDescription: CMFormatDescription?) {
|
||
|
||
let inputDimensions = CMVideoFormatDescriptionGetDimensions(inputFormatDescription)
|
||
var pixelBufferAttributes: [String: Any] = [
|
||
kCVPixelBufferPixelFormatTypeKey as String: UInt(kCVPixelFormatType_32BGRA),
|
||
kCVPixelBufferWidthKey as String: Int(inputDimensions.width / 2),
|
||
kCVPixelBufferHeightKey as String: Int(inputDimensions.height),
|
||
kCVPixelBufferIOSurfacePropertiesKey as String: [:]
|
||
]
|
||
|
||
//从输入格式描述中获取像素缓冲区属性和颜色空间。
|
||
var cgColorSpace = CGColorSpaceCreateDeviceRGB()
|
||
if let inputFormatDescriptionExtension = CMFormatDescriptionGetExtensions(inputFormatDescription) as Dictionary? {
|
||
let colorPrimaries = inputFormatDescriptionExtension[kCVImageBufferColorPrimariesKey]
|
||
|
||
if let colorPrimaries = colorPrimaries {
|
||
var colorSpaceProperties: [String: AnyObject] = [kCVImageBufferColorPrimariesKey as String: colorPrimaries]
|
||
|
||
if let yCbCrMatrix = inputFormatDescriptionExtension[kCVImageBufferYCbCrMatrixKey] {
|
||
colorSpaceProperties[kCVImageBufferYCbCrMatrixKey as String] = yCbCrMatrix
|
||
}
|
||
|
||
if let transferFunction = inputFormatDescriptionExtension[kCVImageBufferTransferFunctionKey] {
|
||
colorSpaceProperties[kCVImageBufferTransferFunctionKey as String] = transferFunction
|
||
}
|
||
|
||
pixelBufferAttributes[kCVBufferPropagatedAttachmentsKey as String] = colorSpaceProperties
|
||
}
|
||
|
||
if let cvColorspace = inputFormatDescriptionExtension[kCVImageBufferCGColorSpaceKey] {
|
||
cgColorSpace = cvColorspace as! CGColorSpace
|
||
} else if (colorPrimaries as? String) == (kCVImageBufferColorPrimaries_P3_D65 as String) {
|
||
cgColorSpace = CGColorSpace(name: CGColorSpace.displayP3)!
|
||
}
|
||
}
|
||
|
||
//创建与输入格式描述相同像素属性的像素缓冲池。
|
||
let poolAttributes = [kCVPixelBufferPoolMinimumBufferCountKey as String: outputRetainedBufferCountHint]
|
||
var cvPixelBufferPool: CVPixelBufferPool?
|
||
CVPixelBufferPoolCreate(kCFAllocatorDefault, poolAttributes as NSDictionary?, pixelBufferAttributes as NSDictionary?, &cvPixelBufferPool)
|
||
guard let pixelBufferPool = cvPixelBufferPool else {
|
||
assertionFailure("Allocation failure: Could not allocate pixel buffer pool.")
|
||
return (nil, nil, nil)
|
||
}
|
||
|
||
preallocateBuffers(pool: pixelBufferPool, allocationThreshold: outputRetainedBufferCountHint)
|
||
|
||
//获取输出格式说明。
|
||
var pixelBuffer: CVPixelBuffer?
|
||
var outputFormatDescription: CMFormatDescription?
|
||
let auxAttributes = [kCVPixelBufferPoolAllocationThresholdKey as String: outputRetainedBufferCountHint] as NSDictionary
|
||
CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(kCFAllocatorDefault, pixelBufferPool, auxAttributes, &pixelBuffer)
|
||
if let pixelBuffer = pixelBuffer {
|
||
CMVideoFormatDescriptionCreateForImageBuffer(allocator: kCFAllocatorDefault,
|
||
imageBuffer: pixelBuffer,
|
||
formatDescriptionOut: &outputFormatDescription)
|
||
}
|
||
pixelBuffer = nil
|
||
|
||
return (pixelBufferPool, cgColorSpace, outputFormatDescription)
|
||
}
|
||
|
||
|
||
///从池中预分配像素缓冲区用于处理。
|
||
/// -参数:
|
||
/// -池:' CVPixelBufferPool '预分配。
|
||
/// - allocationThreshold:可以从池中分配多少缓冲区的阈值。
|
||
private func preallocateBuffers(
|
||
pool: CVPixelBufferPool,
|
||
allocationThreshold: Int
|
||
) {
|
||
var pixelBuffers = [CVPixelBuffer]()
|
||
var error: CVReturn = kCVReturnSuccess
|
||
let auxAttributes = [kCVPixelBufferPoolAllocationThresholdKey as String: allocationThreshold] as NSDictionary
|
||
var pixelBuffer: CVPixelBuffer?
|
||
while error == kCVReturnSuccess {
|
||
error = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(kCFAllocatorDefault, pool, auxAttributes, &pixelBuffer)
|
||
if let pixelBuffer = pixelBuffer {
|
||
pixelBuffers.append(pixelBuffer)
|
||
}
|
||
pixelBuffer = nil
|
||
}
|
||
pixelBuffers.removeAll()
|
||
}
|
||
}
|