ScreenCaptureKit 实现录屏功能
2024-05-23 本文已影响0人
蓝雪清晨
import Cocoa
import ScreenCaptureKit
class InairScreenCaptureRecord: NSObject,SCStreamDelegate, SCStreamOutput {
@objc static let shareManager = InairScreenCaptureRecord()
var screen: SCDisplay?
var availableContent: SCShareableContent?
var filter: SCContentFilter?
var stream: SCStream!
var audioSettings: [String : Any]!
var tag:Int = 0
var recordingBufferCallBack: ((_ buffer:CMSampleBuffer, _ tag:Int) -> Void)?
private let videoSampleBufferQueue = DispatchQueue(label: "screenCaptureKit-samplecode.VideoSampleBufferQueue")
private let audioSampleBufferQueue = DispatchQueue(label: "screenCaptureKit-samplecode.AudioSampleBufferQueue")
//检测录屏权限
var canRecord: Bool {
get async {
do {
// 如果应用程序没有屏幕录制权限,这个调用会产生一个异常。
try await SCShareableContent.excludingDesktopWindows(false, onScreenWindowsOnly: true)
return true
} catch {
return false
}
}
}
deinit {
self.stopRecording()
}
override init() {
super.init()
}
@objc func prepRecord(displayID:CGDirectDisplayID, tag:Int) {
self.tag = tag
Task {
if await self.canRecord {
do {
// 检索要捕获的可用屏幕。
let availableContent = try await SCShareableContent.excludingDesktopWindows(false,onScreenWindowsOnly: true)
self.availableContent = availableContent
self.updateAudioSettings()
// 获取需要录制的屏幕
self.screen = self.availableContent?.displays.first(where: { displayID == $0.displayID })
self.filter = SCContentFilter(display: self.screen!, excludingApplications: [], exceptingWindows: [])
Task { await self.record(audioOnly: false, filter: self.filter!) }
} catch {
print("Failed to get the shareable content: \(error.localizedDescription)")
}
} else {
}
}
}
//获取芯片类型是inter还是arm
@objc open func getCPUTypeIsARM() -> Bool {
var size: size_t = MemoryLayout.size(ofValue: 0)
sysctlbyname("hw.cputype", nil, &size, nil, 0)
var type: Int32 = 0
sysctlbyname("hw.cputype", &type, &size, nil, 0)
if (type == CPU_TYPE_ARM64) {
print("ARM===ARM===ARM===ARM")
return true
} else {
print("X86_64===X86_64===X86_64===X86_64")
return false
}
}
func record(audioOnly: Bool, filter: SCContentFilter) async {
if (self.screen == nil) {
return
}
let streamConfig = SCStreamConfiguration()
streamConfig.pixelFormat = OSType(kCVPixelFormatType_32BGRA)//设置输出类型
streamConfig.width = self.screen!.width
streamConfig.height = self.screen!.height
streamConfig.minimumFrameInterval = CMTime(value: 1, timescale: 60)//设置帧率
streamConfig.showsCursor = true
streamConfig.queueDepth = 5
//下面是开启音频
// conf.capturesAudio = true
// conf.sampleRate = audioSettings["AVSampleRateKey"] as! Int
// conf.channelCount = audioSettings["AVNumberOfChannelsKey"] as! Int
self.stream = SCStream(filter: filter, configuration: streamConfig, delegate: self)
do {
try self.stream.addStreamOutput(self, type: .screen, sampleHandlerQueue: videoSampleBufferQueue)
//下面是添加音频
// try self.stream.addStreamOutput(self, type: .audio, sampleHandlerQueue: audioSampleBufferQueue)
try await self.stream.startCapture()
} catch {
assertionFailure("capture failed".local)
return
}
}
@objc func stopRecording() {
if self.stream != nil {
self.stream.stopCapture()
}
self.stream = nil
self.screen = nil
self.availableContent = nil
}
//设置音频采集参数
func updateAudioSettings() {
self.audioSettings = [AVSampleRateKey : 48000, AVNumberOfChannelsKey : 2] // reset audioSettings
self.audioSettings[AVFormatIDKey] = kAudioFormatMPEG4AAC
self.audioSettings[AVEncoderBitRateKey] = 256 * 1000
}
func stream(_ stream: SCStream, didOutputSampleBuffer sampleBuffer: CMSampleBuffer, of outputType: SCStreamOutputType) {
guard sampleBuffer.isValid else { return }
switch outputType {
case .screen:
if self.screen == nil {
break
}
print("===========视频=====================");
self.recordingBufferCallBack?(sampleBuffer,self.tag)
break
case .audio:
print("===========音频(没做处理)=====================");
break
@unknown default:
assertionFailure("unknown stream type".local)
}
}
func stream(_ stream: SCStream, didStopWithError error: Error) { // stream error
print("关闭流时出现错误:\n".local, error,
"\n 这可能是由于窗口关闭或用户从UI停止".local)
DispatchQueue.main.async {
self.stopRecording()
}
}
/// 根据sampleBuffer获取帧的宽、高和基地址
/// - Parameters:
/// - tag: 录制屏幕标识
/// - sampleBuffer: 视频的每一帧
/// - complation:【data: 基地址,width:宽,height:高,newTag:录制屏幕标识,sampleSize:每帧的大小,RawSampleBuffer:原始帧(即sampleBuffer)】
func convertCMSampleBufferToData(_ tag:Int, sampleBuffer: CMSampleBuffer,complation: @escaping ((_ data: UnsafeMutableRawPointer?, _ width:Int, _ height:Int, _ newTag:Int, _ sampleSize:Int,_ RawSampleBuffer:CMSampleBuffer) -> Void)) {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))
let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer)
let height = CVPixelBufferGetHeight(imageBuffer)
var width = CVPixelBufferGetWidth(imageBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
print("======(\(width),\(height)) ===== PerRow = \(bytesPerRow)")
//获取字节大小
let sampleSize1 = CVPixelBufferGetDataSize(imageBuffer)
// let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
//补齐差的宽度(解决苹果芯片部分分辨率花屏问题)iOS端可以不用
width = width + (sampleSize1 - width*height*4)/(height*4)
CVPixelBufferUnlockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))
if (baseAddress == nil) {
complation(nil, width, height,tag,sampleSize1,sampleBuffer)
return
}
complation(baseAddress!, width, height,tag,sampleSize1,sampleBuffer)
}
}
extension String {
var local: String { return NSLocalizedString(self, comment: "") }
}
使用示例
var ScreenCaptureRecord:InairScreenCaptureRecord?
var ScreenCaptureRecord1:InairScreenCaptureRecord?
@objc public func start() {
//获取需要录制的屏幕
let customScreenArray = self.getDisplayScreen()
print("--------开始录屏------------")
var i = 0
for screen in customScreenArray {
let displayID = screen.deviceDescription[NSDeviceDescriptionKey(rawValue: "NSScreenNumber")] as! CGDirectDisplayID
let name = screen.localizedName.lowercased()
if (name.contains("screenname1")) {
self.ScreenCaptureRecord1 = InairScreenCaptureRecord()
self.ScreenCaptureRecord1!.prepRecord(displayID: displayID,tag: 1)
} else {
//录制主屏幕
self.ScreenCaptureRecord = InairScreenCaptureRecord()
self.ScreenCaptureRecord!.prepRecord(displayID: displayID,tag: 0)
}
i += 1
}
self.recordingBufferReceiveProcessing()
}
@objc public func stop() {
print("--------停止录屏---------")
self.ScreenCaptureRecord?.stopRecording()
self.ScreenCaptureRecord1?.stopRecording()
self.ScreenCaptureRecord = nil
self.ScreenCaptureRecord1 = nil
}
//处理获取到的数据流
func recordingBufferReceiveProcessing() {
self.ScreenCaptureRecord?.recordingBufferCallBack = { (buffer, tag) in
//直接显示到NSView上(使用layer或者metal)
self.metalRender(with: buffer)
}
self.ScreenCaptureRecord1?.recordingBufferCallBack = { (buffer, tag) in
self.metalRender(with: buffer)
}
}
func getDisplayScreen() -> [NSScreen] {
var customScreenArray:[NSScreen] = []
var i = 0
for screen in NSScreen.screens {
let displayId = screen.deviceDescription[NSDeviceDescriptionKey(rawValue: "NSScreenNumber")] as! CGDirectDisplayID
//判断是否是内置屏
if ((CGDisplayIsBuiltin(displayId)) != 0) {
customScreenArray.append(screen)
}
let displayName:String = screen.localizedName
if (displayName.contains("screenname1")) {
customScreenArray.append(screen)
}
}
return customScreenArray
}
渲染
layer渲染
var displayLayer: AVSampleBufferDisplayLayer?
func metalRender(with sampleBuffer: CMSampleBuffer) {
DispatchQueue.main.sync {
if self.displayLayer == nil {
self.displayLayer = AVSampleBufferDisplayLayer()
self.displayLayer?.frame = self.view.bounds // 设置渲染view的frame
//self.displayLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.layer.addSublayer(self.displayLayer!)
}
self.displayLayer?.enqueue(sampleBuffer)
}
}
metal渲染
import MetalKit
import CoreMedia
import MetalPerformanceShaders
var mtkView: MTKView?// 展示视图
var processQueue: DispatchQueue?// 处理队列
var textureCache: CVMetalTextureCache?// 纹理缓存区
var commandQueue: MTLCommandQueue?// 命令队列
var texture: MTLTexture?// 纹理
//写到初始化里面,不然delegate不起作用
setupMetal()
func setupMetal() {
// 1.初始化MTKView
self.mtkView = MTKView(frame: self.view.bounds)//设置自己的渲染view的frame
self.mtkView?.device = MTLCreateSystemDefaultDevice()
self.view.addSubview(self.mtkView!)
self.mtkView?.delegate = self
// 2.设置MTKView的drawable纹理是可读写的(默认是只读)
self.mtkView?.framebufferOnly = false
// 3.创建命令队列
self.commandQueue = self.mtkView?.device?.makeCommandQueue()
// 4.创建Core Video的Metal纹理缓存区
CVMetalTextureCacheCreate(nil, nil, self.mtkView!.device!, nil, &textureCache)
}
func metalRender(with sampleBuffer: CMSampleBuffer) {
// 1.从sampleBuffer获取视频像素缓存区对象
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
if (pixelBuffer == nil) {
return
}
// 2.获取捕捉视频的宽和高
let width = CVPixelBufferGetWidth(pixelBuffer!)
let height = CVPixelBufferGetHeight(pixelBuffer!)
// 4.从现有图像缓冲区创建核心视频Metal纹理缓冲区
var tmpTexture: CVMetalTexture?
let status = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, self.textureCache!, pixelBuffer!, nil, .bgra8Unorm, width, height, 0, &tmpTexture)
// 判断纹理缓冲区是否创建成功
if status == kCVReturnSuccess {
// 5.设置可绘制纹理的当前大小
self.mtkView?.drawableSize = CGSize(width: CGFloat(width), height: CGFloat(height))
// 6.返回纹理缓冲区的Metal纹理对象
self.texture = CVMetalTextureGetTexture(tmpTexture!)
}
}
extension ScreenRecordingViewController: MTKViewDelegate {
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
print("视图大小发生改变时会调用此方法")
}
func draw(in view: MTKView) {
// 判断是否获取了AVFoundation采集的纹理数据
if let texture = self.texture {
// 1.创建指令缓冲
let commandBuffer = commandQueue?.makeCommandBuffer()
// 2.将MTKView作为目标渲染纹理
let drawingTexture = view.currentDrawable?.texture
// 3.创建高斯滤镜,sigma值越高图像越模糊
let filter = MPSImageGaussianBlur(device: mtkView!.device!, sigma: 1)
// 4.高斯滤镜以Metal纹理作为输入和输出
// 输入:摄像头采集的图像 self.texture
// 输出:创建的纹理 drawingTexture(其实就是view.currentDrawable.texture)
filter.encode(commandBuffer: commandBuffer!, sourceTexture: texture, destinationTexture: drawingTexture!)
// 5.展示显示的内容并提交命令
commandBuffer?.present(view.currentDrawable!)
commandBuffer?.commit()
// 6.清空当前纹理,准备下一次的纹理数据读取
self.texture = nil
}
}
}