iOS ReplayKit2录屏 + h264编解码 + soc
2024-01-28 本文已影响0人
沉船无数
前言
Swift版本的h264编码很少,我也是参照简书大佬的文章搞的,可以正常跑,放心cv。
不了解ReplayKit2和app extension的自行百度。
demo实现
拉起app extension
let picker = RPSystemBroadcastPickerView.init(frame: CGRectMake(0, 0, 1, 1))
picker.preferredExtension = "你的app extention bundle id"
picker.showsMicrophoneButton = true
for subView in picker.subviews {
if let btn = subView as? UIButton {
btn.sendActions(for: .allTouchEvents)
}
}
上面代码会弹起一个广播列表,选择你的extension,点击开始直播按钮,会进入你的app extension,开始录屏。
共享代码文件
app extension可以使用宿主代码,只需要按照下图配置即可:
![](https://img.haomeiwen.com/i8135718/8a5f30bd70e37a3f.jpg)
app extension使用宿主工程pod库
在宿主podfile app extension target下编写添加三方库就行,如下图:
![](https://img.haomeiwen.com/i8135718/0a4f812cebd10e41.jpg)
RPBroadcastSampleHandler
class SampleHandler: RPBroadcastSampleHandler {
private var socketMananger: SocketManager?
private var encoder: LYH264Encoder?
override func broadcastStarted(withSetupInfo setupInfo: [String : NSObject]?) {
// User has requested to start the broadcast. Setup info from the UI extension can be supplied but optional.
setup()
}
override func broadcastPaused() {
// User has requested to pause the broadcast. Samples will stop being delivered.
}
override func broadcastResumed() {
// User has requested to resume the broadcast. Samples delivery will resume.
}
override func broadcastFinished() {
// User has requested to finish the broadcast.
socketMananger?.dispose()
encoder?.dispose()
}
override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
switch sampleBufferType {
case RPSampleBufferType.video:
// Handle video sample buffer
encode(sampleBuffer)
break
case RPSampleBufferType.audioApp:
// Handle audio sample buffer for app audio
break
case RPSampleBufferType.audioMic:
// Handle audio sample buffer for mic audio
break
@unknown default:
// Handle other sample buffer types
fatalError("Unknown type of sample buffer")
}
}
func setup() {
socketMananger = SocketManager(isServer: true)
let scale = UIScreen.main.scale
encoder = LYH264Encoder(width: Int32(UIScreen.main.bounds.width * scale), height: Int32(UIScreen.main.bounds.height * scale), bitRate: nil, fps: 30)
}
func encode(_ sampleBuffer: CMSampleBuffer) {
encoder?.encodeVideo(sampleBuffer: sampleBuffer)
encoder?.videoEncodeCallback = { [weak self] (data) in
self?.socketMananger?.sendParam([KSocketDataType : LYSocketDataType.sampleBuffer.rawValue, KSocketDataKey: data])
}
}
func senData(_ sampleBuffer: CMSampleBuffer) {
if let string = self.sampleBufferToBase64(sampleBuffer: sampleBuffer) {
self.socketMananger?.sendParam([KSocketDataType : LYSocketDataType.image.rawValue, KSocketDataKey: string])
}
}
}
h264编码
原理就不说了,代码也大同小异,以下代码Swift 5.x跑没毛病
import VideoToolbox
class LYH264Encoder {
public var videoEncodeCallback: ((Data) -> Void)?
public var width: Int32 = 375
public var height: Int32 = 852
private var bitRate : Int32 = 375 * 852 * 3 * 4
private var fps: Int32 = 10
private var frameID: Int64 = 0
private var encodeCallBack: VTCompressionOutputCallback?
private var encodeQueue = DispatchQueue(label: "com.ly_encode")
private var callBackQueue = DispatchQueue(label: "com.ly_encode_callBack")
private var encodeSession: VTCompressionSession?
public init(width: Int32 = 375, height: Int32 = 852, bitRate: Int32?, fps: Int32?) {
self.width = width
self.height = height
self.bitRate = bitRate != nil ? bitRate! : width * height * 3 * 4
self.fps = fps != nil ? fps! : 10
setupCallBack()
initVideoToolBox()
}
public func encodeVideo(sampleBuffer: CMSampleBuffer) {
if self.encodeSession == nil {
initVideoToolBox()
}
encodeQueue.async {
guard let encodeSession = self.encodeSession else {
return
}
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
var time = CMTime(value: self.frameID, timescale: 1000)
self.frameID += 1
if #available(iOS 15, *) { // iOS 15 timescale = 1000, 码率很低,马赛克
time = CMTime(value: self.frameID, timescale: 100)
}
var flags: VTEncodeInfoFlags = []
//编码
let state = VTCompressionSessionEncodeFrame(encodeSession, imageBuffer: imageBuffer, presentationTimeStamp: time, duration: .invalid, frameProperties: nil, sourceFrameRefcon: nil, infoFlagsOut: &flags)
if state != 0 {
print("VTCompression: encode failed: status \(state)")
}
}
}
func dispose() {
frameID = 0
if let session = encodeSession {
VTCompressionSessionCompleteFrames(session, untilPresentationTimeStamp: .invalid)
VTCompressionSessionInvalidate(session);
encodeSession = nil;
}
}
deinit {
dispose()
}
private func initVideoToolBox() {
// session
let status = VTCompressionSessionCreate(allocator: kCFAllocatorDefault, // 会话的分配器
width: width, // 帧的宽度
height: height, // 帧的高度
codecType: kCMVideoCodecType_H264, // 编解码器的类型,表示使用h.264进行编码
encoderSpecification: nil, // 指定必须使用的特定视频编码
imageBufferAttributes: nil, // 源像素缓冲区所需的属性,用于创建像素缓冲池
compressedDataAllocator: nil, // 压缩数据的分配器
outputCallback: encodeCallBack, //encodeCallBack, // 当一次编码结束会在该函数进行回调,可以在该函数中将数据,写入文件中
refcon: unsafeBitCast(self, to: UnsafeMutableRawPointer.self),
compressionSessionOut: &self.encodeSession
)
if status != 0 {
print("创建编码会话失败")
return
}
guard let encodeSession = encodeSession else {
return
}
//设置实时编码输出
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_RealTime, value: kCFBooleanTrue)
//设置编码方式
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_ProfileLevel, value: kVTProfileLevel_H264_Baseline_AutoLevel)
//设置是否产生B帧(因为B帧在解码时并不是必要的,是可以抛弃B帧的)
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_AllowFrameReordering, value: kCFBooleanFalse)
//设置关键帧间隔
var frameInterval = 10
let number = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &frameInterval)
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_MaxKeyFrameInterval, value: number)
//设置期望帧率,不是实际帧率
let fpscf = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &fps)
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_ExpectedFrameRate, value: fpscf)
//设置码率平均值,单位是bps。码率大了话就会非常清晰,但同时文件也会比较大。码率小的话,画面会模糊
let bitrateAverage = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &bitRate)
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_AverageBitRate, value: bitrateAverage)
//码率限制
let bitRatesLimit: CFArray = [bitRate * 5 / 8, 1] as CFArray
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_DataRateLimits, value: bitRatesLimit)
/// 压缩质量
var quality = 1
VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_Quality, value: CFNumberCreate(kCFAllocatorDefault, CFNumberType.floatType, &quality))
}
private func gotSpsPps(sps: Data, pps: Data) {
// startcode sps pps i b p startcode .....
let startCode = Data([0x00, 0x00, 0x00, 0x01])
var h264Data = Data()
h264Data.append(startCode)
h264Data.append(sps)
callBackQueue.async {
self.videoEncodeCallback?(h264Data)
}
var ppsData = Data()
ppsData.append(startCode)
ppsData.append(pps)
callBackQueue.async {
self.videoEncodeCallback?(ppsData)
}
}
private func gotEncodedData(_ data: Data, isKeyFrame: Bool) {
let startCode = Data([0x00, 0x00, 0x00, 0x01])
var h264Data = Data()
h264Data.append(startCode)
h264Data.append(data)
callBackQueue.async {
self.videoEncodeCallback?(h264Data)
}
}
private func setupCallBack() {
//编码完成回调
encodeCallBack = { (outputCallbackRefCon, sourceFrameRefCon, status, infoFlags, sampleBuffer) in
// 1.判断状态是否等于没有错误
if status != noErr {
print("encode with err")
return
}
guard let sampleBuffer = sampleBuffer else {
print("no buffer")
return
}
guard CMSampleBufferDataIsReady(sampleBuffer) else {
print("didCompressH264 data is not ready ")
return
}
// 2.根据传入的参数获取对象
let encoder: LYH264Encoder = unsafeBitCast(outputCallbackRefCon, to: LYH264Encoder.self)
// 3.判断是否是关键帧
let attachmentsArray = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, createIfNecessary: true)
let attachments = unsafeBitCast(CFArrayGetValueAtIndex(attachmentsArray, 0), to: CFDictionary.self)
let isKeyframe = !CFDictionaryContainsKey(attachments, Unmanaged.passUnretained(kCMSampleAttachmentKey_NotSync).toOpaque())
// 判断当前帧是否为关键帧
// 获取sps & pps数据
if isKeyframe {
print("获取到关键帧")
// 获取编码后的信息(存储于CMFormatDescriptionRef中)
let format = CMSampleBufferGetFormatDescription(sampleBuffer)
// 获取SPS信息
var sparameterSetSize: Int = 0
var sparameterSetCount: Int = 0
var sparameterSet: UnsafePointer<UInt8>?
CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format!, parameterSetIndex: 0, parameterSetPointerOut: &sparameterSet, parameterSetSizeOut: &sparameterSetSize, parameterSetCountOut: &sparameterSetCount, nalUnitHeaderLengthOut: nil)
// 获取PPS信息
var pparameterSetSize: Int = 0
var pparameterSetCount: Int = 0
var pparameterSet: UnsafePointer<UInt8>?
CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format!, parameterSetIndex: 1, parameterSetPointerOut: &pparameterSet, parameterSetSizeOut: &pparameterSetSize, parameterSetCountOut: &pparameterSetCount, nalUnitHeaderLengthOut: nil)
// 装sps/pps转成NSData
let sps = Data(bytes: sparameterSet!, count: sparameterSetSize)
let pps = Data(bytes: pparameterSet!, count: pparameterSetSize)
// 写入文件
encoder.gotSpsPps(sps: sps, pps: pps)
}
// 获取数据块
guard let dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer) else {
return
}
var length: size_t = 0
var totalLength: size_t = 0
var dataPointer: UnsafeMutablePointer<Int8>?
let statusCodeRet = CMBlockBufferGetDataPointer(dataBuffer, atOffset: 0, lengthAtOffsetOut: &length, totalLengthOut: &totalLength, dataPointerOut: &dataPointer)
if statusCodeRet == noErr {
var bufferOffset: size_t = 0
let AVCCHeaderLength: Int = 4 // 返回的nalu数据前四个字节不是0001的startcode,而是大端模式的帧长度length
// 循环获取nalu数据
while bufferOffset < totalLength - AVCCHeaderLength {
var NALUnitLength: UInt32 = 0
// Read the NAL unit length
memcpy(&NALUnitLength, dataPointer! + bufferOffset, AVCCHeaderLength)
// 从大端转系统端
NALUnitLength = CFSwapInt32BigToHost(NALUnitLength)
let data = Data(bytes: dataPointer! + bufferOffset + AVCCHeaderLength, count: Int(NALUnitLength))
encoder.gotEncodedData(data, isKeyFrame: isKeyframe)
// 移动到写一个块,转成NALU单元
// Move to the next NAL unit in the block buffer
bufferOffset += AVCCHeaderLength + Int(NALUnitLength)
}
}
}
}
}
h264解码
enum LYH264DecodeType {
/// CVPixcelBuffer 图片
case imageBuffer
/// h264 编码结构
case sampleBuffer
}
class LYH264Decoder {
/// 解码类型
public var returnType: LYH264DecodeType = .sampleBuffer
/// 解码后image结果回调
public var videoDecodeCallback: ((CVImageBuffer?) -> Void)?
/// 解码后sample结果回调
public var videoDecodeSampleBufferCallback: ((CMSampleBuffer?) -> Void)?
private var width: Int32 = 375
private var height: Int32 = 852
private var spsData: Data?
private var ppsData: Data?
private var decompressionSession: VTDecompressionSession?
private var decodeDesc: CMVideoFormatDescription?
private var callback: VTDecompressionOutputCallback?
private var decodeQueue = DispatchQueue(label: "com.ly_decode")
private var callBackQueue = DispatchQueue(label: "com.ly_decode_callBack")
public init(width: Int32, height: Int32) {
self.width = width
self.height = height
}
public func decode(data: Data) {
decodeQueue.async {
let length: UInt32 = UInt32(data.count)
self.decodeByte(data: data, size: length)
}
}
private func decodeByte(data: Data, size: UInt32) {
//数据类型:frame的前4个字节是NALU数据的开始码,也就是00 00 00 01,
// 将NALU的开始码转为4字节大端NALU的长度信息
let naluSize = size - 4
let length: [UInt8] = [
UInt8(truncatingIfNeeded: naluSize >> 24),
UInt8(truncatingIfNeeded: naluSize >> 16),
UInt8(truncatingIfNeeded: naluSize >> 8),
UInt8(truncatingIfNeeded: naluSize)
]
var frameByte: [UInt8] = length
[UInt8](data).suffix(from: 4).forEach { (item) in
frameByte.append(item)
}
let bytes = frameByte //[UInt8](frameData)
// 第5个字节是表示数据类型,转为10进制后,7是sps, 8是pps, 5是IDR(I帧)信息
let type: Int = Int(bytes[4] & 0x1f)
switch type {
case 0x05:
print("-收到关键帧,准备编码-")
if initDecoder() {
print("!收到关键帧,开始编码!")
decode(frame: bytes, size: size)
}
case 0x06:
// print("增强信息")
break
case 0x07:
print("- sps -")
spsData = data
case 0x08:
print("- pps -")
ppsData = data
default:
if initDecoder() {
decode(frame: bytes, size: size)
}
}
}
private func decode(frame: [UInt8], size: UInt32) {
var blockBUffer: CMBlockBuffer?
var frame1 = frame
//创建blockBuffer
/*!
参数1: structureAllocator kCFAllocatorDefault
参数2: memoryBlock frame
参数3: frame size
参数4: blockAllocator: Pass NULL
参数5: customBlockSource Pass NULL
参数6: offsetToData 数据偏移
参数7: dataLength 数据长度
参数8: flags 功能和控制标志
参数9: newBBufOut blockBuffer地址,不能为空
*/
let blockState = CMBlockBufferCreateWithMemoryBlock(allocator: kCFAllocatorDefault,
memoryBlock: &frame1,
blockLength: Int(size),
blockAllocator: kCFAllocatorNull,
customBlockSource: nil,
offsetToData:0,
dataLength: Int(size),
flags: 0,
blockBufferOut: &blockBUffer)
if blockState != 0 {
print("创建blockBuffer失败")
}
var sampleSizeArray: [Int] = [Int(size)]
var sampleBuffer: CMSampleBuffer?
//创建sampleBuffer
/*
参数1: allocator 分配器,使用默认内存分配, kCFAllocatorDefault
参数2: blockBuffer.需要编码的数据blockBuffer.不能为NULL
参数3: formatDescription,视频输出格式
参数4: numSamples.CMSampleBuffer 个数.
参数5: numSampleTimingEntries 必须为0,1,numSamples
参数6: sampleTimingArray. 数组.为空
参数7: numSampleSizeEntries 默认为1
参数8: sampleSizeArray
参数9: sampleBuffer对象
*/
let readyState = CMSampleBufferCreateReady(allocator: kCFAllocatorDefault,
dataBuffer: blockBUffer,
formatDescription: decodeDesc,
sampleCount: CMItemCount(1),
sampleTimingEntryCount: CMItemCount(),
sampleTimingArray: nil,
sampleSizeEntryCount: CMItemCount(1),
sampleSizeArray: &sampleSizeArray,
sampleBufferOut: &sampleBuffer)
guard let buffer = sampleBuffer, readyState == kCMBlockBufferNoErr else {
print("解码失败")
return
}
if returnType == .sampleBuffer {
if let attachmentArray = CMSampleBufferGetSampleAttachmentsArray(buffer, createIfNecessary: true) {
let dic = unsafeBitCast(CFArrayGetValueAtIndex(attachmentArray, 0), to: CFMutableDictionary.self)
CFDictionarySetValue(dic,
Unmanaged.passUnretained(kCMSampleAttachmentKey_DisplayImmediately).toOpaque(),
Unmanaged.passUnretained(kCFBooleanTrue).toOpaque())
}
videoDecodeSampleBufferCallback?(sampleBuffer)
return
}
//解码数据为CVPixcelBuffer
/*
参数1: 解码session
参数2: 源数据 包含一个或多个视频帧的CMsampleBuffer
参数3: 解码标志
参数4: 解码后数据outputPixelBuffer
参数5: 同步/异步解码标识
*/
let sourceFrame: UnsafeMutableRawPointer? = nil
var inforFalg = VTDecodeInfoFlags.asynchronous
let decodeState = VTDecompressionSessionDecodeFrame(self.decompressionSession!,
sampleBuffer: sampleBuffer!,
flags: VTDecodeFrameFlags._EnableAsynchronousDecompression,
frameRefcon: sourceFrame,
infoFlagsOut: &inforFalg
)
if decodeState != 0 {
print("解码失败")
}
}
private func initDecoder() -> Bool {
if decompressionSession != nil {
return true
}
guard spsData != nil, ppsData != nil else {
return false
}
//处理sps/pps
var sps: [UInt8] = []
[UInt8](spsData!).suffix(from: 4).forEach { (value) in
sps.append(value)
}
var pps: [UInt8] = []
[UInt8](ppsData!).suffix(from: 4).forEach{(value) in
pps.append(value)
}
let spsAndpps = [sps.withUnsafeBufferPointer{$0}.baseAddress!,pps.withUnsafeBufferPointer{$0}.baseAddress!]
let sizes = [sps.count,pps.count]
/**
根据sps pps设置解码参数
param kCFAllocatorDefault 分配器
param 2 参数个数
param parameterSetPointers 参数集指针
param parameterSetSizes 参数集大小
param naluHeaderLen nalu nalu start code 的长度 4
param _decodeDesc 解码器描述
return 状态
*/
let descriptionState = CMVideoFormatDescriptionCreateFromH264ParameterSets(allocator: kCFAllocatorDefault,
parameterSetCount: 2,
parameterSetPointers: spsAndpps,
parameterSetSizes: sizes,
nalUnitHeaderLength: 4,
formatDescriptionOut: &decodeDesc
)
if descriptionState != 0 {
print("description创建失败" )
return false
}
//解码回调设置
/*
VTDecompressionOutputCallbackRecord 是一个简单的结构体,它带有一个指针 (decompressionOutputCallback),指向帧解压完成后的回调方法。你需要提供可以找到这个回调方法的实例 (decompressionOutputRefCon)
*/
setupCallBack()
var callbackRecord = VTDecompressionOutputCallbackRecord(decompressionOutputCallback: callback, decompressionOutputRefCon: unsafeBitCast(self, to: UnsafeMutableRawPointer.self))
/*
解码参数:
* kCVPixelBufferPixelFormatTypeKey:摄像头的输出数据格式
kCVPixelBufferPixelFormatTypeKey,已测可用值为
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,即420v
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,即420f
kCVPixelFormatType_32BGRA,iOS在内部进行YUV至BGRA格式转换
YUV420一般用于标清视频,YUV422用于高清视频,这里的限制让人感到意外。但是,在相同条件下,YUV420计算耗时和传输压力比YUV422都小。
* kCVPixelBufferWidthKey/kCVPixelBufferHeightKey: 视频源的分辨率 width*height
* kCVPixelBufferOpenGLCompatibilityKey : 它允许在 OpenGL 的上下文中直接绘制解码后的图像,而不是从总线和 CPU 之间复制数据。这有时候被称为零拷贝通道,因为在绘制过程中没有解码的图像被拷贝.
*/
let imageBufferAttributes = [
kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
kCVPixelBufferWidthKey: width,
kCVPixelBufferHeightKey: height,
// kCVPixelBufferOpenGLCompatibilityKey:true
] as [CFString : Any]
//创建session
/*!
@function VTDecompressionSessionCreate
@abstract 创建用于解压缩视频帧的会话。
@discussion 解压后的帧将通过调用OutputCallback发出
@param allocator 内存的会话。通过使用默认的kCFAllocatorDefault的分配器。
@param videoFormatDescription 描述源视频帧
@param videoDecoderSpecification 指定必须使用的特定视频解码器.NULL
@param destinationImageBufferAttributes 描述源像素缓冲区的要求 NULL
@param outputCallback 使用已解压缩的帧调用的回调
@param decompressionSessionOut 指向一个变量以接收新的解压会话
*/
let state = VTDecompressionSessionCreate(allocator: kCFAllocatorDefault,
formatDescription: decodeDesc!,
decoderSpecification: nil,
imageBufferAttributes: imageBufferAttributes as CFDictionary,
outputCallback: &callbackRecord,
decompressionSessionOut: &decompressionSession
)
if state != 0 {
print("创建decodeSession失败")
}
VTSessionSetProperty(self.decompressionSession!, key: kVTDecompressionPropertyKey_RealTime, value: kCFBooleanTrue)
return true
}
//解码成功的回掉
private func setupCallBack() {
/*
VTDecompressionOutputCallback 回调方法包括七个参数:
参数1: 回调的引用
参数2: 帧的引用
参数3: 一个状态标识 (包含未定义的代码)
参数4: 指示同步/异步解码,或者解码器是否打算丢帧的标识
参数5: 实际图像的缓冲
参数6: 出现的时间戳
参数7: 出现的持续时间
*/
//(UnsafeMutableRawPointer?, UnsafeMutableRawPointer?, OSStatus, VTDecodeInfoFlags, CVImageBuffer?, CMTime, CMTime) -> Void
callback = { decompressionOutputRefCon, sourceFrameRefCon, status,inforFlags, imageBuffer, presentationTimeStamp, presentationDuration in
let decoder: LYH264Decoder = unsafeBitCast(decompressionOutputRefCon, to: LYH264Decoder.self)
guard imageBuffer != nil else {
return
}
if let block = decoder.videoDecodeCallback {
decoder.callBackQueue.async {
block(imageBuffer)
}
}
}
}
deinit {
if decompressionSession != nil {
VTDecompressionSessionInvalidate(decompressionSession!)
decompressionSession = nil
}
}
}
socket
解决粘包就好,我上一篇文章也提到过,有兴趣可以去看看。
import CocoaAsyncSocket
import SVProgressHUD
import Foundation
fileprivate let ipStr = "172.20.10.1"
fileprivate let myPort: UInt16 = 12345
/// 字典key "type"
let KSocketDataType = "type"
/// 字典key "data"
let KSocketDataKey = "data"
fileprivate let bodyLegth = 4 //信息长度位
enum LYSocketDataType: String, CaseIterable {
/// ping
case ping
/// 图片
case image
/// 录屏视频图片数据容器
case sampleBuffer
/// 字符串消息
case string
/// 单击
case touch
/// 双击
case dobleTap
/// 滑动
case slide
}
protocol SocketManagerDelegate: AnyObject {
func receivedImage(_ image: UIImage)
func receivedPoint(_ x: Double, _ y: Double)
func receivedKeyWord(_ keyWord: String)
func receivedSampleBuffer(_ sampleBuffer: Data)
}
class SocketManager: NSObject {
// MARK: - property
private var socket: GCDAsyncSocket!
//作为服务端时,连接的客户端
private var clientSocket: GCDAsyncSocket?
/// 是否为服务端
public var isServer: Bool
/// 代理
public weak var delegate: SocketManagerDelegate?
/// 是否已连接客户端
private(set) var isConnectClient = false
// 接收缓存,用于解决粘包
private lazy var dataBuffer: Data = {
let data = Data()
return data
}()
// 定时器,发心跳包
private lazy var timer: Timer = {
let timer = Timer(timeInterval: 30, target: self, selector: #selector(timerAction), userInfo: nil, repeats: true)
RunLoop.current.add(timer, forMode: .common)
return timer
}()
//MARK: - cycle
init(isServer: Bool) {
self.isServer = isServer
super.init()
let queue = DispatchQueue(label: "com.lanyou.socket")
socket = GCDAsyncSocket(delegate: self, delegateQueue: queue)
if isServer {
setupServer()
} else {
setupClient()
}
}
fileprivate func setupServer() {
do {
try socket.accept(onPort: myPort)
} catch {
print("socket服务器启动失败: \(error.localizedDescription)")
}
}
fileprivate func setupClient() {
do {
try socket.connect(toHost: ipStr, onPort: myPort)
} catch {
print("socket连接服务器失败: \(error.localizedDescription)")
}
}
deinit {
dispose()
}
public func dispose() {
socket.disconnect()
clientSocket?.disconnect()
if !isServer {
timer.invalidate()
}
}
// MARK: - sendData
public func sendParam(_ param: [String : Any]) {
if isServer && !isConnectClient {
return
}
var dict = param as [String : Any]
// 处理value为data的情况
for (key, value) in dict {
if let dataValue = value as? Data {
if let base64String = dataToBase64String(dataValue) {
dict[key] = base64String
}
}
}
// dict - > data
do {
let jsonData = try JSONSerialization.data(withJSONObject: dict, options: [])
let jsonString = String(data: jsonData, encoding: .utf8)
guard let json = jsonString else { return }
if let data = json.data(using: .utf8) {
sendData(data)
}
} catch {
print("字典转data出错: \(error)")
}
}
fileprivate func dataToBase64String(_ data: Data) -> String? {
return data.base64EncodedString()
}
// 粘包封包
fileprivate func sendData(_ data: Data) {
// 拼接数据 -> 带有长度信息的数据包
var messageLength: UInt32 = UInt32(data.count)
let lengthData = Data(bytes: &messageLength, count: MemoryLayout<UInt32>.size) //4字节
var sendData = lengthData
sendData.append(data)
if isServer {
clientSocket?.write(sendData, withTimeout: -1, tag: 0)
} else {
socket.write(sendData, withTimeout: -1, tag: 0)
}
}
fileprivate func jsonToDictionary(_ jsonString: String) -> [String : Any]? {
if let jsonData = jsonString.data(using: .utf8) {
do {
if let jsonDictionary = try JSONSerialization.jsonObject(with: jsonData, options: []) as? [String: Any] {
return jsonDictionary
} else {
print("json字符串转字典失败")
}
} catch {
print("json转字典err: \(error.localizedDescription)")
}
}
return nil
}
// MARK: - Timer
@objc fileprivate func timerAction() {
sendParam([KSocketDataType : LYSocketDataType.ping.rawValue])
}
}
extension SocketManager: GCDAsyncSocketDelegate {
func socket(_ sock: GCDAsyncSocket, didAcceptNewSocket newSocket: GCDAsyncSocket) {
print("didAcceptNewSocket: \(newSocket.connectedHost ?? "")")
DispatchQueue.main.async {
SVProgressHUD.showSuccess(withStatus: "didAcceptNewSocket: \(newSocket.connectedHost ?? "")")
}
if isServer {
clientSocket = newSocket
isConnectClient = true
newSocket.readData(withTimeout: -1, tag: 0)
}
}
func socketDidDisconnect(_ sock: GCDAsyncSocket, withError err: Error?) {
if let errStr = err?.localizedDescription {
print("连接出错: \(err?.localizedDescription ?? "")")
DispatchQueue.main.async {
SVProgressHUD.showError(withStatus: errStr)
}
}
}
func socket(_ sock: GCDAsyncSocket, didConnectToHost host: String, port: UInt16) {
print("成功连接服务器: \(host):\(port)")
sock.readData(withTimeout: -1, tag: 0)
if !isServer {
timer.fire()
}
}
// MARK: - 粘包拆包
func socket(_ sock: GCDAsyncSocket, didRead data: Data, withTag tag: Int) {
// 先存入缓存区
dataBuffer.append(data)
while true {
guard dataBuffer.count >= bodyLegth else { break } // 保证至少有消息头, 数据大于4个字节,说明有数据
// 获取消息头,即消息长度
var messageLength: UInt32 = 0
(dataBuffer as NSData).getBytes(&messageLength, length: MemoryLayout<UInt32>.size)
guard dataBuffer.count >= Int(messageLength) + bodyLegth else { break } // 判断是否收到完整的消息
// 获取完整的消息
let messageData = dataBuffer.subdata(in: bodyLegth..<(Int(messageLength) + bodyLegth))
// 处理完整的消息
handleData(messageData, socket: sock)
// 移除已经处理过的消息
dataBuffer = Data(dataBuffer.subdata(in: (Int(messageLength) + bodyLegth)..<dataBuffer.count))
}
// 继续监听数据
sock.readData(withTimeout: -1, tag: 0)
}
fileprivate func handleData(_ data: Data, socket: GCDAsyncSocket) {
// dict : eg: {"type" : "image", "data" : "base64"}
guard let receivedString = String(data: data, encoding: .utf8) else {
return
}
guard let dic = jsonToDictionary(receivedString) else {
return
}
// 事件类型
if let type = dic[KSocketDataType] as? LYSocketDataType.RawValue {
switch type {
case LYSocketDataType.string.rawValue: // string类型, 如ping/pong
guard let str = dic[KSocketDataKey] as? String else { return }
if isServer {
delegate?.receivedKeyWord(str)
}
/*DispatchQueue.main.async {
SVProgressHUD.showSuccess(withStatus: self.isServer ? ("服务端收到数据: \(str)") : ("客户端收到数据 \(str)") )
}*/
case LYSocketDataType.image.rawValue:
// base64 -> image
let dataString = dic[KSocketDataKey] as? String
guard let base64Str = dataString else { return }
guard let imageData = Data(base64Encoded: base64Str) else { return }
guard let image = UIImage(data: imageData) else { return }
delegate?.receivedImage(image)
case LYSocketDataType.touch.rawValue, LYSocketDataType.slide.rawValue: //点击事件
handlerTouch(type: type, jsonDic: dic)
case LYSocketDataType.sampleBuffer.rawValue:
handlerSample(type: type, jsonDic: dic)
default:
print("- handleData - ")
}
}
}
fileprivate func handlerTouch(type: String, jsonDic: [String : Any]) {
if isServer {
guard let dataDic = jsonDic[KSocketDataKey] as? [String : Any] else { return }
guard let x = dataDic["x"] as? String, let y = dataDic["y"] as? String else { return }
delegate?.receivedPoint(Double(x) ?? 0, Double(y) ?? 0)
/*DispatchQueue.main.async {
SVProgressHUD.showSuccess(withStatus: "client \(type) x: \(x) y:\(y) ")
}*/
}
}
fileprivate func handlerSample(type: String, jsonDic: [String : Any]) {
let dataString = jsonDic[KSocketDataKey] as? String
guard let base64Str = dataString else { return }
guard let imageData = Data(base64Encoded: base64Str) else { return }
delegate?.receivedSampleBuffer(imageData)
}
func socket(_ sock: GCDAsyncSocket, didWriteDataWithTag tag: Int) {
//print("数据发送成功")
}
}
解码
实现socket代理方法
func receivedSampleBuffer(_ sampleBuffer: Data) {
self.decoder.decode(data: sampleBuffer)
}
AVSampleBufferDisplayLayer播放
private var displayLayer: AVSampleBufferDisplayLayer?
private lazy var decoder: LYH264Decoder = {
let decoder = LYH264Decoder(width: 375, height: 852)
decoder.returnType = .sampleBuffer
decoder.videoDecodeSampleBufferCallback = { [weak self] (buffer) in
guard let displayLayer = self?.displayLayer , let buffer = buffer else {
return
}
if displayLayer.isReadyForMoreMediaData {
displayLayer.enqueue(buffer)
} else {
print("播放h264失败")
}
}
return decoder
}()
完结
以上代码都是demo,而且我也是刚转写Swift,需要根据自己项目改改嗷。