权哥的技术之路iOS进阶之路iOS技巧学习_JM

iOS - 完整搭建直播步骤及具体实现

2017-04-16  本文已影响6265人  zerocc2014

前言

好记性不如烂笔头,最近有点空把一些知识也整理了一遍,后面陆续写一些总结吧!先从这个不太熟悉的音视频这块开始吧,2016年可谓是直播元年,这块的技术也没什么很陌生了,后知后觉的自己最近也开始学习学习,有挺多调用 C 函数,如果是进行编码封装格式最好还是用c语言写跨平台且效率也更佳,后面有时间尝试用c写下,此文只是直播技术的一部分,未完待续,如有误欢迎指正,当然如果看完觉得有帮助也请帮忙点个喜欢❤️。

技术整体概览

音视频采集

采集介绍

采集步骤

采集代码

//
//  CCVideoCapture.m
//  01.视频采集
//
//  Created by zerocc on 2017/3/29.
//  Copyright © 2017年 zerocc. All rights reserved.
//

#import "CCVideoCapture.h"
#import <AVFoundation/AVFoundation.h>

@interface CCVideoCapture () <AVCaptureVideoDataOutputSampleBufferDelegate,AVCaptureAudioDataOutputSampleBufferDelegate>
@property (nonatomic, strong) AVCaptureSession *session;
@property (nonatomic, strong) AVCaptureVideoPreviewLayer *layer;
@property (nonatomic, strong) AVCaptureConnection *videoConnection;

@end

@implementation CCVideoCapture

- (void)startCapturing:(UIView *)preView
{    
    // ===============  采集视频  ===========================
    // 1. 创建 session 会话
    AVCaptureSession *session = [[AVCaptureSession alloc] init];
    session.sessionPreset = AVCaptureSessionPreset1280x720;
    self.session = session;
    
    
    // 2. 设置音视频的输入
    AVCaptureDevice *videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
 
    NSError *error;
    AVCaptureDeviceInput *videoInput = [[AVCaptureDeviceInput alloc] initWithDevice:videoDevice error:&error];
    [self.session addInput:videoInput];
    
    AVCaptureDevice *audioDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
    AVCaptureDeviceInput *audioInput = [[AVCaptureDeviceInput alloc] initWithDevice:audioDevice error:&error];
    [self.session addInput:audioInput];

    
    // 3. 设置音视频的输出
    AVCaptureVideoDataOutput *videoOutput = [[AVCaptureVideoDataOutput alloc] init];
    dispatch_queue_t videoQueue = dispatch_queue_create("Video Capture Queue", DISPATCH_QUEUE_SERIAL);
    [videoOutput setSampleBufferDelegate:self queue:videoQueue];
    [videoOutput setAlwaysDiscardsLateVideoFrames:YES];       
    if ([session canAddOutput:videoOutput]) {
        [self.session addOutput:videoOutput];
    }
    
    AVCaptureAudioDataOutput *audioOutput = [[AVCaptureAudioDataOutput alloc] init];
    dispatch_queue_t audioQueue = dispatch_queue_create("Audio Capture Queue", DISPATCH_QUEUE_SERIAL);
    [audioOutput setSampleBufferDelegate:self queue:audioQueue];
    if ([session canAddOutput:audioOutput]) {
        [session addOutput:audioOutput];
    }
    
    // 4. 获取视频输入与输出连接,用于分辨音视频数据
      // 视频输出方向 默认方向是相反设置方向,必须在将 output 添加到 session 之后
    AVCaptureConnection *videoConnection = [videoOutput connectionWithMediaType:AVMediaTypeVideo];
    self.videoConnection = videoConnection;
    if (videoConnection.isVideoOrientationSupported) {
        videoConnection.videoOrientation = AVCaptureVideoOrientationPortrait;
        [self.session commitConfiguration];
    }else {
        NSLog(@"不支持设置方向");
    }
    
    // 5. 添加预览图层
    AVCaptureVideoPreviewLayer *layer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:session];
    self.layer = layer;
    layer.frame = preView.bounds;
    [preView.layer insertSublayer:layer atIndex:0];
    
    // 6. 开始采集
    [self.session startRunning];
}

// 丢帧视频情况
- (void)captureOutput:(AVCaptureOutput *)captureOutput
  didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer
       fromConnection:(AVCaptureConnection *)connection {
    
}

- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
       fromConnection:(AVCaptureConnection *)connection {
    dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
    if (self.videoConnection == connection) {
        dispatch_sync(queue, ^{

        });
        
        NSLog(@"采集到视频数据");
    } else {
        dispatch_sync(queue, ^{
            
        });

        NSLog(@"采集到音频数据");
    }

    NSLog(@"采集到视频画面");
}

- (void)stopCapturing
{
    [self.encoder endEncode];
    [self.session stopRunning];
    [self.layer removeFromSuperlayer];
}

@end

音视频编码

音视频压缩编码介绍

视频压缩编码 H.264 (AVC)

//
//  CCH264Encoder.h
//  01.视频采集
//
//  Created by zerocc on 2017/4/4.
//  Copyright © 2017年 zerocc. All rights reserved.
//

#import <Foundation/Foundation.h>
#import <CoreMedia/CoreMedia.h>

@interface CCH264Encoder : NSObject

- (void)prepareEncodeWithWidth:(int)width height:(int)height;

- (void)encoderFram:(CMSampleBufferRef)sampleBuffer;

- (void)endEncode;

@end
//
//  CCH264Encoder.m
//  01.视频采集
//
//  Created by zerocc on 2017/4/4.
//  Copyright © 2017年 zerocc. All rights reserved.
//

#import "CCH264Encoder.h"
#import <VideoToolbox/VideoToolbox.h>
#import <AudioToolbox/AudioToolbox.h>

@interface CCH264Encoder () {
    int     _spsppsFound;
}

@property (nonatomic, assign) VTCompressionSessionRef compressionSession;  // coreFoundation中的对象 == c语言对象 不用*,strong只用来修饰oc对象,VTCompressionSessionRef对象本身就是指针  用assign修饰
@property (nonatomic, assign) int frameIndex;

@property (nonatomic, strong) NSFileHandle *fileHandle;
@property (nonatomic, strong) NSString *documentDictionary;

@end

@implementation CCH264Encoder

#pragma mark - lazyload

- (NSFileHandle *)fileHandle {
    if (!_fileHandle) {
        // 这里只写一个裸流,只有视频流没有音频流
        NSString *filePath = [[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, true) firstObject] stringByAppendingPathComponent:@"video.h264"];
        NSFileManager *fileManager = [NSFileManager defaultManager];
        BOOL isExistPath = [fileManager isExecutableFileAtPath:filePath];
        if (isExistPath) {
            [fileManager removeItemAtPath:filePath error:nil];
        }
        
        [[NSFileManager defaultManager] createFileAtPath:filePath contents:nil attributes:nil];
        _fileHandle = [NSFileHandle fileHandleForWritingAtPath:filePath];
    }
    
    return _fileHandle;
}


/**
 准备编码

 @param width 采集的宽度
 @param height 采集的高度
 */
- (void)prepareEncodeWithWidth:(int)width height:(int)height
{
    // 0. 设置默认是第0帧
    self.frameIndex = 0;
    
    // 1. 创建 VTCompressionSessionRef
    
    /**
     VTCompressionSessionRef 

     @param NULL CFAllocatorRef - CoreFoundation分配内存的模式,NULL默认
     @param width int32_t - 视频宽度
     @param height 视频高度
     @param kCMVideoCodecType_H264 CMVideoCodecType - 编码的标准
     @param NULL CFDictionaryRef       encoderSpecification,
     @param NULL CFDictionaryRef       sourceImageBufferAttributes
     @param NULL CFAllocatorRef    compressedDataAllocator
     @param didComparessionCallback  VTCompressionOutputCallback - 编码成功后的回调函数c函数
     @param _Nullable  void * - 传递到回调函数中参数
     @return session
     */
    VTCompressionSessionCreate(NULL,
                               width,
                               height,
                               kCMVideoCodecType_H264,
                               NULL, NULL, NULL,
                               didComparessionCallback,
                               (__bridge void * _Nullable)(self),
                               &_compressionSession);
    
    // 2. 设置属性
      // 2.1 设置实时编码
    VTSessionSetProperty(_compressionSession,
                         kVTCompressionPropertyKey_RealTime,
                         kCFBooleanTrue);
      // 2.2 设置帧率
    VTSessionSetProperty(_compressionSession,
                         kVTCompressionPropertyKey_ExpectedFrameRate,
                         (__bridge CFTypeRef _Nonnull)(@24));
      // 2.3 设置比特率(码率) 1500000/s
    VTSessionSetProperty(_compressionSession,
                         kVTCompressionPropertyKey_AverageBitRate,
                         (__bridge CFTypeRef _Nonnull)(@1500000)); // 每秒有150万比特  bit
      // 2.4 关键帧最大间隔,也就是I帧。
    VTSessionSetProperty(_compressionSession,
                         kVTCompressionPropertyKey_DataRateLimits,
                         (__bridge CFTypeRef _Nonnull)(@[@(1500000/8), @1]));  // 单位是 8 byte
      // 2.5 设置GOP的大小
    VTSessionSetProperty(_compressionSession,
                         kVTCompressionPropertyKey_MaxKeyFrameInterval,
                         (__bridge CFTypeRef _Nonnull)(@20));

    // 3. 准备编码
    VTCompressionSessionPrepareToEncodeFrames(_compressionSession);
}


/**
 开始编码

 @param sampleBuffer CMSampleBufferRef
 */
- (void)encoderFram:(CMSampleBufferRef)sampleBuffer
{
    
    // 2. 开始编码
    // 将CMSampleBufferRef 转换成 CVImageBufferRef,
    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
    CMTime pts = CMTimeMake(self.frameIndex, 24);
    CMTime duration = kCMTimeInvalid;
    VTEncodeInfoFlags flags;
    
    /**
     硬编码器 - 将携带视频帧数据的imageBuffer送到硬编码器中,进行编码。
     
     @param session#> VTCompressionSessionRef
     @param imageBuffer#> CVImageBufferRef
     @param presentationTimeStamp#> CMTime - pts
     @param duration#> CMTime - 传一个固定的无效的时间
     @param frameProperties#> CFDictionaryRef
     @param sourceFrameRefCon#> void - 编码后回调函数中第二个参数
     @param infoFlagsOut#> VTEncodeInfoFlags - 编码后回调函数中第四个参数
     @return
     */
    VTCompressionSessionEncodeFrame(self.compressionSession,
                                    imageBuffer,
                                    pts,
                                    duration,
                                    NULL,
                                    NULL,
                                    &flags);
    NSLog(@"开始编码一帧数据");
}

#pragma mark - 获取编码后的数据  c语言函数 - 编码后的回调函数

void didComparessionCallback (void * CM_NULLABLE outputCallbackRefCon,
                              void * CM_NULLABLE sourceFrameRefCon,
                              OSStatus status,
                              VTEncodeInfoFlags infoFlags,
                              CM_NULLABLE CMSampleBufferRef sampleBuffer)
{
    // c语言中不能调用当前对象self.语法不行,只有通过指针去做相应操作
    // 获取当前 CCH264Encoder 对象,通过传入的 self 参数(VTCompressionSessionCreate中传入了self)
    CCH264Encoder *encoder = (__bridge CCH264Encoder *)(outputCallbackRefCon);

    // 1. 判断该帧是否为关键帧
    CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true);
    CFDictionaryRef dict = CFArrayGetValueAtIndex(attachments, 0);
    BOOL isKeyFrame = !CFDictionaryContainsKey(dict, kCMSampleAttachmentKey_NotSync);
    
    // 2. 如果是关键帧 -> 获取 SPS/PPS 数据 其保存了h264视频的一些必要信息方便解析 -> 并且写入文件
    if (isKeyFrame && !encoder->_spsppsFound) {
        encoder->_spsppsFound = 1;
        // 2.1. 从CMSampleBufferRef获取CMFormatDescriptionRef
        CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer);
        
        // 2.2. 获取 SPS /pps的信息
        const uint8_t *spsOut;
        size_t spsSize, spsCount;
        CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format,
                                                           0,
                                                           &spsOut,
                                                           &spsSize,
                                                           &spsCount,
                                                           NULL);
        const uint8_t *ppsOut;
        size_t ppsSize, ppsCount;
        CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format,
                                                           1,
                                                           &ppsOut,
                                                           &ppsSize,
                                                           &ppsCount,
                                                           NULL);
        
        // 2.3. 将SPS/PPS 转成NSData,并且写入文件
        NSData *spsData = [NSData dataWithBytes:spsOut length:spsSize];
        NSData *ppsData = [NSData dataWithBytes:ppsOut length:ppsSize];
        
        // 2.4. 写入文件 (NALU单元特点:起始都是有 0x00 00 00 01 每个NALU需拼接)
        [encoder writeH264Data:spsData];
        [encoder writeH264Data:ppsData];
    }
    
    // 3. 获取编码后的数据,写入文件
     // 3.1. 获取 CMBlockBufferRef
    CMBlockBufferRef blcokBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
    
     // 3.2. 从blockBuffer 中获取起始位置的内存地址
    size_t totalLength = 0;
    char *dataPointer;
    CMBlockBufferGetDataPointer(blcokBuffer, 0, NULL, &totalLength, &dataPointer);
    
     // 3.3. 一帧的图像可能需要写入多个NALU 单元 --> Slice切换
    static const int H264HeaderLength = 4;  // 头部长度一般为 4
    size_t bufferOffset = 0;
    while (bufferOffset < totalLength - H264HeaderLength) {
        // 3.4 从起始位置拷贝H264HeaderLength长度的地址,计算NALULength
        int NALULength = 0;
        memcpy(&NALULength, dataPointer + bufferOffset, H264HeaderLength);
        
        // H264 编码的数据是大端模式(字节序),转化为iOS系统的模式,计算机内一般都是小端,而网络和文件中一般都是大端
        NALULength = CFSwapInt32BigToHost(NALULength);
        
        // 3.5 从dataPointer开始,根据长度创建NSData
        NSData *data = [NSData dataWithBytes:(dataPointer + bufferOffset + H264HeaderLength) length:NALULength];
        
        // 3.6 写入文件
        [encoder writeH264Data:data];
        
        // 3.7 重新设置 bufferOffset
        bufferOffset += NALULength + H264HeaderLength;
    }
    NSLog(@"。。。。。。。。编码出一帧数据");
};

- (void)writeH264Data:(NSData *)data
{
    // 1. 先获取 startCode
    const char bytes[] = "\x00\x00\x00\x01";
    
    // 2. 获取headerData
    // 减一的原因: byts 拼接的是字符串,而字符串最后一位有个 \0;所以减一才是其正确长度
    NSData *headerData = [NSData dataWithBytes:bytes length:(sizeof(bytes) - 1)];
    [self.fileHandle writeData:headerData];
    [self.fileHandle writeData:data];
}

- (void)endEncode
{
    VTCompressionSessionInvalidate(self.compressionSession);
    CFRelease(_compressionSession);
}
@end

点击Devices dowload 到桌面 右键显示包内容.png 沙盒文件将这些拖入VLC进行测试播放即可.png

音频编码 AAC

ADTS结构.jpg
//
//  CCH264Encoder.h
//  01.视频采集
//
//  Created by zerocc on 2017/4/4.
//  Copyright © 2017年 zerocc. All rights reserved.
//

#import <Foundation/Foundation.h>
#import <AVFoundation/AVFoundation.h>
#import <AudioToolbox/AudioToolbox.h>

@interface CCAACEncoder : NSObject
- (void)encodeAAC:(CMSampleBufferRef)sampleBuffer;
- (void)endEncodeAAC;

@end

//
//  CCH264Encoder.h
//  01.视频采集
//
//  Created by zerocc on 2017/4/4.
//  Copyright © 2017年 zerocc. All rights reserved.
//

#import "CCAACEncoder.h"

@interface CCAACEncoder()
@property (nonatomic, assign) AudioConverterRef audioConverter;
@property (nonatomic, assign) uint8_t *aacBuffer;
@property (nonatomic, assign) NSUInteger aacBufferSize;
@property (nonatomic, assign) char *pcmBuffer;
@property (nonatomic, assign) size_t pcmBufferSize;
@property (nonatomic, strong) NSFileHandle *audioFileHandle;

@end

@implementation CCAACEncoder

- (void) dealloc {
    AudioConverterDispose(_audioConverter);
    free(_aacBuffer);
}

- (NSFileHandle *)audioFileHandle {
    if (!_audioFileHandle) {
        NSString *audioFile = [[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject] stringByAppendingPathComponent:@"audio.aac"];
        [[NSFileManager defaultManager] removeItemAtPath:audioFile error:nil];
        [[NSFileManager defaultManager] createFileAtPath:audioFile contents:nil attributes:nil];
        _audioFileHandle = [NSFileHandle fileHandleForWritingAtPath:audioFile];
    }
    
    return _audioFileHandle;
}

- (id)init {
    if (self = [super init]) {
        _audioConverter = NULL;
        _pcmBufferSize = 0;
        _pcmBuffer = NULL;
        _aacBufferSize = 1024;
        _aacBuffer = malloc(_aacBufferSize * sizeof(uint8_t));
        memset(_aacBuffer, 0, _aacBufferSize);
    }
    
    return self;
}

- (void)encodeAAC:(CMSampleBufferRef)sampleBuffer
{
    CFRetain(sampleBuffer);
    // 1. 创建audio encode converter
    if (!_audioConverter) {
        // 1.1 设置编码参数
        AudioStreamBasicDescription inputAudioStreamBasicDescription = *CMAudioFormatDescriptionGetStreamBasicDescription((CMAudioFormatDescriptionRef)CMSampleBufferGetFormatDescription(sampleBuffer));
    
        AudioStreamBasicDescription outputAudioStreamBasicDescription = {
            .mSampleRate = inputAudioStreamBasicDescription.mSampleRate,
            .mFormatID = kAudioFormatMPEG4AAC,
            .mFormatFlags = kMPEG4Object_AAC_LC,
            .mBytesPerPacket = 0,
            .mFramesPerPacket = 1024,
            .mBytesPerFrame = 0,
            .mChannelsPerFrame = 1,
            .mBitsPerChannel = 0,
            .mReserved = 0
        };
        
        static AudioClassDescription description;
        UInt32 encoderSpecifier = kAudioFormatMPEG4AAC;
        
        UInt32 size;
        AudioFormatGetPropertyInfo(kAudioFormatProperty_Encoders,
                                        sizeof(encoderSpecifier),
                                        &encoderSpecifier,
                                        &size);
        
        unsigned int count = size / sizeof(AudioClassDescription);
        AudioClassDescription descriptions[count];
        AudioFormatGetProperty(kAudioFormatProperty_Encoders,
                                    sizeof(encoderSpecifier),
                                    &encoderSpecifier,
                                    &size,
                                    descriptions);
        
        for (unsigned int i = 0; i < count; i++) {
            if ((kAudioFormatMPEG4AAC == descriptions[i].mSubType) &&
                (kAppleSoftwareAudioCodecManufacturer == descriptions[i].mManufacturer)) {
                memcpy(&description , &(descriptions[i]), sizeof(description));
            }
        }
        
        AudioConverterNewSpecific(&inputAudioStreamBasicDescription,
                                                    &outputAudioStreamBasicDescription,
                                                    1,
                                                    &description,
                                                    &_audioConverter);
    }
    
    CMBlockBufferRef blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
    CFRetain(blockBuffer);
    OSStatus status = CMBlockBufferGetDataPointer(blockBuffer, 0, NULL, &_pcmBufferSize, &_pcmBuffer);
    NSError *error = nil;
    if (status != kCMBlockBufferNoErr) {
        error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
    }
    memset(_aacBuffer, 0, _aacBufferSize);
    
    AudioBufferList outAudioBufferList = {0};
    outAudioBufferList.mNumberBuffers = 1;
    outAudioBufferList.mBuffers[0].mNumberChannels = 1;
    outAudioBufferList.mBuffers[0].mDataByteSize = (int)_aacBufferSize;
    outAudioBufferList.mBuffers[0].mData = _aacBuffer;
    AudioStreamPacketDescription *outPacketDescription = NULL;
    UInt32 ioOutputDataPacketSize = 1;

    status = AudioConverterFillComplexBuffer(_audioConverter, inInputDataProc, (__bridge void *)(self), &ioOutputDataPacketSize, &outAudioBufferList, outPacketDescription);
    if (status == 0) {
        NSData *rawAAC = [NSData dataWithBytes:outAudioBufferList.mBuffers[0].mData
                                        length:outAudioBufferList.mBuffers[0].mDataByteSize];
        NSData *adtsHeader = [self adtsDataForPacketLength:rawAAC.length];
        NSMutableData *fullData = [NSMutableData dataWithData:adtsHeader];
        [fullData appendData:rawAAC];
        [self.audioFileHandle writeData:fullData];
    } else {
        error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
    }
    
    CFRelease(sampleBuffer);
    CFRelease(blockBuffer);
}

/**
 编码器回调 C函数
 
 @param inAudioConverter xx
 @param ioNumberDataPackets xx
 @param ioData xx
 @param outDataPacketDescription xx
 @param inUserData xx
 @return xx
 */
OSStatus inInputDataProc(AudioConverterRef inAudioConverter,
                         UInt32 *ioNumberDataPackets,
                         AudioBufferList *ioData,
                         AudioStreamPacketDescription **outDataPacketDescription,
                         void *inUserData)
{
    CCAACEncoder *encoder = (__bridge CCAACEncoder *)(inUserData);
    UInt32 requestedPackets = *ioNumberDataPackets;
    
    // 填充PCM到缓冲区
    size_t copiedSamples = encoder.pcmBufferSize;
    ioData->mBuffers[0].mData = encoder.pcmBuffer;
    ioData->mBuffers[0].mDataByteSize = (int)encoder.pcmBufferSize;
    encoder.pcmBuffer = NULL;
    encoder.pcmBufferSize = 0;

    if (copiedSamples < requestedPackets) {
        //PCM 缓冲区还没满
        *ioNumberDataPackets = 0;
        return -1;
    }
    *ioNumberDataPackets = 1;
    
    return noErr;
}

/**
 *  Add ADTS header at the beginning of each and every AAC packet.
 *  This is needed as MediaCodec encoder generates a packet of raw
 *  AAC data.
 */
- (NSData *) adtsDataForPacketLength:(NSUInteger)packetLength {
    int adtsLength = 7;
    char *packet = malloc(sizeof(char) * adtsLength);
    // Variables Recycled by addADTStoPacket
    int profile = 2;  //AAC LC
    //39=MediaCodecInfo.CodecProfileLevel.AACObjectELD;
    int freqIdx = 4;  //44.1KHz
    int chanCfg = 1;  //MPEG-4 Audio Channel Configuration. 1 Channel front-center
    NSUInteger fullLength = adtsLength + packetLength;
    // fill in ADTS data
    packet[0] = (char)0xFF; // 11111111     = syncword
    packet[1] = (char)0xF9; // 1111 1 00 1  = syncword MPEG-2 Layer CRC
    packet[2] = (char)(((profile-1)<<6) + (freqIdx<<2) +(chanCfg>>2));
    packet[3] = (char)(((chanCfg&3)<<6) + (fullLength>>11));
    packet[4] = (char)((fullLength&0x7FF) >> 3);
    packet[5] = (char)(((fullLength&7)<<5) + 0x1F);
    packet[6] = (char)0xFC;
    NSData *data = [NSData dataWithBytesNoCopy:packet length:adtsLength freeWhenDone:YES];
    
    return data;
}

- (void)endEncodeAAC
{
    AudioConverterDispose(_audioConverter);
    _audioConverter = nil;
}

@end

#import "CCVideoCapture.h"
#import <AVFoundation/AVFoundation.h>
#import "CCH264Encoder.h"
#import "CCAACEncoder.h"

@interface CCVideoCapture () <AVCaptureVideoDataOutputSampleBufferDelegate,AVCaptureAudioDataOutputSampleBufferDelegate>

@property (nonatomic, strong) AVCaptureSession *session;
@property (nonatomic, strong) AVCaptureVideoPreviewLayer *layer;
@property (nonatomic, strong) AVCaptureConnection *videoConnection;

@property (nonatomic, strong) CCH264Encoder *videoEncoder;
@property (nonatomic , strong) CCAACEncoder *audioEncoder;

@end

@implementation CCVideoCapture

#pragma mark - lazyload

- (CCH264Encoder *)videoEncoder {
    if (!_videoEncoder) {
        _videoEncoder = [[CCH264Encoder alloc] init];
    }
    
    return _videoEncoder;
}

- (CCAACEncoder *)audioEncoder {
    if (!_audioEncoder) {
        _audioEncoder = [[CCAACEncoder alloc] init];
    }
    
    return _audioEncoder;
}

- (void)startCapturing:(UIView *)preView
{
    // ===============  准备编码  ===========================
    [self.videoEncoder prepareEncodeWithWidth:720 height:1280];
    
    
    // ===============  采集视频  ===========================
    // 1. 创建 session 会话
    AVCaptureSession *session = [[AVCaptureSession alloc] init];
    session.sessionPreset = AVCaptureSessionPreset1280x720;
    self.session = session;
    .
    .
    .

- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
       fromConnection:(AVCaptureConnection *)connection {
    dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
    if (self.videoConnection == connection) {
        dispatch_sync(queue, ^{
            [self.videoEncoder encoderFram:sampleBuffer];
        });
        
        NSLog(@"采集到视频数据");
    } else {
        dispatch_sync(queue, ^{
            [self.audioEncoder encodeAAC:sampleBuffer];
        });

        NSLog(@"采集到音频数据");
    }

    NSLog(@"采集到视频画面");
}

- (void)stopCapturing
{
    [self.videoEncoder endEncode];
    [self.audioEncoder endEncodeAAC];
    [self.session stopRunning];
    [self.layer removeFromSuperlayer];
}

未完待续

参考资料

编码和封装、推流和传输
AAC以 ADTS 格式封装的分析
[音频介绍](https://wenku.baidu.com/view/278f415e804d2b160b4ec0ac.html%20%20)

上一篇 下一篇

猜你喜欢

热点阅读