AVFoundataion 音视频采集

2020-03-02  本文已影响0人  pengxiaochao

音视频开发第一章

image.png

1.音频概念

声波的三要素,频率,振幅,波形;

音频通过麦克风转成数字信号才能在互联网上传播,,音频的编码格式有很多种,MP3 ,WAV , ACC ,Ogg 等,但最初的音频裸流数据一般都脉冲编码调制(PCM)数据;采集PCM需要配置以下三个参数,在实际开发中也经常用配置;

举例

以CD音质为例,量化格式为16bite,采样率为44100,声道数为2.这些信息描述CD音质.那么可以CD音质数据,比特率是多少?

 44100 * 16 * 2 = 1378.125kbps 

那么一分钟的,这类CD音质数据需要占用多少存储空间?

1378.125 * 60 /8/1024 = 10.09MB

一般情况下量化格式为8bite 或者16bite,采样率为44100,声道数为1 或者2(表示单声道或者多声道)''

2.1.采集音频数据 (videoToolBox)

//0.创建捕捉会话 AVCaptureSession(音频/视频采集均通过该AVCaptureSession)
self.captureSession = [[AVCaptureSession alloc] init];


//1.获取麦克风设备
AVCaptureDevice *audioDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];

//2.设置Audio的input 设备为iphone的麦克风设备
self.audioInputDevice = [AVCaptureDeviceInput deviceInputWithDevice:audioDevice error:nil];

//3.创建音频输出output,并设置AVCaptureAudioDataOutputSampleBufferDelegate 的代理为自己
//self.captureQueue 为自定义队列

self.audioDataOutput = [[AVCaptureAudioDataOutput alloc] init];
[self.audioDataOutput setSampleBufferDelegate:self queue:self.captureQueue];

//4.将音频的input 和output 加入到AVCaptureSession对象中并提交配置
[self.captureSession beginConfiguration];
if ([self.captureSession canAddInput:self.audioInputDevice]) {
    [self.captureSession addInput:self.audioInputDevice];
}
if([self.captureSession canAddOutput:self.audioDataOutput]){
    [self.captureSession addOutput:self.audioDataOutput];
}
[self.captureSession commitConfiguration];


//self.audioconnection为配置的输出connect对象,可保存在viewcontroller属性中,在代理音频视频的代理方法体中通过connection 判断是音频connect还是视频connection
self.audioConnection = [self.audioDataOutput connectionWithMediaType:AVMediaTypeAudio];



//5.实现AVCaptureAudioDataOutputSampleBufferDelegate 代理方法(捕捉到的音频数据会通过这个sampleBuffer回调出来)


#pragma mark-实现代理方法
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{
    
    //PCM 数据就存放在 sampleBuffer 对象中;
    
}

    
//调用 (开始捕捉音频数据)
[self.captureSession startRunning];
//调用 (停止捕捉音频数据)
[self.captureSession stopRunning];

2.2.编码音频数据

创建一个类 AACAudioEncode 用于将获取到的sampleBuffer 转成AAC 格式的数据;

2.1. 初始化数据

AACAudioEncode类的.h 头文件,通过初始化AudioConfig 配置对象,配置

@protocol AACAudioEncodeDelegate <NSObject>
- (void)audioEncodeCallback:(NSData *)aacData;
@end


@interface AACAudioEncode : NSObject
@property (nonatomic, strong) AudioConfig *config;
@property (nonatomic, weak) id<CCAudioEncoderDelegate> delegate;

/**初始化传入编码器配置*/
- (instancetype)initWithConfig:(AudioConfig*)config;
/**编码*/
- (void)encodeAudioSamepleBuffer: (CMSampleBufferRef)sampleBuffer;
@end

AACAudioEncode 实现细节

- (instancetype)initWithConfig:(AudioConfig*)config {
    self = [super init];
    if (self) {

        _encoderQueue = dispatch_queue_create("aac hard encoder queue", DISPATCH_QUEUE_SERIAL);
        _callbackQueue = dispatch_queue_create("aac hard encoder callback queue", DISPATCH_QUEUE_SERIAL);
        _audioConverter = NULL;
        _pcmBufferSize = 0;
        _pcmBuffer = NULL;
        _config = config;
        if (config == nil) {
            _config = [[AudioConfig alloc] init];
        }
        
    }
    return self;
}


//音频编码(当AVFoundation捕获到音频内容之后)
- (void)encodeAudioSamepleBuffer: (CMSampleBufferRef)sampleBuffer {
    CFRetain(sampleBuffer);
    
    //1.判断音频转换器是否创建成功.如果未创建成功.则配置音频编码参数且创建转码器
    if (!_audioConverter) {
        [self setupEncoderWithSampleBuffer:sampleBuffer];
    }
    
    //2.来到音频编码异步队列
    dispatch_async(_encoderQueue, ^{
    
    //3.获取CMBlockBuffer, 这里面保存了PCM数据
    CMBlockBufferRef blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
    CFRetain(blockBuffer);
    //4.获取BlockBuffer中音频数据大小以及音频数据地址
    OSStatus status = CMBlockBufferGetDataPointer(blockBuffer, 0, NULL, &_pcmBufferSize, &_pcmBuffer);
    //5.判断status状态
    NSError *error = nil;
    if (status != kCMBlockBufferNoErr) {
        error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
        NSLog(@"Error: ACC encode get data point error: %@",error);
        return;
    }
    //2.设置_aacBuffer 为0
    //开辟_pcmBuffsize大小的pcm内存空间
    uint8_t *pcmBuffer = malloc(_pcmBufferSize);
    //将_pcmBufferSize数据set到pcmBuffer中.
    memset(pcmBuffer, 0, _pcmBufferSize);
    

        //3.将pcmBuffer数据填充到outAudioBufferList 对象中
        AudioBufferList outAudioBufferList = {0};
        outAudioBufferList.mNumberBuffers = 1;
        outAudioBufferList.mBuffers[0].mNumberChannels = (uint32_t)_config.channelCount;
        outAudioBufferList.mBuffers[0].mDataByteSize = (UInt32)_pcmBufferSize;
        outAudioBufferList.mBuffers[0].mData = pcmBuffer;
        
        //设置输出包大小为1
        UInt32 outputDataPacketSize = 1;
        
        //配置填充函数,获取输出数据
        //转换由输入回调函数提供的数据
        /*
         参数1: inAudioConverter 音频转换器
         参数2: inInputDataProc 回调函数.提供要转换的音频数据的回调函数。当转换器准备好接受新的输入数据时,会重复调用此回调.
         参数3: inInputDataProcUserData
         参数4: inInputDataProcUserData,self
         参数5: ioOutputDataPacketSize,输出缓冲区的大小
         参数6: outOutputData,需要转换的音频数据
         参数7: outPacketDescription,输出包信息
         */
        status = AudioConverterFillComplexBuffer(_audioConverter, aacEncodeInputDataProc, (__bridge void * _Nullable)(self), &outputDataPacketSize, &outAudioBufferList, NULL);
    
        if (status == noErr) {
            //获取数据
            NSData *rawAAC = [NSData dataWithBytes: outAudioBufferList.mBuffers[0].mData length:outAudioBufferList.mBuffers[0].mDataByteSize];
            //释放pcmBuffer
            free(pcmBuffer);
            //将数据传递到回调队列中
            dispatch_async(_callbackQueue, ^{
                [_delegate audioEncodeCallback:rawAAC];
            });
        } else {
            error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
        }
    
        CFRelease(blockBuffer);
        CFRelease(sampleBuffer);
        if (error) {
            NSLog(@"error: AAC编码失败 %@",error);
        }
    });
}

//配置音频编码参数
- (void)setupEncoderWithSampleBuffer: (CMSampleBufferRef)sampleBuffer {
    
    //获取输入参数
    AudioStreamBasicDescription inputAduioDes = *CMAudioFormatDescriptionGetStreamBasicDescription( CMSampleBufferGetFormatDescription(sampleBuffer));
    
    //设置输出参数
    AudioStreamBasicDescription outputAudioDes = {0};
    outputAudioDes.mSampleRate = (Float64)_config.sampleRate;       //采样率
    outputAudioDes.mFormatID = kAudioFormatMPEG4AAC;                //输出格式
    outputAudioDes.mFormatFlags = kMPEG4Object_AAC_LC;              // 如果设为0 代表无损编码
    outputAudioDes.mBytesPerPacket = 0;                             //自己确定每个packet 大小
    outputAudioDes.mFramesPerPacket = 1024;                         //每一个packet帧数 AAC-1024;
    outputAudioDes.mBytesPerFrame = 0;                              //每一帧大小
    outputAudioDes.mChannelsPerFrame = (uint32_t)_config.channelCount; //输出声道数
    outputAudioDes.mBitsPerChannel = 0;                             //数据帧中每个通道的采样位数。
    outputAudioDes.mReserved =  0;                                  //对其方式 0(8字节对齐)
    
    //填充输出相关信息
    UInt32 outDesSize = sizeof(outputAudioDes);
    AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &outDesSize, &outputAudioDes);
    
    //获取编码器的描述信息(只能传入software)
    AudioClassDescription *audioClassDesc = [self getAudioCalssDescriptionWithType:outputAudioDes.mFormatID fromManufacture:kAppleSoftwareAudioCodecManufacturer];
    
    /** 创建converter
     参数1:输入音频格式描述
     参数2:输出音频格式描述
     参数3:class desc的数量
     参数4:class desc
     参数5:创建的解码器
     */
    OSStatus status = AudioConverterNewSpecific(&inputAduioDes, &outputAudioDes, 1, audioClassDesc, &_audioConverter);
    if (status != noErr) {
        NSLog(@"Error!:硬编码AAC创建失败, status= %d", (int)status);
        return;
    }
    
    // 设置编解码质量
    /*
     kAudioConverterQuality_Max                              = 0x7F,
     kAudioConverterQuality_High                             = 0x60,
     kAudioConverterQuality_Medium                           = 0x40,
     kAudioConverterQuality_Low                              = 0x20,
     kAudioConverterQuality_Min                              = 0
     */
    UInt32 temp = kAudioConverterQuality_High;
    //编解码器的呈现质量
    AudioConverterSetProperty(_audioConverter, kAudioConverterCodecQuality, sizeof(temp), &temp);
    
    //设置比特率
    uint32_t audioBitrate = (uint32_t)self.config.bitrate;
    uint32_t audioBitrateSize = sizeof(audioBitrate);
    status = AudioConverterSetProperty(_audioConverter, kAudioConverterEncodeBitRate, audioBitrateSize, &audioBitrate);
    if (status != noErr) {
        NSLog(@"Error!:硬编码AAC 设置比特率失败");
    }

}

//将sampleBuffer数据提取出PCM数据返回给ViewController.可以直接播放PCM数据
- (NSData *)convertAudioSamepleBufferToPcmData: (CMSampleBufferRef)sampleBuffer {
    //获取pcm数据大小
    size_t size = CMSampleBufferGetTotalSampleSize(sampleBuffer);
    //分配空间
    int8_t *audio_data = (int8_t *)malloc(size);
    memset(audio_data, 0, size);
    
    //获取CMBlockBuffer, 这里面保存了PCM数据
    CMBlockBufferRef blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
    //将数据copy到我们分配的空间中
    CMBlockBufferCopyDataBytes(blockBuffer, 0, size, audio_data);
    NSData *data = [NSData dataWithBytes:audio_data length:size];
    free(audio_data);
    return data;
}

未完待续......

上一篇 下一篇

猜你喜欢

热点阅读