AudioQueue实现录音

2020-04-01  本文已影响0人  桔子橙子柚子_F
#define DEFAULT_CHANNELS (1)
#define DEFAULT_SAMPLE_RATE (16000)
#define DEFAULT_SAMPLE_LENGTH (16)
#define DEFAULT_DURATION_TIME (10)
#define kNumberAudioQueueBuffers 3 // 定义了三个缓冲区

typedef struct OVSAQCallbackStruct {
    AudioStreamBasicDescription mDataFormat;
    AudioQueueRef queue;
    AudioQueueBufferRef mBuffers[kNumberAudioQueueBuffers];
    AudioFileID outputFile;

    unsigned long frameSize;
    long long recPtr;
    int run;
} OVSAQCallbackStruct;
//用于接收音频片段
static void AQInputCallback(void *inUserData, AudioQueueRef inAudioQueue, AudioQueueBufferRef inBuffer,
    const AudioTimeStamp *inStartTime, UInt32 nNumPackets, const AudioStreamPacketDescription *inPacketDesc) {
    OVSVADAQRecorder *engine = (__bridge OVSVADAQRecorder *)inUserData;
    if (inBuffer->mAudioDataByteSize > 0) {
        [engine processAudioBuffer:inBuffer withQueue:inAudioQueue];
    }

    if (engine.aqc.run) {
        AudioQueueEnqueueBuffer(engine.aqc.queue, inBuffer, 0, NULL);
    }
}
//开始录音
 // 开启录音通道
    NSError *error = nil;
    //设置audio session的category
    int ret = [[AVAudioSession sharedInstance]
        setCategory:AVAudioSessionCategoryPlayAndRecord
              error: &error]; //注意,这里选的是AVAudioSessionCategoryPlayAndRecord参数,如果只需要录音,就选择Record就可以了,如果需要录音和播放,则选择PlayAndRecord,这个很重要
    if (!ret) {
        NSLog(@"设置声音环境失败");
        return;
    }
    //启用audio session
    ret = [[AVAudioSession sharedInstance] setActive:YES error:&error];
    if (!ret) {
        NSLog(@"启动失败");
        return;
    }

    // 初始化 AudioFormat    
    //OVSAQCallbackStruct aqc
    //每秒数据的样本帧数
    _aqc.mDataFormat.mSampleRate = DEFAULT_SAMPLE_RATE;
    //每个数据包中的样本帧数
    _aqc.mDataFormat.mFramesPerPacket = 1;
    //每个通道里,一帧采集的bit数目
    _aqc.mDataFormat.mChannelsPerFrame = DEFAULT_CHANNELS;
    //结果分析: 8bit为1byte,即为1个通道里1帧需要采集2byte数据,再*通道数,即为所有通道采集的byte数目。   
    //数据帧中每个通道的样本数据的位数
    _aqc.mDataFormat.mBitsPerChannel = DEFAULT_SAMPLE_LENGTH;
    //pcm格式
    _aqc.mDataFormat.mFormatID = kAudioFormatLinearPCM;
    _aqc.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;

    //所以这里结果赋值给每帧需要采集的byte数目,然后这里的packet也等于一帧的数据。
    _aqc.mDataFormat.mBytesPerFrame = (_aqc.mDataFormat.mBitsPerChannel / 8) * _aqc.mDataFormat.mChannelsPerFrame;
    //每个数据包中的样本帧数。
    _aqc.mDataFormat.mBytesPerPacket = _aqc.mDataFormat.mBytesPerFrame;

    //初始化音频输入队列
    AudioQueueNewInput(&_aqc.mDataFormat, AQInputCallback, (__bridge void *)(self), NULL, NULL, 0, &_aqc.queue);

    int bufferByteSize = 360 * 8;

    //创建缓冲器
    for (int i = 0; i < kNumberAudioQueueBuffers; i++) {
        OSStatus status = AudioQueueAllocateBuffer(_aqc.queue, bufferByteSize, &_aqc.mBuffers[i]);
        if (status != noErr) {
            AudioQueueDispose(_aqc.queue, true);
            _aqc.queue = NULL;
            break;
        }
        //将 _audioBuffers[i] 添加到队列中
        AudioQueueEnqueueBuffer(_aqc.queue, _aqc.mBuffers[i], 0, NULL);
    }

    _aqc.recPtr = 0;
    _aqc.run = 1;

    // 开始录音
    if (_aqc.queue) {
        AudioQueueStart(_aqc.queue, NULL);
        NSLog(@"OVS AQ Recorder-> 开始录音");
    } else {
        //录音初始化错误
        }
    }
//获取到录音片段
- (void)processAudioBuffer:(AudioQueueBufferRef)inBuffer withQueue:(AudioQueueRef)queue {

    Byte *data = (Byte *)malloc(inBuffer->mAudioDataByteSize);
    memset(data, 0, inBuffer->mAudioDataByteSize);
    memcpy(data, inBuffer->mAudioData, inBuffer->mAudioDataByteSize);
    if (data != NULL) {
      //
    }

    free(data);
}
//结束录音
_aqc.run = 0;
 AudioQueueStop(_aqc.queue, true);
上一篇 下一篇

猜你喜欢

热点阅读