音频处理(AudioUnit)音视频从入门到放弃音视频开发

如何使用Audio Unit 录制音频

2020-02-13  本文已影响0人  zhonglaoban

Audio Unit 实现音频录制功能

录制音频流程图

使用Audio Unit录制的时候,过程相对简单,我们使用一个Audio Unit就可以完成了,步骤如下:

  1. 设置好AudioComponentDescription,确定我们使用的Audio Unit类型
  2. 获取Audio Unit实例,我们有两种获取方式,通过AUGraph获取,通过AudioComponent获取。
  3. 设置Audio Unit的属性,告诉系统我们需要使用Audio Unit的哪些功能以及需要采集什么样的数据。
  4. 开始录制和结束录制的控制。
  5. 从回调函数中取得音频数据。

初始化

- (instancetype)initWithAsbd:(AudioStreamBasicDescription)asbd {
    self = [super init];
    if (self) {
        _asbd = asbd;
        _queue = dispatch_queue_create("zf.audioRecorder", DISPATCH_QUEUE_SERIAL);
        [self setupAcd];
        dispatch_async(_queue, ^{
//            [self createInputUnit];
            [self getAudioUnits];
            [self setupAudioUnits];
        });
    }
    return self;
}

设置AudioComponentDescription

- (void)setupAcd {
    _ioUnitDesc.componentType = kAudioUnitType_Output;
    //vpio模式
    _ioUnitDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
    _ioUnitDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
    _ioUnitDesc.componentFlags = 0;
    _ioUnitDesc.componentFlagsMask = 0;
}

获取Audio Unit实例

通过AUGraph获取实例

- (void)getAudioUnits {
    OSStatus status = NewAUGraph(&_graph);
    printf("create graph %d \n", (int)status);
    
    AUNode ioNode;
    status = AUGraphAddNode(_graph, &_ioUnitDesc, &ioNode);
    printf("add ioNote %d \n", (int)status);

    //instantiate the audio units
    status = AUGraphOpen(_graph);
    printf("open graph %d \n", (int)status);
    
    //obtain references to the audio unit instances
    status = AUGraphNodeInfo(_graph, ioNode, NULL, &_ioUnit);
    printf("get ioUnit %d \n", (int)status);
}

通过AudioComponent获取实例

- (void)createInputUnit {
    AudioComponent comp = AudioComponentFindNext(NULL, &_ioUnitDesc);
    if (comp == NULL) {
        printf("can't get AudioComponent");
    }
    OSStatus status = AudioComponentInstanceNew(comp, &(_ioUnit));
    printf("creat audio unit %d \n", (int)status);
}

设置Audio Unit属性

- (void)setupAudioUnits {
    OSStatus status;
    //音频输入默认是关闭的,需要开启 0:关闭,1:开启
    UInt32 enableInput = 1; // to enable input
    UInt32 propertySize;
    status = AudioUnitSetProperty(_ioUnit,
                                  kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Input,
                                  1,
                                  &enableInput,
                                  sizeof(enableInput));
    printf("enable input %d \n", (int)status);
    
    //关闭音频输出
    UInt32 disableOutput = 0; // to disable output
    status = AudioUnitSetProperty(_ioUnit,
                                  kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Output,
                                  0,
                                  &disableOutput,
                                  sizeof(disableOutput));
    printf("disable output %d \n", (int)status);
    
    //设置stram format
    propertySize = sizeof (AudioStreamBasicDescription);
    status = AudioUnitSetProperty(_ioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Output,
                                  1,
                                  &_asbd,
                                  propertySize);
    printf("set input format %d \n", (int)status);
    //检查是否设置成功
    AudioStreamBasicDescription deviceFormat;
    status = AudioUnitGetProperty(_ioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Output,
                                  1,
                                  &deviceFormat,
                                  &propertySize);
    printf("get input format %d \n", (int)status);
    
    //设置最大采集帧数
    UInt32 maxFramesPerSlice = 4096;
    propertySize = sizeof(UInt32);
    status = AudioUnitSetProperty(_ioUnit,
                                  kAudioUnitProperty_MaximumFramesPerSlice,
                                  kAudioUnitScope_Global,
                                  0,
                                  &maxFramesPerSlice,
                                  propertySize);
    printf("set max frame per slice: %d, %d \n", (int)maxFramesPerSlice, (int)status);
    AudioUnitGetProperty(_ioUnit,
                         kAudioUnitProperty_MaximumFramesPerSlice,
                         kAudioUnitScope_Global,
                         0,
                         &maxFramesPerSlice,
                         &propertySize);
    printf("get max frame per slice: %d, %d \n", (int)maxFramesPerSlice, (int)status);
    
    //设置回调
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = &inputCallback;
    callbackStruct.inputProcRefCon = (__bridge void *_Nullable)(self);
    
    status = AudioUnitSetProperty(_ioUnit,
                                  kAudioOutputUnitProperty_SetInputCallback,
                                  kAudioUnitScope_Input,
                                  0,
                                  &callbackStruct,
                                  sizeof(callbackStruct));
    printf("set render callback %d \n", (int)status);
}

开始录制

注释的部分是不使用AUGraph的方式。

- (void)startRecord {
    dispatch_async(_queue, ^{
        OSStatus status;
//        status = AudioUnitInitialize(self.ioUnit);
//        printf("AudioUnitInitialize %d \n", (int)status);
//        status = AudioOutputUnitStart(self.ioUnit);
//        printf("AudioOutputUnitStart %d \n", (int)status);
        
        status = AUGraphInitialize(self.graph);
        printf("AUGraphInitialize %d \n", (int)status);
        status = AUGraphStart(self.graph);
        printf("AUGraphStart %d \n", (int)status);
    });
}

停止录制

- (void)stopRecord {
    dispatch_async(_queue, ^{
        OSStatus status;
        status = AUGraphStop(self.graph);
        printf("AUGraphStop %d \n", (int)status);
    });
}

数据回调AURenderCallback

OSStatus inputCallback(void *inRefCon,
                       AudioUnitRenderActionFlags *ioActionFlags,
                       const AudioTimeStamp *inTimeStamp,
                       UInt32 inBusNumber,
                       UInt32 inNumberFrames,
                       AudioBufferList *__nullable ioData) {

    ZFAudioUnitRecorder *recorder = (__bridge ZFAudioUnitRecorder *)inRefCon;

    AudioBuffer buffer;
    
    /**
     on this point we define the number of channels, which is mono
     for the iphone. the number of frames is usally 512 or 1024.
     */
    UInt32 size = inNumberFrames * recorder.asbd.mBytesPerFrame;
    buffer.mDataByteSize = size; // sample size
    buffer.mNumberChannels = 1; // one channel
    buffer.mData = malloc(size); // buffer size
    
    // we put our buffer into a bufferlist array for rendering
    AudioBufferList bufferList;
    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0] = buffer;
    
    OSStatus status = noErr;
    
    status = AudioUnitRender(recorder.ioUnit, ioActionFlags, inTimeStamp, 1, inNumberFrames, &bufferList);
    
    if (status != noErr) {
        printf("AudioUnitRender %d \n", (int)status);
        return status;
    }
    if ([recorder.delegate respondsToSelector:@selector(audioRecorder:didRecoredAudioData:length:)]) {
        [recorder.delegate audioRecorder:recorder didRecoredAudioData:buffer.mData length:buffer.mDataByteSize];
    }
    free(buffer.mData);
    
    return status;
}
  1. 回调函数中并没有真正获取到数据,还需要调用AudioUnitRender去取数据。
  2. 我们使用了malloc开辟了一块内存空间,我们需要用free释放调。

完整代码请到项目地址下载

上一篇下一篇

猜你喜欢

热点阅读