android 用WebRTC做回音消除

2022-02-18  本文已影响0人  老祝读书

之前使用speex做回音消除,不知道是没有用对还是其他什么原因,导致回音消除的效果非常差。

好在在GitHub找到了一个使用WebRTC做回音消除的例子Android-Audio-Processing-Using-WebRTC

经过一顿改吧改吧的,终于在自己的项目中使用成功。 回音消除的效果比之前好了不止百倍。


/**
 * AudioTrack音频播放,录音
 *
 * @author Robbie
 */
public class Tracker extends JobHandler {

    private static AudioTrack audioTrack;

    private Apm _apm;
    private ApmViewModel vm;
    private AudioRecord audioRecord;
    int framSize = 80;
    // 音频大小
    private int inAudioBufferSize;
    // 音频大小
    private static int outAudioBufferSize;
    // 播放标志
    private boolean isPlaying = true;


    public Tracker(Handler handler) {
        super(handler);
        outAudioBufferSize = Constants.inAudioBufferSize;
        inAudioBufferSize =Constants.inAudioBufferSize;
        this.init();
        initAudioTrace(AudioManager.AUDIO_SESSION_ID_GENERATE);
    }

    public static void initAudioTrace(int sessionId) {
        audioTrack = new AudioTrack((new AudioAttributes.Builder())
                .setLegacyStreamType(Constants.streamType)
                .build(),
                (new AudioFormat.Builder())
                        .setChannelMask(Constants.outputChannelConfig)
                        .setEncoding(Constants.audioFormat)
                        .setSampleRate(Constants.sampleRateInHz)
                        .build(),
                outAudioBufferSize,
                Constants.trackMode, sessionId);



    }

    public boolean isPlaying() {
        return isPlaying;
    }

    public void setPlaying(boolean playing) {
        isPlaying = playing;
    }

    public void init() {
       
        // 初始化音频录制
        audioRecord = new AudioRecord(Constants.audioSource,
                Constants.sampleRateInHz, Constants.inputChannelConfig, Constants.audioFormat, inAudioBufferSize);


    }

    private void initApm() throws Exception {
        int ret;
        vm = ApmViewModel.getInstance();
        try {

            _apm = new Apm(vm.getAecExtendFilter(), vm.getSpeechIntelligibilityEnhance(), vm.getDelayAgnostic(), vm.getBeamForming(),
                    vm.getNextGenerationAEC(), vm.getExperimentalNS(), vm.getExperimentalAGC());

            ret = _apm.HighPassFilter(vm.getHighPassFilter());

            if (vm.getAecPC()) {
                ret = _apm.AECClockDriftCompensation(false);
                ret = _apm.AECSetSuppressionLevel(Apm.AEC_SuppressionLevel.values()[2]);
                ret = _apm.AEC(true);
            } else if (vm.getAecMobile()) {
                ret = _apm.AECMSetSuppressionLevel(Apm.AECM_RoutingMode.values()[2]);
                ret = _apm.AECM(true);
            }

            ret = _apm.NSSetLevel(Apm.NS_Level.values()[1]);
            ret = _apm.NS(vm.getNs());

            ret = _apm.VAD(vm.getVad());


            if (vm.getAgc()) {
                ret = _apm.AGCSetAnalogLevelLimits(0, 255);
                ret = _apm.AGCSetMode(Apm.AGC_Mode.values()[2]);
                ret = _apm.AGCSetTargetLevelDbfs(vm.getAgcTargetLevelInt());
                ret = _apm.AGCSetcompressionGainDb(vm.getAgcCompressionGainInt());
                ret = _apm.AGCEnableLimiter(true);
                ret = _apm.AGC(true);
            }
        } catch (Exception ex) {
//            new AlertDialog.Builder(context).setTitle("System hint")
//                    .setMessage(ex.getMessage())
//                    .show();
            return;
        }


    }


    @Override
    public void run() {
        AudioData currentAudioData;

        try {
            initApm();
        } catch (Exception e) {
            e.printStackTrace();
        }

        while ((currentAudioData = MessageQueue.getInstance(MessageQueue.TRACKER_DATA_QUEUE).take()) != null) {

            if(currentAudioData.isClose()){
                MessageQueue.getInstance(MessageQueue.TRACKER_DATA_QUEUE).clear();
                break;
            }
            if (!IntercomService.connecting) {
                break;
            }

            if (audioTrack == null) {
                continue;
            }
            try {
                if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
                    audioTrack.play();
                }
                short[] bytesPkg = currentAudioData.getRawData();
                if (bytesPkg != null) {


                    for(int i = 0 ; i < bytesPkg.length/framSize ; i++) {

                        try {
                            _apm.ProcessRenderStream(bytesPkg, i * framSize);

                        } catch (Exception e) {
                            e.printStackTrace();
                        }
                    }
                    audioTrack.write(bytesPkg, 0, bytesPkg.length);
                }
                this.reacord(bytesPkg);


            } catch (Exception e) {
                e.printStackTrace();
            }

        }
    }

    private void reacord(short[] bytesPkg) throws Exception {
        if(audioRecord == null){
            this.init();
        }
        if (audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_STOPPED) {
            audioRecord.startRecording();
        }

        AudioData audioData = new AudioData();
        // 实例化音频数据缓冲
        short[] rawData = new short[inAudioBufferSize / 2];

        int size = audioRecord.read(rawData, 0, inAudioBufferSize / 2);

        if (bytesPkg != null) {

            int out_analog_level = 200;
            for(int i = 0 ; i < rawData.length/framSize ; i++) {

                short delay = Short.valueOf(ApmViewModel.getInstance().getAceBufferDelay());

                _apm.SetStreamDelay(delay);
                try {
                    if (vm.getAgc()) {
                        _apm.AGCSetStreamAnalogLevel(out_analog_level);
                    }

                    _apm.ProcessCaptureStream(rawData,i*framSize);
                    if (vm.getAgc()) {
                        out_analog_level = _apm.AGCStreamAnalogLevel();
                        Log.i("AGC", out_analog_level + "");
                    }

                    if (vm.getVad()) {
                        if (!_apm.VADHasVoice()) continue;
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }

            audioData.setRawData(rawData);
        } else {
            audioData.setRawData(rawData);
        }
        audioData.setClose(!IntercomService.connecting);
        MessageQueue.getInstance(MessageQueue.ENCODER_DATA_QUEUE).put(audioData);
    }

   
}

其中的 Apm和ApmViewModel照抄Demo的。

但是回音消除的部分使用Demo中的方法,导致声音出现问题。

播放声音之前执行:

int framSize = 80
  for(int i = 0 ; i < bytesPkg.length/framSize ; i++) {

                        try {
                            _apm.ProcessRenderStream(bytesPkg, i * framSize);

                        } catch (Exception e) {
                            e.printStackTrace();
                        }
                    }

录音之后执行:

int framSize = 80
 int out_analog_level = 200;
            for(int i = 0 ; i < rawData.length/framSize ; i++) {

                short delay = Short.valueOf(ApmViewModel.getInstance().getAceBufferDelay());

                _apm.SetStreamDelay(delay);
                try {
                    if (vm.getAgc()) {
                        _apm.AGCSetStreamAnalogLevel(out_analog_level);
                    }

                    _apm.ProcessCaptureStream(rawData,i*framSize);
                    if (vm.getAgc()) {
                        out_analog_level = _apm.AGCStreamAnalogLevel();
                        Log.i("AGC", out_analog_level + "");
                    }

                    if (vm.getVad()) {
                        if (!_apm.VADHasVoice()) continue;
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }

至于Android中使用到的库文件,可以按照GitHub上的步骤编译。

上一篇 下一篇

猜你喜欢

热点阅读