android源码

Android杂谈:AudioFlinger与AudioTrac

2017-05-28  本文已影响302人  我在等你回复可你没回

写这个玩意纯粹是开个头啦
AT是客户端,AF是服务端。

/frameworks/av/media/libmedia/AudioTrack.cpp
AudioTrack构造函数,初始化一些变量,执行set函数。好多变量都不知道是啥。

AudioTrack::AudioTrack(
        audio_stream_type_t streamType,  //streamType,例如music
        uint32_t sampleRate,                //采样率
        audio_format_t format,            //格式
        audio_channel_mask_t channelMask,  //频道掩码,什么鬼
        size_t frameCount,                             //每个buf的帧数
        audio_output_flags_t flags,                    //什么鬼
        callback_t cbf,                                    //回调
        void* user,                                          //不知道
        uint32_t notificationFrames,              //不知道
        int sessionId,                                    //会话
        transfer_type transferType,               //不知道
        const audio_offload_info_t *offloadInfo,         //不知道
        int uid,
        pid_t pid,
        const audio_attributes_t* pAttributes,       //属性
        bool doNotReconnect)
    : mStatus(NO_INIT),
      mIsTimed(false),
      mPreviousPriority(ANDROID_PRIORITY_NORMAL),  //优先级
      mPreviousSchedulingGroup(SP_DEFAULT),
      mPausedPosition(0),
      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
    mStatus = set(streamType, sampleRate, format, channelMask,
            frameCount, flags, cbf, user, notificationFrames,
            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
            offloadInfo, uid, pid, pAttributes, doNotReconnect);
}

看看漫长的set函数

status_t AudioTrack::set(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        audio_output_flags_t flags,
        callback_t cbf,
        void* user,
        uint32_t notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        int uid,
        pid_t pid,
        const audio_attributes_t* pAttributes,
        bool doNotReconnect)
{
    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
          sessionId, transferType, uid, pid);

    switch (transferType) {  
//音频传送方式,播放音乐是TRANSFER_DEFAULT,播放fm是TRANSFER_SYNC
    case TRANSFER_DEFAULT:
        if (sharedBuffer != 0) {
            transferType = TRANSFER_SHARED;
        } else if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL || sharedBuffer != 0) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        if (sharedBuffer != 0) {
            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_SHARED:
        if (sharedBuffer == 0) {
            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
            return BAD_VALUE;
        }
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mSharedBuffer = sharedBuffer;//buf大小
    mTransfer = transferType;
    mDoNotReconnect = doNotReconnect;

    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
            sharedBuffer->size());

    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);

    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {                //避免重复创建
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }
    if (pAttributes == NULL) {
        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {  //检验streamType有效性
            ALOGE("Invalid stream type %d", streamType);
            return BAD_VALUE;
        }
        mStreamType = streamType;

    } else {
        // stream type shouldn't be looked at, this track has audio attributes //audio属性不知道是什么鬼
        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
        mStreamType = AUDIO_STREAM_DEFAULT;
        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
        }
    }

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("Invalid format %#x", format);
        return BAD_VALUE;
    }
    mFormat = format;

    if (!audio_is_output_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
    mChannelCount = channelCount;

    // force direct flag if format is not linear PCM
    // or offload was requested
//direct flag是什么鬼
    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
            || !audio_is_linear_pcm(format)) {
        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
                    ? "Offload request, forcing to Direct Output"
                    : "Not linear PCM, forcing to Direct Output");
        flags = (audio_output_flags_t)
                // FIXME why can't we allow direct AND fast?
                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
    }

    // force direct flag if HW A/V sync requested 硬件同步
    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
    }

    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
        if (audio_is_linear_pcm(format)) {
            mFrameSize = channelCount * audio_bytes_per_sample(format);//frame大小
        } else {
            mFrameSize = sizeof(uint8_t);
        }
    } else {
        ALOG_ASSERT(audio_is_linear_pcm(format));
        mFrameSize = channelCount * audio_bytes_per_sample(format);
        // createTrack will return an error if PCM format is not supported by server,
        // so no need to check for specific PCM formats here
    }

    // sampling rate must be specified for direct outputs
//采样率,直连输出模式必须指定采样率
    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
        return BAD_VALUE;
    }
    mSampleRate = sampleRate;
    mOriginalSampleRate = sampleRate;
    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;

    //offload的东东
    // Make copy of input parameter offloadInfo so that in the future:
    //  (a) createTrack_l doesn't need it as an input parameter
    //  (b) we can support re-creation of offloaded tracks
    if (offloadInfo != NULL) {
        mOffloadInfoCopy = *offloadInfo;
        mOffloadInfo = &mOffloadInfoCopy;
    } else {
        mOffloadInfo = NULL;
    }
    //左右声道音量
    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
    mSendLevel = 0.0f;
    // mFrameCount is initialized in createTrack_l
    mReqFrameCount = frameCount;
    mNotificationFramesReq = notificationFrames;
    mNotificationFramesAct = 0;
   //sessionId 赋值
    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        mSessionId = AudioSystem::newAudioUniqueId();
    } else {
        mSessionId = sessionId;
    }
   //pid uid
    int callingpid = IPCThreadState::self()->getCallingPid();
    int mypid = getpid();
    if (uid == -1 || (callingpid != mypid)) {
        mClientUid = IPCThreadState::self()->getCallingUid();
    } else {
        mClientUid = uid;
    }
    if (pid == -1 || (callingpid != mypid)) {
        mClientPid = callingpid;
    } else {
        mClientPid = pid;
    }
    mAuxEffectId = 0;
    mFlags = flags;
    mCbf = cbf;
     
   //创建AudioTrackThread线程
    if (cbf != NULL) {
        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
        // thread begins in paused state, and will not reference us until start()
    }

    // create the IAudioTrack
   //创建IAudioTrack
    status_t status = createTrack_l();

   //出现错误时退出AudioTrackThread线程
    if (status != NO_ERROR) {
        if (mAudioTrackThread != 0) {
            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
            mAudioTrackThread->requestExitAndWait();
            mAudioTrackThread.clear();
        }
        return status;
    }

   //变量赋值
    mStatus = NO_ERROR;
    mState = STATE_STOPPED;
    mUserData = user;
    mLoopCount = 0;
    mLoopStart = 0;
    mLoopEnd = 0;
    mLoopCountNotified = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mPosition = 0;
    mReleased = 0;
    mStartUs = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInUnderrun = false;
    mPreviousTimestampValid = false;
    mTimestampStartupGlitchReported = false;
    mRetrogradeMotionReported = false;

    return NO_ERROR;
}

createTrack_l函数会调用AudioFlinger创建IAudioTrack

status_t AudioTrack::createTrack_l()
{
    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger(); //获取IAudioFlinger
    if (audioFlinger == 0) {
        ALOGE("Could not get audioflinger");
        return NO_INIT;
    }

    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) { 清理mDeviceCallback  mOutput 
        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
    }
    audio_io_handle_t output;
    audio_stream_type_t streamType = mStreamType;
    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;

    status_t status;
   //获取output
    status = AudioSystem::getOutputForAttr(attr, &output,
                                           (audio_session_t)mSessionId, &streamType, mClientUid,
                                           mSampleRate, mFormat, mChannelMask,
                                           mFlags, mSelectedDeviceId, mOffloadInfo);
  //获取output失败
    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
              " channel mask %#x, flags %#x",
              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
        return BAD_VALUE;
    }
    {
    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
    // we must release it ourselves if anything goes wrong.

    // Not all of these values are needed under all conditions, but it is easier to get them all
    status = AudioSystem::getLatency(output, &mAfLatency);
    if (status != NO_ERROR) {
        ALOGE("getLatency(%d) failed status %d", output, status);
        goto release;
    }
    //Latency是什么鬼
    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);

    //帧数
    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
    if (status != NO_ERROR) {
        ALOGE("getFrameCount(output=%d) status %d", output, status);
        goto release;
    }
    //采样率
    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
    if (status != NO_ERROR) {
        ALOGE("getSamplingRate(output=%d) status %d", output, status);
        goto release;
    }
    if (mSampleRate == 0) {
        mSampleRate = mAfSampleRate;
        mOriginalSampleRate = mAfSampleRate;
    }
    // Client decides whether the track is TIMED (see below), but can only express a preference
    // for FAST.  Server will perform additional tests.
    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
            // either of these use cases:
            // use case 1: shared buffer
            (mSharedBuffer != 0) ||
            // use case 2: callback transfer mode
            (mTransfer == TRANSFER_CALLBACK) ||
            // use case 3: obtain/release mode
            (mTransfer == TRANSFER_OBTAIN)) &&
            // matching sample rate
            (mSampleRate == mAfSampleRate))) {
        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, output %u Hz",
                mTransfer, mSampleRate, mAfSampleRate);
        // once denied, do not request again if IAudioTrack is re-created
        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
    }

    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
    //  n = 1   fast track with single buffering; nBuffering is ignored
    //  n = 2   fast track with double buffering
    //  n = 2   normal track, (including those with sample rate conversion)
    //  n >= 3  very high latency or very small notification interval (unused).
    const uint32_t nBuffering = 2;
   
    mNotificationFramesAct = mNotificationFramesReq;
    //帧数
    size_t frameCount = mReqFrameCount;
    if (!audio_is_linear_pcm(mFormat)) {
         //buf大小
        if (mSharedBuffer != 0) {
            // Same comment as below about ignoring frameCount parameter for set()
            frameCount = mSharedBuffer->size();
        } else if (frameCount == 0) {
            frameCount = mAfFrameCount;
        }
        if (mNotificationFramesAct != frameCount) {
            mNotificationFramesAct = frameCount;
        }
    } else if (mSharedBuffer != 0) {
        // FIXME: Ensure client side memory buffers need
        // not have additional alignment beyond sample
        // (e.g. 16 bit stereo accessed as 32 bit frame).
        size_t alignment = audio_bytes_per_sample(mFormat);
        if (alignment & 1) {
            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
            alignment = 1;
        }
        if (mChannelCount > 1) {
            // More than 2 channels does not require stronger alignment than stereo
            alignment <<= 1;
        }
        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
            ALOGE("Invalid buffer alignment: address %p, channel count %u",
                    mSharedBuffer->pointer(), mChannelCount);
            status = BAD_VALUE;
            goto release;
        }

        // When initializing a shared buffer AudioTrack via constructors,
        // there's no frameCount parameter.
        // But when initializing a shared buffer AudioTrack via set(),
        // there _is_ a frameCount parameter.  We silently ignore it.
        frameCount = mSharedBuffer->size() / mFrameSize;
    } else {
        // For fast tracks the frame count calculations and checks are done by server

        if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
            // for normal tracks precompute the frame count based on speed.
            const size_t minFrameCount = calculateMinFrameCount(
                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
                    mPlaybackRate.mSpeed);
            if (frameCount < minFrameCount) {
                frameCount = minFrameCount;
            }
        }
    }

    //trace标志
    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
    if (mIsTimed) {
        trackFlags |= IAudioFlinger::TRACK_TIMED;
    }

    pid_t tid = -1;
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        trackFlags |= IAudioFlinger::TRACK_FAST;
        if (mAudioTrackThread != 0) {
            tid = mAudioTrackThread->getTid();
        }
    }

    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
    }

    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
        trackFlags |= IAudioFlinger::TRACK_DIRECT;
    }

    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                // but we will still need the original value also
    int originalSessionId = mSessionId;
   //关键,利用audioFlinger创建IAudioTrack
    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                      mSampleRate,
                                                      mFormat,
                                                      mChannelMask,
                                                      &temp,
                                                      &trackFlags,
                                                      mSharedBuffer,
                                                      output,
                                                      tid,
                                                      &mSessionId,
                                                      mClientUid,
                                                      &status);
    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
            "session ID changed from %d to %d", originalSessionId, mSessionId);

    if (status != NO_ERROR) {
        ALOGE("AudioFlinger could not create track, status: %d", status);
        goto release;
    }
    ALOG_ASSERT(track != 0);

    // AudioFlinger now owns the reference to the I/O handle,
    // so we are no longer responsible for releasing it.
    //共享内存
    sp<IMemory> iMem = track->getCblk();
    if (iMem == 0) {
        ALOGE("Could not get control block");
        return NO_INIT;
    }
    void *iMemPointer = iMem->pointer();
    if (iMemPointer == NULL) {
        ALOGE("Could not get control block pointer");
        return NO_INIT;
    }
    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {
        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
        mDeathNotifier.clear();
    }
    mAudioTrack = track;
    mCblkMemory = iMem;
    IPCThreadState::self()->flushCommands();

    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
    mCblk = cblk;
    // note that temp is the (possibly revised) value of frameCount
    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
        // In current design, AudioTrack client checks and ensures frame count validity before
        // passing it to AudioFlinger so AudioFlinger should not return a different value except
        // for fast track as it uses a special method of assigning frame count.
        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
    }
    frameCount = temp;

    mAwaitBoost = false;
    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
        if (trackFlags & IAudioFlinger::TRACK_FAST) {
            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
            mAwaitBoost = true;
        } else {
            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
            // once denied, do not request again if IAudioTrack is re-created
            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
        }
    }
    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
        } else {
            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
            // FIXME This is a warning, not an error, so don't return error status
            //return NO_INIT;
        }
    }
    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
        } else {
            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
            // FIXME This is a warning, not an error, so don't return error status
            //return NO_INIT;
        }
    }
    // Make sure that application is notified with sufficient margin before underrun
    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
        // Theoretically double-buffering is not required for fast tracks,
        // due to tighter scheduling.  But in practice, to accommodate kernels with
        // scheduling jitter, and apps with computation jitter, we use double-buffering
        // for fast tracks just like normal streaming tracks.
        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
            mNotificationFramesAct = frameCount / nBuffering;
        }
    }

    // We retain a copy of the I/O handle, but don't own the reference
    mOutput = output;
    mRefreshRemaining = true;

    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
    // is the value of pointer() for the shared buffer, otherwise buffers points
    // immediately after the control block.  This address is for the mapping within client
    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
    void* buffers;
    if (mSharedBuffer == 0) {
        buffers = cblk + 1;
    } else {
        buffers = mSharedBuffer->pointer();
        if (buffers == NULL) {
            ALOGE("Could not get buffer pointer");
            return NO_INIT;
        }
    }
   //音效
    mAudioTrack->attachAuxEffect(mAuxEffectId);
    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
    // FIXME don't believe this lie
    mLatency = mAfLatency + (1000*frameCount) / mSampleRate;

    mFrameCount = frameCount;
    // If IAudioTrack is re-created, don't let the requested frameCount
    // decrease.  This can confuse clients that cache frameCount().
    if (frameCount > mReqFrameCount) {
        mReqFrameCount = frameCount;
    }

    // reset server position to 0 as we have new cblk.
    mServer = 0;

    // update proxy
    if (mSharedBuffer == 0) {
        mStaticProxy.clear();
        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
    } else {
        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
        mProxy = mStaticProxy;
    }
    //代理
    mProxy->setVolumeLR(gain_minifloat_pack(
            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));

    mProxy->setSendLevel(mSendLevel);
    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
    mProxy->setSampleRate(effectiveSampleRate);

    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
    playbackRateTemp.mSpeed = effectiveSpeed;
    playbackRateTemp.mPitch = effectivePitch;
    mProxy->setPlaybackRate(playbackRateTemp);
    mProxy->setMinimum(mNotificationFramesAct);

    mDeathNotifier = new DeathNotifier(this);
    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);

    if (mDeviceCallback != 0) {
        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
    }

    return NO_ERROR;
    }

release:
    AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
    if (status == NO_ERROR) {
        status = NO_INIT;
    }
    return status;
}

/frameworks/av/services/audioflinger/AudioFlinger.cpp
AudioFlinger构造函数,初始化一些变量,感觉没做什么事


AudioFlinger::AudioFlinger()
    : BnAudioFlinger(),   //继承与BnAudioFlinger
      mPrimaryHardwareDev(NULL), //主设备
      mAudioHwDevs(NULL),    //音频硬件设备
      mHardwareStatus(AUDIO_HW_IDLE), //设备状态,初始化为空闲
      mMasterVolume(1.0f),   //主音量
      mMasterMute(false),     //静音
      mNextUniqueId(1),
      mMode(AUDIO_MODE_INVALID),  //mode
      mBtNrecIsOff(false),
      mIsLowRamDevice(true),            //低内存设备
      mIsDeviceTypeKnown(false),
      mGlobalEffectEnableTime(0),
      mSystemReady(false)
{                                                      //构造函数没做什么
    getpid_cached = getpid();
    char value[PROPERTY_VALUE_MAX];
    bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
    if (doLog) {
        mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
                MemoryHeapBase::READ_ONLY);
    }

#ifdef TEE_SINK
    (void) property_get("ro.debuggable", value, "0");
    int debuggable = atoi(value);
    int teeEnabled = 0;
    if (debuggable) {
        (void) property_get("af.tee", value, "0");
        teeEnabled = atoi(value);
    }
    // FIXME symbolic constants here
    if (teeEnabled & 1) {
        mTeeSinkInputEnabled = true;
    }
    if (teeEnabled & 2) {
        mTeeSinkOutputEnabled = true;
    }
    if (teeEnabled & 4) {
        mTeeSinkTrackEnabled = true;
    }
#endif
}

AudioFlinger初始化,设置了mode为AUDIO_MODE_NORMAL

void AudioFlinger::onFirstRef()
{
    int rc = 0;

    Mutex::Autolock _l(mLock);

    /* TODO: move all this work into an Init() function */
    char val_str[PROPERTY_VALUE_MAX] = { 0 };
    if (property_get("ro.audio.flinger_standbytime_ms", val_str, NULL) >= 0) {
        uint32_t int_val;
        if (1 == sscanf(val_str, "%u", &int_val)) {
            mStandbyTimeInNsecs = milliseconds(int_val);
            ALOGI("Using %u mSec as standby time.", int_val);
        } else {
            mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
            ALOGI("Using default %u mSec as standby time.",
                    (uint32_t)(mStandbyTimeInNsecs / 1000000));
        }
    }

    mPatchPanel = new PatchPanel(this);

    mMode = AUDIO_MODE_NORMAL;
}

AudioFlinger代码结构图

AudioFlinger.png

DEMO

传送门:https://github.com/wenfengtou/AudioDemo
一个简单的使用audiorecord与audiotrace的demo

demo

录音开始到结束的log

12-31 20:14:32.669 14860 17009 D AudioRecord: getMinFrameCount 1024
12-31 20:14:32.670 14860 17009 D AudioRecord: set(): inputSource 1, sampleRate 8000, format 0x1, channelMask 0x10, frameCount 1024, notificationFrames 0, sessionId 0, transferType 0, flags 0, opPackageName com.wenfengtou.audiodemo uid -1, pid -1
12-31 20:14:32.675 14860 17009 D AudioRecord: set: Create AudioRecordThread
12-31 20:14:32.677 14860 17009 D AudioRecord: openRecord_l
12-31 20:14:32.678   513  1348 V APM_AudioPolicyManager: getInputForAttr() source 1, samplingRate 8000, format 1, channelMask 10,session 41, flags 0
12-31 20:14:32.680   513  1348 D AudioALSAStreamManager: +openInputStream()
12-31 20:14:32.680   513  1348 D AudioALSAStreamManager: openInputStream(), devices = 0x80000004, format = 0x1, channels = 0x10, sampleRate = 8000, status = 38, acoustics = 0x0
12-31 20:14:32.680   513  1348 D AudioALSAStreamIn: AudioALSAStreamIn()
12-31 20:14:32.680   513  1348 D AudioALSAStreamIn: set(), devices = 0x80000004, format = 0x1, channels = 0x10, sampleRate = 8000, acoustics = 0x0, flags = 0
12-31 20:14:32.680   513  1348 D AudioALSAStreamIn: +CheckBesRecordInfo()
12-31 20:14:32.680   513  1348 D AudioSpeechEnhanceInfo: GetBesRecScene() -1
12-31 20:14:32.680   513  1348 D AudioSpeechEnhanceInfo: ResetBesRecScene()
12-31 20:14:32.680   513  1348 D AudioSpeechEnhanceInfo: IsBesRecTuningEnable()+
12-31 20:14:32.681   513  1348 D AudioSpeechEnhanceInfo: IsBesRecTuningEnable()- 0
12-31 20:14:32.681   513  1348 D AudioYusuParam: QueryFeatureSupportInfo(),feature support a090055
12-31 20:14:32.681   513  1348 D AudioSpeechEnhanceInfo: GetBesRecVMFileName()+
12-31 20:14:32.681   513  1348 D AudioSpeechEnhanceInfo: GetBesRecVMFileName(), mVMFileName=, VMFileName=
12-31 20:14:32.681   513  1348 D AudioYusuParam: QueryFeatureSupportInfo(),feature support a090055
12-31 20:14:32.681   513  1348 D AudioSpeechEnhanceInfo: GetForceMagiASRState(), feature_support=a090055, 0, mForceMagiASR=0
12-31 20:14:32.681   513  1348 D AudioSpeechEnhanceInfo: GetForceAECRecState(), mForceAECRec=0
12-31 20:14:32.681   513  1348 D AudioALSAStreamIn: -CheckBesRecordInfo()
12-31 20:14:32.681   513  1348 D AudioALSAStreamIn: checkOpenStreamSampleRate(), origin sampleRate 8000, kDefaultInputSourceSampleRate 48000.
12-31 20:14:32.681   513  1348 D AudioALSAStreamIn: set() done, sampleRate = 8000, num_channels = 1, buffer_size=320
12-31 20:14:32.681   513  1348 E AudioALSAStreamManager: openInputStream(), SetInputMute(mAllInputMute)
12-31 20:14:32.683   513  1348 D AudioALSAStreamIn: SetInputMute(), 0
12-31 20:14:32.683   513  1348 D AudioALSAStreamIn: -SetInputMute()
12-31 20:14:32.685   513  1348 D AudioALSAStreamManager: -openInputStream(), in = 0xe7114180, status = 0x0, mStreamInVector.size() = 2
12-31 20:14:32.685   513  1348 D AudioALSAStreamIn: +setParameters(): input_source=1
12-31 20:14:32.686   513  1348 D AudioALSAStreamIn: setParameters() InputSource = 1
12-31 20:14:32.686   513  1348 D AudioALSAStreamIn: -setParameters(): input_source=1
12-31 20:14:32.699   513 17011 I AudioFlinger_Threads: AudioFlinger's thread 0xe6283700 ready to run
12-31 20:14:32.700   513 17011 D AudioALSAStreamIn: +standby()
12-31 20:14:32.701   513 17011 D AudioALSAStreamIn: -standby()
12-31 20:14:32.711   513 17011 D AudioALSAStreamIn: +standby()
12-31 20:14:32.711   513 17011 D AudioALSAStreamIn: -standby()
12-31 20:14:32.720   513  1348 V AudioPolicyService: AudioCommandThread() adding update audio port list
12-31 20:14:32.720   513  1348 V APM_AudioPolicyManager: getInputForAttr() returns input type = 0 device =0x80000004 *input = 38
12-31 20:14:32.721   513   794 V AudioPolicyService: AudioCommandThread() processing update audio port list
12-31 20:14:32.741   513  1349 D AudioFlinger: track(0xe6f83180): mIsOut 0, mFrameCount 1024, mSampleRate 8000, mFormat 1, mChannelCount 1, mTrackCount 5, thread 0xe6283700, sessionId 41
12-31 20:14:32.745 14860 17009 D AudioRecord: start, sync event 0 trigger session 0
12-31 20:14:32.763   513   513 V APM_AudioPolicyManager: startInput() input 38
12-31 20:14:32.763   513   793 V AudioPolicyService: AudioCommandThread() processing create audio patch
12-31 20:14:32.766   513 17011 D AudioALSAHardware: +createAudioPatch num_sources [1] , num_sinks [1]
12-31 20:14:32.766   513 17011 D AudioALSAHardware: +routing createAudioPatch 80000004->Mixer Src 1
12-31 20:14:32.766   513 17011 D AudioALSAStreamManager: +setParameters(), IOport = 38, keyValuePairs = input_source=1;routing=-2147483644
12-31 20:14:32.766   513 17011 D AudioALSAStreamManager: Send to mStreamInVector [1]
12-31 20:14:32.766   513 17011 D AudioALSAStreamIn: +setParameters(): input_source=1;routing=-2147483644
12-31 20:14:32.767   513 17011 D AudioALSAStreamIn: setParameters() InputSource = 1
12-31 20:14:32.767   513 17011 D AudioALSAStreamManager: +routingInputDevice(), input_device: 0x80000004 => 0x80000004
12-31 20:14:32.767   513 17011 W AudioALSAStreamManager: -routingInputDevice(), input_device == current_input_device(0x80000004), return
12-31 20:14:32.767   513 17011 D AudioALSAStreamIn: -setParameters(): input_source=1;routing=-2147483644
12-31 20:14:32.767   513 17011 D AudioALSAStreamManager: -setParameters()
12-31 20:14:32.767   513 17011 D AudioALSAHardware: handlecheck createAudioPatch sucess new *handle 0x4
12-31 20:14:32.773   513   513 V APM_AudioPolicyManager: setInputDevice() createAudioPatch returned 0 patchHandle 36
12-31 20:14:32.774   513   513 V APM_AudioPolicyManager: AudioPolicyManager::startInput() input source = 1
12-31 20:14:32.774   513   513 V AudioPolicyService: AudioCommandThread() adding recording configuration update event 1, source 1
12-31 20:14:32.775   513   794 V AudioPolicyService: AudioCommandThread() processing recording configuration update
12-31 20:14:32.777   513 17011 D AudioALSAStreamIn: open()
12-31 20:14:32.777   513 17011 D AudioALSAStreamManager: +createCaptureHandler(), mAudioMode = 0, input_source = 1, input_device = 0x80000004, mBypassDualMICProcessUL=0, sample_rate=8000
12-31 20:14:32.777   513 17011 D AudioYusuParam: QueryFeatureSupportInfo(),feature support a090055
12-31 20:14:32.777   513 17011 D AudioALSAStreamManager: -EnableBesRecord(), 1
12-31 20:14:32.778   513 17011 D AudioALSACaptureHandlerBase: AudioALSACaptureHandlerBase()
12-31 20:14:32.778   513 17011 D AudioALSACaptureHandlerBase: init()
12-31 20:14:32.778   513 17011 D AudioALSACaptureHandlerNormal: AudioALSACaptureHandlerNormal()
12-31 20:14:32.778   513 17011 D AudioALSACaptureHandlerNormal: init()
12-31 20:14:32.778   513 17011 D AudioALSAStreamManager: -createCaptureHandler(), mCaptureHandlerVector.size() = 1
12-31 20:14:32.778   513 17011 D AudioALSACaptureHandlerNormal: +open(), input_device = 0x80000004, input_source = 0x1, sample_rate=8000, num_channels=1
12-31 20:14:32.778   513 17011 D AudioALSACaptureDataClient: AudioALSACaptureDataClient()
12-31 20:14:32.778   513 17011 D AudioSPELayer: SPELayer+
12-31 20:14:32.778   513 17011 D AudioSPELayer: GetVoIPJitterTime JitterTime=0,ret=1
12-31 20:14:32.783   513 17011 D AudioSPELayer: SPELayer-
12-31 20:14:32.784   513 17011 D AudioMTKGainController: +SetCaptureGain(), mode=0, source=1, input device=0x80000004, output device=0x2
12-31 20:14:32.784   513 17011 D AudioYusuParam: QueryFeatureSupportInfo(),feature support a090055
12-31 20:14:32.784   513 17011 D AudioMTKGainController: getGainDevice(), input devices = 0x80000004, return gainDevice = 2
12-31 20:14:32.784   513 17011 D AudioMTKGainController: ApplyMicGain(), _mic_mode = 0, _gain_device = 2, mode = 0, micgain = 26, mULTotalGain = 176
12-31 20:14:32.784   513 17011 D AudioMTKGainController: SetAdcPga1 = 3
12-31 20:14:32.784   513 17011 D AudioMTKGainController: SetAdcPga2 = 3
12-31 20:14:32.785   513 17011 D AudioALSACaptureDataClient: AudioALSACaptureDataClient(), besrecord_enable=1, besrecord_scene=-1
12-31 20:14:32.785   513 17011 D AudioSPELayer: SetVMDumpEnable(), 0
12-31 20:14:32.785   513 17011 D AudioSPELayer: SetVMDumpFileName()+, VMFileName=
12-31 20:14:32.783 14860 17009 D AudioRecord: return status 0
12-31 20:14:32.785   513 17011 D AudioSPELayer: SetVMDumpFileName()-, VMFileName=, mVMDumpFileName=
12-31 20:14:32.785   513 17011 D AudioALSACaptureDataClient: CheckBesRecordBypass() 0
12-31 20:14:32.785   513 17011 D AudioALSACaptureDataClient: +ConfigBesRecordParams()
12-31 20:14:32.785   513 17011 D AudioALSACaptureDataClient: +GetBesRecordRoutePath(), output device = 0x2, input device = 0x80000004
12-31 20:14:32.785   513 17011 D AudioYusuParam: QueryFeatureSupportInfo(),feature support a090055
12-31 20:14:32.785   513 17011 D AudioALSACaptureDataClient: CheckBesRecordStereoModeEnable(), mBesRecordStereoMode = 0
12-31 20:14:32.785   513 17011 D AudioALSACaptureDataClient: ConfigBesRecordParams(), categoryPath = Application,SndRecNormal,Profile,Handset, mBesRecordStereoMode = 0, input_source = 1, input_devices = 80000004, bVoIPEnable = 0, bypassDualProcess = 0
12-31 20:14:32.785   513 17011 D AudioSPELayer: SetEnhPara, SPE_MODE=1
12-31 20:14:32.786   513 17011 D AudioSPELayer: SetCompFilter, SPE_MODE=1
12-31 20:14:32.786   513 17011 D AudioYusuParam: QueryFeatureSupportInfo(),feature support a090055
12-31 20:14:32.786   513 17011 D AudioSPELayer: SetDMNRPara, SPE_MODE=1
12-31 20:14:32.786   513 17011 D AudioALSACaptureDataClient: SetDMNREnable(), type=0, bypassDMNR=0
12-31 20:14:32.786   513 17011 D AudioYusuParam: QueryFeatureSupportInfo(),feature support a090055
12-31 20:14:32.786   513 17011 D AudioSPELayer: SetSampleRate, SPE_MODE=1
12-31 20:14:32.786   513 17011 D AudioSPELayer: SetAPPTable, SPE_MODE=1, App_table=4
12-31 20:14:32.786   513 17011 D AudioSPELayer: SetRoute, 2
12-31 20:14:32.786   513 17011 D AudioSPELayer: SetMICDigitalGain, MIC_DG, SPE_MODE=1, gain=12
12-31 20:14:32.786   513 17011 D AudioSPELayer: SetUpLinkTotalGain, SPE_MODE=1, gain=176
12-31 20:14:32.786   513 17011 D AudioALSACaptureDataClient: -ConfigBesRecordParams()
12-31 20:14:32.786   513 17011 D AudioALSACaptureDataClient: +StartBesRecord()
12-31 20:14:32.786   513 17011 D AudioSPELayer: Start+, mode=1
12-31 20:14:32.787   513 17011 D AudioSPELayer: mRecordSampleRate=48000, mRecordFrameRate=20, mRecordApp_table=4
12-31 20:14:32.787   513 17011 D AudioSPELayer: Start, SWIP ver = 9.1
12-31 20:14:32.787   513 17011 D AudioSPELayer: Start, going to configure,mSphCtrlBuffer=0xe6349d80,mem_size=121536
12-31 20:14:32.787   513 17011 D AudioSPELayer: dump, State=0, mode=0
12-31 20:14:32.787   513 17011 D AudioSPELayer: Record:Samplerate = 48000, FrameRate=20,App_table=4, Fea_Cfg_table=1ff
12-31 20:14:32.787   513 17011 D AudioSPELayer: Record:EnhanceParas
12-31 20:14:32.787   513 17011 D AudioSPELayer: 0
12-31 20:14:32.787   513 17011 D AudioSPELayer: 479
12-31 20:14:32.788   513 17011 D AudioSPELayer: 16388
12-31 20:14:32.788   513 17011 D AudioSPELayer: 36892
12-31 20:14:32.788   513 17011 D AudioSPELayer: 37639
12-31 20:14:32.788   513 17011 D AudioSPELayer: 8192
12-31 20:14:32.788   513 17011 D AudioSPELayer: 896
12-31 20:14:32.788   513 17011 D AudioSPELayer: 4
12-31 20:14:32.788   513 17011 D AudioSPELayer: 36624
12-31 20:14:32.788   513 17011 D AudioSPELayer: 197
12-31 20:14:32.788   513 17011 D AudioSPELayer: 611
12-31 20:14:32.788   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 8192
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.789   513 17011 D AudioSPELayer: 0
12-31 20:14:32.790   513 17011 D AudioSPELayer: 0
12-31 20:14:32.790   513 17011 D AudioSPELayer: 0
12-31 20:14:32.790   513 17011 D AudioSPELayer: 0
12-31 20:14:32.790   513 17011 D AudioSPELayer: 0
12-31 20:14:32.790   513 17011 D AudioSPELayer: 0
12-31 20:14:32.790   513 17011 D AudioSPELayer: 0
12-31 20:14:32.790   513 17011 D AudioSPELayer: 0
12-31 20:14:32.790   513 17011 D AudioSPELayer: 0
12-31 20:14:32.790   513 17011 D AudioSPELayer: 0
12-31 20:14:32.791   513 17011 D AudioSPELayer: 0
12-31 20:14:32.791   513 17011 D AudioSPELayer: 0
12-31 20:14:32.791   513 17011 D AudioSPELayer: 0
12-31 20:14:32.791   513 17011 D AudioSPELayer: 0
12-31 20:14:32.791   513 17011 D AudioSPELayer: 0
12-31 20:14:32.791   513 17011 D AudioSPELayer: 0
12-31 20:14:32.791   513 17011 D AudioSPELayer: 0
12-31 20:14:32.791   513 17011 D AudioSPELayer: 0
12-31 20:14:32.791   513 17011 D AudioSPELayer: 0
12-31 20:14:32.792   513 17011 D AudioSPELayer: 0
12-31 20:14:32.792   513 17011 D AudioSPELayer: 0
12-31 20:14:32.792   513 17011 D AudioSPELayer: 0
12-31 20:14:32.792   513 17011 D AudioSPELayer: 0
12-31 20:14:32.792   513 17011 D AudioSPELayer: 0
12-31 20:14:32.792   513 17011 D AudioSPELayer: 0
12-31 20:14:32.792   513 17011 D AudioSPELayer: 55997
12-31 20:14:32.792   513 17011 D AudioSPELayer: 31000
12-31 20:14:32.792   513 17011 D AudioSPELayer: 10752
12-31 20:14:32.792   513 17011 D AudioSPELayer: 32769
12-31 20:14:32.792   513 17011 D AudioSPELayer: 0
12-31 20:14:32.792   513 17011 D AudioSPELayer: 0
12-31 20:14:32.793   513 17011 D AudioSPELayer: 0
12-31 20:14:32.793   513 17011 D AudioSPELayer: 0
12-31 20:14:32.793   513 17011 D AudioSPELayer: 0
12-31 20:14:32.793   513 17011 D AudioSPELayer: 0
12-31 20:14:32.793   513 17011 D AudioSPELayer: 0
12-31 20:14:32.793   513 17011 D AudioSPELayer: Using:Samplerate = 48000, FrameRate=20,MIC_DG=12, App_table=4, Fea_Cfg_table=1ff, MMI_table=ffffffff, Device_mode=2, MMI_MIC_GAIN=176
12-31 20:14:32.793   513 17011 D AudioSPELayer: Using:EnhanceParas
12-31 20:14:32.793   513 17011 D AudioSPELayer: [index 0] 0,479,16388,36892,37639,8192,896
12-31 20:14:32.793   513 17011 D AudioSPELayer: [index 1] 4,36624,197,611,0,0,0
12-31 20:14:32.793   513 17011 D AudioSPELayer: [index 2] 0,8192,0,0,0,0,0
12-31 20:14:32.793   513 17011 D AudioSPELayer: [index 3] 0,0,0,0,0,0,0
12-31 20:14:32.794   513 17011 D AudioSPELayer: [index 4] 0,0,0,0,0,0,0
12-31 20:14:32.794   513 17011 D AudioSPELayer: [index 5] 0,0,0,0,0,0,0
12-31 20:14:32.794   513 17011 D AudioSPELayer: [index 6] 0,0,0,0,0,0,0
12-31 20:14:32.794   513 17011 D AudioSPELayer: [index 7] 55997,31000,10752,32769,0,0,0
12-31 20:14:32.794   513 17011 D AudioSPELayer: Using:DMNRCalData
12-31 20:14:32.794   513 17011 D AudioSPELayer: [index 0] 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
12-31 20:14:32.794   513 17011 D AudioSPELayer: [index 1] 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
12-31 20:14:32.794   513 17011 D AudioSPELayer: [index 2] 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
12-31 20:14:32.795   513 17011 D AudioSPELayer: [index 3] 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
12-31 20:14:32.795   513 17011 D AudioSPELayer: mSph_Enh_ctrl.frame_rate 20
12-31 20:14:32.795   513 17011 D AudioSPELayer: mNsecPerSample=62500
12-31 20:14:32.795   513 17011 D AudioSPELayer: mSPEProcessBufSize=3840
12-31 20:14:32.796   513 17011 D AudioALSACaptureDataClient: -StartBesRecord()
12-31 20:14:32.796   513 17011 D AudioALSACaptureDataClient: sample rate = 8000, drop ms = 25, channels = 1, byts per sample = 2, dropBesRecordDataSize = 400
12-31 20:14:32.796   513 17011 D AudioPreProcess: AudioPreProcess()+
12-31 20:14:32.796   513 17011 D AudioPreProcess: AudioPreProcess()-
12-31 20:14:32.796   513 17011 D AudioALSACaptureDataProviderBase: attach(), 0xe6699000
12-31 20:14:32.796   513 17011 D AudioALSACaptureDataClient: setIdentity(), mIsIdentitySet=0, identity=1
12-31 20:14:32.796   513 17011 D AudioALSACaptureDataProviderBase: attach(), mCaptureDataClientIndex=1, mCaptureDataClientVector.size()=0, Identity=1
12-31 20:14:32.797   513 17011 D AudioALSACaptureDataProviderNormal: open()
12-31 20:14:32.797   513 17011 D AudioALSADeviceParser: GetPcmIndexByString() stringpair = MultiMedia1_Capture
12-31 20:14:32.797   513 17011 D AudioALSADeviceParser: compare success = 1
12-31 20:14:32.797   513 17011 D AudioALSADeviceParser: GetCardIndexByString() stringpair = MultiMedia1_Capture
12-31 20:14:32.797   513 17011 D AudioALSADeviceParser:  compare success Cardindex = 0
12-31 20:14:32.797   513 17011 D AudioALSACaptureDataProviderNormal: open cardindex = 0  pcmindex = 1
12-31 20:14:32.801   513 17011 D AudioALSACaptureDataProviderNormal: buffersizemax = 49152
12-31 20:14:32.801   513 17011 D AudioSpeechEnhanceInfo: GetHifiRecord, mHiFiRecordEnable=0
12-31 20:14:32.802   513 17011 D AudioALSACaptureDataProviderNormal: bHifiRecord = 0
12-31 20:14:32.802   513 17011 D AudioSpeechEnhanceInfo: GetDebugStatus, mDebugflag=0
12-31 20:14:32.802   513 17011 D AudioALSACaptureDataProviderNormal: btempDebug = 0
12-31 20:14:32.802   513 17011 D AudioALSACaptureDataProviderNormal: getLatencyTime(), audiomode=0
12-31 20:14:32.802   513 17011 D AudioALSACaptureDataProviderBase: +HasLowLatencyCapture()
12-31 20:14:32.802   513 17011 D AudioALSACaptureDataClient: +IsLowLatencyCapture()
12-31 20:14:32.802   513 17011 D AudioALSACaptureDataClient: -IsLowLatencyCapture(), bRet=0
12-31 20:14:32.802   513 17011 D AudioALSACaptureDataProviderBase: -HasLowLatencyCapture(), bRet=0
12-31 20:14:32.802   513 17011 D AudioALSACaptureDataProviderNormal: open(), format = 1, channels=2, rate=48000, period_size=960, period_count=4,latency=20 kReadBufferSize=7680
12-31 20:14:32.803   513 17011 D AudioALSACaptureDataProviderNormal: open(), mCaptureDropSize=0, CAPTURE_DROP_MS=0
12-31 20:14:32.803   513 17011 D AudioALSACaptureDataProviderNormal: open(), period_count=4, period_size=960, samplerate = 48000
12-31 20:14:32.803   513 17011 D AudioALSADeviceParser: GetPcmIndexByString() stringpair = MultiMedia1_Capture
12-31 20:14:32.803   513 17011 D AudioALSADeviceParser: compare success = 1
12-31 20:14:32.803   513 17011 D AudioALSADeviceParser: GetCardIndexByString() stringpair = MultiMedia1_Capture
12-31 20:14:32.803   513 17011 D AudioALSADeviceParser:  compare success Cardindex = 0
12-31 20:14:32.806   513 17011 D AudioALSACaptureDataProviderBase: -attach()
12-31 20:14:32.806   513 17011 D AudioSPELayer: SetUPLinkDropTime, 0
12-31 20:14:32.807   513 17011 D AudioSPELayer: SetUPLinkIntrStartTime, sec=969, nsec=623045211
12-31 20:14:32.807   513 17012 D AudioALSACaptureDataProviderNormal: +readThread(), pid: 513, tid: 17012, kReadBufferSize=0x1e00, open_index=2
12-31 20:14:32.807   513 17011 D AudioALSACaptureDataClient: sample_rate: 48000 => 8000, num_channels: 2 => 2, audio_format: 0x4 => 0x1
12-31 20:14:32.807   513 17011 D MtkAudioSrc: MtkAudioSrc Constructor in SR 48000, CH 2; out SR 8000, CH 2; format 0
12-31 20:14:32.807   513 17011 D MtkAudioSrc: +open()
12-31 20:14:32.807   513 17011 D MtkAudioSrc: -open()
12-31 20:14:32.807   513 17011 D AudioALSACaptureDataClient: CheckNeedBesRecordSRC(), mStreamAttributeSource->sample_rate=48000, mStreamAttributeSource->num_channels=2, mStreamAttributeTarget->sample_rate=8000,mStreamAttributeTarget->num_channels=1, BesRecord_usingsamplerate=48000
12-31 20:14:32.807   513 17011 D MtkAudioSrc: MtkAudioSrc Constructor in SR 48000, CH 2; out SR 8000, CH 2; format 0
12-31 20:14:32.807   513 17011 D MtkAudioSrc: +open()
12-31 20:14:32.808   513 17011 D MtkAudioSrc: -open()
12-31 20:14:32.808   513 17011 D AudioALSACaptureDataClient: CheckNeedBesRecordSRC(), 0, 48000, mBesRecSRCSizeFactor=2
12-31 20:14:32.808   513 17011 D AudioALSACaptureDataClient: CheckChannelRemixOp(), ch 2->1 (mBesRecordStereoMode = 0, BesRecord enable = 1), ChannelRemixOp = 4
12-31 20:14:32.808   513 17011 D AudioSpeechEnhanceInfo: GetDebugStatus, mDebugflag=0
12-31 20:14:32.808   513 17011 D AudioALSAHardwareResourceManager: winsn:setInNormalRecord(), state=1
12-31 20:14:32.808   513 17011 D AudioALSAHardwareResourceManager: +startInputDevice(), new_device = 0x80000004, mInputDevice = 0x0, mStartInputDeviceCount = 0, mMicInverse=0
12-31 20:14:32.809   513 17011 D AudioALSAHardwareResourceManager: setMIC1Mode(), isphonemic 0, micmode 1
12-31 20:14:32.809   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceSettingByName() DeviceName = Mic1TypeACCMode descriptor->DeviceStatusCounte = 0
12-31 20:14:32.809   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_MIC1_Mode_Select cltvalue = ACCMODE
12-31 20:14:32.809   513 17011 D AudioALSAHardwareResourceManager: setMIC2Mode(), isphonemic 0, micmode 1
12-31 20:14:32.809   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceSettingByName() DeviceName = Mic2TypeACCMode descriptor->DeviceStatusCounte = 0
12-31 20:14:32.809   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_MIC2_Mode_Select cltvalue = ACCMODE
12-31 20:14:32.809   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceTurnonSequenceByName() DeviceName = builtin_Mic_SingleMic descriptor->DeviceStatusCounte = 0
12-31 20:14:32.810   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_MicSource1_Setting cltvalue = ADC1
12-31 20:14:32.810   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceTurnonSequence  cltname.string () = Audio_MicSource1_Setting cltvalue.string () = ADC1
12-31 20:14:32.810   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_ADC_1_Switch cltvalue = On
12-31 20:14:32.810   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceTurnonSequence  cltname.string () = Audio_ADC_1_Switch cltvalue.string () = On
12-31 20:14:32.810   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_Preamp1_Switch cltvalue = IN_ADC1
12-31 20:14:32.811   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceTurnonSequence  cltname.string () = Audio_Preamp1_Switch cltvalue.string () = IN_ADC1
12-31 20:14:32.811   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_Preamp2_Switch cltvalue = IN_ADC1
12-31 20:14:32.811   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceTurnonSequence  cltname.string () = Audio_Preamp2_Switch cltvalue.string () = IN_ADC1
12-31 20:14:32.811   513 17011 D AudioALSAHardwareResourceManager: -startInputDevice(), mInputDevice = 0x80000004, mStartInputDeviceCount = 1
12-31 20:14:32.812   513 17011 D AudioVoiceUIDL: [SetUplinkStartTime] mULtime sec 0 nsec 0
12-31 20:14:32.812   513 17011 D AudioALSACaptureHandlerNormal: -open()
12-31 20:14:32.868   513 17011 D AudioVoiceUIDL: [SetUplinkStartTime] mULtime sec 969 nsec 634113903
12-31 20:14:41.577 14860 17009 D AudioRecord: stop
12-31 20:14:41.579 14860 17009 D AudioTrackShared: this(0x77f822c0e0), mCblk(0x77f76019c0), front(69632), mIsOut 0, interrupt() FUTEX_WAKE
12-31 20:14:41.587   513 17011 D AudioALSAStreamIn: +standby()
12-31 20:14:41.587   513 17011 D AudioALSAStreamIn: standby(), keep the mStandbyFrameCount = 70240, ret = 0
12-31 20:14:41.588   513 17011 D AudioALSAStreamIn: close()
12-31 20:14:41.588   513 17011 D AudioALSACaptureHandlerNormal: +close()
12-31 20:14:41.588   513 17011 D AudioALSAHardwareResourceManager: +stopInputDevice(), mInputDevice = 0x80000004, stop_device = 0x80000004, mStartInputDeviceCount = 1, mMicInverse=0
12-31 20:14:41.588   513 17011 D AudioALSAHardwareResourceManager: setMIC1Mode(), isphonemic 0, micmode 1
12-31 20:14:41.588   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceSettingByName() DeviceName = Mic1TypeACCMode descriptor->DeviceStatusCounte = 0
12-31 20:14:41.588   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_MIC1_Mode_Select cltvalue = ACCMODE
12-31 20:14:41.588   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceTurnoffSequenceByName() DeviceName = builtin_Mic_SingleMic descriptor->DeviceStatusCounte = 1
12-31 20:14:41.588   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_Preamp1_Switch cltvalue = OPEN
12-31 20:14:41.588   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceTurnoffSequence  cltname.string () = Audio_Preamp1_Switch cltvalue.string () = OPEN
12-31 20:14:41.588   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_Preamp2_Switch cltvalue = OPEN
12-31 20:14:41.588   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceTurnoffSequence  cltname.string () = Audio_Preamp2_Switch cltvalue.string () = OPEN
12-31 20:14:41.589   513 17011 D AudioALSADeviceConfigManager: cltname = Audio_ADC_1_Switch cltvalue = Off
12-31 20:14:41.589   513 17011 D AudioALSADeviceConfigManager: ApplyDeviceTurnoffSequence  cltname.string () = Audio_ADC_1_Switch cltvalue.string () = Off
12-31 20:14:41.589   513 17011 D AudioALSAHardwareResourceManager: -stopInputDevice(), mInputDevice = 0x0, mStartInputDeviceCount = 0
12-31 20:14:41.589   513 17011 D AudioALSACaptureDataClient: ~AudioALSACaptureDataClient()
12-31 20:14:41.589   513 17011 D AudioALSACaptureDataProviderBase: detach(),0xe6699000, Identity=1, mCaptureDataClientVector.size()=1,mCaptureDataProviderType=0, 0xe630c000
12-31 20:14:41.589   513 17011 D AudioALSACaptureDataProviderNormal: close()
12-31 20:14:41.611   513 17011 D AudioALSACaptureDataProviderNormal: -close()
12-31 20:14:41.612   513 17011 D AudioALSACaptureDataProviderBase: detach(), close finish
12-31 20:14:41.612   513 17011 D AudioALSACaptureDataProviderBase: -detach()
12-31 20:14:41.612   513 17011 D MtkAudioSrc: +close()
12-31 20:14:41.612   513 17012 D AudioALSACaptureDataProviderNormal: -readThread(), pid: 513, tid: 17012
12-31 20:14:41.612   513 17011 D MtkAudioSrc: -close()
12-31 20:14:41.612   513 17011 D MtkAudioSrc: +~MtkAudioSrc()
12-31 20:14:41.612   513 17011 D MtkAudioSrc: -~MtkAudioSrc()
12-31 20:14:41.612   513 17011 D AudioALSACaptureDataClient: +StopBesRecord()
12-31 20:14:41.612   513 17011 D AudioSPELayer: Stop
12-31 20:14:41.612   513 17011 D AudioSPELayer: Clear
12-31 20:14:41.612   513 17011 D AudioSPELayer: free mSphCtrlBuffer 0xe6349d80
12-31 20:14:41.612   513 17011 D AudioSPELayer: ~free mSphCtrlBuffer
12-31 20:14:41.612   513 17011 D AudioSPELayer: ~Clear
12-31 20:14:41.612   513 17011 D AudioALSACaptureDataClient: -StopBesRecord()
12-31 20:14:41.612   513 17011 D MtkAudioSrc: +close()
12-31 20:14:41.612   513 17011 D MtkAudioSrc: -close()
12-31 20:14:41.612   513 17011 D MtkAudioSrc: +~MtkAudioSrc()
12-31 20:14:41.613   513 17011 D MtkAudioSrc: -~MtkAudioSrc()
12-31 20:14:41.613   513 17011 D AudioSPELayer: ~SPELayer+
12-31 20:14:41.613   513 17011 D AudioSPELayer: Clear
12-31 20:14:41.613   513 17011 D AudioSPELayer: ~Clear
12-31 20:14:41.613   513 17011 D AudioSPELayer: FlushBufferQ+
12-31 20:14:41.613   513 17011 D AudioSPELayer: FlushBufferQ mULOutBufferQ size=1,mULInBufferQ.size=1,mDLOutBufferQ.size()=0,mDLInBufferQ.size()=0,mDLDelayBufferQ.size()=0
12-31 20:14:41.613   513 17011 D AudioSPELayer: FlushBufferQ-
12-31 20:14:41.614   513 17011 D AudioSPELayer: ~SPELayer-
12-31 20:14:41.614   513 17011 D AudioPreProcess: ~AudioPreProcess()+
12-31 20:14:41.614   513 17011 D AudioPreProcess: ~AudioPreProcess()-
12-31 20:14:41.614   513 17011 D AudioALSACaptureDataClient: -~AudioALSACaptureDataClient()
12-31 20:14:41.614   513 17011 D AudioALSAHardwareResourceManager: winsn:setInNormalRecord(), state=0
12-31 20:14:41.614   513 17011 D AudioVoiceUIDL: [SetUplinkStartTime] mULtime sec 0 nsec 0
12-31 20:14:41.614   513 17011 D AudioALSACaptureHandlerNormal: -close()
12-31 20:14:41.614   513 17011 D AudioALSAStreamManager: +destroyCaptureHandler(), mode = 0, pCaptureHandler = 0xe630f040
12-31 20:14:41.614   513 17011 D AudioALSACaptureHandlerNormal: +~AudioALSACaptureHandlerNormal()
12-31 20:14:41.614   513 17011 D AudioALSACaptureHandlerNormal: ~AudioALSACaptureHandlerNormal()
12-31 20:14:41.614   513 17011 D AudioALSACaptureHandlerBase: ~AudioALSACaptureHandlerBase()
12-31 20:14:41.615   513 17011 D AudioALSAStreamManager: -destroyCaptureHandler(), mCaptureHandlerVector.size() = 0
12-31 20:14:41.615   513 17011 D AudioALSAStreamIn: -standby()
12-31 20:14:41.615   513  1349 V APM_AudioPolicyManager: stopInput() input 38
12-31 20:14:41.615   513  1349 V AudioPolicyService: AudioCommandThread() adding recording configuration update event 0, source 1
12-31 20:14:41.615   513  1349 V AudioPolicyService: AudioCommandThread() adding release patch delay 0
12-31 20:14:41.615   513   794 V AudioPolicyService: AudioCommandThread() processing recording configuration update
12-31 20:14:41.615   513   793 V AudioPolicyService: AudioCommandThread() processing release audio patch
12-31 20:14:41.616   513 17011 D AudioALSAHardware: -releaseAudioPatch handle [0x4]
12-31 20:14:41.616   513 17011 D AudioALSAHardware: +routing releaseAudioPatch 80000004->Mixer
12-31 20:14:41.616   513 17011 D AudioALSAStreamManager: +setParameters(), IOport = 38, keyValuePairs = routing=0
12-31 20:14:41.616   513 17011 D AudioALSAStreamManager: Send to mStreamInVector [1]
12-31 20:14:41.616   513 17011 D AudioALSAStreamIn: +setParameters(): routing=0
12-31 20:14:41.616   513 17011 D AudioALSAStreamManager: +routingInputDevice(), input_device: 0x80000004 => 0x0
12-31 20:14:41.616   513 17011 W AudioALSAStreamManager: -routingInputDevice(), input_device == AUDIO_DEVICE_NONE(0x0), return
12-31 20:14:41.616   513 17011 D AudioALSAStreamIn: -setParameters(): routing=0
12-31 20:14:41.617   513 17011 D AudioALSAStreamManager: -setParameters()
12-31 20:14:41.617   513 17011 D AudioALSAHardware: handlecheck releaseAudioPatch remove handle [4] OK
12-31 20:14:41.617   513 17011 D AudioALSAHardware: -releaseAudioPatch handle [0x4] status [0]
12-31 20:14:41.620   513  1349 V APM_AudioPolicyManager: resetInputDevice() releaseAudioPatch returned 0
12-31 20:14:41.621   513  1349 V APM_AudioPolicyManager: removeAudioPatch() handle 4 af handle 36
12-31 20:14:41.623 14860 17009 D AudioRecord: -stop
上一篇下一篇

猜你喜欢

热点阅读