2023-03-22

2023-04-22  本文已影响0人  周蛋蛋
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!-- Copyright (c) 2016-2021, The Linux Foundation. All rights reserved
     Not a Contribution.
-->
<!-- Copyright (C) 2015 The Android Open Source Project

     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
     You may obtain a copy of the License at

          http://www.apache.org/licenses/LICENSE-2.0

     Unless required by applicable law or agreed to in writing, software
     distributed under the License is distributed on an "AS IS" BASIS,
     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     See the License for the specific language governing permissions and
     limitations under the License.
-->

<audioPolicyConfiguration version="7.0" xmlns:xi="http://www.w3.org/2001/XInclude">
    <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->

    <!-- Global configuration Decalaration -->
    <globalConfiguration speaker_drc_enabled="true" call_screen_mode_supported="true"/>


    <!-- Modules section:
        There is one section per audio HW module present on the platform.
        Each module section will contains two mandatory tags for audio HAL “halVersion” and “name”.
        The module names are the same as in current .conf file:
                “primary”, “A2DP”, “remote_submix”, “USB”
        Each module will contain the following sections:
        “devicePorts”: a list of device descriptors for all input and output devices accessible via this
        module.
        This contains both permanently attached devices and removable devices.
        “mixPorts”: listing all output and input streams exposed by the audio HAL
        “routes”: list of possible connections between input and output devices or between stream and
        devices.
            "route": is defined by an attribute:
                -"type": <mux|mix> means all sources are mutual exclusive (mux) or can be mixed (mix)
                -"sink": the sink involved in this route
                -"sources": all the sources than can be connected to the sink via vis route
        “attachedDevices”: permanently attached devices.
        The attachedDevices section is a list of devices names. The names correspond to device names
        defined in <devicePorts> section.
        “defaultOutputDevice”: device to be used by default when no policy rule applies
    -->
    <modules>
        <!-- Primary Audio HAL -->
        <module name="primary" halVersion="2.0">
            <attachedDevices>
                <item>Earpiece</item>
                <item>Speaker</item>
                <item>Telephony Tx</item>
                <item>Built-In Mic</item>
                <item>Built-In Back Mic</item>
                <item>FM Tuner</item>
                <item>Telephony Rx</item>
            </attachedDevices>
            <defaultOutputDevice>Speaker</defaultOutputDevice>
            <mixPorts>
                <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_FAST AUDIO_OUTPUT_FLAG_PRIMARY">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </mixPort>
                <mixPort name="haptics output" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO_HAPTIC_A"/>
                </mixPort>
                <mixPort name="deep_buffer" role="source"
                        flags="AUDIO_OUTPUT_FLAG_DEEP_BUFFER">
                    <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </mixPort>
                <mixPort name="mmap_no_irq_out" role="source" flags="AUDIO_OUTPUT_FLAG_DIRECT AUDIO_OUTPUT_FLAG_MMAP_NOIRQ">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </mixPort>
                <mixPort name="hifi_playback" role="source" />
                <mixPort name="compress_passthrough" role="source"
                        flags="AUDIO_OUTPUT_FLAG_DIRECT AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD AUDIO_OUTPUT_FLAG_NON_BLOCKING">
                </mixPort>
                <mixPort name="direct_pcm" role="source"
                        flags="AUDIO_OUTPUT_FLAG_DIRECT">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000 128000 176400 192000"
                             channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_2POINT1 AUDIO_CHANNEL_OUT_QUAD AUDIO_CHANNEL_OUT_PENTA AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_6POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
                    <profile name="" format="AUDIO_FORMAT_PCM_8_24_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000 128000 176400 192000 352800 384000"
                             channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_2POINT1 AUDIO_CHANNEL_OUT_QUAD AUDIO_CHANNEL_OUT_PENTA AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_6POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
                    <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000 128000 176400 192000 352800 384000"
                             channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_2POINT1 AUDIO_CHANNEL_OUT_QUAD AUDIO_CHANNEL_OUT_PENTA AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_6POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
                  <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000 128000 176400 192000 352800 384000"
                             channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_2POINT1 AUDIO_CHANNEL_OUT_QUAD AUDIO_CHANNEL_OUT_PENTA AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_6POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
                </mixPort>
                <mixPort name="compressed_offload" role="source"
                         flags="AUDIO_OUTPUT_FLAG_DIRECT AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD AUDIO_OUTPUT_FLAG_NON_BLOCKING AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD">
                    <profile name="" format="AUDIO_FORMAT_MP3"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                    <profile name="" format="AUDIO_FORMAT_FLAC"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000 128000 176400 192000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                    <profile name="" format="AUDIO_FORMAT_ALAC"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000 128000 176400 192000"
                             channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_2POINT1 AUDIO_CHANNEL_OUT_QUAD AUDIO_CHANNEL_OUT_PENTA AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_6POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
                    <profile name="" format="AUDIO_FORMAT_APE"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000 128000 176400 192000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                    <profile name="" format="AUDIO_FORMAT_AAC_LC"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                    <profile name="" format="AUDIO_FORMAT_AAC_HE_V1"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                    <profile name="" format="AUDIO_FORMAT_AAC_HE_V2"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                    <profile name="" format="AUDIO_FORMAT_DTS"
                             samplingRates="32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_2POINT1 AUDIO_CHANNEL_OUT_QUAD AUDIO_CHANNEL_OUT_PENTA AUDIO_CHANNEL_OUT_5POINT1"/>
                    <profile name="" format="AUDIO_FORMAT_DTS_HD"
                             samplingRates="32000 44100 48000 64000 88200 96000 128000 176400 192000"
                             channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_2POINT1 AUDIO_CHANNEL_OUT_QUAD AUDIO_CHANNEL_OUT_PENTA AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_6POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
                    <profile name="" format="AUDIO_FORMAT_WMA"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_2POINT1 AUDIO_CHANNEL_OUT_QUAD AUDIO_CHANNEL_OUT_PENTA AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_6POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
                    <profile name="" format="AUDIO_FORMAT_WMA_PRO"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000"
                             channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_2POINT1 AUDIO_CHANNEL_OUT_QUAD AUDIO_CHANNEL_OUT_PENTA AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_6POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
                    <profile name="" format="AUDIO_FORMAT_VORBIS"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000 128000 176400 192000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                    <profile name="" format="AUDIO_FORMAT_AAC_ADTS_LC"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                    <profile name="" format="AUDIO_FORMAT_AAC_ADTS_HE_V1"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                    <profile name="" format="AUDIO_FORMAT_AAC_ADTS_HE_V2"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 64000 88200 96000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
                </mixPort>
                <mixPort name="voice_tx" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000 48000" channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
                </mixPort>
                <mixPort name="voip_rx" role="source"
                         flags="AUDIO_OUTPUT_FLAG_DIRECT AUDIO_OUTPUT_FLAG_VOIP_RX">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000 32000 44100 48000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
                </mixPort>
                <mixPort name="incall_music_uplink" role="source"
                        flags="AUDIO_OUTPUT_FLAG_INCALL_MUSIC">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000 48000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </mixPort>

                <mixPort name="primary input" role="sink" maxOpenCount="2" maxActiveCount="2">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
                    <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
                </mixPort>
                <mixPort name="fast input" role="sink"
                         flags="AUDIO_INPUT_FLAG_FAST">
                     <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                              samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                              channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
                </mixPort>
                <mixPort name="voip_tx" role="sink"
                         flags="AUDIO_INPUT_FLAG_VOIP_TX">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000 32000 48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
                </mixPort>
                <mixPort name="usb_surround_sound" role="sink" maxOpenCount="2" maxActiveCount="2">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 88200 96000 176400 192000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK AUDIO_CHANNEL_INDEX_MASK_3 AUDIO_CHANNEL_INDEX_MASK_4 AUDIO_CHANNEL_IN_5POINT1 AUDIO_CHANNEL_INDEX_MASK_6 AUDIO_CHANNEL_INDEX_MASK_8"/>
                </mixPort>
                <mixPort name="record_24" role="sink" maxOpenCount="2" maxActiveCount="2">
                    <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 96000 192000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK AUDIO_CHANNEL_INDEX_MASK_3 AUDIO_CHANNEL_INDEX_MASK_4"/>
                    <profile name="" format="AUDIO_FORMAT_PCM_8_24_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 96000 192000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK AUDIO_CHANNEL_INDEX_MASK_3 AUDIO_CHANNEL_INDEX_MASK_4"/>
                    <profile name="" format="AUDIO_FORMAT_PCM_FLOAT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000 96000 192000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK AUDIO_CHANNEL_INDEX_MASK_3 AUDIO_CHANNEL_INDEX_MASK_4"/>
                </mixPort>
                <mixPort name="voice_rx" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000 48000" channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
                </mixPort>
                <mixPort name="mmap_no_irq_in" role="sink" flags="AUDIO_INPUT_FLAG_MMAP_NOIRQ">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK AUDIO_CHANNEL_INDEX_MASK_3"/>
                </mixPort>
                <mixPort name="hifi_input" role="sink" />
                <mixPort name="quad mic" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                        samplingRates="48000"
                        channelMasks="AUDIO_CHANNEL_INDEX_MASK_4"/>
                </mixPort>
            </mixPorts>

            <devicePorts>
                <!-- Output devices declaration, i.e. Sink DEVICE PORT -->
                <devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                            samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
                </devicePort>
                <devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
                <devicePort tagName="Wired Headset" type="AUDIO_DEVICE_OUT_WIRED_HEADSET" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
                <devicePort tagName="Wired Headphones" type="AUDIO_DEVICE_OUT_WIRED_HEADPHONE" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
                <devicePort tagName="Line" type="AUDIO_DEVICE_OUT_LINE" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
                <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
                </devicePort>
                <devicePort tagName="BT SCO Headset" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
                </devicePort>
                <devicePort tagName="BT SCO Car Kit" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
                </devicePort>
                <devicePort tagName="Telephony Tx" type="AUDIO_DEVICE_OUT_TELEPHONY_TX" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
                <devicePort tagName="HDMI" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 11025 16000 22050 32000 44100 48000 64000 88200 96000 128000 176400 192000"/>
                </devicePort>
                <devicePort tagName="FM" type="AUDIO_DEVICE_OUT_FM" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
                <devicePort tagName="USB Device Out" type="AUDIO_DEVICE_OUT_USB_DEVICE" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100 48000 64000 88200 96000 128000 176400 192000"/>
                </devicePort>
                <devicePort tagName="USB Headset Out" type="AUDIO_DEVICE_OUT_USB_HEADSET" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100 48000 64000 88200 96000 128000 176400 192000"/>
                </devicePort>

                <!-- Input devices declaration, i.e. Source DEVICE PORT -->
                <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
                    <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
                </devicePort>
                <devicePort tagName="Built-In Back Mic" type="AUDIO_DEVICE_IN_BACK_MIC" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
                </devicePort>
                <devicePort tagName="FM Tuner" type="AUDIO_DEVICE_IN_FM_TUNER" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
                </devicePort>
                <devicePort tagName="Wired Headset Mic" type="AUDIO_DEVICE_IN_WIRED_HEADSET" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
                             channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
                </devicePort>
                <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
                </devicePort>
                <devicePort tagName="Telephony Rx" type="AUDIO_DEVICE_IN_TELEPHONY_RX" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000 16000 48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
                </devicePort>
                <devicePort tagName="USB Device In" type="AUDIO_DEVICE_IN_USB_DEVICE" role="source">
                </devicePort>
                <devicePort tagName="USB Headset In" type="AUDIO_DEVICE_IN_USB_HEADSET" role="source">
                </devicePort>
                <devicePort tagName="A2DP In" type="AUDIO_DEVICE_IN_BLUETOOTH_A2DP" role="source"
                            encodedFormats="AUDIO_FORMAT_LC3">
                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                            samplingRates="44100 48000" channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
                </devicePort>
            </devicePorts>
            <!-- route declaration, i.e. list all available sources for a given sink -->
            <routes>
                <route type="mix" sink="Earpiece"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx,mmap_no_irq_out,haptics output"/>
                <route type="mix" sink="Speaker"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx,mmap_no_irq_out,haptics output"/>
                <route type="mix" sink="Wired Headset"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx,mmap_no_irq_out,haptics output"/>
                <route type="mix" sink="Wired Headphones"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx,mmap_no_irq_out,haptics output"/>
                <route type="mix" sink="Line"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx,mmap_no_irq_out,haptics output"/>
                <route type="mix" sink="HDMI"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,compress_passthrough,voip_rx"/>
                <route type="mix" sink="FM"
                       sources="primary output"/>
                <route type="mix" sink="BT SCO"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx"/>
                <route type="mix" sink="BT SCO Headset"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx"/>
                <route type="mix" sink="BT SCO Car Kit"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx"/>
                <route type="mix" sink="USB Device Out"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx,mmap_no_irq_out,hifi_playback"/>
                <route type="mix" sink="USB Headset Out"
                       sources="primary output,deep_buffer,direct_pcm,compressed_offload,voip_rx,mmap_no_irq_out,hifi_playback"/>
                <route type="mix" sink="Telephony Tx"
                       sources="voice_tx,incall_music_uplink"/>
                <route type="mix" sink="voice_rx"
                       sources="Telephony Rx"/>
                <route type="mix" sink="primary input"
                       sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic,FM Tuner,Telephony Rx,A2DP In"/>
                <route type="mix" sink="usb_surround_sound"
                       sources="USB Device In,USB Headset In"/>
                <route type="mix" sink="fast input"
                       sources="Built-In Mic,Built-In Back Mic,USB Device In,USB Headset In,Wired Headset Mic"/>
                <route type="mix" sink="quad mic"
                       sources="Built-In Mic,Built-In Back Mic,BT SCO Headset Mic,USB Device In,USB Headset In,Wired Headset Mic"/>
                <route type="mix" sink="voip_tx"
                       sources="Built-In Mic,Built-In Back Mic,BT SCO Headset Mic,USB Device In,USB Headset In,Wired Headset Mic"/>
                <route type="mix" sink="record_24"
                       sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,A2DP In"/>
                <route type="mix" sink="mmap_no_irq_in"
                       sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,USB Device In,USB Headset In"/>
                <route type="mix" sink="hifi_input" sources="USB Device In,USB Headset In" />
            </routes>

        </module>

        <!-- A2DP Audio HAL -->
        <!-- <module name="a2dp" halVersion="2.0">
            <mixPorts>
                <mixPort name="a2dp input" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100 48000" channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
                </mixPort>
            </mixPorts>

            <devicePorts>
                <devicePort tagName="BT A2DP In" type="AUDIO_DEVICE_IN_BLUETOOTH_A2DP" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100 48000" channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
                </devicePort>
            </devicePorts>

            <routes>
                <route type="mix" sink="a2dp input"
                       sources="BT A2DP In"/>
            </routes>
        </module> -->

        <!-- Bluetooth Audio HAL Audio Policy Configuration file -->
        <module name="bluetooth" halVersion="2.0">
            <mixPorts>
                <!-- A2DP Audio Ports -->
                <mixPort name="a2dp output" role="source"/>
                <!-- Hearing AIDs Audio Ports -->
                <mixPort name="hearing aid output" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="24000,16000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </mixPort>
            </mixPorts>
            <devicePorts>
                <!-- A2DP Audio Ports -->
                <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100,48000,88200,96000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
                <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100,48000,88200,96000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
                <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100,48000,88200,96000"
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
                <!-- Hearing AIDs Audio Ports -->
                <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
            </devicePorts>
            <routes>
                <route type="mix" sink="BT A2DP Out"
                       sources="a2dp output"/>
                <route type="mix" sink="BT A2DP Headphones"
                       sources="a2dp output"/>
                <route type="mix" sink="BT A2DP Speaker"
                       sources="a2dp output"/>
                <route type="mix" sink="BT Hearing Aid Out"
                       sources="hearing aid output"/>
            </routes>
        </module>

        <!-- Usb Audio HAL -->
        <module name="usb" halVersion="2.0">
            <mixPorts>
                <mixPort name="usb_accessory output" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </mixPort>
            </mixPorts>
            <devicePorts>
                <devicePort tagName="USB Host Out" type="AUDIO_DEVICE_OUT_USB_ACCESSORY" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </devicePort>
            </devicePorts>
            <routes>
                <route type="mix" sink="USB Host Out"
                       sources="usb_accessory output"/>
            </routes>
        </module>

        <!-- Remote Submix Audio HAL -->
        <xi:include href="/vendor/etc/r_submix_audio_policy_configuration.xml"/>
        <!-- Virtual Audio HAL -->
        <xi:include href="/odm/etc/audio/r_virtual_audio_policy_configuration.xml"/>
        <!-- Bluetooth Audio HAL for hearing aid -->
        <xi:include href="/vendor/etc/bluetooth_qti_hearing_aid_audio_policy_configuration.xml"/>

    </modules>
    <!-- End of Modules section -->

    <!-- Volume section -->

    <xi:include href="/odm/etc/audio/audio_policy_volumes_anp.xml"/>
    <xi:include href="/odm/etc/audio/default_volume_tables.xml"/>

    <!-- End of Volume section -->

</audioPolicyConfiguration>

简单的看下日志打印
我们首先看下onNewAudioModulesAvailableInt 解析xml的第一行代码
void android::AudioPolicyManager::onNewAudioModulesAvailableInt(DeviceVector *newDevices)
{
    for (const auto& hwModule : mHwModulesAll) {
    ALOGE(" zhou hwModule name  %s", hwModule->getName());
   这个对应的就是module name="..."
  for (const auto& outProfile : hwModule->getOutputProfiles()) {
                          ALOGE("zhou Invalid Output profile max open count %u for profile 
     %s",outProfile->maxOpenCount, outProfile->getTagName().c_str());
    这个值拿的是<mixPorts> <mixPort name="primary output"</mixPort>
          ALOGE("zhou mOutputDevicesAll  %s",mOutputDevicesAll.toString().c_str());
        
            const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
            ALOGE("zhou supportedDevices  %s",supportedDevices.toString().c_str());
这个getSupportedDevices 获取的是<routes> <route sources="primary output " 的值sink="Earpiece" 这种设备,解析出来刚好12个</routes>
           outProfile->getSupportedDevices ()是获取,这个setSupportedDevices 可以看下HwMould.cpp中的refreshSupportedDevice ,其实就是for循环了mOutputprofiles,根据route 拿到了sinkDevice设备
            DeviceVector availProfileDevices = supportedDevices.filter(mOutputDevicesAll);
这个其实就是判断containu ,最后获取出来的设备只有0x1和0x2
            ALOGE("zhou availProfileDevices  %s",availProfileDevices.toString().c_str());
sp<DeviceDescriptor> supportedDevice = 0;
            if (supportedDevices.contains(mDefaultOutputDevice)) {
                supportedDevice = mDefaultOutputDevice;
            } else {
                // choose first device present in profile's SupportedDevices also part of
                // mAvailableOutputDevices.
                if (availProfileDevices.isEmpty()) {
                    continue;
                }
                supportedDevice = availProfileDevices.itemAt(0);
            }
            if (!mOutputDevicesAll.contains(supportedDevice)) {
                continue;
            }
         ALOGE("zhou supportedDevice222  %s     outputDesc->mIoHandle      %s",supportedDevice->toString().c_str(),outputDesc->mIoHandle);

这个逻辑是获取一个最终的设备,经过判断获取到了module 中的defaultOutputDevice  
}

    Line 13741: 04-12 14:13:26.360  1587  1587 E APM_AudioPolicyManager:  zhou hwModule name  primary
    Line 23099: 04-12 14:13:28.513  1587  1587 E APM_AudioPolicyManager:  zhou hwModule name  bluetooth
    Line 23113: 04-12 14:13:28.517  1587  1587 E APM_AudioPolicyManager:  zhou hwModule name  usb
    Line 23120: 04-12 14:13:28.519  1587  1587 E APM_AudioPolicyManager:  zhou hwModule name  r_submix
    Line 23161: 04-12 14:13:28.530  1587  1587 E APM_AudioPolicyManager:  zhou hwModule name  r_virtual
    Line 23173: 04-12 14:13:28.532  1587  1587 E APM_AudioPolicyManager:  zhou hwModule name  bluetooth_qti

------------------------------------------------------
APM_AudioPolicyManager: zhou Invalid Output profile max open count 1 for profile primary output
------------------------
APM_AudioPolicyManager: zhou mOutputDevicesAll  {type:0x1,@:;type:0x2,@:;type:0x10000,@:}
--------------------------------------------------
Line 19469: 04-12 14:13:27.974  1587  1587 E APM_AudioPolicyManager: zhou supportedDevices  {type:0x1,@:;type:0x2,@:;type:0x4,@:;type:0x8,@:;type:0x10,@:;type:0x20,@:;type:0x40,@:;type:0x400,@:;type:0x4000,@:;type:0x20000,@:;type:0x100000,@:;type:0x4000000,@:}
---------------------------------
Line 19470: 04-12 14:13:27.974  1587  1587 E APM_AudioPolicyManager: zhou availProfileDevices  {type:0x1,@:;type:0x2,@:}

Binder IPC 代理用于促进跨越进程边界的通信。代理位于 frameworks/av/media/libmedia 中,并以字母“I”开头。
Audio服务在Android N(7.0)之前存在于mediaserver中,Android N开始以audioserver形式存在
所涉及文件形象讲解
系统服务APP:

audioserver deamon进程启动

frameworks/av/media/mediaserver/main_mediaserver.cpp
通过init进程fork出来audioserver进程。

AudioFlinger

是Audio数据消费者,消费音频数据并推进Audio hal、并与Audio Hal对接完成一些控制操作。也是运行在audioserver 进程中一个服务。内部涉及的关键模块:
AudioFlinger :
AudioFlinger.cpp (frameworks/av/services/audioflinger/AudioFlinger.cpp)
Threads.cpp (frameworks/av/services/audioflinger/Threads.cpp)
Tracks.cpp (frameworks/av/services/audioflinger/Tracks.cpp)
audio_hw_hal.cpp (hardware/libhardware_legacy/audio/Audio_hw_hal.cpp)
AudioHardware.cpp (device/friendly-arm/common/libaudio/AudioHardware.cpp)
1.Tracks.cpp:音频流管理,可控制音频流的状态,如 start、stop、pause
2.Threads.cpp:回放线程和录制线程;回放线程从 FIFO 读取回放数据并混音处理,然后写数据到输出流设备;录制线程从输入流设备读取录音数据并重采样处理,然后写数据到 FIFO。
3.AudioMixer.cpp:混音处理,包括重采样、音量调节、声道转换等,其中的重采样复用了 4.AudioResampler;由playback线程 AudioFlinger::MixerThread 直接使用
5.AudioResampler.cpp:重采样处理,可进行采样率转换和声道转换;由录制线程 6.AudioFlinger::RecordThread 直接使用
7.Effects.cpp:音效处理

AudioPolicyService

负责音频路由,音频录音或播音设备的选择。是运行在audioserver 进程中一个服务。在手机场景负责插上耳机的音频通道切换,车载场景负责音频的路由即选择何种音源在哪个设备播放
AudioPolicyService:
AudioPolicyService.cpp (frameworks/av/services/audiopolicy/AudioPolicyService.cpp)
AudioPolicyClientImpl.cpp (frameworks/av/services/audiopolicy/AudioPolicyClientImpl.cpp)
AudioPolicyInterfaceImpl.cpp(frameworks/av/services/audiopolicy/AudioPolicyInterfaceImpl.cpp)

AudioPolicyManager.cpp:
AudioPolicyManager.cpp (device/friendly-arm/common/libaudio/AudioPolicyManager.cpp)
AudioPolicyManager.h (device/friendly-arm/common/libaudio/AudioPolicyManager.h)

AudioPolicyManagerBase.cpp (hardware/libhardware_legacy/audio/AudioPolicyManagerBase.cpp)
1.那么这个声音从哪个设备播放出来呢?这个设备的选择,是由音频的策略服务(AudioPolicyService: )提供的。

AudioPolicyService在Audio中的几大任务:

AudioPolicyManager 是在AudioPolicyService 中创建的

2.在其选择某个设备之后,我们需要把声音写给这个设备,那么谁把这些声音写给这个设备?由AudioFlinger实现。
3.所以在andriod系统中会涉及两个服务,AudioFlinger与AudioPolicyService服务。
4.那么谁去启动,或者向系统添加这两个服务呢?由系统服务APP:
frameworks/av/media/mediaserver/main_mediaserver.cpp

两个服务如何加载

启动AudioFlinger子服务
//frameworks\av\media\audioserver\main_audioserver.cpp
...
android::hardware::configureRpcThreadpool(4, false /*callerWillJoin*/);
        sp<ProcessState> proc(ProcessState::self());
        sp<IServiceManager> sm = defaultServiceManager();
        ALOGI("ServiceManager: %p", sm.get());
       //1.初始化AudioFlinger
        AudioFlinger::instantiate();
     //2.初始化AudioPolicyService
        AudioPolicyService::instantiate();
void AudioFlinger::instantiate() {
    sp<IServiceManager> sm(defaultServiceManager());
   //创建AudioFlinger服务并加入ServiceManager中
    sm->addService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME),
                   new AudioFlingerServerAdapter(new AudioFlinger()), false,
                   IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT);
}
AudioFlinger构造函数如下:

主要进行与audio hal及音效hal服务的绑定。
// frameworks/av/services/audioflinger/AudioFlinger.cpp

AudioFlinger::AudioFlinger()
    : BnAudioFlinger(),.....{
    // 创建mDevicesFactoryHal示例,与audio hal服务绑定。
    mDevicesFactoryHal = DevicesFactoryHalInterface::create();
    mEffectsFactoryHal = EffectsFactoryHalInterface::create();
}
启动AudioPolicyService子服务

JAVA应用层经过JNI,经由IAudioPolicyService接口,访问AudioPolicyService提供的服务 输入输出设备的链接状态系统的音频策略(strategy)的切换音量/音频参数的设置

AudioPolicyService的构成

   1. AudioPolicyService继承了IAudioPolicyService接口,这样AudioPolicyService就能够基于Android的Binder机制,向外部提供服务;
   2. AudioPolicyService同时也继承了AudioPolicyClientInterface类,他有一个AudioPolicyInterface类的成员指针mpPolicyManager,实际上就是指向了AudioPolicyManager;
   3. AudioPolicyManager类继承了AudioPolicyInterface类以便向AudioPolicyService提供服务,反过来同时还有一个AudioPolicyClientInterface指针,该指针在构造函数中被初始化,指向了AudioPolicyService,实际上,AudioPolicyService是经过成员指针mpPolicyManager访问AudioPolicyManager,而 AudioPolicyManager则经过AudioPolicyClientInterface(mpClientInterface)访问 AudioPolicyService;
   4. AudioPolicyClientImpl.cpp实现了 AudioPolicyClient,直接与AudioFlinger交互。对于AudioPolicyIntefaceImpl实现了AudioPolicyInterface直接与AudioPolicyManager交互,
   5. AudioPolicyClient继承了AudioPolicyClientInterface
a.可以看到一共继承了三个类,而instantiate方法再BinderService中,所以继续进入查看
class AudioPolicyService :
    public BinderService<AudioPolicyService>,
    public media::BnAudioPolicyService,
    public IBinder::DeathRecipient
{
b.AudioPolicyservice添加到service_manage
namespace android {

// 模板类,相当于泛型
template<typename SERVICE>
class BinderService
{
public:
    // 调用到这边的静态方法
    static status_t publish(bool allowIsolated = false,
                            int dumpFlags = IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT) {
        sp<IServiceManager> sm(defaultServiceManager());
        // 把Service添加到service_manager中
        return sm->addService(String16(SERVICE::getServiceName()), new SERVICE(), allowIsolated,
                              dumpFlags);
    }

    static void publishAndJoinThreadPool(
            bool allowIsolated = false,
            int dumpFlags = IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT) {
        publish(allowIsolated, dumpFlags);
        joinThreadPool();
    }

    // 初始化方法,都会调用到这边来,接着调用自己的静态方法
    static void instantiate() { publish(); }

    static status_t shutdown() { return NO_ERROR; }

private:
    static void joinThreadPool() {
        sp<ProcessState> ps(ProcessState::self());
        ps->startThreadPool();
        ps->giveThreadPoolName();
        IPCThreadState::self()->joinThreadPool();
    }
};
}

为什么AudioPolicyService 会执行onFirstRef()函数

#include <utils/RefBase.h>
 
//1.初始化强指针
void RefBase::incStrong(const void* id) const
{
    weakref_impl* const refs = mRefs;
    refs->incWeak(id);
 
    refs->addStrongRef(id);
    const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
 
    refs->mBase->onFirstRef();//调用onFirstRef
}
 
//2.weakref_impl类
class RefBase::weakref_impl : public RefBase::weakref_type
{
  public:
    RefBase* const  mBase; //RefBase定义全局变量mBase.
};
------------------------------------------
RefBase基类中定义调用onFirstRef(),如果有实例化sp<T> ttt = new Test()后,将会自动调用onFirstRef()函数。
void RefBase::decStrong(const void* id) const
{
    weakref_impl* const refs = mRefs;
    refs->removeStrongRef(id);
    const int32_t c = refs->mStrong.fetch_sub(1, std::memory_order_release);
    //在调用析构函数时,自动调用onLastStrongRef()函数。
    refs->mBase->onLastStrongRef(id);
    }
    refs->decWeak(id);
}
----------------------------------------------------
在sp<T>的析构函数时,会自动调用onLastStrongRef();
#include <utils/LightRefBase.h>
#include <utils/StrongPointer.h>
  
class RefBase{
   protected:
    // Invoked after creation of initial strong pointer/reference.
    virtual void     onFirstRef();//定义虚函数onFirstRef
 
    // Invoked when either the last strong reference goes away, or we need to undo
    // the effect of an unnecessary onIncStrongAttempted.
    virtual void     onLastStrongRef(const void* id);//定义onLastStrongRef
   private:
     weakref_impl* const mRefs;
 
  };
//frameworks/av/services/audiopolicy/service/AudioPolicyService.cpp
void AudioPolicyService::onFirstRef()
{
    {
        Mutex::Autolock _l(mLock);
        // start audio commands thread 所有的命令(音量控制,输入、输出的切换等)最终都会在该线程中排队执行;
        mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
        // start output activity command thread
        mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
      
      //AudioFlinger客户端实现,调用AudioFlinger的一些服务
        mAudioPolicyClient = new AudioPolicyClient(this);
 // 创建AudioPolicyManager,policy中大部分核心工作均在AudioPolicyManager中实现
//createAudioPolicyManager这个方法的具体实现在AudioPolicyFactory里面,可以在Factory里面修改使用的AudioPolicyManager
        //并且会执行AudioPolicyManager的初始化方法
//class AudioPolicyClient : public AudioPolicyClientInterface
        mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);
    }
    // load audio processing modules
    sp<AudioPolicyEffects>audioPolicyEffects = new AudioPolicyEffects();
    {
        Mutex::Autolock _l(mLock);
        mAudioPolicyEffects = audioPolicyEffects;
    }

    mUidPolicy = new UidPolicy(this);
    mUidPolicy->registerSelf();

    mSensorPrivacyPolicy = new SensorPrivacyPolicy(this);
    mSensorPrivacyPolicy->registerSelf();
}
AudioPolicyFactory帮助构造AudioPolicyManager
//所以查看一下AudioPolicyFactory.cpp中的方法,总结一下干了三件事
//构建AudioPolicyManager实例对象
//调用AudioPolicyManager初始化方法initialize
//成功后把对象返回出去
# frameworks/av/services/audiopolicy/manager/AudioPolicyFactory.cpp

extern "C" AudioPolicyInterface* createAudioPolicyManager(
        AudioPolicyClientInterface *clientInterface)
{
    AudioPolicyManager *apm = new AudioPolicyManager(clientInterface);
//在上面创建了构造函数之后,执行AudioPolicyManager的initialize方法
    status_t status = apm->initialize();
    if (status != NO_ERROR) {
        delete apm;
        apm = nullptr;
    }
    return apm;
}


在AudioPolicyService.cpp中创建了AudioPolicyManager.cpp

static AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface)
{
    //Here we need to redesign how to use the create function createAudioPolicyManager of the vendor extension. BEGIN
    AudioPolicyManager *apm = new AudioPolicyManager(clientInterface, nullptr);
    //Here we need to redesign how to use the create function createAudioPolicyManager of the vendor extension. END
    status_t status = apm->initialize();
    if (status != NO_ERROR) {
        delete apm;
        apm = nullptr;
    }
    return apm;
}

AudioPolicyManager的新建和初始化
1.构造方法主要是加载了配置文件
2.初始化方法主要是创建了引擎,和Manager绑定,并且通过onNewAudioModulesAvailableInt打开输入输出流

# frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager

AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
        : AudioPolicyManager(clientInterface, false /*forTesting*/)
{
    // 加载配置文件 audio_policy.conf
    // 系统会首先加载vendor/etc目录下的configure文件,再加载system/etc目录下的configure文件。
    // 若这两者加载都发生错误的话,系统会加载default配置文件,并命名为primary module,从这可以看
   //  出,音频系统中一定必须存在的module就是primary了
   mpCustInterface = g_custInterface;
    mpCustInterface->SetApm(*this);
    loadConfig();
}

----------------------------------------------------------------------------------------------------------------------

status_t AudioPolicyManager::initialize() {
    {
        // 加载engine
        auto engLib = EngineLibrary::load("libaudiopolicyengine" + getConfig().getEngineLibraryNameSuffix() + ".so");

        // 创建engine
        mEngine = engLib->createEngine();
    }
    // 引擎和Manger绑定
    mEngine->setObserver(this);
    status_t status = mEngine->initCheck();

    // after parsing the config, mOutputDevicesAll and mInputDevicesAll contain all known devices;
    // open all output streams needed to access attached devices
    // 重要方法,打开输入输出流的,但是为什么传null还没搞清楚
    onNewAudioModulesAvailableInt(nullptr /*newDevices*/);

    // 更新所有信息
    updateDevicesAndOutputs();
    return status;
}


在AudioPolicyManager的onNewAudioModulesAvailableInt主要做了着几件事

加载硬件抽象库,通过loadHwModule方法
遍历配置声明中加载出来的输入输出设备
找到符合的输入输出设备并打开

---------------------------------------------------------------------------------------------------------------
void AudioPolicyManager::onNewAudioModulesAvailableInt(DeviceVector *newDevices)
{
    // mHwModulesAll 配置声明中的所有模块,猜测是加载配置文件的时候赋值的
    for (const auto& hwModule : mHwModulesAll) {
        if (std::find(mHwModules.begin(), mHwModules.end(), hwModule) != mHwModules.end()) {
            continue;
        }
        // 加载硬件抽象库,
        // 在构造函数中传入的mpClientInterface == AudioPolicyClient,其中实现类AudioPolicyClientImpI.cpp
        //  AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
       //mAudioPolicyClient = new AudioPolicyClient(this)------- 
      //>createAudioPolicyManager(mAudioPolicyClient)-------->new 
   // AudioPolicyManager(clientInterface),因此mpClientInterface->loadHwModule实际上是调用了 
   // AudioPolicyClient类中的成员函数。
        hwModule->setHandle(mpClientInterface->loadHwModule(hwModule->getName()));(核心函数A)
        mHwModules.push_back(hwModule);

        // 打开访问连接设备所需的所有输出流,
        for (const auto& outProfile : hwModule->getOutputProfiles()) {
            // 支持的设备(猜测的)
            const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
            // 在支持的设备中筛选我们的所有设备,筛选出来的就是可用的设备(猜测的)
            DeviceVector availProfileDevices = supportedDevices.filter(mOutputDevicesAll);
            sp<DeviceDescriptor> supportedDevice = 0;
            if (supportedDevices.contains(mDefaultOutputDevice)) {
                supportedDevice = mDefaultOutputDevice;
            } else {
                if (availProfileDevices.isEmpty()) {
                    continue;
                }
                supportedDevice = availProfileDevices.itemAt(0);
            }
            if (!mOutputDevicesAll.contains(supportedDevice)) {
                continue;
            }
            // 备描述符对象 ,传入了mClientInterface
            sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile, mpClientInterface);
            // output表示这个设备的输出流的句柄
            audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
            // 打开输出流,这里实际也是调用了mpClientInterface->openOutput(核心函数B)
            status_t status = outputDesc->open(nullptr, DeviceVector(supportedDevice),
                                               AUDIO_STREAM_DEFAULT,
                                               AUDIO_OUTPUT_FLAG_NONE, &output);

            for (const auto &device : availProfileDevices) {
                // give a valid ID to an attached device once confirmed it is reachable
                if (!device->isAttached()) {
                    device->attach(hwModule);
                    // 添加到可用设备列表中
                    mAvailableOutputDevices.add(device);
                    // 还没进去看过,猜测是给设备设置一个可以跟HAL层通讯的对象
                    device->setEncapsulationInfoFromHal(mpClientInterface);
                    // newDevices是传入的值,还不清楚什么意思
                    if (newDevices) newDevices->add(device);
                    setEngineDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
                }
            }

            if (mPrimaryOutput == 0 &&outProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
                mPrimaryOutput = outputDesc;
            }
            // direct直接输出到设备的,就把流关闭了
            if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
                outputDesc->close();
            } else {
                addOutput(output, outputDesc);
                // 设置输出设备
                setOutputDevices(outputDesc,DeviceVector(supportedDevice),true,0,NULL);
            }
        }

        // 打开所有输入流,流程和输出流一样
        for (const auto& inProfile : hwModule->getInputProfiles()) {

            sp<AudioInputDescriptor> inputDesc =  new AudioInputDescriptor(inProfile, mpClientInterface);

            audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
            // 和output同理
            status_t status = inputDesc->open(nullptr,
                                              availProfileDevices.itemAt(0),
                                              AUDIO_SOURCE_MIC,
                                              AUDIO_INPUT_FLAG_NONE,
                                              &input);

            for (const auto &device : availProfileDevices) {
                // give a valid ID to an attached device once confirmed it is reachable
                if (!device->isAttached()) {
                    device->attach(hwModule);
                    device->importAudioPortAndPickAudioProfile(inProfile, true);
                    mAvailableInputDevices.add(device);
                    if (newDevices) newDevices->add(device);
                    setEngineDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
                }
            }
            inputDesc->close();
        }
    }
}

打开所有的输出设备核心函数A
audio_module_handle_t AudioPolicyService::AudioPolicyClient::loadHwModule(const char *name)
{
    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
    if (af == 0) {
        ALOGW("%s: could not get AudioFlinger", __func__);
        return AUDIO_MODULE_HANDLE_NONE;
    }
 
 return af->loadHwModule(name);
最终调用到 AudioFlinger 源码位于: 
\frameworks\av\services\audioflinger\AudioFlinger.cpp
audio_module_handle_t AudioFlinger::loadHwModule(const char *name)
{
    if (name == NULL) {
        return AUDIO_MODULE_HANDLE_NONE;
    }
    if (!settingsAllowed()) {
        return AUDIO_MODULE_HANDLE_NONE;
    }
    Mutex::Autolock _l(mLock);
    return loadHwModule_l(name);
}
 
// loadHwModule_l() must be called with AudioFlinger::mLock held
audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
{
    for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
        if (strncmp(mAudioHwDevs.valueAt(i)->moduleName(), name, strlen(name)) == 0) {
            ALOGW("loadHwModule() module %s already loaded", name);
            return mAudioHwDevs.keyAt(i);
        }
    }
 
    audio_hw_device_t *dev;
 
    int rc = load_audio_interface(name, &dev);
    if (rc) {
        ALOGE("loadHwModule() error %d loading module %s", rc, name);
        return AUDIO_MODULE_HANDLE_NONE;
    }
 
    mHardwareStatus = AUDIO_HW_INIT;
    rc = dev->init_check(dev);
    mHardwareStatus = AUDIO_HW_IDLE;
    if (rc) {
        ALOGE("loadHwModule() init check error %d for module %s", rc, name);
        return AUDIO_MODULE_HANDLE_NONE;
    }
 
    // Check and cache this HAL's level of support for master mute and master
    // volume.  If this is the first HAL opened, and it supports the get
    // methods, use the initial values provided by the HAL as the current
    // master mute and volume settings.
 
    AudioHwDevice::Flags flags = static_cast<AudioHwDevice::Flags>(0);
    {  // scope for auto-lock pattern
        AutoMutex lock(mHardwareLock);
 
        if (0 == mAudioHwDevs.size()) {
            mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
            if (NULL != dev->get_master_volume) {
                float mv;
                if (OK == dev->get_master_volume(dev, &mv)) {
                    mMasterVolume = mv;
                }
            }
 
            mHardwareStatus = AUDIO_HW_GET_MASTER_MUTE;
            if (NULL != dev->get_master_mute) {
                bool mm;
                if (OK == dev->get_master_mute(dev, &mm)) {
                    mMasterMute = mm;
                }
            }
        }
 
        mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
        if ((NULL != dev->set_master_volume) &&
            (OK == dev->set_master_volume(dev, mMasterVolume))) {
            flags = static_cast<AudioHwDevice::Flags>(flags |
                    AudioHwDevice::AHWD_CAN_SET_MASTER_VOLUME);
        }
 
        mHardwareStatus = AUDIO_HW_SET_MASTER_MUTE;
        if ((NULL != dev->set_master_mute) &&
            (OK == dev->set_master_mute(dev, mMasterMute))) {
            flags = static_cast<AudioHwDevice::Flags>(flags |
                    AudioHwDevice::AHWD_CAN_SET_MASTER_MUTE);
        }
 
        mHardwareStatus = AUDIO_HW_IDLE;
    }
audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
    mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
 
    ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
          name, dev->common.module->name, dev->common.module->id, handle);
 
    return handle;
 
}

可以看到 mAudioHwDevs 是一个 
DefaultKeyedVector<audio_module_handle_t, AudioHwDevice*> mAudioHwDevs; 
key value 的集合容器,

通过 mAudioHwDevs 容器 装载所有的系统音频对象 AudioHwDevice 
AudioHwDevice对象位于: 
\frameworks\av\services\audioflinger\AudioHwDevice.h

加载每个音频库之前,会遍历 mAudioHwDevs 中是否已经加载,加载成功则不会再次加载

如果没有加载,则来到 load_audio_interface 函数

static int load_audio_interface(const char *if_name, audio_hw_device_t **dev)
{
    const hw_module_t *mod;
    int rc;
 
    rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
    ALOGE_IF(rc, "%s couldn't load audio hw module %s.%s (%s)", __func__,
                 AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
    if (rc) {
        goto out;
    }
    rc = audio_hw_device_open(mod, dev);
    ALOGE_IF(rc, "%s couldn't open audio hw device in %s.%s (%s)", __func__,
                 AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
    if (rc) {
        goto out;
    }
    if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
        ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
        rc = BAD_VALUE;
        goto out;
    }
    return 0;
 
out:
    *dev = NULL;
    return rc;
}

最后添加到mAudioHwDevs容器,其中一个音频抽象层动态连接库就加载完成。
打开所有的输出设备核心函数B
for(modulesAll){
     for(profile){
     outputDes->open()(打开output)
    }
}
outputDesc->open的流程
outputDesc = SwAudioOutputDescriptor
mClientInterface = AudioPolicyClientImpI.cpp
status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
                                       const DeviceVector &devices,
                                       audio_stream_type_t stream,
                                       audio_output_flags_t flags,
                                       audio_io_handle_t *output)
{
        // 最终回到AudioFligner,打开输出
    status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
                                                   output,
                                                   &lConfig,
                                                   device,
                                                   &mLatency,
                                                   mFlags);
}
下面看一下openOutput_l()这个函数----------------------------------------------------------------------------------
//输入参数中的module是由前面loadNodule来获得的,它是一个audio interface的id号
//可以通过此id在mAudioHwSevs中查找对应的AudioHwDevice对象
//这个方法中会将打开的output加到mPlaybackThreads线程中
sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_t module,
                                                            audio_io_handle_t *output,
                                                            audio_config_t *config,
                                                            audio_devices_t devices,
                                                            const String8& address,
                                                            audio_output_flags_t flags)
{
    //1.查找相应的audio interface,找到合适的device
    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
    if (outHwDev == NULL) {
        return 0;
    }
 
    AudioStreamOut *outputStream = NULL;
    //2.为设备打开一个输出流,创建Audio HAL的音频输出对象
    status_t status = outHwDev->openOutputStream(
            &outputStream,
            *output,
            devices,
            flags,
            config,
            address.string());
 
    mHardwareStatus = AUDIO_HW_IDLE;
 
    //3. 创建playbackThread
    if (status == NO_ERROR) {
 
        PlaybackThread *thread;
        if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
            thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
            ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread);
        } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
                || !isValidPcmSinkFormat(config->format)
                || !isValidPcmSinkChannelMask(config->channel_mask)) {
            thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
            ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread);
        } else {
            //一般是创建混音线程,代表AudioStreamOut对象的output也传递进去了,混音线程是PlaybackThread 的子类。
            thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
            ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread);
        }
        mPlaybackThreads.add(*output, thread);
        return thread;
    }
 
    return 0;
}

APM 去实现AudioPolicyInterface, AudioPolicyClient去实现AudioPolicyClientInterface

loadConfig()

audio_policy_configuration.xml的格式

//从接口功能入手 .以
1. AudioDeviceInventory.java
final int res = mAudioSystem.setDeviceConnectionState(device,
                        AudioSystem.DEVICE_STATE_AVAILABLE, address, deviceName,
                        AudioSystem.AUDIO_FORMAT_DEFAULT);
2. AudioSystem.java
  public static native int setDeviceConnectionState(int device, int state,
                                                      String device_address, String device_name,
                                                      int codecFormat);
3.
status_t AudioSystem::setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
                                               const char* device_address,
                                               const char* device_name,
                                               audio_format_t encodedFormat) {
    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
   

    if (aps == 0) return PERMISSION_DENIED;

 
    return statusTFromBinderStatus(
            aps->setDeviceConnectionState(
                    deviceAidl,
                    VALUE_OR_RETURN_STATUS(
                            legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(state)),
                    name,
                    VALUE_OR_RETURN_STATUS(legacy2aidl_audio_format_t_AudioFormat(encodedFormat))));
}
//核心
// establish binder interface to AudioPolicy service
const sp<IAudioPolicyService> AudioSystem::get_audio_policy_service() {
    sp<IAudioPolicyService> ap;
    sp<AudioPolicyServiceClient> apc;
    {
        nsecs_t time_start = systemTime();
        Mutex::Autolock _l(gLockAPS);
        if (gAudioPolicyService == 0) {
            sp<IServiceManager> sm = defaultServiceManager();
            sp<IBinder> binder;
            do {
                binder = sm->getService(String16("media.audio_policy"));
                if (binder != 0)
                    break;
                ALOGW("AudioPolicyService not published, waiting...");
                usleep(500000); // 0.5 s
            } while (true);
            if (gAudioPolicyServiceClient == NULL) {
                gAudioPolicyServiceClient = new AudioPolicyServiceClient();
            }
            binder->linkToDeath(gAudioPolicyServiceClient);
            gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
            LOG_ALWAYS_FATAL_IF(gAudioPolicyService == 0);
            apc = gAudioPolicyServiceClient;
            // Make sure callbacks can be received by gAudioPolicyServiceClient
            ProcessState::self()->startThreadPool();
        }
        ap = gAudioPolicyService;
        nsecs_t time_interval = (systemTime() - time_start) / TIME_CONVERSION;
        if (time_interval > BINDER_TIME) {
            ALOGW("(gLockAPS) AudioSystem get_audio_policy_service() "
                "ms:  %" PRId64 "ms", time_interval);
        }
    }
    if (apc != 0) {
        int64_t token = IPCThreadState::self()->clearCallingIdentity();
        ap->registerClient(apc);
        ap->setAudioPortCallbacksEnabled(apc->isAudioPortCbEnabled());
        ap->setAudioVolumeGroupCallbacksEnabled(apc->isAudioVolumeGroupCbEnabled());
        IPCThreadState::self()->restoreCallingIdentity(token);
    }

    return ap;
}

AudioPolicyClient 的实现是AudioPolicyClientImpl 其实是指向的是AudioFlinger

比如

status_t AudioPolicyService::AudioPolicyClient::closeOutput(audio_io_handle_t output)
{
    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
    if (af == 0) {
        return PERMISSION_DENIED;
    }

    return af->closeOutput(output);
}
-------------------------------------------------------------------------------------------------------------
系统是通过调用了AudioPolicyManager调过去的

 mpClientInterface->closeOutput(duplicatedOutput);
最后调用到了AudioFlinger 中

一般接口函数的调用流程遵循如下关系:AudioSystem-》AudioPolicyService-》AudioPolicyManager-》AudioPolicyClient-》AudioFlinger,接下来我们主要分析接口的实现。

AudioPolicyClientInterface.h 定义的方法是AudioFlinger.h 中也会定义,但是只是AudioFlinger.h 的一部分
比如:openOutput
AudioPolicyInterface.h 定义方法是AudioPolicyManager.h中也会定义,但是也只是AudioPolicyManager.h的一部分
创建 AudioPolicyManager(clientInterface) 的时候将clientInterface 传进来,也就用clientInterface 来调用AudioFlinger
AudioPolicy总结

1、AudioPolicyService才是真正调用AudioFlinger的地方,Audio_poliy_hal、AudioPolicyManagerBase最终都会绕到AudioPolicyService里面去。

2、AduioPolicyService调用AudioFlinger里面的函数,是通过Binder机制,即调用接口IAudioFlinger接口。

3、AudioPolicyService只提供服务给AudioSystem.cpp,通过Binder接口调用。其他类不可以直接调用AudioPolicyService类,只能调用AudioSystem里面的方法间接调用AudioPolicyService。


上面是我们之前讲解过的配置文件,其中primary,a2dp,usb,r_submix节点间我们称为modle,在primary中存在一个outputs与inputs结点。

1.在AudioPolicyService中存在一个mHwModules数组,其中有多个mHwModule,每个mHwModule包含.mName,.mOutputProfiles(对应一个output结点配置:IOProfiles类),.mInputProfiles(对应一个或者多个input结点配置:IOProfiles类).等成员。
2.由AudioFlinger实现,根据mHwModules数组中每个mHwModule的名字,打开厂家提供的so文件。然后构建硬件封装的对象mAudioHwDevice放入mAudioHwDevices数组中。mAudioHwDevice中含有audio_hw_device结构体,audio_hw_device又包含了厂家提供的AudioHardWare对象。
3.打开module中的output创建playbackTraad线程(由AudioFlinger实现,但是由AudioPolicyService调用):对每一个module中的每一个output Profiles,如果flog不是特殊的,则调用status_t status = outHwDev->openOutputStream(&outputStream,output,devices,flags,config,address.string())。然后创建线程thread = new DirectOutputThread,在通过mPlaybackThreads.add(output, thread)添加到mPlaybackThreads线程数组之中。所以,没一个Output都对应一个线程(猜测input也是如此)。
————————————————

AudioTrack 的创建过程-
播放声音时都要创建AudioTrack对象,
java的AudioTrack对象创建时会导致c++的AudioTrack对象被创建,AudioTrack要播放声音需要和Thread 进行挂钩;
所以分析的核心是c++的AudioTrack类,
创建AudioTrack时涉及一个重要函数: set
对于Android 系统,其可能有一个或多个声卡,系统中使用output 描述声卡的输出通道,对于每一个outPut 都有一个PlayBackThread,他们都是由AudioFlinger 来管理。
对于应用程序,他们不关心声音从哪里播放,
1.AudioTrack 的创建过程,在android_media_AudioTrack_setup 中有
a.new AudioTrack () 中直接调用set函数,和硬件产生关联
b.set 函数
c.在set 中调用createTrack_l的方法
d. 在createTrack_l 中创建一个AudioFlinger 。然后调用 status = audioFlinger->createTrack(VALUE_OR_FATAL(input.toAidl()), response); 
AudioFlinger 会调用 AudioSystem.getOutputForAttr的接口
e.然后aps.getOutputForAttr
d.mAudioPolicyManager->getOutputForAttributes
f.getStrategyForAtt 获取一个strategy
g.getDeviceForStrategy
h.getOutputForDevice
i.selectOutput
以上createTrack是获取一个audio_hanlde_io 其实就是获取一个out.outputId
通过这个获取一个
j.checkPlaybackThread(outPut.outputID) = PlayBackThread playthread*

k.track =  thread->createTack -----new track 创建出一个track 
获取一个track 

l.mTracks.add(track)

output.audioTrack  = new TrackHandle (mTrack)

APP的AudioTrack 和 playbackThread的mTracks中的track之间建立共享内存


if(mShareBuffer ==0){
mProxy = new AudioTrackClientProxy
}else{
mStaticproxy = new StaticAudioTrackClientProxy
}


数据之间如何传递

之前提到过,作为声音的应用程序,他需要给playbackThread线程提供数据,他是怎么提供的呢?APP给AudioTrack提供音频数据有2种方式: 一次性提供(MODE_STATIC)、边播放边提供(MODE_STREAM)
对于简单的提示音,数据是十分少的,我们可以把所有的数据,一次性提供给播放现场,那么创建的时候就可以使用Mode_STATIC
对于播放音乐,播放网络传递过来的声音,那么我们需要边播放,边提供。

数据同步问题:
1.音频数据保存在buffer中,这个buffer 由谁提供了?APP 还是PlayBackThread
2.APP 提供数据。PlaybackThread 消耗数据,如何同步。

Android 系统之中,其中报好多个设备,每个设备都有对应的output.同是每个OutPut 对应一个播放线程PlaybackThread,PlaybackThread 中存在一个数组mTracks,其中包含一个或多个Track ,每一个Track都对应应用程序中创建的AudioTrack,正是因为应用创建了AudioTrack 彩会导致PlayBackThread中的track 创建。
App 与PlayBackThread 处于不同的进程。

他们在不同的进程,所以使用binder通信
1.如果APP提供AudioTrack的数据是一次性提供(Mode_STATIC:一次性,提前提供数据),那么显然这个buffer当前是由应用程序提供。因为应用程序才知道这个buffer有多大,这样APP先构造,PlaybackThread 再消费,则无需进行同步。
2.如果应用程序,是一边播放,一边提供数据(Mode_Stream),那么就由PlaybackThread 创建共享内存,这样使用环形缓冲区进行同步。

这个在创建Creat_Track 的时候,如果mShareBuffer == 0 会指向playbackThread 提供的buffer

如果不是0 的话,直接使用应用的buffer


    /*其会导致playbackThread线程中对应的Track与共享内存的创建*/
    AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 2*minBuffSize, TEST_MODE);
    /*写数据,该函数在AudioTrack.java中实现*/
    track.write(data, 0, data.length);
        /*可以知道,其最终都是调用到C++中实现的native_write_byte函数*/
        int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,writeMode == WRITE_BLOCKING);

static jint android_media_AudioTrack_writeArray(JNIEnv *env, jobject thiz,T javaAudioData,jint offsetInSamples, jint sizeInSamples,jint javaAudioFormat,jboolean isWriteBlocking) {
    /*把java的AudioTrack转化为c++实现的AudioTrack*/
    sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);
    jint samplesWritten = writeToTrack(lpTrack, javaAudioFormat, cAudioData,offsetInSamples, sizeInSamples, isWriteBlocking == JNI_TRUE /* blocking */);
        /*如果应用程序没有提供共享内存*/
        if (track->sharedBuffer() == 0) {
            /*调用AudioTrack中write函数*/
            written = track->write(data + offsetInSamples, sizeInBytes, blocking);
            // for compatibility with earlier behavior of write(), return 0 in this case
            if (written == (ssize_t) WOULD_BLOCK) {
                written = 0;
            }
        } else {//如果应用程序提供了共享内存
            // writing to shared memory, check for capacity
            if ((size_t)sizeInBytes > track->sharedBuffer()->size()) {
                sizeInBytes = track->sharedBuffer()->size();
            }
            /*直接使用memcpy写入数据*/
            memcpy(track->sharedBuffer()->pointer(), data + offsetInSamples, sizeInBytes);
            written = sizeInBytes;
        }
从上面我们可以看到,如果应用程序提供了共享内存,直接使用memcpy写入数据,如果没有程序没有提供共享内存,则调用AudioTrack中的write函数如下
    ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
    /*获得空白buffer*/
    status_t err = obtainBuffer(&audioBuffer,blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
    /*把数据写入buffer*/
    memcpy(audioBuffer.i8, buffer, toWrite);
    /*释放buffer*/
    releaseBuffer(&audioBuffer);
现在我们来看看Tracks.cpp中的是怎么使用obtainBuffer读取数据的:
       status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
    status_t status = mServerProxy->obtainBuffer(&buf);
可以看到其调用mServerProxy的releaseBuffer函数。其中ServerProxy* mServerProxy。mServerProxy是什么呢?我们看看上小节我们分析的结果:
AudioFlinger::PlaybackThread::Track::Track(
    if (sharedBuffer == 0) {//如果共享内存为应用程序提供
        mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
                mFrameSize, !isExternalTrack(), sampleRate);
    } else {//如果共享内存为PlaybackThread提供
        mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
                mFrameSize);
    }
    mServerProxy = mAudioTrackServerProxy;
我们可以猜测得到,当调用mServerProxy中obtainBuffer与releaseBuffer方法,会导致AudioTrack.cpp:
status_t AudioTrack::set(
    if (mSharedBuffer == 0) {
        mStaticProxy.clear();
        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
    } else {
        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
        mProxy = mStaticProxy;
对应的mProxy 中的obtainBuffer与releaseBuffer被调用,mProxy 是给应用程序管理共享内存的,mServerProxy是给playbackThread管理共享内存的。

下面我们做一下总结:

1.APP创建AudioTrack,然后playbackThread创建对应的Track。他们之间通过共享内存传递数据。

2.APP有两种共享内存的方式
MODE_STATIC:APP创建共享内存,APO一次性填充数据
MODE_STREAM:APP使用obtiainBuffer获得空白内存,填充数据。然后使用releaseBuffer释放。

3.playbackThread使用obtiainBuffer获得含有数据的内存,消耗数据之后使用releaseBuffer释放。

d.AudioTrack中含有mProxy,用来管理内存,其中包含了obtainBuffer, releaseBuffer函数。
Track中含有mServerProxy, 它被用来管理共享内存, 里面含有obtainBuffer, releaseBuffer函数。
对于不同的MODE(MODE_STATIC或者MODE_STREAM:APP), 这些Proxy指向不同的对象

播放:

在frameworks\av\services\audioflinger\Threads.cpp文件中,其存在threadLoop函数,平时处于休眠状态,当接受到应用程序发送数据的时候,其就会被换醒:

bool AudioFlinger::PlaybackThread::threadLoop()
    /*处理配置信息*/
    processConfigEvents_l();
    /*当前激活的,有数据的Track*/
    sp<Track> t = mActiveTracks[i].promote();
    /*如果数据为0,则让声卡进入休眠状态*/
    if ((!mActiveTracks.size() && systemTime() > mStandbyTimeNs) ||isSuspended()) {
                // put audio hardware into standby after short delay
                if (shouldStandby_l()) {
                    /*声卡休眠*/
                    threadLoop_standby();
                    mStandby = true;
                }
            /*线程休眠*/
            mWaitWorkCV.wait(mLock);
            
a. prepareTracks_l : 
   确定enabled track, disabled track
   对于enabled track, 设置mState.tracks[x]中的参数
b. threadLoop_mix : 处理数据(比如重采样)、混音
   确定hook:
   逐个分析mState.tracks[x]的数据, 根据它的格式确定tracks[x].hook
   再确定总的mState.hook

   调用hook:
   调用总的mState.hook即可, 它会再去调用每一个mState.tracks[x].hook
   
   混音后的数据会放在mState.outputTemp临时BUFFER中
   然后转换格式后存入 thread.mMixerBuffer
c. memcpy_by_audio_format : 
   把数据从thread.mMixerBuffer或thread.mEffectBuffer复制到thread.mSinkBuffer
d. threadLoop_write: 
   把thread.mSinkBuffer写到声卡上
e. threadLoop_exit


上一篇下一篇

猜你喜欢

热点阅读