赞
踩
提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
主要参考的博客:
https://blog.csdn.net/weixin_55053963/article/details/122596520
https://blog.csdn.net/weixin_43013761/article/details/89471523?spm=1001.2014.3001.5502
https://blog.csdn.net/l328873524/article/details/105374196/
https://blog.csdn.net/qq_38091632/article/details/118254286
等会将会根据代码与这张图进行讲解
AudioPolicyService启动时执行这个函数onFirstRef
//启动时执行这个函数 void AudioPolicyService::onFirstRef() { { Mutex::Autolock _l(mLock); // 这个两个指令线程后续关注一下,有一些指令是会由它们接收的,然后去执行一些相对应的方法 // start audio commands thread mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this); // start output activity command thread mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this); //AudioPolicyClientImpl是AudioPolicyClient的实现类 //AudioPolicyClientImpl是AudioPolicyManager调用AudioFinger和AudioPolicyService的桥梁 //相当于AudioPolicyManager可以通过AudioPolicyClientImpl去调用一些AudioPolicyManager到AudioFinger的方法 mAudioPolicyClient = new AudioPolicyClient(this); //createAudioPolicyManager这个方法的具体实现在AudioPolicyFactory里面,可以在Factory里面修改使用的AudioPolicyManager //并且会执行AudioPolicyManager的初始化方法 mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient); } // load audio processing modules // 音效相关的类(这个不在关注的范围内,只关注上述的代码) sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects(); sp<UidPolicy> uidPolicy = new UidPolicy(this); sp<SensorPrivacyPolicy> sensorPrivacyPolicy = new SensorPrivacyPolicy(this); { Mutex::Autolock _l(mLock); mAudioPolicyEffects = audioPolicyEffects; mUidPolicy = uidPolicy; mSensorPrivacyPolicy = sensorPrivacyPolicy; } uidPolicy->registerSelf(); sensorPrivacyPolicy->registerSelf(); }
AudioCommandThread这个类是APS(AudioPolicyService)的内部类,继承了Thread,主要处理一些有关Audio的一些行为,下面将会对这些行为进行分析,具体声明看APS的头文件
bool AudioPolicyService::AudioCommandThread::threadLoop() { nsecs_t waitTime = -1; mLock.lock(); while (!exitPending()) { sp<AudioPolicyService> svc; while (!mAudioCommands.isEmpty() && !exitPending()) { nsecs_t curTime = systemTime(); // commands are sorted by increasing time stamp: execute them from index 0 and up if (mAudioCommands[0]->mTime <= curTime) { sp<AudioCommand> command = mAudioCommands[0]; mAudioCommands.removeAt(0); mLastCommand = command; switch (command->mCommand) { case SET_VOLUME: case SET_PARAMETERS: { ParametersData *data = (ParametersData *)command->mParam.get(); ALOGV("AudioCommandThread() processing set parameters string %s, io %d", data->mKeyValuePairs.string(), data->mIO); mLock.unlock(); command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs); mLock.lock(); }break; case SET_VOICE_VOLUME: case STOP_OUTPUT: { StopOutputData *data = (StopOutputData *)command->mParam.get(); ALOGV("AudioCommandThread() processing stop output portId %d", data->mPortId); svc = mService.promote(); if (svc == 0) { break; } mLock.unlock(); svc->doStopOutput(data->mPortId); mLock.lock(); }break; case RELEASE_OUTPUT: case CREATE_AUDIO_PATCH: { CreateAudioPatchData *data = (CreateAudioPatchData *)command->mParam.get(); MTK_ALOGV("AudioCommandThread() processing create audio patch"); sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); if (af == 0) { command->mStatus = PERMISSION_DENIED; } else { mLock.unlock(); command->mStatus = af->createAudioPatch(&data->mPatch, &data->mHandle); mLock.lock(); } } break; case RELEASE_AUDIO_PATCH: case UPDATE_AUDIOPORT_LIST: case UPDATE_AUDIOPATCH_LIST: case CHANGED_AUDIOVOLUMEGROUP: case SET_AUDIOPORT_CONFIG: case DYN_POLICY_MIX_STATE_UPDATE: case RECORDING_CONFIGURATION_UPDATE: case GET_CUSTOM_AUDIO_VOLUME: { if (FeatureOption::MTK_AUDIO_GAIN_NVRAM) { GetCustomAudioVolumeData *data = (GetCustomAudioVolumeData *)command->mParam.get(); ALOGV("AudioCommandThread() processing GET_CUSTOM_AUDIO_VOLUME"); command->mStatus = AudioPolicyServiceCustomImpl::get()->gainTable_getAudioData(GET_AUDIO_POLICY_VOL_FROM_VER1_DATA, sizeof(AUDIO_CUSTOM_VOLUME_STRUCT), &(data->mVolConfig)); } } break; case STOP_OUTPUT_SAMPLERATE: case SET_EFFECT_SUSPENDED: case AUDIO_MODULES_UPDATE: default: } else { waitTime = mAudioCommands[0]->mTime - curTime; break; } } // release delayed commands wake lock if the queue is empty if (mAudioCommands.isEmpty()) { release_wake_lock(mName.string()); } // At this stage we have either an empty command queue or the first command in the queue // has a finite delay. So unless we are exiting it is safe to wait. if (!exitPending()) { MTK_ALOGS_IF(MTK_VERBOSE_LOG_VALUE, mLastCommand != NULL, "AudioCommandThread() going to sleep [0x%x]", mLastCommand->mCommand); if (waitTime == -1) { mWaitWorkCV.wait(mLock); } else { mWaitWorkCV.waitRelative(mLock, waitTime); } } } // release delayed commands wake lock before quitting if (!mAudioCommands.isEmpty()) { release_wake_lock(mName.string()); } mLock.unlock(); return false; }
上述代码删除了大部分的实现,只保留了几个典型的例子,可以看出,有通过AudioSystem去调用方法的,也有通过获取mService.promote()的代理去调用方法,或者通过AudioSystem去获取audio_flinger去使用AF的方法,还有使用AudioPolicyServiceCustomImpl的方法。
总而言之,AudioCommandThread这个类中的线程可以处理大部分audio相关的操作,不过需要注意的是,这是一个线程,它会判断一个命令list是否为空,如果不为空,那么是有一些跟audio相关的指令需要执行的。这些指令存放在一个list中,具体是怎么放置这些指令的,可以查看void AudioPolicyService::AudioCommandThread::insertCommand_l(sp& command, int delayMs)这个方法,该方法也是在APS中实现的。下面我们从
AudioPolicyClientImpl里面大部分的代码都是如下形式,注意在本章的1.中,mAudioPolicyClient是作为参数传入createAudioPolicyManager(mAudioPolicyClient)中的,而createAudioPolicyManager又将这个参数传入APM的带参构造函数里,在后续的AudioPolicyFactory会有详细讲解,AudioPolicyClient的具体作用就是让AudioPolicyManager(APM)可以执行AudioFinger(AF)和AudioPolicyService(APS)的方法,下面拿出两个具体的例子看一下,如下所示:
audio_module_handle_t AudioPolicyService::AudioPolicyClient::loadHwModule(const char *name)
{
//这等于拿到AudioFinger的代理
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
ALOGW("%s: could not get AudioFlinger", __func__);
return AUDIO_MODULE_HANDLE_NONE;
}
//af是获取AudioFinger,让其执行AudioFinger中的loadHwModule方法
return af->loadHwModule(name);
}
status_t AudioPolicyService::AudioPolicyClient::setStreamVolume(audio_stream_type_t stream,
float volume, audio_io_handle_t output,
int delay_ms)
{
//执行AudioPolicyService的setStreamVolume方法
return mAudioPolicyService->setStreamVolume(stream, volume, output,
delay_ms);
}
下面看一下createAudioPolicyManager这个方法,在上面的注释中说明了createAudioPolicyManager是在AudioPolicyFactory.cpp里面的,AudioPolicyFactory具体如下
#include <AudioPolicyManager.h> namespace android { AudioPolicyManagerCustomInterface* audiopolicymanagerMTK = NULL; // MTK_AUDIO extern "C" AudioPolicyInterface* createAudioPolicyManager( AudioPolicyClientInterface *clientInterface) { //这部分是厂商自己新增的功能,这个就不具体看了 audiopolicymanagerMTK = (AudioPolicyManagerCustomInterface*) new AudioPolicyManagerCustomImpl(); // MTK_AUDIO //这个是默认的AudioPolicyManager,也可以替换为自己的实现 //也可以传参进入,像audiopolicymanagerMTK,这样就可以在原有基础上增加一些功能 AudioPolicyManager *apm = new AudioPolicyManager(clientInterface, audiopolicymanagerMTK); //在上面创建了构造函数之后,执行AudioPolicyManager的initialize方法 status_t status = apm->initialize(); if (status != NO_ERROR) { delete apm; apm = nullptr; } return apm; } extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface) { if (audiopolicymanagerMTK != NULL) {// MTK_AUDIO delete audiopolicymanagerMTK; } delete interface; } } // namespace android
上述的方法创建了AudioPolicyManager类的实现,从这里开始,1.中的一些分析已经到了最后了,现在开始从APM开始分析了,先从构造函数分析,后续再看initialize,其构造方法主要是加载了audio_policy_configuration.xml这个配置到AudioPolicyConfig中,即mConfig中
AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface, bool /*forTesting*/) : mUidCached(AID_AUDIOSERVER), // no need to call getuid(), there's only one of us running. mpClientInterface(clientInterface), mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f), mA2dpSuspended(false), mConfig(mHwModulesAll, mOutputDevicesAll, mInputDevicesAll, mDefaultOutputDevice), mAudioPortGeneration(1), mBeaconMuteRefCount(0), mBeaconPlayingRefCount(0), mBeaconMuted(false), mTtsOutputAvailable(false), mMasterMono(false), mMusicEffectOutput(AUDIO_IO_HANDLE_NONE) { } AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface, AudioPolicyManagerCustomInterface *customInterface) : AudioPolicyManager(clientInterface, false /*forTesting*/) { mpAudioPolicyMTKInterface = customInterface; mpAudioPolicyMTKInterface->common_set(this); InitializeMTKLogLevel("vendor.af.policy.debug"); //加载配置 loadConfig(); } void AudioPolicyManager::loadConfig() { if (deserializeAudioPolicyXmlConfig(getConfig()) != NO_ERROR) { ALOGE("could not load audio policy configuration file, setting defaults"); //最后设置默认配置 getConfig().setDefault(); } } //这个方法主要是对配置进行解析 static status_t deserializeAudioPolicyXmlConfig(AudioPolicyConfig &config) { char audioPolicyXmlConfigFile[AUDIO_POLICY_XML_CONFIG_FILE_PATH_MAX_LENGTH]; std::vector<const char*> fileNames; status_t ret; if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false)) { if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false) && property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) { // Both BluetoothAudio@2.0 and BluetoothA2dp@1.0 (Offlaod) are disabled, and uses // the legacy hardware module for A2DP and hearing aid. fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME); } else if (property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) { // A2DP offload supported but disabled: try to use special XML file fileNames.push_back(AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME); } } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false)) { fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME); } //主要是对这个文件进行加载、解析 //AUDIO_POLICY_XML_CONFIG_FILE_NAME = "audio_policy_configuration.xml"; fileNames.push_back(AUDIO_POLICY_XML_CONFIG_FILE_NAME); for (const char* fileName : fileNames) { for (const auto& path : audio_get_configuration_paths()) { snprintf(audioPolicyXmlConfigFile, sizeof(audioPolicyXmlConfigFile), "%s/%s", path.c_str(), fileName); //deserializeAudioPolicyFile对这个xml文件进行解析 //具体的类是Serializer.cpp //但需要知道config是传地址,在这个方法里,config拿到了所有配置的信息,并存放于HwModules中 //我们需要知道的是config这个变量,包含了几个关键的变量 /* 1.HwModuleCollection &mHwModules; .xml中所有module模块的集合 2.DeviceVector &mAvailableOutputDevices; .xml中所有output devices模块的集合 3.DeviceVector &mAvailableInputDevices; .xml中所有input devices模块的集合 4.sp<DeviceDescriptor> &mDefaultOutputDevices; .xml中默认output device 5.VolumeCurvesCollection *mVolumeCurves; .xml中音量集合 */ ret = deserializeAudioPolicyFile(audioPolicyXmlConfigFile, &config); if (ret == NO_ERROR) { //记录这个xml文件 config.setSource(audioPolicyXmlConfigFile); return ret; } } } return ret; }
在解析这个方法前,先分析一下配置文件audio_policy_configuration.xml
<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude"> <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” --> <!-- Global configuration Decalaration --> <globalConfiguration speaker_drc_enabled="true"/> <!-- Modules section: There is one section per audio HW module present on the platform. Each module section will contains two mandatory tags for audio HAL “halVersion” and “name”. The module names are the same as in current .conf file: “primary”, “A2DP”, “remote_submix”, “USB” Each module will contain the following sections: “devicePorts”: a list of device descriptors for all input and output devices accessible via this module. This contains both permanently attached devices and removable devices. “mixPorts”: listing all output and input streams exposed by the audio HAL “routes”: list of possible connections between input and output devices or between stream and devices. "route": is defined by an attribute: -"type": <mux|mix> means all sources are mutual exclusive (mux) or can be mixed (mix) -"sink": the sink involved in this route -"sources": all the sources than can be connected to the sink via vis route “attachedDevices”: permanently attached devices. The attachedDevices section is a list of devices names. The names correspond to device names defined in <devicePorts> section. “defaultOutputDevice”: device to be used by default when no policy rule applies --> <modules> //顶层结构中包含与各个音频 HAL 硬件模块对应的模块,其中每个模块都有一系列混合端口、设备端口和导向: <!-- Primary Audio HAL --> <module name="primary" halVersion="3.0"> <attachedDevices> <item>Speaker</item> <item>Built-In Mic</item> <item>Built-In Back Mic</item> </attachedDevices> <defaultOutputDevice>Speaker</defaultOutputDevice> //混合端口描述了可以在音频 HAL 处打开以供播放和捕获的流的可能配置文件。 <mixPorts> <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/> </mixPort> <mixPort name="deep_buffer" role="source" flags="AUDIO_OUTPUT_FLAG_DEEP_BUFFER"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/> </mixPort> <mixPort name="compressed_offload" role="source" flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING"> <profile name="" format="AUDIO_FORMAT_MP3" samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/> <profile name="" format="AUDIO_FORMAT_AAC" samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/> <profile name="" format="AUDIO_FORMAT_AAC_LC" samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/> </mixPort> <mixPort name="voice_tx" role="source"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/> </mixPort> <mixPort name="primary input" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000" channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/> </mixPort> <mixPort name="voice_rx" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/> </mixPort> </mixPorts> //设备端口描述了可以附上其类型(以及(可选)地址和音频属性,如果相关)的设备。 <devicePorts> <!-- Output devices declaration, i.e. Sink DEVICE PORT --> <devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/> </devicePort> <devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address=""> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/> <gains> <gain name="gain_1" mode="AUDIO_GAIN_MODE_JOINT" minValueMB="-8400" maxValueMB="4000" defaultValueMB="0" stepValueMB="100"/> </gains> </devicePort> <devicePort tagName="Wired Headset" type="AUDIO_DEVICE_OUT_WIRED_HEADSET" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/> </devicePort> <devicePort tagName="Wired Headphones" type="AUDIO_DEVICE_OUT_WIRED_HEADPHONE" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/> </devicePort> <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/> </devicePort> <devicePort tagName="BT SCO Headset" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/> </devicePort> <devicePort tagName="BT SCO Car Kit" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/> </devicePort> <devicePort tagName="Telephony Tx" type="AUDIO_DEVICE_OUT_TELEPHONY_TX" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/> </devicePort> <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000" channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/> </devicePort> <devicePort tagName="Built-In Back Mic" type="AUDIO_DEVICE_IN_BACK_MIC" role="source"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000" channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/> </devicePort> <devicePort tagName="Wired Headset Mic" type="AUDIO_DEVICE_IN_WIRED_HEADSET" role="source"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000" channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/> </devicePort> <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET" role="source"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/> </devicePort> <devicePort tagName="Telephony Rx" type="AUDIO_DEVICE_IN_TELEPHONY_RX" role="source"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/> </devicePort> </devicePorts> <!-- route declaration, i.e. list all available sources for a given sink --> //导向(新)现在已从混合端口描述符中分离出来,支持描述从设备到设备或从流到设备的导向 <routes> <route type="mix" sink="Earpiece" sources="primary output,deep_buffer,BT SCO Headset Mic"/> <route type="mix" sink="Speaker" sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/> <route type="mix" sink="Wired Headset" sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/> <route type="mix" sink="Wired Headphones" sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/> <route type="mix" sink="primary input" sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/> <route type="mix" sink="Telephony Tx" sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic, voice_tx"/> <route type="mix" sink="voice_rx" sources="Telephony Rx"/> </routes> </module> <!-- A2dp Input Audio HAL --> <xi:include href="a2dp_in_audio_policy_configuration.xml"/> <!-- Usb Audio HAL --> <xi:include href="usb_audio_policy_configuration.xml"/> <!-- Remote Submix Audio HAL --> <xi:include href="r_submix_audio_policy_configuration.xml"/> <!-- Bluetooth Audio HAL --> <xi:include href="bluetooth_audio_policy_configuration.xml"/> <!-- MSD Audio HAL (optional) --> <xi:include href="msd_audio_policy_configuration.xml"/> </modules> <!-- End of Modules section --> <!-- Volume section: IMPORTANT NOTE: Volume tables have been moved to engine configuration. Keep it here for legacy. Engine will fallback on these files if none are provided by engine. --> <xi:include href="audio_policy_volumes.xml"/> <xi:include href="default_volume_tables.xml"/> <!-- End of Volume section --> <!-- Surround Sound configuration --> <xi:include href="surround_sound_configuration_5_0.xml"/> <!-- End of Surround Sound configuration --> </audioPolicyConfiguration>
下面我们分析Serializer.cpp这个类,看一下配置是怎么解析的,并保存在config里面,详细见Serializer.cpp里的如下方法deserialize,configFile是文件名,config是传入传出参数,是保存配置的一个变量
//下面的代码分析,有点像一个金字塔模型,通过不断的挖掘,使得整个配置信息被获取
status_t PolicySerializer::deserialize(const char *configFile, AudioPolicyConfig *config) { auto doc = make_xmlUnique(xmlParseFile(configFile)); if (doc == nullptr) { ALOGE("%s: Could not parse %s document.", __func__, configFile); return BAD_VALUE; } xmlNodePtr root = xmlDocGetRootElement(doc.get()); if (root == NULL) { ALOGE("%s: Could not parse %s document: empty.", __func__, configFile); return BAD_VALUE; } if (xmlXIncludeProcess(doc.get()) < 0) { ALOGE("%s: libxml failed to resolve XIncludes on %s document.", __func__, configFile); } //rootName = "audioPolicyConfiguration"这个可以在上述文件知道是根节点的名字 //这个配置文件包含了很多子文件,那些子文件也包含在里面,但根节点不为audioPolicyConfiguration if (xmlStrcmp(root->name, reinterpret_cast<const xmlChar*>(rootName))) { ALOGE("%s: No %s root element found in xml data %s.", __func__, rootName, reinterpret_cast<const char*>(root->name)); return BAD_VALUE; } //获取配置文件的版本号,可知为1.0 std::string version = getXmlAttribute(root, versionAttribute); if (version.empty()) { ALOGE("%s: No version found in root node %s", __func__, rootName); return BAD_VALUE; } //分析Serializer中的mVersion,可知也是1.0 if (version != mVersion) { ALOGE("%s: Version does not match; expect %s got %s", __func__, mVersion.c_str(), version.c_str()); return BAD_VALUE; } // Lets deserialize children // Modules //主要的参数解析工作是在deserializeCollection这个方法里 //注意这个config,它是AudioPolicyConfig这个类型,存储了大量的信息在APM里 //AudioPolicyConfig这个类包含了以下变量: /*1.HwModuleCollection &mHwModules; .xml中所有module模块的集合 2.DeviceVector &mAvailableOutputDevices; .xml中所有output devices模块的集合 3.DeviceVector &mAvailableInputDevices; .xml中所有input devices模块的集合 4.sp<DeviceDescriptor> &mDefaultOutputDevices; .xml中默认output device 5.VolumeCurvesCollection *mVolumeCurves; .xml中音量集合 */ ModuleTraits::Collection modules; //注意<ModuleTraits>,这个是对模板ModuleTraits进行解析 // /*template <class Trait> status_t deserializeCollection(const xmlNode *cur, typename Trait::Collection *collection, typename Trait::PtrSerializingCtx serializingContext) */ //下面会重点分析deserializeCollection这个方法 status_t status = deserializeCollection<ModuleTraits>(root, &modules, config); if (status != NO_ERROR) { return status; } config->setHwModules(modules); // Global Configuration GlobalConfigTraits::deserialize(root, config); // Surround configuration SurroundSoundTraits::deserialize(root, config); return android::OK; } --------------------------下面会重点分析deserializeCollection这个方法-------------------------- template <class Trait> status_t deserializeCollection(const xmlNode *cur, typename Trait::Collection *collection, typename Trait::PtrSerializingCtx serializingContext) { //从ModuleTraits::Collection modules模块这个例子来分析,是先解析模块 for (cur = cur->xmlChildrenNode; cur != NULL; cur = cur->next) { const xmlNode *child = NULL; if (!xmlStrcmp(cur->name, reinterpret_cast<const xmlChar*>(Trait::collectionTag))) { child = cur->xmlChildrenNode; } else if (!xmlStrcmp(cur->name, reinterpret_cast<const xmlChar*>(Trait::tag))) { child = cur; } for (; child != NULL; child = child->next) { if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(Trait::tag))) { //Trait::deserialize这个方法也很重要,后续分析 auto element = Trait::deserialize(child, serializingContext); if (element.isOk()) { status_t status = Trait::addElementToCollection(element, collection); if (status != NO_ERROR) { ALOGE("%s: could not add element to %s collection", __func__, Trait::collectionTag); return status; } } else { return BAD_VALUE; } } } if (!xmlStrcmp(cur->name, reinterpret_cast<const xmlChar*>(Trait::tag))) { return NO_ERROR; } } return NO_ERROR; } ----------------ModuleTraits::deserialize----------------- Return<ModuleTraits::Element> ModuleTraits::deserialize(const xmlNode *cur, PtrSerializingCtx ctx) { std::string name = getXmlAttribute(cur, Attributes::name); if (name.empty()) { ALOGE("%s: No %s found", __func__, Attributes::name); return Status::fromStatusT(BAD_VALUE); } uint32_t versionMajor = 0, versionMinor = 0; std::string versionLiteral = getXmlAttribute(cur, Attributes::version); if (!versionLiteral.empty()) { sscanf(versionLiteral.c_str(), "%u.%u", &versionMajor, &versionMinor); ALOGV("%s: mHalVersion = major %u minor %u", __func__, versionMajor, versionMajor); } ALOGV("%s: %s %s=%s", __func__, tag, Attributes::name, name.c_str()); Element module = new HwModule(name.c_str(), versionMajor, versionMinor); // Deserialize childrens: Audio Mix Port, Audio Device Ports (Source/Sink), Audio Routes //mixPorts这个就是上述配置中的混合端口, MixPortTraits::Collection mixPorts; //deserializeCollection<MixPortTraits>(cur, &mixPorts, NULL);这个方法就是上面的那个模板方法 //同理,在这个模板方法里,不断的循环找出子节点,然后将这些节点信息写入到配置中 status_t status = deserializeCollection<MixPortTraits>(cur, &mixPorts, NULL); if (status != NO_ERROR) { return Status::fromStatusT(status); } module->setProfiles(mixPorts); //devicePorts这个就是上述配置中的设备端口, DevicePortTraits::Collection devicePorts; status = deserializeCollection<DevicePortTraits>(cur, &devicePorts, NULL); if (status != NO_ERROR) { return Status::fromStatusT(status); } module->setDeclaredDevices(devicePorts); //devicePorts这个就是上述配置中的routes, RouteTraits::Collection routes; status = deserializeCollection<RouteTraits>(cur, &routes, module.get()); if (status != NO_ERROR) { return Status::fromStatusT(status); } module->setRoutes(routes); //继续循环,寻找下一个子节点RouteTraits DevicePortTraits MixPortTraits for (const xmlNode *children = cur->xmlChildrenNode; children != NULL; children = children->next) { if (!xmlStrcmp(children->name, reinterpret_cast<const xmlChar*>(childAttachedDevicesTag))) { ALOGV("%s: %s %s found", __func__, tag, childAttachedDevicesTag); for (const xmlNode *child = children->xmlChildrenNode; child != NULL; child = child->next) { if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(childAttachedDeviceTag))) { auto attachedDevice = make_xmlUnique(xmlNodeListGetString( child->doc, child->xmlChildrenNode, 1)); if (attachedDevice != nullptr) { ALOGV("%s: %s %s=%s", __func__, tag, childAttachedDeviceTag, reinterpret_cast<const char*>(attachedDevice.get())); sp<DeviceDescriptor> device = module->getDeclaredDevices(). getDeviceFromTagName(std::string(reinterpret_cast<const char*>( attachedDevice.get()))); ctx->addDevice(device); } } } } if (!xmlStrcmp(children->name, reinterpret_cast<const xmlChar*>(childDefaultOutputDeviceTag))) { auto defaultOutputDevice = make_xmlUnique(xmlNodeListGetString( children->doc, children->xmlChildrenNode, 1)); if (defaultOutputDevice != nullptr) { ALOGV("%s: %s %s=%s", __func__, tag, childDefaultOutputDeviceTag, reinterpret_cast<const char*>(defaultOutputDevice.get())); sp<DeviceDescriptor> device = module->getDeclaredDevices().getDeviceFromTagName( std::string(reinterpret_cast<const char*>(defaultOutputDevice.get()))); if (device != 0 && ctx->getDefaultOutputDevice() == 0) { ctx->setDefaultOutputDevice(device); ALOGV("%s: default is %08x", __func__, ctx->getDefaultOutputDevice()->type()); } } } } return module; }
上面mConfig/config这个变量就存储了大量的信息。注意!!!
然后执行initialize这个方法,现在开始对APM的initialize进行分析
status_t AudioPolicyManager::initialize() { { //加载音频engine库 auto engLib = EngineLibrary::load( "libaudiopolicyengine" + getConfig().getEngineLibraryNameSuffix() + ".so"); if (!engLib) { ALOGE("%s: Failed to load the engine library", __FUNCTION__); return NO_INIT; } //创建音频engine对象 mEngine = engLib->createEngine(); if (mEngine == nullptr) { ALOGE("%s: Failed to instantiate the APM engine", __FUNCTION__); return NO_INIT; } } //将APM设置为音频engine的监听者,即engine发生了一些变化,APM可以知道 mEngine->setObserver(this); //音频engine初始化检查 status_t status = mEngine->initCheck(); if (status != NO_ERROR) { LOG_FATAL("Policy engine not initialized(err=%d)", status); return status; } // 在解析完配置后,mOutputDevicesAll和mInputDevicesAll包含了所有已知的设备 // 如果要打开所有的输出流,需要访问连接的设备(即要打开对应配置,需要连接这个配置的设备) //注意:我们先从这个方法开始分析,等会再分析剩下的内容 onNewAudioModulesAvailableInt(nullptr /*newDevices*/); // make sure default device is reachable // 确保默认的设备是可访问的 if (mDefaultOutputDevice == 0 || !mAvailableOutputDevices.contains(mDefaultOutputDevice)) { ALOGE_IF(mDefaultOutputDevice != 0, "Default device %s is unreachable", mDefaultOutputDevice->toString().c_str()); status = NO_INIT; } // If microphones address is empty, set it according to device type // 如果麦克风地址是空的,根据设备类型设置一个地址给它 for (size_t i = 0; i < mAvailableInputDevices.size(); i++) { if (mAvailableInputDevices[i]->address().empty()) { if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) { mAvailableInputDevices[i]->setAddress(AUDIO_BOTTOM_MICROPHONE_ADDRESS); } else if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) { mAvailableInputDevices[i]->setAddress(AUDIO_BACK_MICROPHONE_ADDRESS); } } } if (mPrimaryOutput == 0) { ALOGE("Failed to open primary output"); status = NO_INIT; } // Silence ALOGV statements #if defined(MTK_AUDIO) #if defined(CONFIG_MT_ENG_BUILD) property_set("log.tag." LOG_TAG, "V"); #else char property_value[PROPERTY_VALUE_MAX]; if (property_get("log.tag." LOG_TAG, property_value, "D") > 0) { if (property_value[0] != 'V') { property_set("log.tag." LOG_TAG, "D"); ALOGD("log.tag.APM_AudioPolicyManager D"); } else { ALOGD("log.tag.APM_AudioPolicyManager V"); } } else { ALOGD("Autoset log.tag.APM_AudioPolicyManager D"); property_set("log.tag." LOG_TAG, "D"); } #endif #else // Silence ALOGV statements property_set("log.tag." LOG_TAG, "D"); #endif updateDevicesAndOutputs(); return status; }
从initialize中,我们得知,主要的一些工作都在onNewAudioModulesAvailableInt中,所以需要对其进行分析(重点)
这里主要是输入输出设备的创建和打开
void AudioPolicyManager::onNewAudioModulesAvailableInt(DeviceVector *newDevices) { //mHwModulesAll是所有配置的集合,HwModules是包含了primary和Bluetooth和usb这些类别的大集合 //有关mHwModulesAll这个变量,详细见HwModule.cpp这个类,里面包含了它们的各种方法 //mHwModulesAll在https://blog.csdn.net/jamecer/article/details/125442294中第五小节中的注释是 //.xml中所有module模块的集合 for (const auto& hwModule : mHwModulesAll) { if (std::find(mHwModules.begin(), mHwModules.end(), hwModule) != mHwModules.end()) { continue; } //注意mpClientInterface是AudioPolicyClientImpl的实现,其中loadHwModule是AudioFinger中的loadHwModule //hwModule获取到的module名一般是primary或者a2dp、bluetooth这些 //如果Name不为空,会执行AudioFinger中的loadHwModule_l方法 //loadHwModule_l主要做了一些设置,给这个module一个io_handle //每一Module代表一个HAL模块,有usb、primary、蓝牙这些等等 hwModule->setHandle(mpClientInterface->loadHwModule(hwModule->getName())); if (hwModule->getHandle() == AUDIO_MODULE_HANDLE_NONE) { ALOGW("could not open HW module %s", hwModule->getName()); continue; } //mHwModules把已分配了io_handle的hwModule添加进去 mHwModules.push_back(hwModule); // open all output streams needed to access attached devices // except for direct output streams that are only opened when they are actually // required by an app. // This also validates mAvailableOutputDevices list //这部分是音频输出的一些设置 打开访问连接设备所需的所有输出流,除了仅在应用程序实际需要时才打开的直接输出流。 //同时也验证了mAvailableOutputDevices列表 //outProfile可以视为HAL模块中的单个设备的配置,像output设备的primary output,deep_buffer这些 //hwModule->getOutputProfiles是获取一大类的配置表 for (const auto& outProfile : hwModule->getOutputProfiles()) { if (!outProfile->canOpenNewIo()) { ALOGE("Invalid Output profile max open count %u for profile %s", outProfile->maxOpenCount, outProfile->getTagName().c_str()); continue; } if (!outProfile->hasSupportedDevices()) { ALOGW("Output profile contains no device on module %s", hwModule->getName()); continue; } if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_TTS) != 0) { mTtsOutputAvailable = true; } mpAudioPolicyMTKInterface->lowLatency_CheckSpeakerProtectionDevice(outProfile); //在outProfile这个输出的模块中,获取所有支持的设备 const DeviceVector &supportedDevices = outProfile->getSupportedDevices(); //mOutputDevicesAll这个参数代表的意义是.xml文件中所有输出设备模块的集合 //这里主要是拿到可用的输出设备 DeviceVector availProfileDevices = supportedDevices.filter(mOutputDevicesAll); sp<DeviceDescriptor> supportedDevice = 0; //如果可用的设备中包含默认输出设备,那么久将supportedDevice 设置为默认设备 if (supportedDevices.contains(mDefaultOutputDevice)) { supportedDevice = mDefaultOutputDevice; } else { // choose first device present in profile's SupportedDevices also part of // mAvailableOutputDevices. if (availProfileDevices.isEmpty()) { continue; } //如果没有,就选择可用设备中的第一个 supportedDevice = availProfileDevices.itemAt(0); } if (!mOutputDevicesAll.contains(supportedDevice)) { continue; } // 根据outProfile 得到一个描述符,设置mpClientInterface // 描述了source和sink之间的关系 //针对outProfile这个模块,实例化一个AudioOutputDescriptor //下面会对这个实例化的类进行分析 sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile, mpClientInterface); // 初始化这个输出设备的audio_io_handle_t audio_io_handle_t output = AUDIO_IO_HANDLE_NONE; //打开音频设备,会为output分配一个io_handle的number //打开成功后,可以得到:output != AUDIO_IO_HANDLE_NONE, 某些情况下output == AUDIO_IO_HANDLE_NONE //如果,设备是正常使用中,output != AUDIO_IO_HANDLE_NONE //然后再打开supportedDevice这个设备,这个设备对应的output是一个唯一的标识符 //下面会对这个方法进行分析 status_t status = outputDesc->open(nullptr, DeviceVector(supportedDevice), AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output); if (status != NO_ERROR) { ALOGW("Cannot open output stream for devices %s on hw module %s", supportedDevice->toString().c_str(), hwModule->getName()); continue; } //这里需要对那些可用的设备进行一个循环 for (const auto &device : availProfileDevices) { // give a valid ID to an attached device once confirmed it is reachable // 一旦确认连接的设备可以访问,给它一个有效的id // 这个就是判断连接的设备是否可以访问 if (!device->isAttached()) { device->attach(hwModule); //将这个连接的设备设备添加到可用设备的列表中 mAvailableOutputDevices.add(device); device->setEncapsulationInfoFromHal(mpClientInterface); if (newDevices) newDevices->add(device); setEngineDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_AVAILABLE); } } //如果mPrimaryOutput 为0 且outProfile是AUDIO_OUTPUT_FLAG_PRIMARY if (mPrimaryOutput == 0 && outProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) { mpAudioPolicyMTKInterface->gainTable_initXML(); //将outputDesc设置主音频输出 mPrimaryOutput = outputDesc; mpAudioPolicyMTKInterface->gainTable_getCustomAudioVolume(); } if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0) { // 如果是直接输出流则先关闭 outputDesc->close(); } else { // 将这个添加到打开的输出描述符列表mOutputs中(SwAudioOutputCollection类型) addOutput(output, outputDesc); // 设置适合当前Source的输出设备,这一部分内容和修改的pfw文件有关 setOutputDevices(outputDesc, DeviceVector(supportedDevice), true, 0, NULL); } } // open input streams needed to access attached devices to validate // mAvailableInputDevices list // 这一部分是有关麦克风的 for (const auto& inProfile : hwModule->getInputProfiles()) { //到了最大打开数了 if (!inProfile->canOpenNewIo()) { ALOGE("Invalid Input profile max open count %u for profile %s", inProfile->maxOpenCount, inProfile->getTagName().c_str()); continue; } if (!inProfile->hasSupportedDevices()) { ALOGW("Input profile contains no device on module %s", hwModule->getName()); continue; } // chose first device present in profile's SupportedDevices also part of // available input devices // 选择配置文件的支持设备中的、存在的第一个设备,该设备也是可用输入设备的一部分 const DeviceVector &supportedDevices = inProfile->getSupportedDevices(); DeviceVector availProfileDevices = supportedDevices.filter(mInputDevicesAll); if (availProfileDevices.isEmpty()) { ALOGE("%s: Input device list is empty!", __FUNCTION__); continue; } //为该设备创建一个对象AudioInputDescriptor,根据inProfile 得到一个描述符,设置mpClientInterface sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile, mpClientInterface); // 初始化input audio_io_handle_t input = AUDIO_IO_HANDLE_NONE; //这里最终调用到AudioFlinger的openIntput打开输入流(mpClientInterface->openInput(前面说过这个变量里面包含了AudioFlinger的binder和AudioService的的对象)) // 如果open成功,input != AUDIO_IO_HANDLE_NONE,某些情况下是input == AUDIO_IO_HANDLE_NONE status_t status = inputDesc->open(nullptr, availProfileDevices.itemAt(0), AUDIO_SOURCE_MIC, AUDIO_INPUT_FLAG_NONE, &input); if (status != NO_ERROR) { ALOGW("Cannot open input stream for device %s on hw module %s", availProfileDevices.toString().c_str(), hwModule->getName()); continue; } for (const auto &device : availProfileDevices) { // give a valid ID to an attached device once confirmed it is reachable // 一旦确认连接的设备可以访问,就为它提供一个有效的 ID if (!device->isAttached()) { device->attach(hwModule); device->importAudioPortAndPickAudioProfile(inProfile, true); mAvailableInputDevices.add(device); if (newDevices) newDevices->add(device); setEngineDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_AVAILABLE); } } //关闭这个inputDesc inputDesc->close(); } } mpAudioPolicyMTKInterface->fm_initOutputIdForApp(); mpAudioPolicyMTKInterface->hifiAudio_initSetting(); }
对SwAudioOutputDescriptor这个类进行分析,这个类是被包含在类AudioOutputDescriptor.cpp中,下面对AudioOutputDescriptor.cpp进行分析
//SwAudioOutputDescriptor的构造,其中重点关注AudioOutputDescriptor //new AudioInputDescriptor(inProfile, mpClientInterface) SwAudioOutputDescriptor::SwAudioOutputDescriptor(const sp<IOProfile>& profile, AudioPolicyClientInterface *clientInterface) : AudioOutputDescriptor(profile, clientInterface), mProfile(profile), mIoHandle(AUDIO_IO_HANDLE_NONE), mLatency(0), mFlags((audio_output_flags_t)0), mOutput1(0), mOutput2(0), mDirectOpenCount(0), mDirectClientSession(AUDIO_SESSION_NONE) { if (profile != NULL) { mFlags = (audio_output_flags_t)profile->getFlags(); } } //AudioOutputDescriptor的构造 //其实从声明,就可以知道profile,是某个HAL模块的一个具体设备 //AudioOutputDescriptor(profile, clientInterface) AudioOutputDescriptor::AudioOutputDescriptor(const sp<PolicyAudioPort>& policyAudioPort, AudioPolicyClientInterface *clientInterface) : mOutputFirstActive(false) , mPolicyAudioPort(policyAudioPort) , mClientInterface(clientInterface) //<<MTK added ,mIsDuplicated(false) //MTK added>> { if (mPolicyAudioPort.get() != nullptr) { mPolicyAudioPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat); if (mPolicyAudioPort->asAudioPort()->getGains().size() > 0) { mPolicyAudioPort->asAudioPort()->getGains()[0]->getDefaultConfig(&mGain); } } InitializeMTKLogLevel("vendor.af.policy.debug"); } //最后再分析open这个方法,这个方法很重要,注意!!! //outputDesc->open(nullptr,DeviceVector(supportedDevice),AUDIO_STREAM_DEFAULT,AUDIO_OUTPUT_FLAG_NONE,&output) //supportedDevice是一个具体的设备,output很重要,是一个传入传出参数,传入空值,传出一个具体的io_handle,这个io_handle是独一无二的 //注意一下这个DeviceVector,它里面不是仅包含了一个speaker,还包含了很多不同配置的speaker,像采样率,单双声道这些(这个不确定) status_t SwAudioOutputDescriptor::open(const audio_config_t *config, const DeviceVector &devices, audio_stream_type_t stream, audio_output_flags_t flags, audio_io_handle_t *output) { mDevices = devices; sp<DeviceDescriptor> device = devices.getDeviceForOpening(); LOG_ALWAYS_FATAL_IF(device == nullptr, "%s failed to get device descriptor for opening " "with the requested devices, all device types: %s", __func__, dumpDeviceTypes(devices.types()).c_str()); audio_config_t lConfig; //设置单个设备的某个具体的配置参数 if (config == nullptr) { lConfig = AUDIO_CONFIG_INITIALIZER; lConfig.sample_rate = mSamplingRate; lConfig.channel_mask = mChannelMask; lConfig.format = mFormat; } else { lConfig = *config; } // if the selected profile is offloaded and no offload info was specified, // create a default one 如果所选配置文件已卸载且未指定卸载信息,请创建默认配置文件 //流的类型这里也指定了 if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) { flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD); lConfig.offload_info = AUDIO_INFO_INITIALIZER; lConfig.offload_info.sample_rate = lConfig.sample_rate; lConfig.offload_info.channel_mask = lConfig.channel_mask; lConfig.offload_info.format = lConfig.format; lConfig.offload_info.stream_type = stream; lConfig.offload_info.duration_us = -1; lConfig.offload_info.has_video = true; // conservative lConfig.offload_info.is_streaming = true; // likely } mFlags = (audio_output_flags_t)(mFlags | flags); ALOGV("opening output for device %s profile %p name %s", mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str()); //注意这个方法mClientInterface,是AudioPolicyClientInterface这个类型 //它有跟AudioPolicyClient相关,在本文开头的第一张图里,已经说明了这个类的主要作用 //找到这个类,可以知道openOutput实际是调用AudioFlinger这个类的openOutput //而AF这个类,实际上是通过调用到HAL层的方法(这个将在另一篇文章里讨论) //挑AF中的openOutput进行分析 status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(), output, &lConfig, device, &mLatency, mFlags); if (status == NO_ERROR) { LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE, "%s openOutput returned output handle %d for device %s, " "selected device %s for opening", __FUNCTION__, *output, devices.toString().c_str(), device->toString().c_str()); mSamplingRate = lConfig.sample_rate; mChannelMask = lConfig.channel_mask; mFormat = lConfig.format; mId = PolicyAudioPort::getNextUniqueId(); mIoHandle = *output; mProfile->curOpenCount++; } return status; }
AudioFlinger.cpp中的openOutput方法
status_t AudioFlinger::openOutput(audio_module_handle_t module, audio_io_handle_t *output, audio_config_t *config, const sp<DeviceDescriptorBase>& device, uint32_t *latencyMs, audio_output_flags_t flags) { ALOGI("openOutput() this %p, module %d Device %s, SamplingRate %d, Format %#08x, " "Channels %#x, flags %#x", this, module, device->toString().c_str(), config->sample_rate, config->format, config->channel_mask, flags); audio_devices_t deviceType = device->type(); const String8 address = String8(device->address().c_str()); if (deviceType == AUDIO_DEVICE_NONE) { return BAD_VALUE; } #if defined(MTK_AUDIO) FeatureOption::getValues(); #endif Mutex::Autolock _l(mLock); //关注重点在这,如果成功打开,返回一个ThreadBase指针thread sp<ThreadBase> thread = openOutput_l(module, output, config, deviceType, address, flags); if (thread != 0) { if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) { PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); *latencyMs = playbackThread->latency(); // notify client processes of the new output creation playbackThread->ioConfigChanged(AUDIO_OUTPUT_OPENED); // the first primary output opened designates the primary hw device if no HW module // named "primary" was already loaded. AutoMutex lock(mHardwareLock); if ((mPrimaryHardwareDev == nullptr) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) { ALOGI("Using module %d as the primary audio interface", module); mPrimaryHardwareDev = playbackThread->getOutput()->audioHwDev; mHardwareStatus = AUDIO_HW_SET_MODE; mPrimaryHardwareDev->hwDevice()->setMode(mMode); mHardwareStatus = AUDIO_HW_IDLE; } // ALPS04408933 low latency support drc if (FeatureOption::MTK_AUDIOMIXER_ENABLE_DRC) { String8 s; int value; status_t result; sp<DeviceHalInterface> dev = playbackThread->getOutput()->audioHwDev->hwDevice(); result = dev->getParameters(String8("GetBesLoudnessStatus"), &s); if (result == OK) { AudioParameter param = AudioParameter(s); if (param.getInt(String8("GetBesLoudnessStatus"), value) == NO_ERROR) { playbackThread->setParameters(String8::format("SetBesLoudnessStatus=%d",value)); } } updateDrcParamCache(); } // MTK_AUDIOMIXER_ENABLE_DRC } else { MmapThread *mmapThread = (MmapThread *)thread.get(); mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED); } return NO_ERROR; } return NO_INIT; } ------------------------------------------openOutput_l---------------------------------------------------- sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module, audio_io_handle_t *output, audio_config_t *config, audio_devices_t deviceType, const String8& address, audio_output_flags_t flags) { //继续关注audio_io_handle_t *output,因为这个是一个传入传出参数 //findSuitableHwDev_l是找一个合适的硬件设备 AudioHwDevice *outHwDev = findSuitableHwDev_l(module, deviceType); if (outHwDev == NULL) { return 0; } //这里就可以看到output 这个值是怎么被赋予的,这个是独一无二的 if (*output == AUDIO_IO_HANDLE_NONE) { *output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT); } else { // Audio Policy does not currently request a specific output handle. // If this is ever needed, see openInput_l() for example code. ALOGE("openOutput_l requested output handle %d is not AUDIO_IO_HANDLE_NONE", *output); return 0; } mHardwareStatus = AUDIO_HW_OUTPUT_OPEN; // FOR TESTING ONLY: // This if statement allows overriding the audio policy settings // and forcing a specific format or channel mask to the HAL/Sink device for testing. if (!(flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT))) { // Check only for Normal Mixing mode if (kEnableExtendedPrecision) { // Specify format (uncomment one below to choose) //config->format = AUDIO_FORMAT_PCM_FLOAT; //config->format = AUDIO_FORMAT_PCM_24_BIT_PACKED; //config->format = AUDIO_FORMAT_PCM_32_BIT; //config->format = AUDIO_FORMAT_PCM_8_24_BIT; // ALOGV("openOutput_l() upgrading format to %#08x", config->format); } if (kEnableExtendedChannels) { // Specify channel mask (uncomment one below to choose) //config->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch //config->channel_mask = audio_channel_mask_from_representation_and_bits( // AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1); // another 4ch example } } //这里主要关注的是outputStream 这个变量,是一个传入传出参数 AudioStreamOut *outputStream = NULL; //这里主要是打开一个音频输出流 status_t status = outHwDev->openOutputStream( &outputStream, *output, deviceType, flags, config, address.string()); mHardwareStatus = AUDIO_HW_IDLE; if (status == NO_ERROR) { if (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) { //根据这些输出流,iohandle 等可以打开一个播放线程 sp<MmapPlaybackThread> thread = new MmapPlaybackThread(this, *output, outHwDev, outputStream, mSystemReady); mMmapThreads.add(*output, thread); ALOGV("openOutput_l() created mmap playback thread: ID %d thread %p", *output, thread.get()); return thread; } else { sp<PlaybackThread> thread; if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { //同上 thread = new OffloadThread(this, outputStream, *output, mSystemReady); ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread.get()); } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) || !isValidPcmSinkFormat(config->format) || !isValidPcmSinkChannelMask(config->channel_mask)) { //同上 thread = new DirectOutputThread(this, outputStream, *output, mSystemReady); ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread.get()); } else { #if defined(MTK_AUDIO_FIX_DEFAULT_DEFECT) // ALPS05144075: a2dp playback noise thread = new MixerThread(this, outputStream, *output, mSystemReady, ThreadBase::MIXER, deviceType); #else thread = new MixerThread(this, outputStream, *output, mSystemReady); #endif ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread.get()); } mPlaybackThreads.add(*output, thread); mPatchPanel.notifyStreamOpened(outHwDev, *output); return thread; } } return 0; }
这篇是启动时的初始化,只对代码进行分析,后续再根据设备实际连接时的操作(主要是AudioService和AudioFinger的分析),根据产生的Log,进行分析。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。