赞
踩
上一篇介绍了AudioFlinger的初始化,接下来对音频路由进行介绍。注意,本文主要介绍动态路由,即汽车音频路由的常规方式。针对Android原生路由策略不做深入分析。
什么是音频路由?如何实现音频路由?如何定制音频路由?这是本文要解决的三大问题。
什么是音频路由?音频无非就是把音频数据流放到指定的声卡上,然后进行播放。那么,音频路由就是要解决把某种类型的音频流放到那种声卡进行播放的策略。
为什么要这样做?因为现在的Android支持多声卡,并且Android本质有各种类型的音频流。这么多的音频流和声卡需要被有效管理起来,所以需要音频路由来实现。什么是音频流?这是对上层应用场景的概念,诸如铃声,多媒体音乐,TTS播报等,,某些音频流需要在不同的声卡设备上播放,从而满足不同的需求。你可能会问,手机不就一个喇叭吗,还需要这么复杂。下面我们来看一个场景,小明插着耳机听着音乐,此时他女朋友小华给他打了个电话。这个时候,铃声声音会同时从耳机和手机喇叭出 来,以保证小明能晓得有来电。如果单单是从耳机输出,当声音很小的时候那可能会被忽略掉(那可能是要被扒成皮的๑乛◡乛๑ )。这这只是很简单的小场景。
**怎么实现音频路由?**这是本文着重介绍的。下面开始进行介绍。
开始介绍之前,还需重点介绍一下动态路由的初始化,它是汽车路由的核心,所以有必要在开始进行介绍。
CarAudioService的init开始:
//packages/services/Car/service/src/com/android/car/CarAudioService.java
public void init() {
synchronized (mImplLock) {
//mUseDynamicRouting = mContext.getResources().getBoolean(R.bool.audioUseDynamicRouting);
//packages/services/Car/service/res/values/config.xml
//<bool name="audioUseDynamicRouting">true</bool>
if (!mUseDynamicRouting) {
Log.i(CarLog.TAG_AUDIO, "Audio dynamic routing not configured, run in legacy mode"
setupLegacyVolumeChangedListener();
} else {
setupDynamicRouting();
setupVolumeGroups();
setNaviMixChangedListener();
}
}
mUseDynamicRouting变量一般定制会在device overlay目录进行覆盖定制,默认为false,不启用动态路由。我们要使用动态路由,所以这个变量要打开。
private void setupDynamicRouting() {
final IAudioControl audioControl = getAudioControl();
//1、获取路由策略
AudioPolicy audioPolicy = getDynamicAudioPolicy(audioControl);
//2、向AudioPolicyManager注册路由策略
int r = mAudioManager.registerAudioPolicy(audioPolicy);
if (r != AudioManager.SUCCESS) {
throw new RuntimeException("registerAudioPolicy failed " + r);
}
mAudioPolicy = audioPolicy;
}
//packages/services/Car/service/src/com/android/car/CarAudioService.java private AudioPolicy getDynamicAudioPolicy(@NonNull IAudioControl audioControl) { AudioPolicy.Builder builder = new AudioPolicy.Builder(mContext); builder.setLooper(Looper.getMainLooper()); // 1st, enumerate all output bus device ports ,获取所有的device端口 AudioDeviceInfo[] deviceInfos = mAudioManager.getDevices(AudioManager.GET_DEVICES_OUTPUTS); //省略一堆 //前方高能,从这里开始进行 context 到 bus的匹配 // 2nd, map context to physical bus try { for (int contextNumber : CONTEXT_NUMBERS) { int busNumber = audioControl.getBusForContext(contextNumber); mContextToBus.put(contextNumber, busNumber); CarAudioDeviceInfo info = mCarAudioDeviceInfos.get(busNumber); if (info == null) { Log.w(CarLog.TAG_AUDIO, "No bus configured for context: " + contextNumber); } } } catch (RemoteException e) { Log.e(CarLog.TAG_AUDIO, "Error mapping context to physical bus", e); } //建立路由,通过context建立usage到bus的路由建立,核心! // 3rd, enumerate all physical buses and build the routing policy. // Note that one can not register audio mix for same bus more than once. for (int i = 0; i < mCarAudioDeviceInfos.size(); i++) { int busNumber = mCarAudioDeviceInfos.keyAt(i); boolean hasContext = false; CarAudioDeviceInfo info = mCarAudioDeviceInfos.valueAt(i); AudioFormat mixFormat = new AudioFormat.Builder() .setSampleRate(info.getSampleRate()) .setEncoding(info.getEncodingFormat()) .setChannelMask(info.getChannelCount()) .build(); AudioMixingRule.Builder mixingRuleBuilder = new AudioMixingRule.Builder(); for (int j = 0; j < mContextToBus.size(); j++) { if (mContextToBus.valueAt(j) == busNumber) { hasContext = true; int contextNumber = mContextToBus.keyAt(j); //通过context获取对应的usage集合 int[] usages = getUsagesForContext(contextNumber); for (int usage : usages) { mixingRuleBuilder.addRule( new AudioAttributes.Builder().setUsage(usage).build(), AudioMixingRule.RULE_MATCH_ATTRIBUTE_USAGE); } Log.e(CarLog.TAG_AUDIO, "Bus number: " + busNumber + " contextNumber: " + contextNumber + " sampleRate: " + info.getSampleRate() + " channels: " + info.getChannelCount() + " usages: " + Arrays.toString(usages)); } } if (hasContext) { // It's a valid case that an audio output bus is defined in // audio_policy_configuration and no context is assigned to it. // In such case, do not build a policy mix with zero rules. //构造一个AudioMix 对象,这个对象包含了所有的usage对应关系和device所需的信息 //就这样建立了一条音频路由策略,当然,现在还没起作用,因为还没注册到动态路由中去 AudioMix audioMix = new AudioMix.Builder(mixingRuleBuilder.build()) .setFormat(mixFormat) .setDevice(info.getAudioDeviceInfo()) .setRouteFlags(AudioMix.ROUTE_FLAG_RENDER) .build(); builder.addMix(audioMix); } } // 4th, attach the {@link AudioPolicyVolumeCallback} builder.setAudioPolicyVolumeCallback(mAudioPolicyVolumeCallback); return builder.build(); }
通过CarAudioService、AudioControl、AudioPolicy(audio_policy_configuration.xml)三者相互配合,最终构成从usage到bus的映射关系。
这里需要说明的是,usage到context是多对一关系(多个usage可以对应一个context类型,但是反过来不行一个context不能对应多个usage),context到bus也是多对一关系,bus到output还是多对一关系。如下图所示:
这里我就省略了一堆中间的过程调用了,要想了解其中调用过程请看我上一篇文章的框架分析,这里就不过多展开了。
//frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp status_t AudioPolicyManager::registerPolicyMixes(const Vector<AudioMix>& mixes) { ALOGV("registerPolicyMixes() %zu mix(es)", mixes.size()); status_t res = NO_ERROR; sp<HwModule> rSubmixModule; // examine each mix's route type for (size_t i = 0; i < mixes.size(); i++) { AudioMix mix = mixes[i]; // Only capture of playback is allowed in LOOP_BACK & RENDER mode if (is_mix_loopback_render(mix.mRouteFlags) && mix.mMixType != MIX_TYPE_PLAYERS) { ALOGE("Unsupported Policy Mix %zu of %zu: " "Only capture of playback is allowed in LOOP_BACK & RENDER mode", i, mixes.size()); res = INVALID_OPERATION; break; } //没错,省略一大堆,PolicyMixes.registerMix()这个才是真正起作用的注册,注册到哪? if (mPolicyMixes.registerMix(mix, 0 /*output desc*/) != NO_ERROR) { ALOGE("Error registering mix %zu for address %s", i, address.string()); res = INVALID_OPERATION; break; } } }
//frameworks/av/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp status_t AudioPolicyMixCollection::registerMix(const String8& address, AudioMix mix, sp<SwAudioOutputDescriptor> desc) { ssize_t index = indexOfKey(address); if (index >= 0) { ALOGE("registerPolicyMixes(): mix for address %s already registered", address.string()); return BAD_VALUE; } for (size_t j = 0; j < mix. mCriteria.size(); j++) { ALOGE("add by Mr.H: "+ "registerPolicyMixes(): mix for address %s AudioMix.mUsage %d registered", address.string(), mix.mCriteria[j].mValue.mUsage); } sp<AudioPolicyMix> policyMix = new AudioPolicyMix(); policyMix->setMix(mix); add(address, policyMix); if (desc != 0) { desc->mPolicyMix = policyMix->getMix(); policyMix->setOutput(desc); } return NO_ERROR; }
上面的打印是我加的,源码米有哈。mix.mCriteria这个变量,就是通过它来进行策略的匹配,因为它保存着对应的usage值的集合。
到这里,初始化的前序工作已经就绪,那么。
以AudioTrack创建为例:
过程如下:
1、先获取getMinBufferSize,这个挺有意思的,它最终是到audiohal获取得到一个驱动支持的size(LONG_PERIOD_SIZE,PLAYBACK_LONG_PERIOD_COUNT)大小,再经过一系列计算得到的,有兴趣的可以深入了解一下,这两个参数很重要,有的卡顿也是因为它造成的。
2、 new AudioTrack(创建AudioTrack的路由选择),这部分是我们要重点介绍的。
3、AudioTrack.play(),播放。
4、AudioTrack.write(),需要源源不断的往buffer写入数据。
public void startMedia() { int minBufferSize = AudioTrack.getMinBufferSize(48000, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT); AudioAttributes music= new AudioAttributes.Builder() .setUsage(AudioAttributes.USAGE_MEDIA) .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC) .build() ; mAudioTrack = new AudioTrack(music, new AudioFormat.Builder() .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) .setEncoding(AudioFormat.ENCODING_PCM_16BIT) .setSampleRate(48000) .build(), minBufferSize * 2, AudioTrack.MODE_STREAM, 0); mAudioTrack.play(); audioFile = new File(Environment.getExternalStorageDirectory(), "test.wav"); Log.d("test",""+Environment.getExternalStorageDirectory()) ; try { fileInputStream = new FileInputStream(audioFile); } catch (Exception e) { Log.e(TAG, e.toString()); } final byte buffer[] = new byte[minBufferSize * 2]; writePCMThread = new Thread(new Runnable() { public void run() { try { while (fileInputStream.read(buffer) >= 0) { mAudioTrack.write(buffer, 0, buffer.length); } } catch (Exception e) { e.printStackTrace(); } } }); writePCMThread.start(); }
创建AudioTrack时序图如下:
通过上图黄色部分获取到I/O的handle之后就好办了,真正的创建AudioTrack是在PlaybackThread(这个是在初始化AudioFlinger的时候创建的,如果没有对应的PlayBackThread会创建一个新)中建立的。一个PlaybackThread可以放多个特定类型的AudioTrack,AudioFlinger通过它进行混音后写到PCM上去,最终实现音频播放。
两步,一个配置audio_policy_configuration,另一个配置AudioControl 。
//device/google/coral/audio/audio_policy_configuration.xml一般自定义的应该放到device目录下 <audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude"> <globalConfiguration speaker_drc_enabled="true"/> <modules> <module name="primary" halVersion="2.0"> <attachedDevices>//设备类型在这里插入代码片 <item>Speaker</item> <item>Built-In Mic</item> </attachedDevices> <defaultOutputDevice>Speaker</defaultOutputDevice> <mixPorts>//混音端口(混音线程) <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/> </mixPort> <mixPort name="primary input" role="sink"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="8000,11025,16000,22050,24000,32000,44100,48000" channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/> </mixPort> </mixPorts> <devicePorts>//设备端口 <devicePort tagName="Music" type="AUDIO_DEVICE_OUT_BUS" role="sink" address="bus0"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/> <gains> <gain name="" mode="AUDIO_GAIN_MODE_JOINT" minValueMB="0" maxValueMB="4000" defaultValueMB="2000" stepValueMB="100"/> </gains> </devicePort> <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink" address="bus0">//android 原生是没有address配置的,type类型需要使用bus类型才能生效 </devicePort> </devicePort> <devicePort tagName="HDMI Out" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink" address="bus0"> </devicePort> <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source"> </devicePort> <devicePort tagName="FM Tuner" type="AUDIO_DEVICE_IN_BUS" role="source" address="tuner0"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/> <gains> <gain name="" mode="AUDIO_GAIN_MODE_JOINT" minValueMB="0" maxValueMB="4000" defaultValueMB="1000" stepValueMB="100"/> </gains> </devicePort> <devicePort tagName="Voice Call" type="AUDIO_DEVICE_IN_BUS" role="source" address="voice_call"> <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/> <gains> <gain name="" mode="AUDIO_GAIN_MODE_JOINT" minValueMB="0" maxValueMB="4000" defaultValueMB="1000" stepValueMB="100"/> </gains> </devicePort> </devicePorts> <routes>//路由 <route type="mix" sink="Speaker" sources="esai output,primary output"/> <route type="mix" sink="Wired Headphones" sources="primary output"/> <route type="mix" sink="HDMI Out" sources="primary output"/> <route type="mix" sink="primary input" sources="Built-In Mic,Wired Headset Mic,Spdif-In"/> </routes> </module> </modules> </audioPolicyConfiguration>
一般我们只需改xml和audiocontrol的contextToBusMap就可以了。
sContextToBusMap的值代表了bus类型,另外需要注意的是顺序与context类型一一对应,如下:
//hardware/interfaces/automotive/audiocontrol/1.0/default/AudioControl.cpp
static int sContextToBusMap[] = {//这个顺序很重要,顺序与context类型一一对应
-1, // INVALID
0, // MUSIC_CONTEXT
2, // NAVIGATION_CONTEXT
1, // VOICE_COMMAND_CONTEXT
1, // CALL_RING_CONTEXT
3, // CALL_CONTEXT
1, // ALARM_CONTEXT
1, // NOTIFICATION_CONTEXT
1, // SYSTEM_SOUND_CONTEXT
};
好了,到这里Auto Audio路由模模块大概流程框架基本介绍完毕。音频路由是Auto Android 音频系统的一个很核心的内容。auto音频的三大核心,一个是音频管理(声音焦点),一个是音频流管理(音频路由),一个是audiohal实现。三大部分相互影响相互配合。后续将继续介绍audiohal部分内容、音频常见问题处理以及音频模块的整体复盘,关于音频系统估计还会写三到五篇内容。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。