当前位置:   article > 正文

Android音频驱动学习(一) Audio HAL

audio hal

Hal加载过程

Hal加载过程
加载audio hal需要分三步
1、hw_get_module_by_class :加载hal module
2、audio_hw_device_open:调用audio device open
3、open_output_stream:打开output

DevicesFactory::loadAudioInterface(const char *if_name, audio_hw_device_t **dev) 
  rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
    if (rc) {
        ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
        goto out;
    }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

实际调用到了audio_hw.c中adev_open(),只会被调用一次,也就是给硬件模块中的函数指针赋值open()。

    rc = audio_hw_device_open(mod, dev);
    if (rc) {
        ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
        goto out;
    }
    if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
        ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
        rc = -EINVAL;
        audio_hw_device_close(*dev);
        goto out;
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

获取到dev设备之后,,会调用openOutputStream来打开所有支持的Output,最终调用到Device.cpp:

 Return<void> Device::openOutputStream(int32_t ioHandle, const DeviceAddress &device,
142                                        const AudioConfig &config, AudioOutputFlagBitfield flags,
143                                        const SourceMetadata& /* sourceMetadata */,
144                                        openOutputStream_cb _hidl_cb) {
145      audio_config_t halConfig;
146      HidlUtils::audioConfigToHal(config, &halConfig);
147      audio_stream_out_t *halStream;
148      ALOGV(
149          "open_output_stream handle: %d devices: %x flags: %#x "
150          "srate: %d format %#x channels %x address %s",
151          ioHandle, static_cast<audio_devices_t>(device.device),
152          static_cast<audio_output_flags_t>(flags), halConfig.sample_rate, halConfig.format,
153          halConfig.channel_mask, deviceAddressToHal(device).c_str());
154       int status =
155  mDevice->open_output_stream(mDevice, ioHandle, static_cast<audio_devices_t>(device.device),
156                       static_cast<audio_output_flags_t>(flags), &halConfig,
157                       &halStream, deviceAddressToHal(device).c_str());
158      ALOGV("open_output_stream status %d stream %p", status, halStream);
159      sp<IStreamOut> streamOut;
160      if (status == OK) {
161          streamOut = new StreamOut(this, halStream);
162      }
163      AudioConfig suggestedConfig;
164      HidlUtils::audioConfigFromHal(halConfig, &suggestedConfig);
165      _hidl_cb(analyzeStatus("open_output_stream", status), streamOut, suggestedConfig);
166      return Void();
167  }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27

以上基本加载完hal层,其实最终获取到两个对象:dev,stream。对于Hal层所有的操作都是基于这两个句柄,这点可以对照看下audio hal接口定义的地方:audio.h

static inline int audio_hw_device_open(const struct hw_module_t* module,
                                       struct audio_hw_device** device)
{
    return module->methods->open(module, AUDIO_HARDWARE_INTERFACE,
                                 TO_HW_DEVICE_T_OPEN(device));
} 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

之前调用的audio_hw_device_open,就是调用audio.h这个地方,具体实现就是在定义结构体audio_module的地方,不同平台不一样,最终一般都是调用到audio hal的adev_open之类的函数。看下这里的传参:struct audio_hw_device** device,这个结构体就是最终需要open的device。一般的厂商都会封装一层audio_hw_device,因为audio_hw_device都是原生的接口,厂商需要自己添加一定接口。
audio_hw_device结构体提供的接口一般都为对device直接操作的,如get_supported_devices、set_mode、set_mic_mute、setParameter之类,其中有两个重要接口:open_output_stream(播放output)、open_input_stream(录音output)
这就是之前提到的第三步,厂商实现这两个函数,最终返回结构体:audio_stream_in、audio_stream_out。
这两个结构体提供的接口一般都是对于流进行的,如read、write、start、stop。Flinger线程对于hal层操作一般都是最终调用这两个结构体。
所以之前说到的两个对象dev,stream,dev就是audio_hw_device,stream就是audio_stream_in、audio_stream_out。

MTK Audio Hal

初始化

  • Module定义
    如之前所说,平台会定义audio_module,然后policy再根据xml去loadmodule。
    Mtk上audio_module定义:
 struct legacy_audio_module HAL_MODULE_INFO_SYM = {
1284          .module = {
1285              .common = {
1286                  .tag = HARDWARE_MODULE_TAG,
1287                  .module_api_version = AUDIO_MODULE_API_VERSION_0_1,
1288                  .hal_api_version = HARDWARE_HAL_API_VERSION,
1289                  .id = AUDIO_HARDWARE_MODULE_ID,
1290                  .name = "MTK Audio HW HAL",
1291                  .author = "MTK",
1292                  .methods = &legacy_audio_module_methods,
1293                  .dso = NULL,
1294                  .reserved = {0},
1295              },
1296          },
1297      };

1279      static struct hw_module_methods_t legacy_audio_module_methods = {
1280          .open = legacy_adev_open
1281      };
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19

所以1.1上面提到的module->methods->open就是调用到 legacy_adev_open。

  • legacy_adev_open
    可以看到主要做的就是创建了一个legacy_audio_device,
    这里先看下这个结构体定义,可以看出来legacy_audio_device就是之前提过的MTK对于audio_hw_device的封装
103      struct legacy_audio_device {
104          struct audio_hw_device_mtk device;
105  
106          AudioMTKHardwareInterface *hwif;
107      };

struct audio_hw_device_mtk: audio_hw_device {
57  
58      int (*xway_play_start)(struct audio_hw_device *dev, int sample_rate);
59      int (*xway_play_stop)(struct audio_hw_device *dev);
60      int (*xway_play_write)(struct audio_hw_device *dev, void *buffer, int size_bytes);
61      int (*xway_getfreebuffercount)(struct audio_hw_device *dev);
62      int (*xway_rec_start)(struct audio_hw_device *dev, int smple_rate);
63      int (*xway_rec_stop)(struct audio_hw_device *dev);
64      int (*xway_rec_read)(struct audio_hw_device *dev, void *buffer, int size_bytes);
65  
66      int (*setup_parameters_callback)(struct audio_hw_device *dev, device_parameters_callback_t callback, void *cookie);
67      int (*set_audio_parameter_changed_callback)(struct audio_hw_device *dev, device_audio_parameter_changed_callback_t callback, void *cookie);
68      int (*clear_audio_parameter_changed_callback)(struct audio_hw_device *dev, void *cookie);
69  };
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20

legacy_adev_open给各个函数指针接口赋值。
这些函数就是音频对于设备操作主要的接口,基本上每一个都很重要,后续会一一说明用处。

1183      static int legacy_adev_open(const hw_module_t *module, const char *name,
......
1191          struct legacy_audio_device *ladev;
1198          ladev = (struct legacy_audio_device *)calloc(1, sizeof(*ladev));
1203           ladev->device.common.tag = HARDWARE_DEVICE_TAG;
1204  #ifdef MTK_SUPPORT_AUDIO_DEVICE_API3
1205          ladev->device.common.version = AUDIO_DEVICE_API_VERSION_3_0;
1206  #else
1207          ladev->device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
1208  #endif
1209          ladev->device.common.module = const_cast<hw_module_t *>(module);
1210          ladev->device.common.close = legacy_adev_close;
1211  
1212          ladev->device.get_supported_devices = adev_get_supported_devices;
1213          ladev->device.init_check = adev_init_check;
1214          ladev->device.set_voice_volume = adev_set_voice_volume;
1215          ladev->device.set_master_volume = adev_set_master_volume;
1216          ladev->device.get_master_volume = adev_get_master_volume;
1217          ladev->device.set_mode = adev_set_mode;
1218          ladev->device.set_mic_mute = adev_set_mic_mute;
1219          ladev->device.get_mic_mute = adev_get_mic_mute;
1220          ladev->device.set_parameters = adev_set_parameters;
1221          ladev->device.get_parameters = adev_get_parameters;
1222          ladev->device.get_input_buffer_size = adev_get_input_buffer_size;
1223          ladev->device.open_output_stream = adev_open_output_stream;
1224          ladev->device.close_output_stream = adev_close_output_stream;
1225          ladev->device.open_input_stream = adev_open_input_stream;
1226          ladev->device.close_input_stream = adev_close_input_stream;
1227  
1228          ladev->device.get_microphones = adev_get_microphones;
1229  
1230          ladev->device.dump = adev_dump;
1231  
1232          ladev->device.create_audio_patch = adev_create_audio_patch;
1233          ladev->device.release_audio_patch = adev_release_audio_patch;
1234          ladev->device.get_audio_port = adev_get_audio_port;
1235          ladev->device.set_audio_port_config = adev_set_audio_port_config;
......
1248          ladev->device.xway_play_start = adev_xway_play_start;
1249          ladev->device.xway_play_stop = adev_xway_play_stop;
1250          ladev->device.xway_play_write = adev_xway_play_write;
1251          ladev->device.xway_getfreebuffercount = adev_xway_getfreebuffercount;
1252          ladev->device.xway_rec_start = adev_xway_rec_start;
1253          ladev->device.xway_rec_stop = adev_xway_rec_stop;
1254          ladev->device.xway_rec_read = adev_xway_rec_read;
1255  
1256          // added for HIDL extend
1257          ladev->device.setup_parameters_callback = adev_setup_parameters_callback;
1258          ladev->device.set_audio_parameter_changed_callback = adev_set_audio_parameters_changed_callback;
1259          ladev->device.clear_audio_parameter_changed_callback = adev_clear_audio_parameters_changed_callback;
1261          pthread_mutex_lock(&gHwInstanceLock);
1262          ladev->hwif = createMTKAudioHardware();
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52

函数的后面可以看到这里还创建了ladev->hwif = createMTKAudioHardware();这是legacy_audio_device的另外一个成员变量AudioMTKHardwareInterface ,如名字定义就是mtk底层的接口。跟踪这个调用,其实返回的就是AudioALSAHardware的单例,回过头看框架图,
AudioALSAHardware就是MTK V3 Audio hal逻辑代码的起始点。

5657  AudioMTKHardwareInterface *AudioMTKHardwareInterface::create() {
5658      /*
5659       * FIXME: This code needs to instantiate the correct audio device
5660       * interface. For now - we use compile-time switches.
5661       */
5662      AudioMTKHardwareInterface *hw = 0;
5663      char value[PROPERTY_VALUE_MAX];
5664  
5665      ALOGV("Creating MTK AudioHardware");
5666      //hw = new android::AudioALSAHardware();
5667      hw = android::AudioALSAHardware::GetInstance();
5668  
5669      return hw;
5670  
5671  }
5672  
5673  extern "C" AudioMTKHardwareInterface *createMTKAudioHardware() {
5674      /*
5675       * FIXME: This code needs to instantiate the correct audio device
5676       * interface. For now - we use compile-time switches.
5677       */
5678      return AudioMTKHardwareInterface::create();
5679  
5680  }
5681  
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • V3重要的类
    以上可以看到走进V3目录下的AudioALSAHardware,MTk Audio Hal主要的逻辑代码都在V3目录下,上面的legacy相关流程主要是将V3封装一层,迎合上层接口定义,看一下V3目录重要的类:
    AudioALSAStreamManager是入口管理下面的AudioALSAStreamIn和AudioALSAStreamOut
    AudioALSAStreamOut管理着AudioALSAPlaybackXXXX
    AudioALSAStreamIn管理着AudioALSACaptureXXXX,
    AudioALSAPlaybackXXXXAudioALSACaptureXXXX这两个类里面的主要函数是open(),read()和write(),主要是负责对PCM buf的读写到Linux 的ALSA里面。
    AudioALSASpeechXXXX类是Aduio的一个算法处理。
    AudioALSAHardwareResourceManager这个类主要用于打开和关闭硬件设备,如MIC,喇叭等
    AudioALSAVolumeController,这个类在下面的框架图没有体现,但是也很常用,主要用于Audio系统的音量控制,音量补偿,音频参数也在此得到应用。

  • V3框架图
    在这里插入图片描述

  • V3初始化
    接下来就以AudioALSAHardware为入口看下Mtk hal具体如何实现。
    构造函数:

812  AudioALSAHardware::AudioALSAHardware() :
814   mAudioMessengerIPI(AudioMessengerIPI::getInstance()),
819   mAudioSpeechEnhanceInfoInstance(AudioSpeechEnhanceInfo::getInstance()),
823   mAudioAlsaDeviceInstance(AudioALSADeviceParser::getInstance()),
825   mANCController(AudioALSAANCController::getInstance()),
843   mStreamManager = AudioALSAStreamManager::getInstance(); 
844   mSpeechPhoneCallController = AudioALSASpeechPhoneCallController::getInstance(); 
845   mAudioALSAParamTunerInstance = AudioALSAParamTuner::getInstance();
889      mAudioHalBtscoWB = (bool)get_uint32_from_mixctrl(PROPERTY_KEY_BTSCO_WB_ON);
890      ALOGD("%s(), mAudioHalBtscoWB = %d", __FUNCTION__, mAudioHalBtscoWB);
891      if (mAudioHalBtscoWB == true) {
892          WCNChipController::GetInstance()->SetBTCurrentSamplingRateNumber(16000);
893          AudioBTCVSDControl::getInstance()->BT_SCO_SetMode(true);
894          mSpeechPhoneCallController->setBTMode(true);
895      } else {
896          WCNChipController::GetInstance()->SetBTCurrentSamplingRateNumber(8000);
897          AudioBTCVSDControl::getInstance()->BT_SCO_SetMode(false);
898          mSpeechPhoneCallController->setBTMode(false);
899      }
910      if (mixer_ctl_set_value(mixer_get_ctl_by_name(AudioALSADriverUtility::getInstance()->getMixer(),
911                                                    "aaudio_ion"), 0, 1)) {
912          ALOGW("%s(), aaudio_ion enable fail", __FUNCTION__);
913      }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23

这里构造函数初始化了很多东西,缩写的名字看不出实际意义,暂时先不看了,用到的时候再看其用处,之前说过adev_open之后的流程就是打开output/input,这里以播放为例子来理流程:adev_open_output_stream

790          out->legacy_out = ladev->hwif->openOutputStreamWithFlags(devices, flags,
791                                                                   (int *) &config->format,
792                                                                   &config->channel_mask,
793                                                                   &config->sample_rate, &status);
  • 1
  • 2
  • 3
  • 4

之前就看到赋值给hwif的就是这个AudioALSAHardware,所以这里的openOutputStreamWithFlags调用的就是AudioALSAHardware的函数:

5641  AudioMTKStreamOutInterface *AudioALSAHardware::openOutputStreamWithFlags(uint32_t devices,
5642                                                                           audio_output_flags_t flags,
5643                                                                           int *format,
5644                                                                           uint32_t *channels,
5645                                                                           uint32_t *sampleRate,
5646                                                                           status_t *status) {
5647      return mStreamManager->openOutputStream(devices, format, channels, sampleRate, status, flags);
5648  }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

这个mStreamManager就是构造体函数里初始化的AudioALSAStreamManager。

323  AudioMTKStreamOutInterface *AudioALSAStreamManager::openOutputStream(
324      uint32_t devices,
325      int *format,
326      uint32_t *channels,
327      uint32_t *sampleRate,
328      status_t *status,
329      uint32_t output_flag) {

350      AudioALSAStreamOut *pAudioALSAStreamOut = new AudioALSAStreamOut();
351      pAudioALSAStreamOut->set(devices, format, channels, sampleRate, status, output_flag);
365      pAudioALSAStreamOut->setIdentity(mStreamOutIndex);
366      mStreamOutVector.add(mStreamOutIndex, pAudioALSAStreamOut);
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

可以看到主要做的就是new了AudioALSAStreamOut,并将这些参数传入:

324      uint32_t devices,//设备
325      int *format,//传入的数据类型,如PCM_32BIT/PCM_16BIT/AAC/MP3,总之正常传入pcm,offload模式传入mp3或者aac文件

326      uint32_t *channels,//声道数
327      uint32_t *sampleRate,//采样率
328      status_t *status,
329      uint32_t output_flag//这个就是output类型,一般有DIRECT、fast、deep_buffer、compress_offload等
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

Flag表:

AUDIO_OUTPUT_FLAGDescription
AUDIO_OUTPUT_FLAG_DIRECT表示音频流直接输出到音频设备,不需要软件混音,一般用于 HDMI 设备声音输出
AUDIO_OUTPUT_FLAG_PRIMARY表示音频流需要输出到主输出设备,一般用于铃声类声音
AUDIO_OUTPUT_FLAG_FAST表示音频流需要快速输出到音频设备,一般用于按键音、游戏背景音等对时延要求高的场景
AUDIO_OUTPUT_FLAG_DEEP_BUFFER表示音频流输出可以接受较大的时延,一般用于音乐、视频播放等对时延要求不高的场景
AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD表示音频流没有经过软件解码,需要输出到硬件解码器,由硬件解码器进行解码

AudioALSAStreamOut.cpp:

146  status_t AudioALSAStreamOut::set(
160      // device
161      mStreamAttributeSource.output_devices = static_cast<audio_devices_t>(devices);
162      mStreamAttributeSource.policyDevice = mStreamAttributeSource.output_devices;
163  
164      // check format
165      if (*format == AUDIO_FORMAT_PCM_16_BIT ||
166          *format == AUDIO_FORMAT_PCM_8_24_BIT ||
167          *format == AUDIO_FORMAT_PCM_32_BIT) {
168          mStreamAttributeSource.audio_format = static_cast<audio_format_t>(*format);
169      } else if (*format == AUDIO_FORMAT_MP3) {
170          ALOGD("%s(), format mp3", __FUNCTION__);
171          mStreamAttributeSource.audio_format = static_cast<audio_format_t>(*format);
172          mStreamAttributeSource.audio_offload_format = *format;
173      } else if (*format == AUDIO_FORMAT_AAC_LC) {
174          ALOGD("%s(), format aac", __FUNCTION__);
175          mStreamAttributeSource.audio_format = static_cast<audio_format_t>(*format);
176          mStreamAttributeSource.audio_offload_format = *format;
177      } else {
178          ALOGE("%s(), wrong format 0x%x, use 0x%x instead.", __FUNCTION__, *format, kDefaultOutputSourceFormat);
179  
180          *format = kDefaultOutputSourceFormat;
181          *status = BAD_VALUE;
182      }
183  
184      // check channel mask
185      if (mStreamAttributeSource.output_devices == AUDIO_DEVICE_OUT_AUX_DIGITAL) { // HDMI
186          if (*channels == AUDIO_CHANNEL_OUT_STEREO) {
187              mStreamOutType = STREAM_OUT_HDMI_STEREO;
188  
189              mStreamAttributeSource.audio_channel_mask = *channels;
190              mStreamAttributeSource.num_channels = popcount(*channels);
191  
192              mStreamOutHDMIStereo = this;
193              mStreamOutHDMIStereoCount++;
194              ALOGD("%s(), mStreamOutHDMIStereoCount =%d", __FUNCTION__, mStreamOutHDMIStereoCount);
195          } else if (*channels == AUDIO_CHANNEL_OUT_5POINT1 ||
196                     *channels == AUDIO_CHANNEL_OUT_7POINT1) {
197              mStreamOutType = STREAM_OUT_HDMI_MULTI_CHANNEL;
198  
199              mStreamAttributeSource.audio_channel_mask = *channels;
200              mStreamAttributeSource.num_channels = popcount(*channels);
201          } else {
202              ALOGE("%s(), wrong channels 0x%x, use 0x%x instead.", __FUNCTION__, *channels, kDefaultOutputSourceChannelMask);
203  
204              *channels = kDefaultOutputSourceChannelMask;
205              *status = BAD_VALUE;
206          }
207      } else if (devices == AUDIO_DEVICE_OUT_SPEAKER_SAFE) { // Primary
208          mStreamOutType = STREAM_OUT_VOICE_DL;
209          mStreamAttributeSource.audio_channel_mask = *channels;
210          mStreamAttributeSource.num_channels = popcount(*channels);
211      } else if (*channels == kDefaultOutputSourceChannelMask || *channels == AUDIO_CHANNEL_OUT_MONO) { // Primary
212          mStreamAttributeSource.audio_channel_mask = *channels;
213          mStreamAttributeSource.num_channels = popcount(*channels);
214      } else {
215          ALOGE("%s(), wrong channels 0x%x, use 0x%x instead.", __FUNCTION__, *channels, kDefaultOutputSourceChannelMask);
216  
217          *channels = kDefaultOutputSourceChannelMask;
218          *status = BAD_VALUE;
219      }
220  
221      // check sample rate
222      if (SampleRateSupport(*sampleRate) == true) {
223          if ((mStreamAttributeSource.num_channels == 2) && (mStreamAttributeSource.output_devices == AUDIO_DEVICE_OUT_AUX_DIGITAL)) {
224              mStreamAttributeSource.sample_rate = 44100;
225          } else {
226              mStreamAttributeSource.sample_rate = *sampleRate;
227          }
228          if ((mStreamOutType == STREAM_OUT_PRIMARY || mStreamOutType == STREAM_OUT_VOICE_DL) && ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
229              AudioALSASampleRateController::getInstance()->setPrimaryStreamOutSampleRate(*sampleRate);
230          }
231      } else {
232          ALOGE("%s(), wrong sampleRate %d, use %d instead.", __FUNCTION__, *sampleRate, kDefaultOutputSourceSampleRate);
233  
234          *sampleRate = kDefaultOutputSourceSampleRate;
235          *status = BAD_VALUE;
236      }
239  
240      mStreamAttributeSource.mAudioOutputFlags = (audio_output_flags_t)flags;
241      collectPlatformOutputFlags(mStreamAttributeSource.mAudioOutputFlags);
242  
243      if (mStreamAttributeSource.mAudioOutputFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
244          mStreamAttributeSource.usePolicyDevice = true;
245          char result[PROPERTY_VALUE_MAX];
246          property_get(allow_offload_propty, result, "1");
247          offloadflag = atoi(result);
248          mStreamAttributeSource.offload_codec_info.disable_codec = offloadflag ? 0 : 1;
249          ALOGD("%s(),mStreamAttributeSource.offload_codec_info.disable_codec =%d ", __FUNCTION__, mStreamAttributeSource.offload_codec_info.disable_codec);
250      }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90

AudioALSAStreamOut的初始化可以看到主要就是将之前传进来参数经过处理设置给mStreamAttributeSource对象。至此从AudioPolicy loadModule并openOutput的流程,hal层所作的就完成了,这里纠正了以前一个观念,看起来这里从开机的时候就加载并打开了所有output,output就对应着设备,这样不会产生功耗问题吗?看完对应的hal层流程,其实看起来只是初始化,实际设备的操作需要调用tinyAlsa的接口,而以上流程看起来并没有,所以FW层的openOutput就是只是初始化hal层而已,并不会实际操作硬件。

播放

  • 流程图
    在这里插入图片描述
  • 代码跟踪
    FW层来说播放的话一般就是
    AudioTrack:start
    AudioFlinger->addTrack
    AudioPolicyManager->startOutput
    Threads:NormalSink->write
    Policy的startOutput其实并没有干啥实际的事,所以对于hal播放的启动就是最后这个NormalSink->write,跟过FW的流程就可以知道这里的NormalSink就是Hidl的StreamOutHal,Hidl的StreamOutHal就是之前adev_open_output_stream返回的audio_stream_out结构体变量,看下之前的定义:
813          out->stream.write = out_write;

249      static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
250                               size_t bytes) {
251  #ifdef AUDIO_HAL_PROFILE_ENTRY_FUNCTION
252          AudioAutoTimeProfile _p(__func__, AUDIO_HAL_FUNCTION_WRITE_NS);
253  #endif
254          struct legacy_stream_out *out =
255                  reinterpret_cast<struct legacy_stream_out *>(stream);
256          return out->legacy_out->write(buffer, bytes);
257      }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

之前跟踪过这个legacy_out就是 ladev->hwif->openOutputStreamWithFlags的返回值,也就是上面提到的AudioALSAStreamOut。所以最后调用的就是AudioALSAStreamOut的write函数:

ssize_t AudioALSAStreamOut::write(const void *buffer, size_t bytes) {
......
  if (mStandby == true) {
          status = open();
          mPlaybackHandler->setFirstDataWriteFlag(true);
......
514      mPlaybackHandler->preWriteOperation(buffer, bytes);
515      outputSize = mPlaybackHandler->write(buffer, bytes);
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

AudioALSAStreamOut::open

1103  status_t AudioALSAStreamOut::open() {
 mPlaybackHandler = mStreamManager->createPlaybackHandler(&mStreamAttributeSource);
1142          if (mPlaybackHandler) {
1143              // open audio hardware
1144              status = mPlaybackHandler->open();
1145  
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

AudioALSAStreamManager::createPlaybackHandler

626   if (isPhoneCallOpen() == true) {
case AUDIO_DEVICE_OUT_SPEAKER_SAFE: {
pPlaybackHandler = new AudioALSAPlaybackHandlerSpeakerProtection(stream_attribute_source);
......
644          case AUDIO_DEVICE_OUT_AUX_DIGITAL:
645   pPlaybackHandler = new AudioALSAPlaybackHandlerHDMI(stream_attribute_source);
} else {

652          switch (stream_attribute_source->output_devices) {
653          case AUDIO_DEVICE_OUT_BLUETOOTH_SCO:
654          case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
655          case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT: {
656              if (!stream_attribute_source->isMixerOut) {
657                  pPlaybackHandler = new AudioALSAPlaybackHandlerMixer(stream_attribute_source);
658              } else {
659                  if (WCNChipController::GetInstance()->IsBTMergeInterfaceSupported() == true) {
660                      pPlaybackHandler = new AudioALSAPlaybackHandlerBTSCO(stream_attribute_source);
661                  } else {
662                      pPlaybackHandler = new AudioALSAPlaybackHandlerBTCVSD(stream_attribute_source);
663                  }
664              }
665              break;
666          }
667          case AUDIO_DEVICE_OUT_AUX_DIGITAL: {
668              pPlaybackHandler = new AudioALSAPlaybackHandlerHDMI(stream_attribute_source);
669              break;
670          }
671          case AUDIO_DEVICE_OUT_FM: {
672              pPlaybackHandler = new AudioALSAPlaybackHandlerFMTransmitter(stream_attribute_source);
673              break;
674          }
675          case AUDIO_DEVICE_OUT_EARPIECE:
676          case AUDIO_DEVICE_OUT_WIRED_HEADSET:
677          case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
678          case AUDIO_DEVICE_OUT_SPEAKER:
679          default: {
680              if (isBtSpkDevice(stream_attribute_source->output_devices)) {
681                  if (!stream_attribute_source->isMixerOut) {
682                      pPlaybackHandler = new AudioALSAPlaybackHandlerMixer(stream_attribute_source);
683                      break;
684                  }
685              }
686  
687  #if !defined(MTK_BASIC_PACKAGE)
688              if (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD & stream_attribute_source->mAudioOutputFlags) {
689                  pPlaybackHandler = new AudioALSAPlaybackHandlerOffload(stream_attribute_source);
690                  break;
691              } else
692  #endif
693              {
694  #if defined(MTK_MAXIM_SPEAKER_SUPPORT) || (MTK_AUDIO_SMARTPASCP_SUPPORT)
695                  if (AudioSmartPaController::getInstance()->isSwDspSpkProtect(stream_attribute_source->output_devices)) {
696                      if (!stream_attribute_source->isMixerOut) {
697                          pPlaybackHandler = new AudioALSAPlaybackHandlerMixer(stream_attribute_source);
698                          break;
699                      }
700  #if defined(MTK_MAXIM_SPEAKER_SUPPORT)
701                      if (AudioSmartPaController::getInstance()->getSpkProtectType() == SPK_AP_DSP) {
702  pPlaybackHandler = new AudioALSAPlaybackHandlerSpeakerProtection(stream_attribute_source);
703                          break;
704                      }
705  #elif defined(MTK_AUDIO_SMARTPASCP_SUPPORT)
706                      if (AudioSmartPaController::getInstance()->getSpkProtectType() == SPK_APSCP_DSP) {
707  pPlaybackHandler = new AudioALSAPlaybackHandlerSpeakerProtectionDsp(stream_attribute_source);
708                          break;
709                      }
710  #endif
715                  } else
716  #endif // end of #if defined(MTK_MAXIM_SPEAKER_SUPPORT) || (MTK_AUDIO_SMARTPASCP_SUPPORT)
717                  {
718  #ifdef DOWNLINK_LOW_LATENCY
719                      if (AUDIO_OUTPUT_FLAG_FAST & stream_attribute_source->mAudioOutputFlags &&
720                          !(AUDIO_OUTPUT_FLAG_PRIMARY & stream_attribute_source->mAudioOutputFlags)) {
721                          pPlaybackHandler = new AudioALSAPlaybackHandlerFast(stream_attribute_source);
722                          break;
723                      }
724  #if defined(MTK_AUDIO_AAUDIO_SUPPORT)
725        else if (AUDIO_OUTPUT_FLAG_MMAP_NOIRQ & stream_attribute_source->mAudioOutputFlags) {
726                          pPlaybackHandler = new AudioALSAPlaybackHandlerAAudio(stream_attribute_source);
727                          break;
728                      }
729  #endif
730                      else
731  #endif
732                      {
733                          if (AudioSmartPaController::getInstance()->isInCalibration()) {
734  pPlaybackHandler = new AudioALSAPlaybackHandlerNormal(stream_attribute_source);
735                              break;
736                          }
737  #if defined(MTK_AUDIODSP_SUPPORT)
738                          if (AudioDspStreamManager::getInstance()->getDspOutHandlerEnable(stream_attribute_source->mAudioOutputFlags)) {
739  pPlaybackHandler = new AudioALSAPlaybackHandlerDsp(stream_attribute_source);
740                          } else {
741  pPlaybackHandler = new AudioALSAPlaybackHandlerNormal(stream_attribute_source);
742                          }
743                          break;
744  #else
745  pPlaybackHandler = new AudioALSAPlaybackHandlerNormal(stream_attribute_source);
746                          break;
747  #endif
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100

这个函数根据不同的device创建不同的AudioALSAPlaybackHandler,这里以normal为例继续跟踪,AudioALSAPlaybackHandlerNormal构造函数就是初始化一系列参数,没啥需要看的,所以可以直接看之前AudioALSAStreamOut调用的mPlaybackHandler->open();

220  status_t AudioALSAPlaybackHandlerNormal::open() {
//可以看到这里根据不同的flag选择不同pcmindex、cardindex、playbackSeq 
238      if (isIsolatedDeepBuffer(mStreamAttributeSource->mAudioOutputFlags)) {
239  
240          ALOGD("%s(), isolated deep buffer keypcmDeepBuffer = %s", __FUNCTION__, keypcmDeepBuffer.string());
241  
242          pcmindex = AudioALSADeviceParser::getInstance()->GetPcmIndexByString(keypcmDeepBuffer);
243          cardindex = AudioALSADeviceParser::getInstance()->GetCardIndexByString(keypcmDeepBuffer);
244  
245          // use playback 2
246          if (keypcmDeepBuffer.compare(keypcmPlayback2) == 0) {
247              playbackSeq = String8(AUDIO_CTL_PLAYBACK2);
248          } else {
249              playbackSeq = String8(AUDIO_CTL_PLAYBACK3);
250          }
251  
252          if (mixer_ctl_set_value(mixer_get_ctl_by_name(mMixer, "deep_buffer_scenario"), 0, 1)) {
253              ALOGW("%s(), deep_buffer_scenario enable fail", __FUNCTION__);
254          }
255      } else if (mStreamAttributeSource->mAudioOutputFlags & AUDIO_OUTPUT_FLAG_VOIP_RX) {
256          pcmindex = AudioALSADeviceParser::getInstance()->GetPcmIndexByString(keypcmPlayback12);
257          cardindex = AudioALSADeviceParser::getInstance()->GetCardIndexByString(keypcmPlayback12);
258          playbackSeq = String8(AUDIO_CTL_PLAYBACK12);
259      } else {
260          pcmindex = AudioALSADeviceParser::getInstance()->GetPcmIndexByString(keypcmPlayback1);
261          cardindex = AudioALSADeviceParser::getInstance()->GetCardIndexByString(keypcmPlayback1);
262          playbackSeq = String8(AUDIO_CTL_PLAYBACK1);
263      }
264  
265      mApTurnOnSequence = getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_1, playbackSeq);
266      mApTurnOnSequence2 = getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_2, playbackSeq);
267  #if defined(MTK_AUDIODSP_SUPPORT)
268      mApTurnOnSequence3 = getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_3, playbackSeq);
269      mApTurnOnSequenceDsp = getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_DSP, playbackSeq);
270  #endif
271      mHardwareResourceManager->setCustOutputDevTurnOnSeq(mStreamAttributeSource->output_devices,
272                                                          mTurnOnSeqCustDev1, mTurnOnSeqCustDev2);
273  
274      mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequence);
275      mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequence2);
276      mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequenceDsp);
277      mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequence3);
278      mHardwareResourceManager->enableTurnOnSequence(mTurnOnSeqCustDev1);
279      mHardwareResourceManager->enableTurnOnSequence(mTurnOnSeqCustDev2);
289      //ListPcmDriver(cardindex, pcmindex);
290  
291      struct pcm_params *params;
292      params = pcm_params_get(cardindex, pcmindex,  PCM_OUT);
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48

我知道的是一个flag对应上层的一个通路,而这里根据flag来选择pcm以及声卡id,可以看一个例子: keypcmPlayback1,定义在AudioALSADeviceString.h中:
static String8 keypcmPlayback1 = String8(“Playback_1”);
Driver中有dai_link的结构体数组,其中就定了一个pcm Playback_1,所以这就是一个FE PCM的名字,所以这里可以理解为一个flag对应着一个FE PCM。

364  	/* Front End DAI links */
365  	{
366  		.name = "Playback_1",
367  		.stream_name = "Playback_1",
368  		.cpu_dai_name = "DL1",
369  		.codec_name = "snd-soc-dummy",
370  		.codec_dai_name = "snd-soc-dummy-dai",
371  		.trigger = {SND_SOC_DPCM_TRIGGER_PRE,
372  			    SND_SOC_DPCM_TRIGGER_PRE},
373  		.dynamic = 1,
374  		.dpcm_playback = 1,
375  	},
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

这两个函数:GetCardIndexByString、GetPcmIndexByString,就是根据这个字符串取到pcm id以及card id。对应关系的话在AudioALSADeviceParser初始化的时候读取/proc/asound/pcm并存入数组。最终通过pcm_params_get根据card 和pcm id获取pcm_params,这是TinyAlsa的一个接口。

每一个flag判断还会给Sequence赋值, 比如:

1、playbackSeq = String8(AUDIO_CTL_PLAYBACK1);
2、mApTurnOnSequence1=getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_1,playbackSeq);
3、mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequence1);
  • 1
  • 2
  • 3

enableTurnOnSequence:

AudioALSAHardwareResourceManager::enableTurnOnSequence
ret = mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(sequence);
AudioALSADeviceConfigManager::ApplyDeviceTurnonSequenceByName
DeviceCtlDescriptor *descriptor = GetDeviceDescriptorbyname(DeviceName);
String8 cltname = descriptor->mDeviceCltonVector.itemAt(count);
String8 cltvalue = descriptor->mDeviceCltonVector.itemAt(count + 1);
  if (setMixerCtl(cltname, cltvalue)) {
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

setMixerCtl是封装的
mixer_get_ctl_by_name、mixer_ctl_get_type、mixer_get_ctl、mixer_get_ctl_by_name、mixer_ctl_set_value
等一系列mixer操作,最终是通过mixer_ctl_set_value来打开通路,以上这些均为TinyMix的接口,主要是用于通路设备操作。在手机目录下运行tinymix,可以获取到所有的ctl列表,就是代表所有设备。

接着看open函数后面最主要的

405      unsigned int flag = PCM_MMAP | PCM_OUT | PCM_MONOTONIC;
406      openPcmDriverWithFlag(pcmindex, flag);

status_t AudioALSAPlaybackHandlerBase::openPcmDriverWithFlag(const unsigned int device, unsigned int flag) {
244      mPcmflag = flag;
245      mPcm = pcm_open(AudioALSADeviceParser::getInstance()->GetCardIndex(),
246                      device, flag, &mConfig
......
259      if (mPcmflag & PCM_MMAP) {
260          audio_pcm_write_wrapper_fp = pcm_mmap_write;
261      } else {
262          audio_pcm_write_wrapper_fp = pcm_write;
263      }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13

这就是这一步的关键了,通过 pcm_open打开pcm设备,并且定义好pcm_write相关函数指针。至此open的过程就结束了。
回头看之前AudioALSAStreamOut write流程:

515      outputSize = mPlaybackHandler->write(buffer, bytes);

ssize_t AudioALSAPlaybackHandlerNormal::write(const void *buffer, size_t bytes) {
721      void *pBufferAfterDcRemoval = NULL;
722      uint32_t bytesAfterDcRemoval = 0;
723      // DC removal before DRC
724      doDcRemoval(pBuffer, bytes, &pBufferAfterDcRemoval, &bytesAfterDcRemoval);
725  
726  
727      // stereo to mono for speaker
728      doStereoToMonoConversionIfNeed(pBufferAfterDcRemoval, bytesAfterDcRemoval);
729  
771          // post processing (can handle both Q1P16 and Q1P31 by audio_format_t)
772          void *pBufferAfterPostProcessing = NULL;
773          uint32_t bytesAfterPostProcessing = 0;
774          doPostProcessing(pBufferAfterDcRemoval, bytesAfterDcRemoval, &pBufferAfterPostProcessing, &bytesAfterPostProcessing);
775  
776          // SRC
777          void *pBufferAfterBliSrc = NULL;
778          uint32_t bytesAfterBliSrc = 0;
779          doBliSrc(pBufferAfterPostProcessing, bytesAfterPostProcessing, &pBufferAfterBliSrc, &bytesAfterBliSrc);
780  
781          // bit conversion
782          void *pBufferAfterBitConvertion = NULL;
783          uint32_t bytesAfterBitConvertion = 0;
784          doBitConversion(pBufferAfterBliSrc, bytesAfterBliSrc, &pBufferAfterBitConvertion, &bytesAfterBitConvertion);
785  
786          // data pending
787          pBufferAfterPending = NULL;
788          bytesAfterpending = 0;
789         dodataPending(pBufferAfterBitConvertion, bytesAfterBitConvertion, &pBufferAfterPending, &bytesAfterpending);

794      // pcm dump
795      WritePcmDumpData(pBufferAfterPending, bytesAfterpending);

809      // write data to pcm driver
810      int retval = pcmWrite(mPcm, pBufferAfterPending, bytesAfterpending);
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37

以上可以看到在pcmWrite之前对buffer进行了一系列处理:

doDcRemoval 去除频谱的直流分量
doStereoToMonoConversionIfNeed 立体声转换为单声道
doPostProcessing
doBliSrc 重采样
doBitConversion 位宽转换
dodataPending

doDcRemoval、doBliSrc、doBitConversion 这三个定义在AudioALSAPlaybackHandlerBase.cpp中,AudioALSAPlaybackHandlerBase在初始化的时候会加载/vendor/lib64/libaudiocomponentengine_vendor.so,并且通过dlsym返回相应句柄createMtkDcRemove、createMtkAudioSrc、createMtkAudioBitConverter。在调用doDcRemoval、doBliSrc、doBitConversion的时候,就会调用*->process来进行相应操作。
doPostProcessing看起来像是进行音效处理,doStereoToMonoConversionIfNeed就是将立体声转化为单声道,这两个需要后续遇到的时候再行debug分析。考虑src之后可能会影响对齐,dodataPending中做了个64位对齐。以上操作,如果音乐在其间出了问题,每一步之间都可以手动添加dump,来定位错误点后具体分析原因。
最后一系列操作结束后再调用pcmWrite将buffer写入driver。pcmWrite就是调用之前赋值的audio_pcm_write_wrapper_fp ,也就是TinyAlsa的接口:pcm_write或 pcm_mmap_write,mmap就是低时延类型的接口。

设备通路

在播放的流程中提到了设备的打开,其中粗略的带过了enableTurnOnSequence的流程,这个需要好好看下,首先在AudioALSADeviceConfigManager初始化的时候会调用

int ret = LoadAudioConfig(AUDIO_DEVICE_EXT_CONFIG_FILE);
#define AUDIO_DEVICE_EXT_CONFIG_FILE "/vendor/etc/audio_device.xml"
  • 1
  • 2

随便看一下这个文件的几个节点:

  <path name="speaker_output" value="turnoff">
        <kctl name="Speaker_Amp_Switch" value="Off" />
    </path>
    <path name="builtin_Mic_Mic2" value="turnon">
        <kctl name="Audio_MicSource1_Setting" value="ADC1" />
        <kctl name="Audio_ADC_1_Switch" value="On" />
        <kctl name="Audio_ADC_2_Switch" value="On" />
        <kctl name="Audio_Preamp1_Switch" value="IN_ADC3" />
        <kctl name="Audio_Preamp2_Switch" value="IN_ADC3" />
</path>
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

Kctl就是tinymix可以直接操作的kcontrol,path就是实际设备通路。再回到LoadAudioConfig,这个函数就是解析这个xml,每个path都存入 mDeviceVector,其子节点kctl存入mDeviceVector->path的mDeviceCltonVector或者mDeviceCltoffVector或者mDeviceCltsettingVector。字面意思,开、关、值设置。

再回头看enableTurnOnSequence会调用ApplyDeviceTurnonSequenceByName

458  status_t AudioALSADeviceConfigManager::ApplyDeviceTurnonSequenceByName(const char *DeviceName) {
//GetDeviceDescriptorbyname就是根据DeviceName选出mDeviceVector中相同名字的path
459      DeviceCtlDescriptor *descriptor = GetDeviceDescriptorbyname(DeviceName);
460      if (descriptor == NULL) {
461          ALOGE("%s  DeviceName = %s descriptor == NULL", __FUNCTION__, DeviceName);
462          return INVALID_OPERATION;
463      }
464      ALOGD("%s() DeviceName = %s descriptor->DeviceStatusCounte = %d", __FUNCTION__, DeviceName, descriptor->DeviceStatusCounter);
465      if (descriptor->DeviceStatusCounter == 0) {
466          for (size_t count = 0; count < descriptor->mDeviceCltonVector.size(); count += 2) {
//mDeviceVector就是path,由于这里是开,所以这里mDeviceVector->mDeviceCltonVector就是xml中的kctl
467              String8 cltname = descriptor->mDeviceCltonVector.itemAt(count);
468              String8 cltvalue = descriptor->mDeviceCltonVector.itemAt(count + 1);
469              ALOGV("cltname = %s cltvalue = %s", cltname.string(), cltvalue.string());
470  #if defined(CUSTOM_AUDIO_SPEAKER_SEQ_SUPPORT)
471              if ((strcmp(cltname.c_str(), "Receiver_Speaker_Switch") == 0)
472                  && (strcmp(DeviceName, AUDIO_DEVICE_EXT_SPEAKER) == 0)
473                  && (INTERVAL_EXTSPEAKER_AMP_SW > 0)) {
474                  ALOGD_IF(mLogEnable, "%s(), ext speaker on, AMP to Analog SW interval[%d]",
475                      __FUNCTION__, INTERVAL_EXTSPEAKER_AMP_SW);
476                  usleep(INTERVAL_EXTSPEAKER_AMP_SW);
477              }
478  #endif//CUSTOM_AUDIO_SPEAKER_SEQ_SUPPORT
//传入kctl的键值对,调用setMixerCtl,也就是mixer_ctl_set_value
479              if (setMixerCtl(cltname, cltvalue)) {
480                  ALOGE("Error: %s  cltname.string () = %s cltvalue.string () = %s", __FUNCTION__, cltname.string(), cltvalue.string());
481                  ASSERT(false);
482              }
483          }
484      }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30

以上可以看出你调用ApplyDeviceTurnonSequenceByName,就是传入path名字,这个函数会打开该path所有kctl子节点,同样的ApplyDeviceTurnoffSequenceByName就是关闭所有kctl。

那么可以看一下哪边用到ApplyDeviceTurnoffSequenceByName比较多:
AudioALSAHardwareResourceManager.cpp

668         ret = mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(sequence);  in enableTurnOnSequence()
944 …      mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC1_INVERSE);  in startInputDevice()
946 …              mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC1);  in startInputDevice()
950 …      mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC2_INVERSE);  in startInputDevice()
952 …              mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC2);  in startInputDevice()
957             mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC3);  in startInputDevice()
961             mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC4);  in startInputDevice()
965             mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC5);  in startInputDevice()
969 …            mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_TRIPLE_MIC);  in startInputDevice()
972 …            mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(getStartInputDeviceForDualMic());  in startInputDevice()
975 …            mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_SINGLE_MIC);  in startInputDevice()
992 …      mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_BACK_MIC_INVERSE);  in startInputDevice()
994 …              mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_BACK_MIC);  in startInputDevice()
1002         mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_HEADSET_MIC);  in startInputDevice()
1327         mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_SIDETONE);  in EnableSideToneFilter()
1644         mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_2IN1_SPEAKER);  in OpenReceiverPath()
1646         mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_RECEIVER);  in OpenReceiverPath()
1691         mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_HIFI_DAC);  in OpenHeadphonePath()
1693         mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_HEADPHONE);  in OpenHeadphonePath()
.....省略
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20

可以看到主要都是在AudioALSAHardwareResourceManager.cpp中,AudioALSAHardwareResourceManager.cpp就是用来控制设备通路的打开和关闭,还有turnoff的就不列出来了。
有一些漏音问题或者杂音问题就可以通过这里通路打开关闭时序调整来解决。
涉及到通路切换pop音,也可以考虑在调用这些函数之后对音量进行一个淡入淡出的操作。

音量增益

MTk平台音量增益控制都在AudioALSAVolumeController.cpp中,可以看到比较熟悉的setMasterVolume。

status_t AudioALSAVolumeController::setMasterVolume(float v, audio_mode_t mode, uint32_t devices)
int MapVolume = AudioALSAVolumeController::logToLinear(v);
  • 1
  • 2

setMasterVolume中根据mode、devices来选择应用不同的增益配置。

1382              case (AUDIO_DEVICE_OUT_EARPIECE): {
1383                  ApplyAudioGain(MapVolume,  mode, Audio_Earpiece);
1384                  break;
1385              }

void AudioALSAVolumeController::ApplyAudioGain(int Gain, uint32_t mode, uint32_t device) {

1278      int DegradedBGain = mVolumeRange[device];
1279      DegradedBGain = DegradedBGain + (DEVICE_VOLUME_RANGE - DegradedBGain) * ((VOLUME_MAPPING_STEP - Gain) / VOLUME_MAPPING_STEP);
1280      ALOGD("ApplyAudioGain  DegradedBGain = %d mVolumeRange[mode] = %d ", DegradedBGain, mVolumeRange[device]);
1281      if (device  ==  Audio_Earpiece || device == Audio_DualMode_Earpiece || device == Sipcall_Earpiece) {
1282          SetReceiverGain(DegradedBGain);
1283      } else if ((device  == Audio_Headset) || (device == Audio_Headphone) || (device == Sipcall_Headset) || (device == Sipcall_Headphone)) {
1284          ALOGD("ApplyAudioGain Audio_Headset\n");
1285  #ifdef USE_PREV_DESINGED    //no headphone impedance
1286          if (GetHeadPhoneImpedanceEnable() == true) {
1287              DegradedBGain += MapHeadPhoneImpedance();
1288              ALOGD("GetHeadPhoneImpedanceEnable DegradedBGain = %d ", DegradedBGain);
1289  
1290              SetHeadPhoneLGain(DegradedBGain);
1291              SetHeadPhoneRGain(DegradedBGain);
1292          } else
1293  #endif
1294          {
1295              SetHeadPhoneLGain(DegradedBGain);
1296              SetHeadPhoneRGain(DegradedBGain);
1297          }
1298      } else if ((device  == Audio_DualMode_Headset) || (device == Audio_DualMode_Headphone)) {
1299          SetHeadPhoneLGain(DegradedBGain);
1300          SetHeadPhoneRGain(DegradedBGain);
1301      } else if (device == Audio_Speaker) {
1302          ALOGD("ApplyAudioGain Audio_Speaker\n");
1303          if (DegradedBGain >= (_countof(DL_PGA_LINEOUT_GAIN) - 1)) {
1304              DegradedBGain = _countof(DL_PGA_LINEOUT_GAIN) - 1;
1305          }
1306          SetLinoutLGain(DegradedBGain);
1307          SetLinoutRGain(DegradedBGain);
1308      }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38

计算出增益之后,通过SetHeadPhoneLGain、SetReceiverGain、SetLinoutRGain设置。

void AudioALSAVolumeController::SetReceiverGain(int DegradedBGain) {
937      enum mixer_ctl_type type;
938      ctl = mixer_get_ctl_by_name(mMixer, "Handset_PGA_GAIN");
939      type = mixer_ctl_get_type(ctl);
940      if (mixer_ctl_set_enum_by_string(ctl, DL_PGA_Handset_GAIN[index])) {
  • 1
  • 2
  • 3
  • 4
  • 5

可以看见是通过tinymix设置下去。Speaker用的是ApplyExtAmpHeadPhoneGain,但最终看到也还是通过tinymix设置。

void AudioALSAVolumeController::ApplyExtAmpHeadPhoneGain(int Gain, uint32_t mode, uint32_t device) {
1343      SetLinoutLGain(DegradedBGain);
1344      SetLinoutRGain(DegradedBGain);
  • 1
  • 2
  • 3

QCOM Audio Hal

音频框图

在这里插入图片描述

概念

  • Front End PCMs:音频前端,一个前端对应着一个 PCM 设备
    FE PCMs:
    deep_buffer
    low_latency
    mutil_channel
    compress_offload
    audio_record
    usb_audio
    a2dp_audio
    voice_call

  • Back End DAIs:音频后端,一个后端对应着一个 DAI 接口,一个 FE PCM 能够连接到一个或多个 BE DAI
    BE DAI:
    SLIM_BUS
    Aux_PCM
    Primary_MI2S
    Secondary_MI2S
    Tertiary_MI2S
    Quatermary_MI2S

  • Audio Device:有 headset、speaker、earpiece、mic、bt、modem 等;不同的设备可能与不同的 DAI 接口连接,也可能与同一个 DAI 接口连接(如上图,Speaker 和 Earpiece 都连接到 DAI1)

  • Usecase:
    ·usecase 通俗表示音频场景,对应着音频前端,比如:
    ·low_latency:按键音、触摸音、游戏背景音等低延时的放音场景
    ·deep_buffer:音乐、视频等对时延要求不高的放音场景
    ·compress_offload:mp3、flac、aac等格式的音源播放场景,这种音源不需要软件解 码,直接把数据送到硬件解码器(aDSP),由硬件解码器(aDSP)进行解码
    ·record:普通录音场景
    ·record_low_latency:低延时的录音场景
    ·voice_call:语音通话场景
    ·voip_call:网络通话场景

音频通路连接

通路链接流程:
FE_PCMs <=> BE_DAIs <=> Devices

  • 打开FE pcm
int start_output_stream(struct stream_out *out)
{
    int ret = 0;
    struct audio_usecase *uc_info;
    struct audio_device *adev = out->dev;
    // 根据 usecase 找到对应 FE PCM id
    out->pcm_device_id = platform_get_pcm_device_id(out->usecase, PCM_PLAYBACK);
    if (out->pcm_device_id < 0) {
        ALOGE("%s: Invalid PCM device id(%d) for the usecase(%d)",
              __func__, out->pcm_device_id, out->usecase);
        ret = -EINVAL;
        goto error_open;
    }
    // 为这个音频流新建一个 usecase 实例
    uc_info = (struct audio_usecase *)calloc(1, sizeof(struct audio_usecase));

    if (!uc_info) {
        ret = -ENOMEM;
        goto error_config;
    }

    uc_info->id = out->usecase; // 音频流对应的 usecase
    uc_info->type = PCM_PLAYBACK; // 音频流的流向
    uc_info->stream.out = out;
    uc_info->devices = out->devices; // 音频流的初始设备
    uc_info->in_snd_device = SND_DEVICE_NONE;
    uc_info->out_snd_device = SND_DEVICE_NONE;
    list_add_tail(&adev->usecase_list, &uc_info->list); // 把新建的 usecase 实例添加到链表中

    // 根据 usecase、out->devices,为音频流选择相应的音频设备
    select_devices(adev, out->usecase);

    ALOGV("%s: Opening PCM device card_id(%d) device_id(%d) format(%#x)",
          __func__, adev->snd_card, out->pcm_device_id, out->config.format);
    if (!is_offload_usecase(out->usecase)) {
        unsigned int flags = PCM_OUT;
        unsigned int pcm_open_retry_count = 0;
        if (out->usecase == USECASE_AUDIO_PLAYBACK_AFE_PROXY) {
            flags |= PCM_MMAP | PCM_NOIRQ;
            pcm_open_retry_count = PROXY_OPEN_RETRY_COUNT;
        } else if (out->realtime) {
            flags |= PCM_MMAP | PCM_NOIRQ;
        } else
            flags |= PCM_MONOTONIC;

        while (1) {
            // 打开 FE PCM
            out->pcm = pcm_open(adev->snd_card, out->pcm_device_id,
                               flags, &out->config);
            if (out->pcm == NULL || !pcm_is_ready(out->pcm)) {
                ALOGE("%s: %s", __func__, pcm_get_error(out->pcm));
                if (out->pcm != NULL) {
                    pcm_close(out->pcm);
                    out->pcm = NULL;
                }
                if (pcm_open_retry_count-- == 0) {
                    ret = -EIO;
                    goto error_open;
                }
                usleep(PROXY_OPEN_WAIT_TIME * 1000);
                continue;
            }
            break;
        }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • BE_DAIs
    mixer_pahts.xml 中看到 usecase 相关的通路:
  <path name="deep-buffer-playback speaker">
        <ctl name="QUAT_MI2S_RX Audio Mixer MultiMedia1" value="1" />
    </path>
    <path name="deep-buffer-playback headphones">
        <ctl name="TERT_MI2S_RX Audio Mixer MultiMedia1" value="1" />
    </path>
    <path name="deep-buffer-playback earphones">
        <ctl name="QUAT_MI2S_RX Audio Mixer MultiMedia1" value="1" />
</path>
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9

这些通路其实就是连接 usecase、device 之间的路由。比如 “deep-buffer-playback speaker” 是连接 deep-buffer-playback FE PCM、speaker Device 之间的路由,打开 “deep-buffer-playback speaker”,则把 deep-buffer-playback FE PCM 和 speaker Device 连接起来;关闭 “deep-buffer-playback speaker”,则断开 deep-buffer-playback FE PCM 和 speaker Device 的连接。
之前提到“device 连接着唯一的 BE DAI,确定了 device 也就能确定所连接的 BE DAI”,因此这些路由通路其实都隐含着 BE DAI 的连接:FE PCM 并非直接到 device 的,而是 FE PCM 先连接到 BE DAI,BE DAI 再连接到 device。这点有助于理解路由控件,路由控件面向的是 FE PCM 和 BE DAI 之间的连接,回放类型的路由控件名称一般是: $BE_DAI Audio Mixer F E P C M , 录 制 类 型 的 路 由 控 件 名 称 一 般 是 : FE_PCM,录制类型的路由控件名称一般是: FEPCMFE_PCM Audio Mixer $BE_DAI,这很容易分辨。
例如 “deep-buffer-playback speaker” 通路中的路由控件:

<ctl name="QUAT_MI2S_RX Audio Mixer MultiMedia1" value="1" />
  • 1

MultiMedia1:deep_buffer usacase 对应的 FE PCM
QUAT_MI2S_RX:speaker device 所连接的 BE DAI
Audio Mixer:表示 DSP 路由功能
value:1 表示连接,0 表示断开连接

这个ctl的意思是:把 MultiMedia1 PCM 与 QUAT_MI2S_RX DAI 连接起来。并没有指明 QUAT_MI2S_RX DAI 与 speaker device 之间的连接,因为 BE DAIs 与 Devices 之间并不需要路由控件,如之前所强调”device 连接着唯一的 BE DAI,确定了 device 也就能确定所连接的 BE DAI“。
路由操作函数是 enable_audio_route()/disable_audio_route(),这两个函数名称很贴合,控制 FE PCMs 与 BE DAIs 的连接或断开。

代码流程很简单,把 usecase 和 device 拼接起来就是路由的 path name 了,然后再调用 audio_route_apply_and_update_path() 来设置路由通路:

const char * const use_case_table[AUDIO_USECASE_MAX] = {
    [USECASE_AUDIO_PLAYBACK_DEEP_BUFFER] = "deep-buffer-playback",
    [USECASE_AUDIO_PLAYBACK_LOW_LATENCY] = "low-latency-playback",
    //...
};
const char * const backend_tag_table[SND_DEVICE_MAX] = {
    [SND_DEVICE_OUT_HANDSET] = "earphones";
    [SND_DEVICE_OUT_SPEAKER] = "speaker";
    [SND_DEVICE_OUT_SPEAKER] = "headphones";
    //...
};
void platform_add_backend_name(char *mixer_path, snd_device_t snd_device,
                               struct audio_usecase *usecase)
{
    if ((snd_device < SND_DEVICE_MIN) || (snd_device >= SND_DEVICE_MAX)) {
        ALOGE("%s: Invalid snd_device = %d", __func__, snd_device);
        return;
    }
    const char * suffix = backend_tag_table[snd_device];

    if (suffix != NULL) {
        strlcat(mixer_path, " ", MIXER_PATH_MAX_LENGTH);
        strlcat(mixer_path, suffix, MIXER_PATH_MAX_LENGTH);
    }
}
int enable_audio_route(struct audio_device *adev,
                       struct audio_usecase *usecase)
{
    snd_device_t snd_device;
    char mixer_path[MIXER_PATH_MAX_LENGTH];

    if (usecase == NULL)
        return -EINVAL;
    ALOGV("%s: enter: usecase(%d)", __func__, usecase->id);
    if (usecase->type == PCM_CAPTURE)
        snd_device = usecase->in_snd_device;
    else
        snd_device = usecase->out_snd_device
    strlcpy(mixer_path, use_case_table[usecase->id], MIXER_PATH_MAX_LENGTH);
    platform_add_backend_name(mixer_path, snd_device, usecase);
    ALOGD("%s: apply mixer and update path: %s", __func__, mixer_path);
    audio_route_apply_and_update_path(adev->audio_route, mixer_path);
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42

TinyAlsa接口调用

HAL与ALSA对接使用了TinyALSA库,这个很重要。TinyALSA是一个轻量级的封装库,对ALSA接口进行了二次封装,简化了对ALSA的操作,具体源码目录在/external/tinyalsa。这个库衔接了Hal与Linux,这个是连接驱动的关键。

编译tinyalsa配套工具

代码路径:external/tinyalsa/
编译完后会产生tinyplay/tinymix/tinycap等等工具。
tinymix: 查看配置混音器
tinyplay: 播放音频
tinycap: 录音

tinyalsa命令

  • Tinymix:查看和更改ctl
    tinymix不加任何参数-显示当前配置情况
    tinymix [ctl][value] 设置ctl值

  • Tinyplay:播放音乐
    tinyplay /sdcard/0_16.wav

  • Tinycap:录音
    tinycap /sdcard/test.wav

API

主要api:
Pcm:

struct pcm *pcm_open(unsigned int card, unsigned int device, unsigned int flags, struct pcm_config *config); 
int pcm_write(struct pcm *pcm, const void *data, unsigned int count); //返回0表示成功 
int pcm_read(struct pcm *pcm, void *data, unsigned int count);//返回0表示成功 
int pcm_close(struct pcm *pcm);
Mixer:
int mixer_ctl_set_value(struct mixer_ctl *ctl, int count, char ** argv)
void mixer_ctl_get(struct mixer_ctl *ctl, unsigned *value)
void mixer_close(struct mixer *mixer)
int mixer_ctl_set(struct mixer_ctl *ctl, unsigned percent)
struct mixer *mixer_open(const char *device)
int mixer_ctl_select(struct mixer_ctl *ctl, const char *value)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

在这里插入图片描述

本文内容由网友自发贡献,转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号