当前位置:   article > 正文

android 多通道音频,支持多通道录音

android 支持多dsp

原生Android只支持2 channel的录音。可是偏偏会有多mic的需求,比如说语音识别。目前已知TDM协议可以将多mic数据从kernel送到hal,从内核空间搬运到用户空间中。可是原生AudioRecord接口是完全不支持多channel录音数据的采集的,怎么修改,才能让原生进行支持呢?

我们就从AudioRecord的构造函数开始往下研究。无论行不行,都要研究出个所以然来!​我们如果写个录音app,我们一般这么使用AudioRecord:

int sampleRateInHz = 8000;

int audioEncodingBits = AudioFormat.ENCODING_PCM_16BIT;

int recordBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfiguration, audioEncodingBits);

mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,

sampleRateInHz, channelConfiguration, audioEncodingBits,

recordBufferSize);

先说AudioRecord构造函数最后一个参数recordBufferSize。来自:

getMinBufferSize

//AudioRecord.java

static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {

int channelCount = 0;

...

//根据channelMask得出channelCount

//这里竟然有个6声道的,估计可以参考下

case AudioFormat.CHANNEL_IN_5POINT1:

channelCount = 6;

...

int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);

...

}

native_get_min_buff_size对应android_media_AudioRecord_get_min_buff_size:

//android_media_AudioRecord.cpp

static jint android_media_AudioRecord_get_min_buff_size(JNIEnv *env,  jobject thiz,jint sampleRateInHertz, jint channelCount, jint audioFormat) {

size_t frameCount = 0;

audio_format_t format = audioFormatToNative(audioFormat);

status_t result = AudioRecord::getMinFrameCount(&frameCount,

sampleRateInHertz,

format,

audio_channel_in_mask_from_count(channelCount));

return frameCount * channelCount * audio_bytes_per_sample(format);

}

这里传入的format是AudioFormat.ENCODING_PCM_16BIT,根据audio_bytes_per_sample:

//audio.h

static inline size_t audio_bytes_per_sample(audio_format_t format)

{

...

case AUDIO_FORMAT_PCM_16_BIT:

case AUDIO_FORMAT_IEC61937:

size = sizeof(int16_t);

...

}

audio_bytes_per_sample返回的是sizeof(signed short) = 2.

status_t AudioRecord::getMinFrameCount(

size_t* frameCount,

uint32_t sampleRate,

audio_format_t format,

audio_channel_mask_t channelMask)

{

status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);

...

//这里需要double一下

// We double the size of input buffer for ping pong use of record buffer.

// Assumes audio_is_linear_pcm(format)

if ((*frameCount = (size * 2) / (audio_channel_count_from_in_mask(channelMask) *

audio_bytes_per_sample(format))) == 0) {

ALOGE("Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",

sampleRate, format, channelMask);

return BAD_VALUE;

}

}

getInputBufferSize直接看hal层:

//audio_hw.c

static size_t get_input_buffer_size(uint32_t sample_rate,

audio_format_t format,

int channel_count,

bool is_low_latency)

{

...

//这里是(8000*20)/1000

size = (sample_rate * AUDIO_CAPTURE_PERIOD_DURATION_MSEC) / 1000;

size *= sizeof(short) * channel_count;

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/IT小白/article/detail/317136?site
推荐阅读
相关标签
  

闽ICP备14008679号