当前位置:   article > 正文

Android-NDK-audio-echo_android aaduio echo.so

android aaduio echo.so

项目图

运行界面

界面分析

  1. setContentView(R.layout.activity_main);//设置布局文件
  2. controlButton = (Button)findViewById((R.id.capture_control_button));
  3. statusView = (TextView)findViewById(R.id.statusView);

{

  1. private void queryNativeAudioParameters() {
  2. supportRecording = true;
  3. AudioManager myAudioMgr = (AudioManager) getSystemService(Context.AUDIO_SERVICE);//获取audioManager的服务
  4. if(myAudioMgr == null) {
  5. supportRecording = false;
  6. return;
  7. }
  8. nativeSampleRate = myAudioMgr.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);//audio的采样率
  9. nativeSampleBufSize =myAudioMgr.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);//每个sample的大小
  10. {
  • 音频源:我们可以使用麦克风作为采集音频的数据源。

  • 采样率:一秒钟对声音数据的采样次数,采样率越高,音质越好。

  • 音频通道:单声道,双声道等,

  • 音频格式:一般选用PCM格式,即原始的音频样本。

  • 缓冲区大小:音频数据写入缓冲区的总数,可以通过AudioRecord.getMinBufferSize获取最小的缓冲区。(将音频采集到缓冲区中然后再从缓冲区中读取)

  1. }
  2. // hardcoded channel to mono: both sides -- C++ and Java sides
  3. int recBufSize = AudioRecord.getMinBufferSize(
  4. Integer.parseInt(nativeSampleRate),
  5. AudioFormat.CHANNEL_IN_MONO,
  6. AudioFormat.ENCODING_PCM_16BIT);
  7. if (recBufSize == AudioRecord.ERROR ||
  8. recBufSize == AudioRecord.ERROR_BAD_VALUE) {
  9. supportRecording = false;
  10. }
  11. }

}

queryNativeAudioParameters();

  1. delaySeekBar = (SeekBar)findViewById(R.id.delaySeekBar);
  2. curDelayTV = (TextView)findViewById(R.id.curDelay);
  3. echoDelayProgress = delaySeekBar.getProgress() * 1000 / delaySeekBar.getMax();

设置seekbar的监听器

  1. delaySeekBar.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() {
  2. @Override
  3. public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
  4. float curVal = (float)progress / delaySeekBar.getMax();
  5. curDelayTV.setText(String.format("%s", curVal));
  6. {

//设置坐标

  1. private void setSeekBarPromptPosition(SeekBar seekBar, TextView label) {
  2. float thumbX = (float)seekBar.getProgress()/ seekBar.getMax() *
  3. seekBar.getWidth() + seekBar.getX();
  4. label.setX(thumbX - label.getWidth()/2.0f);
  5. }
  1. }
  2. setSeekBarPromptPosition(delaySeekBar, curDelayTV);
  3. if (!fromUser) return;
  4. echoDelayProgress = progress * 1000 / delaySeekBar.getMax();
  5. {
static native boolean configureEcho(int delayInMs, float decay);
  1. }
  2. configureEcho(echoDelayProgress, echoDecayProgress);
  3. }
  4. @Override
  5. public void onStartTrackingTouch(SeekBar seekBar) {}
  6. @Override
  7. public void onStopTrackingTouch(SeekBar seekBar) {}
  8. });
  1. //当前view 在attachedToWindow之后执行操作
  2. delaySeekBar.post(new Runnable() {
  3. @Override
  4. public void run() {
  5. setSeekBarPromptPosition(delaySeekBar, curDelayTV);
  6. }
  7. });
  1. private void startEcho() {
  2. if(!supportRecording){
  3. return;
  4. }
  5. if (!isPlaying) {
  6. if(!createSLBufferQueueAudioPlayer()) {
  7. statusView.setText(getString(R.string.player_error_msg));
  8. return;
  9. }
  10. if(!createAudioRecorder()) {
  11. deleteSLBufferQueueAudioPlayer();
  12. statusView.setText(getString(R.string.recorder_error_msg));
  13. return;
  14. }
  15. startPlay(); // startPlay() triggers startRecording()
  16. statusView.setText(getString(R.string.echoing_status_msg));
  17. } else {
  18. stopPlay(); // stopPlay() triggers stopRecording()
  19. updateNativeAudioUI();
  20. deleteAudioRecorder();
  21. deleteSLBufferQueueAudioPlayer();
  22. }
  23. isPlaying = !isPlaying;
  24. controlButton.setText(getString(isPlaying ?
  25. R.string.cmd_stop_echo: R.string.cmd_start_echo));
  26. }

 jni function 声明

  1. /*
  2. * jni function declarations
  3. */
  4. static native void createSLEngine(int rate, int framesPerBuf,
  5. long delayInMs, float decay);
  6. static native void deleteSLEngine();
  7. static native boolean configureEcho(int delayInMs, float decay);
  8. static native boolean createSLBufferQueueAudioPlayer();
  9. static native void deleteSLBufferQueueAudioPlayer();
  10. static native boolean createAudioRecorder();
  11. static native void deleteAudioRecorder();
  12. static native void startPlay();
  13. static native void stopPlay();

OpenSL ES

以上图片引用自:https://www.jianshu.com/p/82da5f87314f

OpensSL ES是无授权费,跨平台的,针对嵌入式精心优化的硬件音频加速API。

对象和接口的概念:

对象:提供一组资源及其状态的抽象

接口:提供特定功能的方法的抽象

对象与接口的关系:

对象暴露的接口,有以下三个方面决定:

1)对象的类型

2)应用程序在对象创建期间的,接口请求。

3)在对象声明周期,接口的添加和移除。

一个对象的类型,表明了它有implicat interfaces,implicat interface的含义是:无论应用程序是否request,它都存在对象暴露的接口函数中。

在对象创建的时候,如果有应用请求才暴露的接口,被称为explicit interfaces。

对象定义的可以动态添加和移除的接口,被称为dynamic interfaces.SLDynamicInterfaceManagementItf

EchoAudioEngine

  1. struct EchoAudioEngine {
  2. SLmilliHertz fastPathSampleRate_;
  3. uint32_t fastPathFramesPerBuf_;
  4. uint16_t sampleChannels_;
  5. uint16_t bitsPerSample_;
  6. SLObjectItf slEngineObj_;
  7. SLEngineItf slEngineItf_;
  8. AudioRecorder *recorder_;
  9. AudioPlayer *player_;
  10. AudioQueue *freeBufQueue_; // Owner of the queue
  11. AudioQueue *recBufQueue_; // Owner of the queue
  12. sample_buf *bufs_;
  13. uint32_t bufCount_;
  14. uint32_t frameCount_;
  15. int64_t echoDelay_;
  16. float echoDecay_;
  17. AudioDelay *delayEffect_;
  18. };
  19. static EchoAudioEngine engine;

SLObjectItf:任何对象都暴露这个接口。每个方法创建一个对象,都返回这个接口SLObjectItf. 销毁对象通过destory().应用程序获取其他的接口,通过type ID 使用GetInterface来返回。通过SLObjectItf接口的Realize和resume来控制状态。

SLEngineItf:应用程序开启一个回话的方式,是通过创建一个engine对象的。Engine对象的创建是通过一个SLCreateEngine()来获取的,返回一个SLObjectItf.

当Engine对象创建后,可以获取它的SLEngineItf。

SLBufferQueueItf:被用于流式的音频数据,填充到一个player object或者record object的buffer队列里面。

1)对于recorder对象,当recorder的状态处于SL_RECORDSTATE_RECORDING时。这个对象被SLRecordItf接口控制添加buffer,来隐含开始填充的进程。如果队列中没有足够的buffer,这个auido的数据的填充将会停止和在buffer队列中的要被录制的audio数据会丢失。这个录制仍旧是SL_RECORDSTATE_RECORDING状态。一旦入队了额外的buffer,

填充音频数据将与当前音频数据一起恢复,而不是从饥饿开始了。如果recorder没有处于录制状态,额外的buffer并没有填充队列中任何buffer.

 2)在播放对象中的buffer被就地使用,并不会被设备拷贝。应用程序的开发者应该注意到,修改在已经入队的buffer的内容是无效的和会引起音频数据损坏的。

3)一旦入队列的buffer完成了播放或者填充,有callback进行通知,它是安全删除buffer。它也是安全的用新数据填充buffer,和在在播放对象入队buffer和再次入队buffer在录制对象。

4)状态转换为SL_PLAYSTATE_STOPPED,通过release所有的buffer来清空队列,和设置cursor为0.每一个buffer被释放,都会回调被带有SL_BUFFERQUEUENVENT_STOP的flag.

5)一旦转换为了SL_RECORDSTATE_STOPPED状态,应用程序应该继续放buffer到队列中,来取回系统中剩余的buffer。获取剩余的buffer完成的标志是回调方法带有了SL_BUFFERQUEUEEVNENT_CONTEN_END的事件flag。空的buffer可以被用于下一个录制的回话。录制的cursor被设置为0.

6)一旦转化为SL_PLAYSTATE_PAUSEDor SL_RECORDSTATE_PAUSED,cursor仍旧保留在当前的位置。


 

createSLEngine

  1. engine.fastPathSampleRate_ = static_cast<SLmilliHertz>(sampleRate) * 1000;
  2. engine.fastPathFramesPerBuf_ = static_cast<uint32_t>(framesPerBuf);
  3. engine.sampleChannels_ = AUDIO_SAMPLE_CHANNELS;
  4. engine.bitsPerSample_ = SL_PCMSAMPLEFORMAT_FIXED_16;
  5. /*
  6. SL_API SLresultSLAPIENTRY slCreateEngine(
  7. SLObjectItf *pEngine,
  8. SLuint32 numOptions
  9. const SLEngineOption *pEngineOptions,
  10. SLuint32 numInterfaces,
  11. const SLInterfaceID *pInterfaceIds,
  12. const SLboolean * pInterfaceRequired
  13. )
  14. */
  15. result = slCreateEngine(&engine.slEngineObj_, 0, NULL, 0, NULL, NULL);
  16. SLASSERT(result);
  17. //Realizing the object in synchronous mode. */
  18. result =
  19. (*engine.slEngineObj_)->Realize(engine.slEngineObj_, SL_BOOLEAN_FALSE);
  20. SLASSERT(result);

//获取slEngineItf_

  1. result = (*engine.slEngineObj_)
  2. ->GetInterface(engine.slEngineObj_, SL_IID_ENGINE,
  3. &engine.slEngineItf_);
  1. // compute the RECOMMENDED fast audio buffer size:
  2. // the lower latency required
  3. // *) the smaller the buffer should be (adjust it here) AND
  4. // *) the less buffering should be before starting player AFTER
  5. // receiving the recorder buffer
  6. // Adjust the bufSize here to fit your bill [before it busts]
  7. uint32_t bufSize = engine.fastPathFramesPerBuf_ * engine.sampleChannels_ *
  8. engine.bitsPerSample_;
  9. bufSize = (bufSize + 7) >> 3; // bits --> byte
  10. engine.bufCount_ = BUF_COUNT;
  11. engine.bufs_ = allocateSampleBufs(engine.bufCount_, bufSize);
  12. assert(engine.bufs_);

//创建缓冲队列,freeBufQueue是指空闲的buffer队列,主要是提供空的采样数组。recBufQueue是接收缓冲队列,主要是用来存储已采集到的音频数据,同样也是播放数据的来源。引擎初始化完毕之后会初始化freeBufQueue,初始化了16个空的大小为480字节的数组。至此音频引擎的初始化结束。

  1. engine.freeBufQueue_ = new AudioQueue(engine.bufCount_);
  2. engine.recBufQueue_ = new AudioQueue(engine.bufCount_);
  3. assert(engine.freeBufQueue_ && engine.recBufQueue_);
  4. for (uint32_t i = 0; i < engine.bufCount_; i++) {
  5. engine.freeBufQueue_->push(&engine.bufs_[i]);
  6. }

//创建AudioDelay

  1. engine.echoDelay_ = delayInMs;
  2. engine.echoDecay_ = decay;
  3. engine.delayEffect_ = new AudioDelay(
  4. engine.fastPathSampleRate_, .sampleChannels_, engine.bitsPerSample_,
  5. engine.echoDelay_, engine.echoDecay_);
  6. assert(engine.delayEffect_);

AudioPlayer

构造方法

  1. SLresult result;
  2. assert(sampleFormat);
  3. sampleInfo_ = *sampleFormat;
  4. /*
  5. SLresult (*CreateOutputMix) (
  6. SLEngineItf self,
  7. SLObjectItf* pMix,
  8. SLuint32 numInterfaces,
  9. const SLInterfaceID * pInterfaceIds,
  10. const SLboolean * pInterfaceRequired
  11. );
  12. 创建混音器的对象
  13. */
  14. result = (*slEngine)
  15. ->CreateOutputMix(slEngine, &outputMixObjectItf_, 0, NULL, NULL);
  16. SLASSERT(result);
  17. // realize the output mix
  18. result =
  19. (*outputMixObjectItf_)->Realize(outputMixObjectItf_, SL_BOOLEAN_FALSE);
  20. SLASSERT(result);
  1. // configure audio source,配置audio source
  2. SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {
  3. SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, DEVICE_SHADOW_BUFFER_QUEUE_LEN};
  4. SLAndroidDataFormat_PCM_EX format_pcm;
  5. ConvertToSLSampleFormat(&format_pcm, &sampleInfo_);
  6. SLDataSource audioSrc = {&loc_bufq, &format_pcm};
  1. // configure audio sink,配置音频的输出
  2. SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX,
  3. outputMixObjectItf_};
  4. SLDataSink audioSnk = {&loc_outmix, NULL};
  1. /*
  2. * create fast path audio player: SL_IID_BUFFERQUEUE and SL_IID_VOLUME
  3. * and other non-signal processing interfaces are ok.,创建audioplayer
  4. */
  5. SLInterfaceID ids[2] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
  6. SLboolean req[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
  7. /*
  8. SLresult (*CreateAudioPlayer) (
  9. SLEngineItf self,
  10. SLObjectItf* pPlayer,
  11. const SLDataSource *pAudioSrc,
  12. const SLDataSink *pAudioSnk,
  13. SLuint32 numInterfaces,
  14. const SLInterfaceID * pInterfaceIds,
  15. const SLboolean * pInterfaceRequired
  16. );
  17. 创建音频播放器
  18. */
  19. result = (*slEngine)->CreateAudioPlayer(
  20. slEngine, &playerObjectItf_, &audioSrc, &audioSnk,
  21. sizeof(ids) / sizeof(ids[0]), ids, req);
  22. SLASSERT(result);
  1. // realize the player,实现播放器
  2. result = (*playerObjectItf_)->Realize(playerObjectItf_, SL_BOOLEAN_FALSE);
  3. SLASSERT(result);
  1. // get the play interface,获取播放接口
  2. result = (*playerObjectItf_)
  3. ->GetInterface(playerObjectItf_, SL_IID_PLAY, &playItf_);
  4. SLASSERT(result);
  1. // get the buffer queue interface,获取bufferq queue的接口
  2. result = (*playerObjectItf_)
  3. ->GetInterface(playerObjectItf_, SL_IID_BUFFERQUEUE,
  4. &playBufferQueueItf_);
  5. SLASSERT(result);
  1. // register callback on the buffer queue,在bufferq queue上注册接口
  2. result = (*playBufferQueueItf_)
  3. ->RegisterCallback(playBufferQueueItf_, bqPlayerCallback, this);
  4. SLASSERT(result);

//设置播放状态

  1. result = (*playItf_)->SetPlayState(playItf_, SL_PLAYSTATE_STOPPED);
  2. SLASSERT(result);
  1. // create an empty queue to track deviceQueue
  2. devShadowQueue_ = new AudioQueue(DEVICE_SHADOW_BUFFER_QUEUE_LEN);
  3. assert(devShadowQueue_);
  4. silentBuf_.cap_ = (format_pcm.containerSize >> 3) * format_pcm.numChannels *
  5. sampleInfo_.framesPerBuf_;
  6. silentBuf_.buf_ = new uint8_t[silentBuf_.cap_];
  7. memset(silentBuf_.buf_, 0, silentBuf_.cap_);
  8. silentBuf_.size_ = silentBuf_.cap_;

AudioPlayer::Start

//首先获取播放状态

  1. SLuint32 state;
  2. SLresult result = (*playItf_)->GetPlayState(playItf_, &state);
  3. if (result != SL_RESULT_SUCCESS) {
  4. return SL_BOOLEAN_FALSE;
  5. }
  6. if (state == SL_PLAYSTATE_PLAYING) {
  7. return SL_BOOLEAN_TRUE;
  8. }
  9. //先设置播放状态是STOPPED状态
  10. result = (*playItf_)->SetPlayState(playItf_, SL_PLAYSTATE_STOPPED);
  11. SLASSERT(result);
  12. //然后如队列相应的buffer。
  13. result =
  14. (*playBufferQueueItf_)
  15. ->Enqueue(playBufferQueueItf_, silentBuf_.buf_, silentBuf_.size_);
  16. SLASSERT(result);
  17. devShadowQueue_->push(&silentBuf_);
  18. //设置播放的状态为playing
  19. result = (*playItf_)->SetPlayState(playItf_, SL_PLAYSTATE_PLAYING);
  20. SLASSERT(result);
  21. return SL_BOOLEAN_TRUE;

ProcessSLCallback

注册callback:

  1. void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *ctx) {
  2. (static_cast<AudioPlayer *>(ctx))->ProcessSLCallback(bq);
  3. }
  1. void AudioPlayer::ProcessSLCallback(SLAndroidSimpleBufferQueueItf bq) {
  2. #ifdef ENABLE_LOG
  3. logFile_->logTime();
  4. #endif
  5. std::lock_guard<std::mutex> lock(stopMutex_);
  6. // retrieve the finished device buf and put onto the free queue
  7. // so recorder could re-use it,获取已经完成的device buf,并把它放到空闲队列的缓冲区。recorder可以重复使用它
  8. sample_buf *buf;
  1. devShadowQueue_->pop();
  2. if (buf != &silentBuf_) {
  3. buf->size_ = 0;
  4. freeQueue_->push(buf);
  5. if (!playQueue_->front(&buf)) {
  6. #ifdef ENABLE_LOG
  7. logFile_->log("%s", "====Warning: running out of the Audio buffers");
  8. #endif
  9. return;
  10. }
  1. devShadowQueue_->push(buf);
  2. (*bq)->Enqueue(bq, buf->buf_, buf->size_);//在从播放队列中,获取一个buffer并入队列。
  3. playQueue_->pop();

//如果播放队列的大小小于kickstart buffer的数量。入队列silent数据。

playQueue是播放队列,如果为空的话表示没有缓冲数据,这里回调到用的地方做错误处理,若是成功取出,那么先将其存入中转队列,并且将其传入调用播放的方法中开启播放,最后在播放队列中删除该已经播放的数组,在播放完成之后会进入Player播放队列注册的回调中。

  1. if (playQueue_->size() < PLAY_KICKSTART_BUFFER_COUNT) {
  2. (*bq)->Enqueue(bq, buf->buf_, buf->size_);
  3. devShadowQueue_->push(&silentBuf_);
  4. return;
  5. }

//填充要播放的数据。

  1. for (int32_t idx = 0; idx < PLAY_KICKSTART_BUFFER_COUNT; idx++) {
  2. playQueue_->front(&buf);
  3. playQueue_->pop();
  4. devShadowQueue_->push(buf);//devshadow的queue的作用是中转队列。
  5. (*bq)->Enqueue(bq, buf->buf_, buf->size_);
  6. }

AudioPlayer::stop

  1. SLuint32 state;
  2. SLresult result = (*playItf_)->GetPlayState(playItf_, &state);
  3. SLASSERT(result);
  4. if (state == SL_PLAYSTATE_STOPPED) return;
  5. std::lock_guard<std::mutex> lock(stopMutex_);
  6. result = (*playItf_)->SetPlayState(playItf_, SL_PLAYSTATE_STOPPED);
  7. SLASSERT(result);
  8. (*playBufferQueueItf_)->Clear(playBufferQueueItf_);
  9. #ifdef ENABLE_LOG
  10. if (logFile_) {
  11. delete logFile_;
  12. logFile_ = nullptr;
  13. }
  14. #endif

AudioPlayer:~AudioPlayer

  1. AudioPlayer::~AudioPlayer() {
  2. std::lock_guard<std::mutex> lock(stopMutex_);
  3. // destroy buffer queue audio player object, and invalidate all associated
  4. // interfaces
  5. if (playerObjectItf_ != NULL) {
  6. (*playerObjectItf_)->Destroy(playerObjectItf_);
  7. }
  8. // Consume all non-completed audio buffers,消耗掉所有未完成的buffer
  9. sample_buf *buf = NULL;
  10. while (devShadowQueue_->front(&buf)) {
  11. buf->size_ = 0;
  12. devShadowQueue_->pop();
  13. if(buf != &silentBuf_) {
  14. freeQueue_->push(buf);
  15. }
  16. }
  17. delete devShadowQueue_;
  18. //把正在播放的队列,放audiobuffer到freeQueue
  19. while (playQueue_->front(&buf)) {
  20. buf->size_ = 0;
  21. playQueue_->pop();
  22. freeQueue_->push(buf);
  23. }
  24. //销毁混音器的对象接口
  25. // destroy output mix object, and invalidate all associated interfaces
  26. if (outputMixObjectItf_) {
  27. (*outputMixObjectItf_)->Destroy(outputMixObjectItf_);
  28. }
  29. delete[] silentBuf_.buf_;
  30. }

AudioRecorder

AudioRecorder::AudioRecorder

 //转为SL的格式

  1. SLresult result;
  2. sampleInfo_ = *sampleFormat;
  3. SLAndroidDataFormat_PCM_EX format_pcm;
  4. ConvertToSLSampleFormat(&format_pcm, &sampleInfo_);

// configure audio source,配置audiosource,SL_DATALOCATOR_IODEVICE Data will be generated or consumed by the
specified IO device. Note: for audio output use
the output mix.
SL_DEFAULTDEVICEID_AUDIOINPUTIdentifier denoting the set of input devicesfrom
whichthe implementation receives audio from by
default.

SLDataLocator_IODevice loc_dev = {SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT, SL_DEFAULTDEVICEID_AUDIOINPUT, NULL}; SLDataSource audioSrc = {&loc_dev, NULL};

  1. // configure audio sink,配置audi sink
  2. SLDataLocator_AndroidSimpleBufferQueue loc_bq = {
  3. SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, DEVICE_SHADOW_BUFFER_QUEUE_LEN};
SLDataSink audioSnk = {&loc_bq, &format_pcm};

//创建audiorecorder的object

  1. // create audio recorder
  2. // (requires the RECORD_AUDIO permission)
  3. const SLInterfaceID id[2] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
  4. SL_IID_ANDROIDCONFIGURATION};
  5. const SLboolean req[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
  6. result = (*slEngine)->CreateAudioRecorder(
  7. slEngine, &recObjectItf_, &audioSrc, &audioSnk,
  8. sizeof(id) / sizeof(id[0]), id, req);
  9. SLASSERT(result);
  1. // Configure the voice recognition preset which has no
  2. // signal processing for lower latency.
  3. /*
  4. /*---------------------------------------------------------------------------*/
  5. /* Android AudioRecorder configuration                                       */
  6. /*---------------------------------------------------------------------------*/
  7.  
  8. /** Audio recording preset */
  9. /** Audio recording preset key */
  10. #define SL_ANDROID_KEY_RECORDING_PRESET ((const SLchar*) "androidRecordingPreset")
  11. /** Audio recording preset values */
  12. /**   preset "none" cannot be set, it is used to indicate the current settings
  13.  *     do not match any of the presets. */
  14. #define SL_ANDROID_RECORDING_PRESET_NONE                ((SLuint32) 0x00000000)
  15. /**   generic recording configuration on the platform */
  16. #define SL_ANDROID_RECORDING_PRESET_GENERIC             ((SLuint32) 0x00000001)
  17. /**   uses the microphone audio source with the same orientation as the camera
  18.  *     if available, the main device microphone otherwise */
  19. #define SL_ANDROID_RECORDING_PRESET_CAMCORDER           ((SLuint32) 0x00000002)
  20. /**   uses the main microphone tuned for voice recognition */
  21. #define SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION   ((SLuint32) 0x00000003)
  22. /**   uses the main microphone tuned for audio communications */
  23. #define SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION ((SLuint32) 0x00000004)
  24. /**   uses the main microphone unprocessed */
  25. #define SL_ANDROID_RECORDING_PRESET_UNPROCESSED         ((SLuint32) 0x00000005)
  26.  
  27. /*---------------------------------------------------------------------------*/
  28. /* Android AudioPlayer configuration                                         */
  29. /*---------------------------------------------------------------------------*/
  30.  
  31. /** Audio playback stream type */
  32. /** Audio playback stream type key */
  33. #define SL_ANDROID_KEY_STREAM_TYPE ((const SLchar*) "androidPlaybackStreamType")
  34.  
  35. /** Audio playback stream type  values */
  36. /*      same as android.media.AudioManager.STREAM_VOICE_CALL */
  37. #define SL_ANDROID_STREAM_VOICE        ((SLint32) 0x00000000)
  38. /*      same as android.media.AudioManager.STREAM_SYSTEM */
  39. #define SL_ANDROID_STREAM_SYSTEM       ((SLint32) 0x00000001)
  40. /*      same as android.media.AudioManager.STREAM_RING */
  41. #define SL_ANDROID_STREAM_RING         ((SLint32) 0x00000002)
  42. /*      same as android.media.AudioManager.STREAM_MUSIC */
  43. #define SL_ANDROID_STREAM_MEDIA        ((SLint32) 0x00000003)
  44. /*      same as android.media.AudioManager.STREAM_ALARM */
  45. #define SL_ANDROID_STREAM_ALARM        ((SLint32) 0x00000004)
  46. /*      same as android.media.AudioManager.STREAM_NOTIFICATION */
  47. #define SL_ANDROID_STREAM_NOTIFICATION ((SLint32) 0x00000005)
  48.  
  49. /*---------------------------------------------------------------------------*/
  50. /* Android AudioPlayer and AudioRecorder configuration                       */
  51. /*---------------------------------------------------------------------------*/
  52.  
  53. /** Audio Performance mode.
  54.  * Performance mode tells the framework how to configure the audio path
  55.  * for a player or recorder according to application performance and
  56.  * functional requirements.
  57.  * It affects the output or input latency based on acceptable tradeoffs on
  58.  * battery drain and use of pre or post processing effects.
  59.  * Performance mode should be set before realizing the object and should be
  60.  * read after realizing the object to check if the requested mode could be
  61.  * granted or not.
  62.  */
  63. /** Audio Performance mode key */
  64. #define SL_ANDROID_KEY_PERFORMANCE_MODE ((const SLchar*) "androidPerformanceMode")
  65.  
  66. /** Audio performance values */
  67. /*      No specific performance requirement. Allows HW and SW pre/post processing. */
  68. #define SL_ANDROID_PERFORMANCE_NONE ((SLuint32) 0x00000000)
  69. /*      Priority given to latency. No HW or software pre/post processing.
  70.  *      This is the default if no performance mode is specified. */
  71. #define SL_ANDROID_PERFORMANCE_LATENCY ((SLuint32) 0x00000001)
  72. /*      Priority given to latency while still allowing HW pre and post processing. */
  73. #define SL_ANDROID_PERFORMANCE_LATENCY_EFFECTS ((SLuint32) 0x00000002)
  74. /*      Priority given to power saving if latency is not a concern.
  75.  *      Allows HW and SW pre/post processing. */
  76. #define SL_ANDROID_PERFORMANCE_POWER_SAVING ((SLuint32) 0x00000003)
  77. 在采集选项中包含
  78. xxx_RECORDING_PRESET_GENERIC(通用配置,不知道是啥意思)
  79. xxx_RECORDING_PRESET_CAMCORDER(录像中优先使用摄像头同方向的Mic,如果没有同方向的就使用主Mic)
  80. xxx_RECORDING_PRESET_VOICE_RECOGNITION(针对语音识别业务进行了优化,可能使用降噪Mic)
  81. xxx_RECORDING_PRESET_VOICE_COMMUNICATION(针对电话或网络电话优化,可能会硬件AEC、NS、AGC)
  82. xxx_RECORDING_PRESET_UNPROCESSED(使用主Mic采集,不经过任何优化处理)
  83. 在渲染选项中包含
  84. xxx_STREAM_VOICE(VoIP或者电话,音量需要通过通话音量调节)
  85. xxx_STREAM_SYSTEM(系统音量,我的华为P10没有这个音量选项)
  86. xxx_STREAM_RING(铃声音量)
  87. xxx_STREAM_MEDIA(媒体音量)
  88. xxx_STREAM_ALARM(闹钟音量)
  89. ————————————————
  90. 版权声明:本文为CSDN博主「everlastxc」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
  91. 原文链接:https://blog.csdn.net/qq_29621351/article/details/94562600
  92. */
  93. SLAndroidConfigurationItf inputConfig;
  94. result = (*recObjectItf_)
  95. ->GetInterface(recObjectItf_, SL_IID_ANDROIDCONFIGURATION,
  96. &inputConfig);
  97. if (SL_RESULT_SUCCESS == result) {
  98. SLuint32 presetValue = SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION;
  99. (*inputConfig)
  100. ->SetConfiguration(inputConfig, SL_ANDROID_KEY_RECORDING_PRESET,
  101. &presetValue, sizeof(SLuint32));
  102. }
  103. result = (*recObjectItf_)->Realize(recObjectItf_, SL_BOOLEAN_FALSE);
  104. SLASSERT(result);

//获取recordobjectItf

  1. result =
  2. (*recObjectItf_)->GetInterface(recObjectItf_, SL_IID_RECORD, &recItf_);
  3. SLASSERT(result);

//获取对象中的bufferqueue

  1. result = (*recObjectItf_)
  2. ->GetInterface(recObjectItf_, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
  3. &recBufQueueItf_);
  4. SLASSERT(result);

//注册bufferqueue中的callback回调接口

  1. result = (*recBufQueueItf_)
  2. ->RegisterCallback(recBufQueueItf_, bqRecorderCallback, this);
  3. SLASSERT(result);

//创建中转的缓冲区

  1. devShadowQueue_ = new AudioQueue(DEVICE_SHADOW_BUFFER_QUEUE_LEN);
  2. assert(devShadowQueue_);

AudioRecorder::Start

  1. // in case already recording, stop recording and clear buffer queue,先暂停录制和情况buffer queue队列
  2. result = (*recItf_)->SetRecordState(recItf_, SL_RECORDSTATE_STOPPED);
  3. SLASSERT(result);
  4. result = (*recBufQueueItf_)->Clear(recBufQueueItf_);
  5. SLASSERT(result);
  1. for (int i = 0; i < RECORD_DEVICE_KICKSTART_BUF_COUNT; i++) {
  2. sample_buf *buf = NULL;
  3. if (!freeQueue_->front(&buf)) {
  4. LOGE("=====OutOfFreeBuffers @ startingRecording @ (%d)", i);
  5. break;
  6. }
  7. freeQueue_->pop();
  8. assert(buf->buf_ && buf->cap_ && !buf->size_);
  9. result = (*recBufQueueItf_)->Enqueue(recBufQueueItf_, buf->buf_, buf->cap_);//把空闲的buffer,输入进去
  10. SLASSERT(result);
  11. devShadowQueue_->push(buf);//并把这个buffer放到这个中转队列
  12. }

//设置录制状态为RECORDING

  1. result = (*recItf_)->SetRecordState(recItf_, SL_RECORDSTATE_RECORDING);
  2. SLASSERT(result);

AudioRecorder::ProcessSLCallback

  1. assert(bq == recBufQueueItf_);
  2. sample_buf *dataBuf = NULL;
  3. devShadowQueue_->front(&dataBuf);
  4. devShadowQueue_->pop();
  5. dataBuf->size_ = dataBuf->cap_; // device only calls us when it is really
  6. // full
  7. callback_(ctx_, ENGINE_SERVICE_MSG_RECORDED_AUDIO_AVAILABLE, dataBuf);//callback,audio数据已经存在
  8. recQueue_->push(dataBuf);
  1. //重新设置空闲的buffer到如队列
  2. sample_buf *freeBuf;
  3. while (freeQueue_->front(&freeBuf) && devShadowQueue_->push(freeBuf)) {
  4. freeQueue_->pop();
  5. SLresult result = (*bq)->Enqueue(bq, freeBuf->buf_, freeBuf->cap_);
  6. SLASSERT(result);
  7. }

Audio Delay

当信号输入进来时,使信号的输出波形比输入滞后设定的时间值比如100ms

把信号持续输入看成一个和时间相关的数据流,设定一个缓冲Buffer,大小根据delay的最大值设定,比如delay最大值为2000ms、那么对于96Khz采样频率的处理来说B u f f e r = 96 ∗ 2000 Buffer = 96 * 2000Buffer=96∗2000; Buffer的大小可能还需要加上DSP一次运算的样本点数比如128,这样最终的数据缓冲值B u f f e r = 192128 Buffer = 192128Buffer=192128;
设定一个结构体,内部含有缓冲区信号样本点输入计数器指针∗ i n P *inP∗inP和样本信号计数器输出指针∗ o u t P *outP∗outP、以及延迟设置值D l y DlyDly;
∗ o u t P *outP∗outP的值根据∗ i n P *inP∗inP和D l y DlyDly之差来确定,要注意∗ i n P *inP∗inP比D l y DlyDly小的情况,这种情况下 ∗ o u t P *outP∗outP的值应该取上一个Buffer的数据,即∗ o u t P *outP∗outP位于∗ i n P *inP∗inP上一个Buffer中而不是同属一个Buffer里面
————————————————
版权声明:本文为CSDN博主「Flynn2019」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/bentengdema/article/details/102495512

AudioDelay::AudioDelay

  1. feedbackFactor_ = static_cast<int32_t>(decayWeight_ * kFloatToIntMapFactor);
  2. liveAudioFactor_ = kFloatToIntMapFactor - feedbackFactor_;
  3. allocateBuffer();

allocteBuffer

//转为s

  1. float floatDelayTime = (float)delayTime_ / kMsPerSec;
  2. //转为1s的总帧数
  3. float fNumFrames = floatDelayTime * (float)sampleRate_ / kMsPerSec;
  4. //总帧数
  5. size_t sampleCount = static_cast<uint32_t>(fNumFrames + 0.5f) * channelCount_;

//字节数

  1. uint32_t bytePerSample = format_ / 8;
  2. assert(bytePerSample <= 4 && bytePerSample);

//每帧的字节数

uint32_t bytePerFrame = channelCount_ * bytePerSample;
  1. // get bufCapacity in bytes
  2. bufCapacity_ = sampleCount * bytePerSample;
  3. bufCapacity_ =
  4. ((bufCapacity_ + bytePerFrame - 1) / bytePerFrame) * bytePerFrame;//按每帧的字节数对齐
  5. buffer_ = new uint8_t[bufCapacity_];
  6. assert(buffer_);
  7. memset(buffer_, 0, bufCapacity_);
  8. curPos_ = 0;
  9. // bufSize_ is in Frames ( not samples, not bytes )
  10. bufSize_ = bufCapacity_ / bytePerFrame;

AudioDelay::process

  1. process() filter live audio with "echo" effect:
  2. * delay time is run-time adjustable
  3. * decay time could also be adjustable, but not used
  4. * in this sample, hardcoded to .5
  5. *
  6. * @param liveAudio is recorded audio stream
  7. * @param channelCount for liveAudio, must be 2 for stereo
  8. * @param numFrames is length of liveAudio in Frames ( not in byte )
  1. // process every sample,处理每一帧
  2. int32_t sampleCount = channelCount_ * numFrames;
  3. int16_t* samples = &static_cast<int16_t*>(buffer_)[curPos_ * channelCount_];
  1. for (size_t idx = 0; idx < sampleCount; idx++) {
  2. #if 1
  3. int32_t curSample =
  4. (samples[idx] * feedbackFactor_ + liveAudio[idx] * liveAudioFactor_) /
  5. kFloatToIntMapFactor;
//当前帧和上一阵的根据相应的因素的叠加
  1. if (curSample > SHRT_MAX)
  2. curSample = SHRT_MAX;
  3. else if (curSample < SHRT_MIN)
  4. curSample = SHRT_MIN;
  5. liveAudio[idx] = samples[idx];
  6. samples[idx] = static_cast<int16_t>(curSample);
  7. #else
  8. // Pure delay
  9. int16_t tmp = liveAudio[idx];
  10. liveAudio[idx] = samples[idx];
  11. samples[idx] = tmp;
  12. #endif
  13. }
  14. curPos_ += numFrames;
  15. lock_.unlock();

Audio Common

  1. void ConvertToSLSampleFormat(SLAndroidDataFormat_PCM_EX* pFormat,
  2. SampleFormat* pSampleInfo_) {
  1. // Only support 2 channels,设置声道数
  2. // For channelMask, refer to wilhelm/src/android/channels.c for details
  3. if (pSampleInfo_->channels_ <= 1) {
  4. pFormat->numChannels = 1;
  5. pFormat->channelMask = SL_SPEAKER_FRONT_LEFT;
  6. } else {
  7. pFormat->numChannels = 2;
  8. pFormat->channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
  9. }
  1. case SL_ANDROID_PCM_REPRESENTATION_FLOAT:
  2. pFormat->bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_32;//设置每一帧的bit数
  3. pFormat->containerSize = SL_PCMSAMPLEFORMAT_FIXED_32;//设置container的大小
  4. pFormat->formatType = SL_ANDROID_DATAFORMAT_PCM_EX;//设置数据类型
  5. break;

audio_main

分别创建audioplayer和audio record,同时注册了

engine.player_->SetBufQueue(engine.recBufQueue_, engine.freeBufQueue_);
engine.player_->RegisterCallback(EngineService, (void *)&engine);
engine.recorder_->SetBufQueues(engine.freeBufQueue_, engine.recBufQueue_);
engine.recorder_->RegisterCallback(EngineService, (void *)&engine);
bool EngineService(void *ctx, uint32_t msg, void *data) {
  assert(ctx == &engine);
  switch (msg) {
    case ENGINE_SERVICE_MSG_RETRIEVE_DUMP_BUFS: {
      *(static_cast<uint32_t *>(data)) = dbgEngineGetBufCount();
      break;
    }
    case ENGINE_SERVICE_MSG_RECORDED_AUDIO_AVAILABLE: {//当有录制的audio数据时
      // adding audio delay effect
      sample_buf *buf = static_cast<sample_buf *>(data);
      assert(engine.fastPathFramesPerBuf_ ==
             buf->size_ / engine.sampleChannels_ / (engine.bitsPerSample_ / 8));
      engine.delayEffect_->process(reinterpret_cast<int16_t *>(buf->buf_),
                                   engine.fastPathFramesPerBuf_);//达到延迟的效果
      break;
    }
    default:
      assert(false);
      return false;
  }

  return true;
}

 

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/不正经/article/detail/160716
推荐阅读
相关标签
  

闽ICP备14008679号