当前位置:   article > 正文

IJKPLAYER源码分析-AudioTrack播放

IJKPLAYER源码分析-AudioTrack播放

1 前言

    AudioTrack是Android SDK所提供的播放PCM音频的技术,与mediacodec类似,IJKPLAYER对此使用的是以native层反射到Java层的播放能力。

    关于AudioTrack的官方文档,请参考AudioTrack官方文档

2 接口

2.1 pipeline

  • IJKFF_Pipeline结构体是对Android和iOS平台video的软硬解以及audio的播放操作的抽象,2端遵循共同的接口规范,包括各自平台的硬解和ffmpeg软解;

    IJKFF_Pipeline结构体定义

  1. typedef struct IJKFF_Pipeline_Opaque IJKFF_Pipeline_Opaque;
  2. typedef struct IJKFF_Pipeline IJKFF_Pipeline;
  3. struct IJKFF_Pipeline {
  4. SDL_Class *opaque_class;
  5. IJKFF_Pipeline_Opaque *opaque;
  6. void (*func_destroy) (IJKFF_Pipeline *pipeline);
  7. IJKFF_Pipenode *(*func_open_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
  8. SDL_Aout *(*func_open_audio_output) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
  9. IJKFF_Pipenode *(*func_init_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
  10. int (*func_config_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
  11. };
  •  IJKFF_Pipeline_Opaque结构体则是IJKFF_Pipeline结构体相关数据的封装;

    展开IJKFF_Pipeline_Opaque结构体: 

  1. typedef struct IJKFF_Pipeline_Opaque {
  2. FFPlayer *ffp;
  3. SDL_mutex *surface_mutex;
  4. jobject jsurface;
  5. volatile bool is_surface_need_reconfigure;
  6. bool (*mediacodec_select_callback)(void *opaque, ijkmp_mediacodecinfo_context *mcc);
  7. void *mediacodec_select_callback_opaque;
  8. SDL_Vout *weak_vout;
  9. float left_volume;
  10. float right_volume;
  11. } IJKFF_Pipeline_Opaque;

   创建pipeline对象调用链:

Java native_setup() => IjkMediaPlayer_native_setup()=> ijkmp_android_create() => ffpipeline_create_from_android()

    展开ijkmp_android_create方法:

  1. IjkMediaPlayer *ijkmp_android_create(int(*msg_loop)(void*))
  2. {
  3. IjkMediaPlayer *mp = ijkmp_create(msg_loop);
  4. if (!mp)
  5. goto fail;
  6. mp->ffplayer->vout = SDL_VoutAndroid_CreateForAndroidSurface();
  7. if (!mp->ffplayer->vout)
  8. goto fail;
  9. mp->ffplayer->pipeline = ffpipeline_create_from_android(mp->ffplayer);
  10. if (!mp->ffplayer->pipeline)
  11. goto fail;
  12. ffpipeline_set_vout(mp->ffplayer->pipeline, mp->ffplayer->vout);
  13. return mp;
  14. fail:
  15. ijkmp_dec_ref_p(&mp);
  16. return NULL;
  17. }

     再展开ffpipeline_create_from_android方法:

  1. IJKFF_Pipeline *ffpipeline_create_from_android(FFPlayer *ffp)
  2. {
  3. ALOGD("ffpipeline_create_from_android()\n");
  4. IJKFF_Pipeline *pipeline = ffpipeline_alloc(&g_pipeline_class, sizeof(IJKFF_Pipeline_Opaque));
  5. if (!pipeline)
  6. return pipeline;
  7. IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
  8. opaque->ffp = ffp;
  9. opaque->surface_mutex = SDL_CreateMutex();
  10. opaque->left_volume = 1.0f;
  11. opaque->right_volume = 1.0f;
  12. if (!opaque->surface_mutex) {
  13. ALOGE("ffpipeline-android:create SDL_CreateMutex failed\n");
  14. goto fail;
  15. }
  16. pipeline->func_destroy = func_destroy;
  17. pipeline->func_open_video_decoder = func_open_video_decoder;
  18. pipeline->func_open_audio_output = func_open_audio_output;
  19. pipeline->func_init_video_decoder = func_init_video_decoder;
  20. pipeline->func_config_video_decoder = func_config_video_decoder;
  21. return pipeline;
  22. fail:
  23. ffpipeline_free_p(&pipeline);
  24. return NULL;
  25. }

2.2 SDL_Aout

  • 该结构体抽象了Android和iOS端对声音硬件的所有操作,2端几乎遵循共同的接口规范;
  1. struct SDL_Aout {
  2. SDL_mutex *mutex;
  3. double minimal_latency_seconds;
  4. SDL_Class *opaque_class;
  5. SDL_Aout_Opaque *opaque;
  6. void (*free_l)(SDL_Aout *vout);
  7. int (*open_audio)(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained);
  8. void (*pause_audio)(SDL_Aout *aout, int pause_on);
  9. void (*flush_audio)(SDL_Aout *aout);
  10. void (*set_volume)(SDL_Aout *aout, float left, float right);
  11. void (*close_audio)(SDL_Aout *aout);
  12. double (*func_get_latency_seconds)(SDL_Aout *aout);
  13. void (*func_set_default_latency_seconds)(SDL_Aout *aout, double latency);
  14. // optional
  15. void (*func_set_playback_rate)(SDL_Aout *aout, float playbackRate);
  16. void (*func_set_playback_volume)(SDL_Aout *aout, float playbackVolume);
  17. int (*func_get_audio_persecond_callbacks)(SDL_Aout *aout);
  18. // Android only
  19. int (*func_get_audio_session_id)(SDL_Aout *aout);
  20. };

2.3 SDL_Aout_Opaque

  • 该结构体是SDL_Aout结构的内部数据成员,封装了对音频操作相关的参数、数据与mutex和cond等;
  1. typedef struct SDL_Aout_Opaque {
  2. SDL_cond *wakeup_cond;
  3. SDL_mutex *wakeup_mutex;
  4. SDL_AudioSpec spec;
  5. SDL_Android_AudioTrack* atrack;
  6. uint8_t *buffer;
  7. int buffer_size;
  8. volatile bool need_flush;
  9. volatile bool pause_on;
  10. volatile bool abort_request;
  11. volatile bool need_set_volume;
  12. volatile float left_volume;
  13. volatile float right_volume;
  14. SDL_Thread *audio_tid;
  15. SDL_Thread _audio_tid;
  16. int audio_session_id;
  17. volatile float speed;
  18. volatile bool speed_changed;
  19. } SDL_Aout_Opaque;

3 创建SDL_Aout

ffp_prepare_async_l() => ffpipeline_open_audio_output() => func_open_audio_output() => SDL_AoutAndroid_CreateForAudioTrack()

    最后,走到此处创建SDL_Aout对象: 

  1. SDL_Aout *SDL_AoutAndroid_CreateForAudioTrack()
  2. {
  3. SDL_Aout *aout = SDL_Aout_CreateInternal(sizeof(SDL_Aout_Opaque));
  4. if (!aout)
  5. return NULL;
  6. SDL_Aout_Opaque *opaque = aout->opaque;
  7. opaque->wakeup_cond = SDL_CreateCond();
  8. opaque->wakeup_mutex = SDL_CreateMutex();
  9. opaque->speed = 1.0f;
  10. aout->opaque_class = &g_audiotrack_class;
  11. aout->free_l = aout_free_l;
  12. aout->open_audio = aout_open_audio;
  13. aout->pause_audio = aout_pause_audio;
  14. aout->flush_audio = aout_flush_audio;
  15. aout->set_volume = aout_set_volume;
  16. aout->close_audio = aout_close_audio;
  17. aout->func_get_audio_session_id = aout_get_audio_session_id;
  18. aout->func_set_playback_rate = func_set_playback_rate;
  19. return aout;
  20. }

4 open_audio 

    主要做以下事情:

  •  打开audio,其实是分配一个jobject类型的AudioTrack对象,并把音频源的参数传递给它,后续对AudioTrack的操作均使用该对象;
  • 值得一提的是,IJKPLAYER对音频源的channel和pcm格式以及采样率有限制,比如只允许播放单双通道、16bit或8bit的pcm格式、采样率也必须在4000~48000之间;
  • 保存AudioTrack所能播放的音频参数在is->audio_tgt中,后续音频参数变更重采样之用;
  • 开启audio_thread线程异步处理对AudioTrack的操作;
  • 设置AudioTrack的缺省时延,用于音视频同步时纠正音频的时钟;
  1. // 设置缺省时延,若有func_set_default_latency_seconds回调则通过回调更新,没有则设置变量minimal_latency_seconds的值
  2. SDL_AoutSetDefaultLatencySeconds(ffp->aout, ((double)(2 * spec.size)) / audio_hw_params->bytes_per_sec);

    完整调用链: 

read_thread() => stream_component_open() => audio_open() => SDL_AoutOpenAudio() => aout_open_audio() => aout_open_audio_n()

    我们主要来看看aout_open_audio_n函数:

  • 将音频源的采样参数告知给AudioTrack,分配一个jobject类型的AudioTrack对象,后续对AudioTrack的操作均是由此对象发起;
  • 按getMinBufferSize() * 2分配一个用于缓存PCM数据的buffer,一定大于256byte;
  • 开启一个audio_thread线程,用以异步执行对AudioTrack的操作,诸如setVolume() / pause() / flush() / close_audio() / setPlaybackRate()等;
  • 将audio硬件PCM播放的参数在全局is->audio_tgt变量中,后续参数变更重采样用;
  • 设置播放的初始音量;
  1. static int aout_open_audio_n(JNIEnv *env, SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
  2. {
  3. assert(desired);
  4. SDL_Aout_Opaque *opaque = aout->opaque;
  5. opaque->spec = *desired;
  6. // 将pcm采样参数告知audiotrack
  7. opaque->atrack = SDL_Android_AudioTrack_new_from_sdl_spec(env, desired);
  8. if (!opaque->atrack) {
  9. ALOGE("aout_open_audio_n: failed to new AudioTrcak()");
  10. return -1;
  11. }
  12. opaque->buffer_size = SDL_Android_AudioTrack_get_min_buffer_size(opaque->atrack);
  13. if (opaque->buffer_size <= 0) {
  14. ALOGE("aout_open_audio_n: failed to getMinBufferSize()");
  15. SDL_Android_AudioTrack_free(env, opaque->atrack);
  16. opaque->atrack = NULL;
  17. return -1;
  18. }
  19. opaque->buffer = malloc(opaque->buffer_size);
  20. if (!opaque->buffer) {
  21. ALOGE("aout_open_audio_n: failed to allocate buffer");
  22. SDL_Android_AudioTrack_free(env, opaque->atrack);
  23. opaque->atrack = NULL;
  24. return -1;
  25. }
  26. if (obtained) {
  27. SDL_Android_AudioTrack_get_target_spec(opaque->atrack, obtained);
  28. SDLTRACE("audio target format fmt:0x%x, channel:0x%x", (int)obtained->format, (int)obtained->channels);
  29. }
  30. opaque->audio_session_id = SDL_Android_AudioTrack_getAudioSessionId(env, opaque->atrack);
  31. ALOGI("audio_session_id = %d\n", opaque->audio_session_id);
  32. opaque->pause_on = 1;
  33. opaque->abort_request = 0;
  34. opaque->audio_tid = SDL_CreateThreadEx(&opaque->_audio_tid, aout_thread, aout, "ff_aout_android");
  35. if (!opaque->audio_tid) {
  36. ALOGE("aout_open_audio_n: failed to create audio thread");
  37. SDL_Android_AudioTrack_free(env, opaque->atrack);
  38. opaque->atrack = NULL;
  39. return -1;
  40. }
  41. return 0;
  42. }

     此外,这里介绍一下getMinBufferSize函数:

  • getMinBufferSize会综合考虑硬件情况(诸如是否支持采样率,硬件本身的延迟情况等)后,得出一个最小缓冲区的大小。一般我们分配的缓冲大小会是它的整数倍。 

5 audio_thread

  • 对Android  SDK的AudioTrack异步执行操作,如pause()/play()/setVolume()/flush()/setSpped();
  • 通过sdl_audio_callback回调copy固定256byte的PCM数据,然后喂给AudioTrack播放;

5.1 执行操作

    所有对AudioTrack的操作,都是在此线程里异步执行:

  • 值得一提的是,若播放器处于pause状态时,该线程会一直条件等待opaque->pause_on非false(也即可播状态)或程序退出,线程空转;
  1. static int aout_thread_n(JNIEnv *env, SDL_Aout *aout)
  2. {
  3. SDL_Aout_Opaque *opaque = aout->opaque;
  4. SDL_Android_AudioTrack *atrack = opaque->atrack;
  5. SDL_AudioCallback audio_cblk = opaque->spec.callback;
  6. void *userdata = opaque->spec.userdata;
  7. uint8_t *buffer = opaque->buffer;
  8. // 单次喂给AudioTrack的PCM的bytes,不宜喂得太少,也不宜太多,单次应能播一会儿,5ms
  9. int copy_size = 256;
  10. assert(atrack);
  11. assert(buffer);
  12. SDL_SetThreadPriority(SDL_THREAD_PRIORITY_HIGH);
  13. if (!opaque->abort_request && !opaque->pause_on)
  14. SDL_Android_AudioTrack_play(env, atrack);
  15. while (!opaque->abort_request) {
  16. SDL_LockMutex(opaque->wakeup_mutex);
  17. if (!opaque->abort_request && opaque->pause_on) {
  18. SDL_Android_AudioTrack_pause(env, atrack);
  19. // 若暂停了,当前线程一直在此条件等待播放
  20. while (!opaque->abort_request && opaque->pause_on) {
  21. SDL_CondWaitTimeout(opaque->wakeup_cond, opaque->wakeup_mutex, 1000);
  22. }
  23. if (!opaque->abort_request && !opaque->pause_on) {
  24. if (opaque->need_flush) {
  25. opaque->need_flush = 0;
  26. SDL_Android_AudioTrack_flush(env, atrack);
  27. }
  28. SDL_Android_AudioTrack_play(env, atrack);
  29. }
  30. }
  31. if (opaque->need_flush) {
  32. opaque->need_flush = 0;
  33. SDL_Android_AudioTrack_flush(env, atrack);
  34. }
  35. if (opaque->need_set_volume) {
  36. opaque->need_set_volume = 0;
  37. SDL_Android_AudioTrack_set_volume(env, atrack, opaque->left_volume, opaque->right_volume);
  38. }
  39. if (opaque->speed_changed) {
  40. opaque->speed_changed = 0;
  41. SDL_Android_AudioTrack_setSpeed(env, atrack, opaque->speed);
  42. }
  43. SDL_UnlockMutex(opaque->wakeup_mutex);
  44. // copy解码后的pcm数据,每次固定256byte
  45. audio_cblk(userdata, buffer, copy_size);
  46. if (opaque->need_flush) {
  47. SDL_Android_AudioTrack_flush(env, atrack);
  48. opaque->need_flush = false;
  49. }
  50. if (opaque->need_flush) {
  51. opaque->need_flush = 0;
  52. SDL_Android_AudioTrack_flush(env, atrack);
  53. } else {
  54. // 将pcm数据喂给AudioTrack播放
  55. int written = SDL_Android_AudioTrack_write(env, atrack, buffer, copy_size);
  56. if (written != copy_size) {
  57. ALOGW("AudioTrack: not all data copied %d/%d", (int)written, (int)copy_size);
  58. }
  59. }
  60. // TODO: 1 if callback return -1 or 0
  61. }
  62. SDL_Android_AudioTrack_free(env, atrack);
  63. return 0;
  64. }

5.2 sdl_audio_callback

  • audio_thread通过callback的方式从解码后的音频队列FrameQueue拷贝走固定长度256byte的pcm数据;
  1. /* prepare a new audio buffer */
  2. static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
  3. {
  4. FFPlayer *ffp = opaque;
  5. VideoState *is = ffp->is;
  6. int audio_size, len1;
  7. if (!ffp || !is) {
  8. memset(stream, 0, len);
  9. return;
  10. }
  11. ffp->audio_callback_time = av_gettime_relative();
  12. if (ffp->pf_playback_rate_changed) {
  13. ffp->pf_playback_rate_changed = 0;
  14. #if defined(__ANDROID__)
  15. if (!ffp->soundtouch_enable) {
  16. SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
  17. }
  18. #else
  19. SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
  20. #endif
  21. }
  22. if (ffp->pf_playback_volume_changed) {
  23. ffp->pf_playback_volume_changed = 0;
  24. SDL_AoutSetPlaybackVolume(ffp->aout, ffp->pf_playback_volume);
  25. }
  26. // 循环是确保copy走len字节的pcm数据
  27. while (len > 0) {
  28. if (is->audio_buf_index >= is->audio_buf_size) {
  29. audio_size = audio_decode_frame(ffp);
  30. if (audio_size < 0) {
  31. /* if error, just output silence */
  32. is->audio_buf = NULL;
  33. is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
  34. } else {
  35. if (is->show_mode != SHOW_MODE_VIDEO)
  36. update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
  37. is->audio_buf_size = audio_size;
  38. }
  39. is->audio_buf_index = 0;
  40. }
  41. if (is->auddec.pkt_serial != is->audioq.serial) {
  42. is->audio_buf_index = is->audio_buf_size;
  43. // 静音播放
  44. memset(stream, 0, len);
  45. // flush掉seek前后的pcm数据
  46. SDL_AoutFlushAudio(ffp->aout);
  47. break;
  48. }
  49. len1 = is->audio_buf_size - is->audio_buf_index;
  50. if (len1 > len)
  51. len1 = len;
  52. if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
  53. // 在此copy走pcm数据
  54. memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
  55. else {
  56. memset(stream, 0, len1);
  57. if (!is->muted && is->audio_buf)
  58. SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
  59. }
  60. len -= len1;
  61. stream += len1;
  62. is->audio_buf_index += len1;
  63. }
  64. is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
  65. /* Let's assume the audio driver that is used by SDL has two periods. */
  66. if (!isnan(is->audio_clock)) {
  67. // 计算Audio参考时钟时应将硬件里的PCM样本缓存考虑进去(opensl es and audiounit),以及is->audio_write_buf_size
  68. set_clock_at(&is->audclk, is->audio_clock - (double)(is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec - SDL_AoutGetLatencySeconds(ffp->aout), is->audio_clock_serial, ffp->audio_callback_time / 1000000.0);
  69. sync_clock_to_slave(&is->extclk, &is->audclk);
  70. }
  71. if (!ffp->first_audio_frame_rendered) {
  72. ffp->first_audio_frame_rendered = 1;
  73. ffp_notify_msg1(ffp, FFP_MSG_AUDIO_RENDERING_START);
  74. }
  75. if (is->latest_audio_seek_load_serial == is->audio_clock_serial) {
  76. int latest_audio_seek_load_serial = __atomic_exchange_n(&(is->latest_audio_seek_load_serial), -1, memory_order_seq_cst);
  77. if (latest_audio_seek_load_serial == is->audio_clock_serial) {
  78. if (ffp->av_sync_type == AV_SYNC_AUDIO_MASTER) {
  79. ffp_notify_msg2(ffp, FFP_MSG_AUDIO_SEEK_RENDERING_START, 1);
  80. } else {
  81. ffp_notify_msg2(ffp, FFP_MSG_AUDIO_SEEK_RENDERING_START, 0);
  82. }
  83. }
  84. }
  85. if (ffp->render_wait_start && !ffp->start_on_prepared && is->pause_req) {
  86. while (is->pause_req && !is->abort_request) {
  87. SDL_Delay(20);
  88. }
  89. }
  90. }

5.3 喂PCM数据

  1. ......
  2. // 从FrameQueue队列里取走256个byte的pcm数据
  3. audio_cblk(userdata, buffer, copy_size);
  4. if (opaque->need_flush) {
  5. SDL_Android_AudioTrack_flush(env, atrack);
  6. opaque->need_flush = false;
  7. }
  8. if (opaque->need_flush) {
  9. opaque->need_flush = 0;
  10. SDL_Android_AudioTrack_flush(env, atrack);
  11. } else {
  12. // 将从FrameQueue队列里copy过来的pcm数据喂给AudioTrack播放
  13. int written = SDL_Android_AudioTrack_write(env, atrack, buffer, copy_size);
  14. if (written != copy_size) {
  15. ALOGW("AudioTrack: not all data copied %d/%d", (int)written, (int)copy_size);
  16. }
  17. }
  18. ......
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小小林熬夜学编程/article/detail/401559
推荐阅读
相关标签
  

闽ICP备14008679号