当前位置:   article > 正文

基于FFmpeg H264 + G711A 音视频裸流合并 MP4文件 ( G711A 转 AAC)_使用c语言将视频裸流和音频裸流合并成一个文件

使用c语言将视频裸流和音频裸流合并成一个文件

        由于 FFmpeg 只支持H264+AAC的mp4封装格式的,并不支持H264+G711的mp4封装格式。所以需要将G711a转码成AAC格式的,然后封装成mp4文件,但网上有说 通过修改movenc.c文件, 重新编译ffmpeg,能支持H264+G711a的 ,但我尝试编译,没有成功,你们可以尝试编译一下,要是成功了,希望能一起分享一下,哈哈。

文章链接:C++编程-7:ffmpeg支持G711音频和H.264视频数据同步封装进MP4文件_椰果奶茶加冰的博客-CSDN博客_ffmpeg g711

代码:

1.首先是将G711a转码成AAC。基于该类库: https://github.com/EasyDarwin/EasyAACEncoder

  1. InitParam initParam;
  2. initParam.u32AudioSamplerate=8000;
  3. initParam.ucAudioChannel=1;
  4. initParam.u32PCMBitSize=16;
  5. initParam.ucAudioCodec = Law_ALaw;
  6. //initParam.ucAudioCodec = Law_ULaw;
  7. EasyAACEncoder_Handle handle = Easy_AACEncoder_Init( initParam);
  8. char* infilename = "src.g711a"; //标准
  9. char* outAacname = "g711.aac";
  10. FILE* fpIn = fopen(infilename, "rb");
  11. if(NULL == fpIn)
  12. {
  13. printf("%s:[%d] open %s file failed\n",__FUNCTION__,__LINE__,infilename);
  14. return -1;
  15. }
  16. FILE* fpOut = fopen(outAacname, "wb");
  17. if(NULL == fpOut)
  18. {
  19. printf("%s:[%d] open %s file failed\n",__FUNCTION__,__LINE__,outAacname);
  20. return -1;
  21. }
  22. int gBytesRead = 0;
  23. int bG711ABufferSize = 500;
  24. int bAACBufferSize = 4*bG711ABufferSize;//提供足够大的缓冲区
  25. unsigned char *pbG711ABuffer = (unsigned char *)malloc(bG711ABufferSize *sizeof(unsigned char));
  26. unsigned char *pbAACBuffer = (unsigned char*)malloc(bAACBufferSize * sizeof(unsigned char));
  27. unsigned int out_len = 0;
  28. while((gBytesRead = fread(pbG711ABuffer, 1, bG711ABufferSize, fpIn)) >0)
  29. {
  30. if(Easy_AACEncoder_Encode(handle, pbG711ABuffer, gBytesRead, pbAACBuffer, &out_len) > 0)
  31. {
  32. fwrite(pbAACBuffer, 1, out_len, fpOut);
  33. }
  34. else
  35. {
  36. }
  37. }
  38. Easy_AACEncoder_Release(handle);
  39. free(pbG711ABuffer);
  40. free(pbAACBuffer);
  41. fclose(fpIn);
  42. fclose(fpOut);

2.  将H264 + AAC保存到MP4文件 参考:ffmpeg h264文件和裸流 封装mp4 - 简书 

   1)创建文件

  1. int FFmpegTool::CreateMp4(const char* filename)
  2. {
  3. int ret; // 成功返回0,失败返回1
  4. const char* pszFileName = filename;
  5. AVOutputFormat *fmt;
  6. AVCodec *video_codec;
  7. AVStream *m_pVideoSt;
  8. AVCodec *audio_codec;
  9. AVStream *m_pAudioSt;
  10. av_register_all();
  11. avformat_alloc_output_context2(&m_pOc, NULL, NULL, pszFileName);
  12. if (!m_pOc)
  13. {
  14. printf("Could not deduce output format from file extension: using MPEG. \n");
  15. avformat_alloc_output_context2(&m_pOc, NULL, "mpeg", pszFileName);
  16. }
  17. if (!m_pOc)
  18. {
  19. return 1;
  20. }
  21. fmt = m_pOc->oformat;
  22. if (fmt->video_codec != AV_CODEC_ID_NONE)
  23. {
  24. // 添加视频和音频流信息
  25. m_pVideoSt = add_stream(m_pOc, &video_codec, AV_CODEC_ID_H264, 0);
  26. m_pAudioSt = add_stream(m_pOc, &audio_codec, AV_CODEC_ID_AAC, 1);
  27. }
  28. // 打开视音频流
  29. if (m_pAudioSt)
  30. {
  31. open_audio(m_pOc, audio_codec, m_pAudioSt);
  32. }
  33. if (m_pVideoSt)
  34. {
  35. open_video(m_pOc, video_codec, m_pVideoSt);
  36. }
  37. printf("==========Output Information==========\n");
  38. av_dump_format(m_pOc, 0, pszFileName, 1);
  39. printf("======================================\n");
  40. /* open the output file, if needed */
  41. if (!(fmt->flags & AVFMT_NOFILE))
  42. {
  43. // 打开输出流
  44. ret = avio_open(&m_pOc->pb, pszFileName, AVIO_FLAG_WRITE);
  45. if (ret < 0)
  46. {
  47. printf("could not open %s\n", pszFileName);
  48. return 1;
  49. }
  50. }
  51. /* Write the stream header, if any */
  52. ret = avformat_write_header(m_pOc, NULL);
  53. if (ret < 0)
  54. {
  55. printf("Error occurred when opening output file");
  56. return 1;
  57. }
  58. }

   

  1. bool isIdrFrame(uint8_t* buf, int len) {
  2. switch (buf[0] & 0x1f) {
  3. case 7: // SPS
  4. return true;
  5. case 8: // PPS
  6. return true;
  7. case 5:
  8. return true;
  9. case 1:
  10. return false;
  11. default:
  12. return false;
  13. break;
  14. }
  15. return false;
  16. }
  17. // 判断关键帧
  18. bool FFmpegTool::judgeKeyFrame(uint8_t* buf, int size) {
  19. //主要是解析idr前面的sps pps
  20. int last = 0;
  21. for (int i = 2; i <= size; ++i) {
  22. if (i == size) {
  23. if (last) {
  24. bool ret = isIdrFrame(buf + last, i - last);
  25. if (ret) {
  26. return true;
  27. }
  28. }
  29. }
  30. else if (buf[i - 2] == 0x00 && buf[i - 1] == 0x00 && buf[i] == 0x01) {
  31. if (last) {
  32. int size = i - last - 3;
  33. if (buf[i - 3]) ++size;
  34. bool ret = isIdrFrame(buf + last, size);
  35. if (ret) {
  36. return true;
  37. }
  38. }
  39. last = i + 1;
  40. }
  41. }
  42. return false;
  43. }
  44. // 输出流中添加视音频流
  45. AVStream * FFmpegTool::add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id, int type)
  46. {
  47. AVCodecContext *c;
  48. AVStream *st;
  49. /* find the encoder */
  50. *codec = avcodec_find_encoder(codec_id);
  51. if (!*codec)
  52. {
  53. printf("could not find encoder for '%s' \n", avcodec_get_name(codec_id));
  54. exit(1);
  55. }
  56. st = avformat_new_stream(oc, *codec);
  57. if (!st)
  58. {
  59. printf("could not allocate stream \n");
  60. exit(1);
  61. }
  62. st->id = oc->nb_streams - 1;
  63. c = st->codec;
  64. if (type == 0)
  65. m_vi_nstream = st->index;
  66. else
  67. m_ai_nstream = st->index;
  68. AVRational time_base;
  69. switch ((*codec)->type)
  70. {
  71. case AVMEDIA_TYPE_AUDIO:
  72. c->sample_fmt = AV_SAMPLE_FMT_S16P;
  73. //c->bit_rate = 128000;
  74. c->sample_rate = m_sample_rate;
  75. c->channels = m_channel;
  76. c->codec_id = AV_CODEC_ID_AAC;
  77. c->channel_layout = AV_CH_LAYOUT_STEREO;
  78. time_base = { 1, c->sample_rate };
  79. st->time_base = time_base;
  80. break;
  81. case AVMEDIA_TYPE_VIDEO:
  82. c->codec_id = AV_CODEC_ID_H264;
  83. c->bit_rate = m_bit_rate;
  84. c->width = m_width;
  85. c->height = m_height;
  86. c->time_base.den = m_fps;
  87. c->time_base.num = 1;
  88. c->gop_size = 1;
  89. c->pix_fmt = AV_PIX_FMT_YUV420P;
  90. time_base = { 1, m_fps };
  91. st->time_base = time_base;
  92. if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
  93. {
  94. c->max_b_frames = 2;
  95. }
  96. if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  97. {
  98. c->mb_decision = 2;
  99. }
  100. break;
  101. default:
  102. break;
  103. }
  104. if (oc->oformat->flags & AVFMT_GLOBALHEADER)
  105. {
  106. c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
  107. }
  108. return st;
  109. }
  110. // 打开视频编码器
  111. void FFmpegTool::open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
  112. {
  113. int ret;
  114. AVCodecContext *c = st->codec;
  115. /* open the codec */
  116. ret = avcodec_open2(c, codec, NULL);
  117. if (ret < 0)
  118. {
  119. printf("could not open video codec:%d", ret);
  120. }
  121. }
  122. // 打开音频编码器
  123. void FFmpegTool::open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
  124. {
  125. int ret;
  126. AVCodecContext *c = st->codec;
  127. /* open the codec */
  128. ret = avcodec_open2(c, codec, NULL);
  129. if (ret < 0)
  130. {
  131. printf("could not open audio codec:%d", ret);
  132. //exit(1);
  133. }
  134. }

2)  写入音视频

  1. // 写入视频
  2. void FFmpegTool::WriteVideo(void* data, int nLen, int type)
  3. {
  4. AVStream *pst;
  5. // 分为视频流和音频流
  6. if (type == 0)
  7. pst = m_pOc->streams[m_vi_nstream];
  8. else
  9. pst = m_pOc->streams[m_ai_nstream];
  10. // Init packet
  11. AVPacket pkt;
  12. av_init_packet(&pkt);
  13. int isI = judgeKeyFrame((uint8_t*)data, nLen);
  14. pkt.flags |= isI ? AV_PKT_FLAG_KEY : 0;
  15. pkt.stream_index = pst->index; // (int)packet在stream的index位置
  16. pkt.data = (uint8_t*)data;
  17. pkt.size = nLen;
  18. // 第一帧为关键帧
  19. if (m_waitkey) {
  20. if (0 == (pkt.flags & AV_PKT_FLAG_KEY)) {
  21. return;
  22. }
  23. else
  24. m_waitkey = 0;
  25. }
  26. // 计算每一帧的长度
  27. int64_t calc_duration = (double)AV_TIME_BASE / m_fps;
  28. // 计算该帧的显示时间戳
  29. pkt.pts = (double)(m_frame_index*calc_duration) / (double)(av_q2d(pst->time_base)*AV_TIME_BASE);
  30. // 解码时间戳和显示时间戳相等 因为视频中没有b帧
  31. pkt.dts = pkt.pts;
  32. // 帧的时长
  33. pkt.duration = (double)calc_duration / (double)(av_q2d(pst->time_base)*AV_TIME_BASE);
  34. if (type == 0) {
  35. // 一帧一帧计算
  36. cur_pts_v = pkt.pts;
  37. m_frame_index++;
  38. }
  39. else
  40. { // 音频帧和视频帧同步
  41. cur_pts_a = pkt.pts;
  42. }
  43. // 换算时间戳 (换算成已输出流中的时间基为单位的显示时间戳)
  44. pkt.pts = av_rescale_q_rnd(pkt.pts, pst->time_base, pst->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
  45. pkt.dts = av_rescale_q_rnd(pkt.dts, pst->time_base, pst->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
  46. pkt.duration = av_rescale_q(pkt.duration, pst->time_base, pst->time_base);
  47. pkt.pos = -1;
  48. pkt.stream_index = pst->index;
  49. if (av_interleaved_write_frame(m_pOc, &pkt) < 0) {
  50. printf("cannot write frame\n");
  51. }
  52. av_free_packet(&pkt);
  53. }

  3) 关闭视频

  1. void FFmpegTool::CloseMp4()
  2. {
  3. m_waitkey = -1;
  4. m_vi_nstream = -1;
  5. m_ai_nstream = -1;
  6. if (m_pOc)
  7. av_write_trailer(m_pOc);
  8. if (m_pOc && !(m_pOc->oformat->flags & AVFMT_NOFILE))
  9. avio_close(m_pOc->pb);
  10. if (m_pOc)
  11. {
  12. avformat_free_context(m_pOc);
  13. m_pOc = NULL;
  14. }
  15. }

以上是一些主要的代码实现,大家可以参考下。里面一些参数还需要自己根据情况设置一下,比如音频的采样率 ,音频的通道,视频的帧率,分辨率等等。代码里我把我理解的地方基本都写上注释了,如果有不正确的地方,大家也多多指教,谢谢。

注: 由于公司项目,无法全部上传,所以只能上传主要实现代码。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/空白诗007/article/detail/901703
推荐阅读
相关标签
  

闽ICP备14008679号