当前位置:   article > 正文

QT 使用ffmpeg播放音视频文件/网络资源_qt通过ffmpeg 获取音视频的时长

qt通过ffmpeg 获取音视频的时长

1、将播放功能进行封装 。

  1. extern "C" {
  2. #include <libavformat/avformat.h>
  3. #include <libswscale/swscale.h>
  4. #include <libavutil/imgutils.h>
  5. }
  6. #include <QApplication>
  7. #include <QLabel>
  8. #include <QTimer>
  9. #include <QAudioOutput>
  10. // 音视频播放器类
  11. class MediaPlayer : public QObject {
  12. Q_OBJECT
  13. public:
  14. MediaPlayer(const QString& filename, QLabel* videoWidget) :
  15. pFormatContext(nullptr), videoStream(-1), audioStream(-1),
  16. pVideoFrame(nullptr), pFrameRGB(nullptr), swsContext(nullptr),
  17. videoWidget(videoWidget), timer(nullptr) {
  18. // 打开音视频文件
  19. if (!openFile(filename.toUtf8().constData())) {
  20. qDebug() << "Failed to open file: " << filename;
  21. }
  22. }
  23. ~MediaPlayer() {
  24. // 释放资源
  25. closeFile();
  26. if (timer) {
  27. delete timer;
  28. }
  29. }
  30. // 打开音视频文件
  31. bool openFile(const char* filename) {
  32. // 打开音视频文件
  33. if (avformat_open_input(&pFormatContext, filename, nullptr, nullptr) != 0) {
  34. return false;
  35. }
  36. // 获取音视频流信息
  37. if (avformat_find_stream_info(pFormatContext, nullptr) < 0) {
  38. return false;
  39. }
  40. // 查找音视频流
  41. for (int i = 0; i < pFormatContext->nb_streams; i++) {
  42. if (pFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
  43. videoStream = i;
  44. }
  45. if (pFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
  46. audioStream = i;
  47. }
  48. }
  49. // 打开视频解码器
  50. AVCodec* pVideoCodec = avcodec_find_decoder(pFormatContext->streams[videoStream]->codecpar->codec_id);
  51. AVCodecContext* pVideoCodecContext = avcodec_alloc_context3(pVideoCodec);
  52. avcodec_parameters_to_context(pVideoCodecContext, pFormatContext->streams[videoStream]->codecpar);
  53. if (avcodec_open2(pVideoCodecContext, pVideoCodec, nullptr) < 0) {
  54. return false;
  55. }
  56. // 创建视频帧
  57. pVideoFrame = av_frame_alloc();
  58. // 创建RGB帧
  59. pFrameRGB = av_frame_alloc();
  60. int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, pVideoCodecContext->width, pVideoCodecContext->height, 1);
  61. uint8_t* buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
  62. av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, AV_PIX_FMT_RGB24, pVideoCodecContext->width,
  63. pVideoCodecContext->height, 1);
  64. // 创建图像转换上下文
  65. videoSwsContext= sws_getContext(pVideoCodecContext->width, pVideoCodecContext->height, pVideoCodecContext->pix_fmt,
  66. pVideoCodecContext->width, pVideoCodecContext->height, AV_PIX_FMT_RGB24,
  67. SWS_BILINEAR, nullptr, nullptr, nullptr);
  68. // 创建音频帧
  69. pAudioFrame = av_frame_alloc();
  70. //打开音频解码器
  71. AVCodec* pAudioCodec= avcodec_find_decoder(pFormatContext->streams[audioStream]->codecpar->codec_id);
  72. //获取音频流解码器,或者指定解码器
  73. AVCodecContext* pAudioCodecContext = avcodec_find_decoder(pAudioCodec->codec_id);
  74. if (pAudioCodecContext == NULL) {
  75. return false;
  76. }
  77. //打开音频解码器
  78. result = avcodec_open2(pAudioCodec, pAudioCodecContext, NULL);
  79. if (result < 0) {
  80. return false;
  81. }
  82. // 创建重采样上下文并设置参数
  83. audioSwsContext= swr_alloc();
  84. av_opt_set_int(audioSwsContext, "in_channel_layout", pAudioCodec->channel_layout, 0);
  85. av_opt_set_int(audioSwsContext, "in_sample_rate", pAudioCodec->sample_rate, 0);
  86. av_opt_set_sample_fmt(audioSwsContext, "in_sample_fmt", pAudioCodec->sample_fmt, 0);
  87. av_opt_set_int(audioSwsContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); // 目标音频格式为立体声
  88. av_opt_set_int(audioSwsContext, "out_sample_rate", 44100, 0); // 目标音频采样率为44.1kHz
  89. av_opt_set_sample_fmt(audioSwsContext, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); // 目标音频采样格式为16位有符号整数
  90. // audioSwsContext= swr_alloc_set_opts(nullptr,
  91. // AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, 44100,
  92. // pAudioCodec->channel_layout,
  93. // pAudioCodec->sample_fmt,
  94. // pAudioCodec->sample_rate,
  95. // 0, nullptr);
  96. swr_init(audioSwsContext);
  97. // 创建QAudioFormat对象
  98. QAudioFormat audioFormat;
  99. audioFormat.setSampleRate(44100/*audioCodec->sample_rate*/);
  100. audioFormat.setChannelCount(2/*audioCodec->channels*/);
  101. audioFormat.setSampleSize(16);
  102. audioFormat.setCodec("audio/pcm");
  103. audioFormat.setByteOrder(QAudioFormat::LittleEndian);
  104. audioFormat.setSampleType(QAudioFormat::SignedInt);
  105. QAudioDeviceInfo audioDeviceInfo = QAudioDeviceInfo::defaultOutputDevice();
  106. if (!audioDeviceInfo.isFormatSupported(audioFormat)){
  107. audioFormat = audioDeviceInfo.nearestFormat(audioFormat);
  108. }
  109. // 创建QAudioOutput对象
  110. audioOutput = new QAudioOutput(audioFormat);
  111. audioOutput->setVolume(100);
  112. // 打开音频输出设备
  113. outputDevice = audioOutput->start();
  114. if (!outputDevice)
  115. return false;
  116. return true;
  117. }
  118. // 关闭音视频文件
  119. void closeFile() {
  120. if (pFormatContext) {
  121. avformat_close_input(&pFormatContext);
  122. pFormatContext = nullptr;
  123. }
  124. if (pVideoFrame) {
  125. av_frame_free(&pVideoFrame);
  126. pVideoFrame = nullptr;
  127. }
  128. if (pFrameRGB) {
  129. av_frame_free(&pFrameRGB);
  130. pFrameRGB = nullptr;
  131. }
  132. if (videoSwsContext) {
  133. sws_freeContext(videoSwsContext);
  134. videoSwsContext= nullptr;
  135. }
  136. if (pAudioFrame) {
  137. av_frame_free(&pAudioFrame);
  138. pAudioFrame= nullptr;
  139. }
  140. if (audioCodec != NULL) {
  141. avcodec_close(audioCodec);
  142. audioCodec = NULL;
  143. }
  144. if (audioSwsContext) {
  145. sws_freeContext(audioSwsContext);
  146. audioSwsContext= nullptr;
  147. }
  148. // 停止并释放音频输出设备
  149. if (outputDevice)
  150. outputDevice->close();
  151. if (audioOutput){
  152. audioOutput->stop();
  153. delete audioOutput;
  154. audioOutput = nullptr;
  155. }
  156. }
  157. // 播放音视频
  158. void play() {
  159. AVPacket packet;
  160. int frameFinished;
  161. // 循环读取音视频帧
  162. while (av_read_frame(pFormatContext, &packet) >= 0) {
  163. // 播放视频帧
  164. if (packet.stream_index == videoStream) {
  165. frameFinish = avcodec_send_packet(videoCodec, avPacket);
  166. if (frameFinish < 0) {
  167. continue;
  168. }
  169. frameFinished= avcodec_receive_frame(videoCodec, pVideoFrame);
  170. if (frameFinished< 0) {
  171. continue;
  172. }
  173. if (frameFinished>= 0) {
  174. //将数据转成一张图片
  175. sws_scale(videoSwsContext, (const uint8_t *const *)pVideoFrame->data, pVideoFrame->linesize, 0, videoHeight, pFrameRGB->data, pFrameRGB->linesize);
  176. // 显示图像
  177. QImage image(pFrameRGB->data[0], pVideoFrame->width, pVideoFrame->height, QImage::Format_RGB888);
  178. videoWidget->setPixmap(QPixmap::fromImage(image));
  179. // 计算视频帧显示时间间隔
  180. int64_t pts = av_frame_get_best_effort_timestamp(pVideoFrame);
  181. int64_t timeBase = pFormatContext->streams[videoStream]->time_base.den;
  182. double frameDelay = av_q2d(pFormatContext->streams[videoStream]->r_frame_rate);
  183. double time = (pts * frameDelay) / timeBase;
  184. // 延时显示下一帧
  185. QEventLoop loop;
  186. QTimer::singleShot(time * 1000, &loop, [&]() { loop.quit(); });
  187. loop.exec();
  188. // 释放内存
  189. av_packet_unref(&packet);
  190. }
  191. }
  192. // 播放音频帧
  193. if (packet.stream_index == audioStream) {
  194. // 解码音频帧
  195. AVCodecContext* pAudioCodecContext = pFormatContext->streams[audioStream]->codec;
  196. frameFinished= avcodec_send_packet(pAudioCodecContext , packet);
  197. if (frameFinished< 0) {
  198. continue;
  199. }
  200. frameFinished= avcodec_receive_frame(pAudioCodecContext , pAudioFrame);
  201. if (frameFinished < 0) {
  202. continue;
  203. }
  204. if (frameFinish >= 0) {
  205. // 播放音频帧(你可以在此处使用Qt的音频播放功能)
  206. // 在这里输出音频数据,你可以根据需要进行相应处理
  207. qDebug() << "音频帧大小:" << aacFrame->pkt_size;
  208. // 转码音频帧
  209. // 计算转码后的音频数据大小
  210. int dstNbSamples = av_rescale_rnd(swr_get_delay(swrCtx, 44100/*pAudioFrame->sample_rate*/) + aacFrame->nb_samples, 44100, 44100/*pAudioFrame->sample_rate*/, AV_ROUND_UP);
  211. int dstBufferSize = av_samples_get_buffer_size(nullptr, 2, dstNbSamples, AV_SAMPLE_FMT_S16, 0);
  212. // 分配转码后的音频数据缓冲区
  213. uint8_t *dstBuffer = static_cast<uint8_t *>(av_malloc(dstBufferSize));
  214. // 进行音频转码
  215. int numSamples = swr_convert(audioSwsContext, &dstBuffer, dstNbSamples, const_cast<const uint8_t **>(pAudioFrame->data), pAudioFrame->nb_samples);
  216. if (numSamples < 0) {
  217. qDebug() << "音频转码失败";
  218. av_freep(&dstBuffer);
  219. }
  220. else{
  221. // 释放资源
  222. // 将音频帧数据写入音频输出设备
  223. outputDevice->write((const char *)dstBuffer, dstBufferSize);
  224. }
  225. // 计算音频帧播放时长
  226. AVRational timeBase = pFormatContext->streams[audioStream]->time_base;
  227. int64_t pts = av_frame_get_best_effort_timestamp(pAudioFrame);
  228. double time = av_q2d(timeBase) * pts;
  229. // 延时播放下一帧
  230. QEventLoop loop;
  231. QTimer::singleShot(time * 1000, &loop, [&]() { loop.quit(); });
  232. loop.exec();
  233. }
  234. // 释放内存
  235. av_frame_free(&pAudioFrame);
  236. av_packet_unref(&packet);
  237. }
  238. }
  239. }
  240. private:
  241. AVFormatContext* pFormatContext; // 音视频格式上下文
  242. int videoStream; // 视频流索引
  243. int audioStream; // 音频流索引
  244. AVFrame* pVideoFrame; // 视频帧
  245. AVFrame* pFrameRGB; // RGB帧
  246. SwsContext* videoSwsContext; // 图像转换上下文
  247. QLabel* videoWidget; // 用于显示视频的QWidget
  248. QTimer* timer; // 定时器,用于延时显示
  249. QAudioOutput *audioOutput;
  250. QIODevice *outputDevice;
  251. SwrContext *audioSwsContext;
  252. };

2、调用示例。

  1. int main(int argc, char *argv[])
  2. {
  3. QApplication app(argc, argv);
  4. QLabel videoWidget;
  5. videoWidget.show();
  6. MediaPlayer player("path/to/video_file.mp4", &videoWidget);
  7. player.play();
  8. return app.exec();
  9. }

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/AllinToyou/article/detail/354315
推荐阅读
相关标签
  

闽ICP备14008679号