当前位置:   article > 正文

FFmpeg获取视频关键帧并保存成jpg图像_ffmpeg 抽帧 成图片

ffmpeg 抽帧 成图片

1、命令行方式

1秒取1帧 r:rate

ffmpeg -i input.mp4 -f image2 -r 1  dstPath/image-%03d.jpg

提取I帧

ffmpeg -i input.mp4 -an -vf select='eq(pict_type\,I)' -vsync 2 -s 720*480 -f image2  dstPath/image-%03d.jpg

本文福利, 免费领取C++音视频学习资料包+学习路线大纲、技术视频/代码,内容包括(音视频开发,面试题,FFmpeg ,webRTC ,rtmp ,hls ,rtsp ,ffplay ,编解码,推拉流,srs)↓↓↓↓↓↓见下面↓↓文章底部点击免费领取↓↓

2、代码方式

提取I帧

  1. //source: keyframe.cpp
  2. #include <iostream>
  3. #include <cstdio>
  4. #include <cstring>
  5. #define __STDC_CONSTANT_MACROS
  6. extern "C"
  7. {
  8. #include <libavutil/imgutils.h>
  9. #include <libavutil/samplefmt.h>
  10. #include <libavutil/timestamp.h>
  11. #include <libavutil/opt.h>
  12. #include <libavcodec/avcodec.h>
  13. #include <libavutil/channel_layout.h>
  14. #include <libavutil/common.h>
  15. #include <libavutil/imgutils.h>
  16. #include <libavutil/mathematics.h>
  17. #include <libavutil/samplefmt.h>
  18. #include <libavutil/pixfmt.h>
  19. #include <libavformat/avformat.h>
  20. #include <libswscale/swscale.h>
  21. #include <jpeglib.h>
  22. }
  23. using namespace std;
  24. char errbuf[256];
  25. char timebuf[256];
  26. static AVFormatContext *fmt_ctx = NULL;
  27. static AVCodecContext *video_dec_ctx = NULL;
  28. static int width, height;
  29. static enum AVPixelFormat pix_fmt;
  30. static AVStream *video_stream = NULL;
  31. static const char *src_filename = NULL;
  32. static const char *output_dir = NULL;
  33. static int video_stream_idx = -1;
  34. static AVFrame *frame = NULL;
  35. static AVFrame *pFrameRGB = NULL;
  36. static AVPacket pkt;
  37. static struct SwsContext *pSWSCtx = NULL;
  38. static int video_frame_count = 0;
  39. /* Enable or disable frame reference counting. You are not supposed to support
  40. * both paths in your application but pick the one most appropriate to your
  41. * needs. Look for the use of refcount in this example to see what are the
  42. * differences of API usage between them. */
  43. static int refcount = 0;
  44. static void jpg_save(uint8_t *pRGBBuffer, int iFrame, int width, int height);
  45. static int decode_packet(int *got_frame, int cached)
  46. {
  47. int ret = 0;
  48. int decoded = pkt.size;
  49. *got_frame = 0;
  50. if (pkt.stream_index == video_stream_idx)
  51. {
  52. /* decode video frame */
  53. ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
  54. if (ret < 0)
  55. {
  56. fprintf(stderr, "Error decoding video frame (%s)\n", av_make_error_string(errbuf, sizeof(errbuf), ret));
  57. return ret;
  58. }
  59. if (*got_frame)
  60. {
  61. if (frame->width != width || frame->height != height ||
  62. frame->format != pix_fmt)
  63. {
  64. /* To handle this change, one could call av_image_alloc again and
  65. * decode the following frames into another rawvideo file. */
  66. fprintf(stderr, "Error: Width, height and pixel format have to be "
  67. "constant in a rawvideo file, but the width, height or "
  68. "pixel format of the input video changed:\n"
  69. "old: width = %d, height = %d, format = %s\n"
  70. "new: width = %d, height = %d, format = %s\n",
  71. width, height, av_get_pix_fmt_name(pix_fmt),
  72. frame->width, frame->height,
  73. av_get_pix_fmt_name(frame->format));
  74. return -1;
  75. }
  76. video_frame_count++;
  77. static int iFrame = 0;
  78. if (frame->key_frame == 1) //如果是关键帧
  79. {
  80. sws_scale(pSWSCtx, frame->data, frame->linesize, 0,
  81. video_dec_ctx->height,
  82. pFrameRGB->data, pFrameRGB->linesize);
  83. // 保存到磁盘
  84. iFrame++;
  85. jpg_save(pFrameRGB->data[0], iFrame, width, height);
  86. }
  87. }
  88. }
  89. /* If we use frame reference counting, we own the data and need
  90. * to de-reference it when we don't use it anymore */
  91. if (*got_frame && refcount)
  92. av_frame_unref(frame);
  93. return decoded;
  94. }
  95. static int open_codec_context(int *stream_idx,
  96. AVCodecContext **dec_ctx, AVFormatContext *fmt_ctx, enum AVMediaType type)
  97. {
  98. int ret, stream_index;
  99. AVStream *st;
  100. AVCodec *dec = NULL;
  101. AVDictionary *opts = NULL;
  102. ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
  103. if (ret < 0)
  104. {
  105. fprintf(stderr, "Could not find %s stream in input file '%s'\n",
  106. av_get_media_type_string(type), src_filename);
  107. return ret;
  108. }
  109. else
  110. {
  111. stream_index = ret;
  112. st = fmt_ctx->streams[stream_index];
  113. /* find decoder for the stream */
  114. dec = avcodec_find_decoder(st->codecpar->codec_id);
  115. if (!dec)
  116. {
  117. fprintf(stderr, "Failed to find %s codec\n",
  118. av_get_media_type_string(type));
  119. return AVERROR(EINVAL);
  120. }
  121. /* Allocate a codec context for the decoder */
  122. *dec_ctx = avcodec_alloc_context3(dec);
  123. if (!*dec_ctx)
  124. {
  125. fprintf(stderr, "Failed to allocate the %s codec context\n",
  126. av_get_media_type_string(type));
  127. return AVERROR(ENOMEM);
  128. }
  129. /* Copy codec parameters from input stream to output codec context */
  130. if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0)
  131. {
  132. fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
  133. av_get_media_type_string(type));
  134. return ret;
  135. }
  136. /* Init the decoders, with or without reference counting */
  137. av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
  138. if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0)
  139. {
  140. fprintf(stderr, "Failed to open %s codec\n",
  141. av_get_media_type_string(type));
  142. return ret;
  143. }
  144. *stream_idx = stream_index;
  145. }
  146. return 0;
  147. }
  148. static int get_format_from_sample_fmt(const char **fmt, enum AVSampleFormat sample_fmt)
  149. {
  150. int i;
  151. struct sample_fmt_entry
  152. {
  153. enum AVSampleFormat sample_fmt;
  154. const char *fmt_be, *fmt_le;
  155. } sample_fmt_entries[] = {
  156. {AV_SAMPLE_FMT_U8, "u8", "u8"},
  157. {AV_SAMPLE_FMT_S16, "s16be", "s16le"},
  158. {AV_SAMPLE_FMT_S32, "s32be", "s32le"},
  159. {AV_SAMPLE_FMT_FLT, "f32be", "f32le"},
  160. {AV_SAMPLE_FMT_DBL, "f64be", "f64le"},
  161. };
  162. *fmt = NULL;
  163. for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++)
  164. {
  165. struct sample_fmt_entry *entry = &sample_fmt_entries[i];
  166. if (sample_fmt == entry->sample_fmt)
  167. {
  168. *fmt = AV_NE(entry->fmt_be, entry->fmt_le);
  169. return 0;
  170. }
  171. }
  172. fprintf(stderr,
  173. "sample format %s is not supported as output format\n",
  174. av_get_sample_fmt_name(sample_fmt));
  175. return -1;
  176. }
  177. int main(int argc, char **argv)
  178. {
  179. int ret = 0, got_frame;
  180. int numBytes = 0;
  181. uint8_t *buffer;
  182. if (argc != 3 && argc != 4)
  183. {
  184. fprintf(stderr, "usage: %s [-refcount] input_file ouput_dir\n"
  185. "API example program to show how to read frames from an input file.\n"
  186. "This program reads frames from a file, decodes them, and writes bmp keyframes\n"
  187. "If the -refcount option is specified, the program use the\n"
  188. "reference counting frame system which allows keeping a copy of\n"
  189. "the data for longer than one decode call.\n"
  190. "\n",
  191. argv[0]);
  192. exit(1);
  193. }
  194. if (argc == 4 && !strcmp(argv[1], "-refcount"))
  195. {
  196. refcount = 1;
  197. argv++;
  198. }
  199. src_filename = argv[1];
  200. output_dir = argv[2];
  201. /* open input file, and allocate format context */
  202. if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0)
  203. {
  204. fprintf(stderr, "Could not open source file %s\n", src_filename);
  205. exit(1);
  206. }
  207. /* retrieve stream information */
  208. if (avformat_find_stream_info(fmt_ctx, NULL) < 0)
  209. {
  210. fprintf(stderr, "Could not find stream information\n");
  211. exit(1);
  212. }
  213. if (open_codec_context(&video_stream_idx, &video_dec_ctx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0)
  214. {
  215. video_stream = fmt_ctx->streams[video_stream_idx];
  216. /* allocate image where the decoded image will be put */
  217. width = video_dec_ctx->width;
  218. height = video_dec_ctx->height;
  219. pix_fmt = video_dec_ctx->pix_fmt;
  220. }
  221. else
  222. {
  223. goto end;
  224. }
  225. /* dump input information to stderr */
  226. av_dump_format(fmt_ctx, 0, src_filename, 0);
  227. if (!video_stream)
  228. {
  229. fprintf(stderr, "Could not find video stream in the input, aborting\n");
  230. ret = 1;
  231. goto end;
  232. }
  233. pFrameRGB = av_frame_alloc();
  234. numBytes = avpicture_get_size(AV_PIX_FMT_BGR24, width, height);
  235. buffer = av_malloc(numBytes);
  236. avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, width, height);
  237. pSWSCtx = sws_getContext(width, height, pix_fmt, width, height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
  238. frame = av_frame_alloc();
  239. if (!frame)
  240. {
  241. fprintf(stderr, "Could not allocate frame\n");
  242. ret = AVERROR(ENOMEM);
  243. goto end;
  244. }
  245. /* initialize packet, set data to NULL, let the demuxer fill it */
  246. av_init_packet(&pkt);
  247. pkt.data = NULL;
  248. pkt.size = 0;
  249. if (video_stream)
  250. printf("Demuxing video from file '%s' to dir: %s\n", src_filename, output_dir);
  251. /* read frames from the file */
  252. while (av_read_frame(fmt_ctx, &pkt) >= 0)
  253. {
  254. AVPacket orig_pkt = pkt;
  255. do
  256. {
  257. ret = decode_packet(&got_frame, 0);
  258. if (ret < 0)
  259. break;
  260. pkt.data += ret;
  261. pkt.size -= ret;
  262. } while (pkt.size > 0);
  263. av_packet_unref(&orig_pkt);
  264. }
  265. /* flush cached frames */
  266. pkt.data = NULL;
  267. pkt.size = 0;
  268. end:
  269. if (video_dec_ctx)
  270. avcodec_free_context(&video_dec_ctx);
  271. if (fmt_ctx)
  272. avformat_close_input(&fmt_ctx);
  273. if (buffer)
  274. av_free(buffer);
  275. if (pFrameRGB)
  276. av_frame_free(&pFrameRGB);
  277. if (frame)
  278. av_frame_free(&frame);
  279. return ret < 0;
  280. }
  281. static void jpg_save(uint8_t *pRGBBuffer, int iFrame, int width, int height)
  282. {
  283. struct jpeg_compress_struct cinfo;
  284. struct jpeg_error_mgr jerr;
  285. char szFilename[1024];
  286. int row_stride;
  287. FILE *fp;
  288. JSAMPROW row_pointer[1]; // 一行位图
  289. cinfo.err = jpeg_std_error(&jerr);
  290. jpeg_create_compress(&cinfo);
  291. sprintf(szFilename, "%s/image-%03d.jpg", output_dir, iFrame); //图片名字为视频名+号码
  292. fp = fopen(szFilename, "wb");
  293. if (fp == NULL)
  294. return;
  295. jpeg_stdio_dest(&cinfo, fp);
  296. cinfo.image_width = width; // 为图的宽和高,单位为像素
  297. cinfo.image_height = height;
  298. cinfo.input_components = 3; // 在此为1,表示灰度图, 如果是彩色位图,则为3
  299. cinfo.in_color_space = JCS_RGB; //JCS_GRAYSCALE表示灰度图,JCS_RGB表示彩色图像
  300. jpeg_set_defaults(&cinfo);
  301. jpeg_set_quality(&cinfo, 80, 1);
  302. jpeg_start_compress(&cinfo, TRUE);
  303. row_stride = cinfo.image_width * 3; //每一行的字节数,如果不是索引图,此处需要乘以3
  304. // 对每一行进行压缩
  305. while (cinfo.next_scanline < cinfo.image_height)
  306. {
  307. row_pointer[0] = &(pRGBBuffer[cinfo.next_scanline * row_stride]);
  308. jpeg_write_scanlines(&cinfo, row_pointer, 1);
  309. }
  310. jpeg_finish_compress(&cinfo);
  311. jpeg_destroy_compress(&cinfo);
  312. fclose(fp);
  313. }
  1. cat Makefile
  2. keyframe:keyframe.cpp
  3. g++ $< -o $@ `pkg-config --libs libavcodec libavformat libswscale libavutil` -ljpeg -fpermissive

本文福利, 免费领取C++音视频学习资料包+学习路线大纲、技术视频/代码,内容包括(音视频开发,面试题,FFmpeg ,webRTC ,rtmp ,hls ,rtsp ,ffplay ,编解码,推拉流,srs)↓↓↓↓↓↓见下面↓↓文章底部点击免费领取↓↓

声明:本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号