赞
踩
最近搞了一个ffmpeg相关的视频解码项目。很久以前搞过一个桌面管理项目。于是。。。。。一个动态桌面背景项目就此诞生。
动态桌面背景,这个东西很久以前就很好奇。然而市面上相关软件又不太想用。so,so,so,现在有实力了就自己搞个。
很久以前的代码
HWND hwndParent = ::FindWindow(L"Progman", L"Program Manager");
if (hwndParent == NULL)
{
return NULL;
}
HWND hwndSHELLDLL_DefView = ::FindWindowEx(hwndParent, NULL, L"SHELLDLL_DefView", NULL);
HWND hwndSysListView32 = ::FindWindowEx(hwndSHELLDLL_DefView, NULL, L"SysListView32", L"FolderView");
注意:以上代码适用范围有限,而且获取到的窗口句柄是桌面图标窗口。而非桌面背景窗口。
经过各种搜索最终代码如下
BOOL CALLBACK EnumWindowsProc(HWND hwnd, LPARAM lParam) { HWND p = FindWindowEx(hwnd, NULL, "SHELLDLL_DefView", NULL); HWND* ret = (HWND*)lParam; if (p) { // Gets the WorkerW Window after the current one. *ret = FindWindowEx(NULL, hwnd, "WorkerW", NULL); } return true; } HWND CXWallPaperDlg::findDesktopWnd() { // Fetch the Progman window HWND progman = ::FindWindow("ProgMan", NULL); // Send 0x052C to Progman. This message directs Progman to spawn a // WorkerW behind the desktop icons. If it is already there, nothing // happens. SendMessageTimeout(progman, 0x052C, 0, 0, SMTO_NORMAL, 1000, nullptr); // We enumerate all Windows, until we find one, that has the SHELLDLL_DefView // as a child. // If we found that window, we take its next sibling and assign it to workerw. HWND wallpaper_hwnd = nullptr; EnumWindows(EnumWindowsProc, (LPARAM)&wallpaper_hwnd); // Return the handle you're looking for. return wallpaper_hwnd; }
以上代码是从国外的论坛直接复制过来的,目前所知Win10适用,其他系统未测试。
注意代码中的英文注释,已经将功能讲的很详细了。
需要注意的一点就是桌面背景窗口就是桌面图标窗口下面的窗口。
XPlayWnd* dlg = static_cast<XPlayWnd*>(lpParameter); dlg->FillBluck(); CRect r; dlg->GetWindowRect(r); //计算目标尺寸 int dw, dh; AVFormatContext *pFormatCtx = nullptr; //int audioindex = -1; int videoindex = -1; AVCodecContext *pCodecCtx = nullptr; AVCodec *pCodec = nullptr; AVFrame *pFrame = nullptr; AVFrame *pFrameRGB = nullptr; unsigned char *out_buffer = nullptr; AVPacket *packet = nullptr; int ret, got_picture; struct SwsContext *img_convert_ctx = nullptr; char filepath[_MAX_PATH] = { 0 }; INT64 dieTime = GetTickCount64(); int LastFreamDely = 0; //上一帧的持续时间 int defaultDely = 40; //默认每帧延时 int AudioRate = 0; int AudioChannel = 0; //AVCodecContext *AudioCodec = nullptr; double time_base = 0; //基础时间单位,毫秒数 //SwrContext *m_SWRtx = nullptr; //初始化编解码库 av_register_all();//创建AVFormatContext对象,与码流相关的结构。 pFormatCtx = avformat_alloc_context(); //初始化pFormatCtx结构 if (avformat_open_input(&pFormatCtx, dlg->m_VideoFile.GetBuffer(), NULL, NULL) != 0) { OutputDebugString("Couldn't open input stream.\n"); goto end; } //获取音视频流数据信息 if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { OutputDebugString("Couldn't find stream information.\n"); goto end; } //nb_streams视音频流的个数,这里当查找到视频流时就中断了。 for (int i = 0; i < pFormatCtx->nb_streams; i++) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { videoindex = i; break; } //else if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) //{ // audioindex = i; //} } if (videoindex == -1) { OutputDebugString("Didn't find a video stream.\n"); goto end; } /*if (audioindex == -1) { OutputDebugString("Didn't find a Audio stream.\n"); goto end; }*/ //获取音频流信息 //AudioCodec = pFormatCtx->streams[audioindex]->codec; // 查找音频解码器 //AVCodec *codec = avcodec_find_decoder(AudioCodec->codec_id); //if (codec == nullptr) // goto end; // 打开音频解码器 //if (avcodec_open2(AudioCodec, codec, nullptr) != 0) // goto end; //AudioCodec->channel_layout = av_get_default_channel_layout(AudioCodec->channels); //int rate = AudioCodec->sample_rate; //int channel = AudioCodec->channels; //g_AudioPlayThread->cleanAllAudioBuffer(); //g_AudioPlayThread->setCurrentSampleInfo(rate, 16, channel); //g_AudioPlayThread->start(); //获取视频流编码结构 pCodecCtx = pFormatCtx->streams[videoindex]->codec; //查找解码器 pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { OutputDebugString("Codec not found.\n"); goto end; } //用于初始化pCodecCtx结构 if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { OutputDebugString("Could not open codec.\n"); goto end; } //创建帧结构,此函数仅分配基本结构空间,图像数据空间需通过av_malloc分配 pFrame = av_frame_alloc(); pFrameRGB = av_frame_alloc(); switch (dlg->m_DrawType) { case 0: dw = r.Width(); dh = r.Height(); break; case 1: dw = pCodecCtx->width; dh = pCodecCtx->height; break; case 2: if (1.0 * r.Width() / r.Height() >= 1.0 * pCodecCtx->width / pCodecCtx->height) { dh = r.Height(); dw = pCodecCtx->width * (1.0 * r.Height() / pCodecCtx->height); } else { dw = r.Width(); dh = pCodecCtx->height * (1.0 * r.Width() / pCodecCtx->width); } break; default: break; } //创建动态内存,创建存储图像数据的空间 //av_image_get_buffer_size获取一帧图像需要的大小 out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_RGB32, dw,dh, 1)); av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, out_buffer, AV_PIX_FMT_RGB32, dw, dh, 1); packet = (AVPacket *)av_malloc(sizeof(AVPacket)); //通过平均帧率获取默认时延 { int num = pFormatCtx->streams[videoindex]->avg_frame_rate.num; int den = pFormatCtx->streams[videoindex]->avg_frame_rate.den; defaultDely = 1000 * den / num; num = pFormatCtx->streams[videoindex]->time_base.num; den = pFormatCtx->streams[videoindex]->time_base.den; time_base = 1000.0 * num / den; } //m_SWRtx = swr_alloc(); //swr_alloc_set_opts(m_SWRtx, AudioCodec->channel_layout, AV_SAMPLE_FMT_S16, \ // AudioCodec->sample_rate, AudioCodec->channels, AudioCodec->sample_fmt, \ // AudioCodec->sample_rate, 0, 0); //swr_init(m_SWRtx); //初始化img_convert_ctx结构 img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, dw, dh, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL); dlg->m_vWidth = pCodecCtx->width; dlg->m_vHeight = pCodecCtx->height; //av_read_frame读取一帧未解码的数据 while (av_read_frame(pFormatCtx, packet) >= 0) { //如果是视频数据 if (packet->stream_index == videoindex) { // 解码视频帧, 发送视频包 ret = avcodec_send_packet(pCodecCtx, packet); if (ret != 0) break; // 解码视频帧,接收视频解码帧 ret = avcodec_receive_frame(pCodecCtx, pFrame); if (ret == 0) { //反转图像 ,否则生成的图像是上下调到的 pFrame->data[0] += pFrame->linesize[0] * (pCodecCtx->height - 1); pFrame->linesize[0] *= -1; pFrame->data[1] += pFrame->linesize[1] * (pCodecCtx->height / 2 - 1); pFrame->linesize[1] *= -1; pFrame->data[2] += pFrame->linesize[2] * (pCodecCtx->height / 2 - 1); pFrame->linesize[2] *= -1; //转换图像格式,将解压出来的YUV420P的图像转换为BRG24的图像 ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); //等待前一帧的持续事件 while (GetTickCount64() < dieTime) { if (dlg->m_bStop) { goto end; } Sleep(1); } dlg->ShowInDlg(pFrameRGB, dw, dh,32); } LastFreamDely = packet->duration * time_base; if (LastFreamDely == 0) { LastFreamDely = defaultDely; } dieTime += LastFreamDely; } //else if (packet->stream_index == audioindex) //{ // 解码视频帧, 发送视频包 // //ret = avcodec_send_packet(AudioCodec, packet); // //if (ret != 0) // // break; // 解码视频帧,接收视频解码帧 // //ret = avcodec_receive_frame(AudioCodec, pFrame); // //if (ret != 0) // // continue; // //uint8_t *array[1]; // //uint8_t arrays[10000] = { 0 }; // //array[0] = arrays; // //int len = swr_convert(m_SWRtx, array, 10000, (const uint8_t **)pFrame->data, pFrame->nb_samples); // g_AudioPlayThread->addAudioBuffer((char*)arrays, m_AvFrame->linesize[0]); // //g_AudioPlayThread->addAudioBuffer((char*)arrays, pFrame->linesize[0]); //} av_free_packet(packet); } end: if (img_convert_ctx) sws_freeContext(img_convert_ctx); if (pFrameRGB) av_frame_free(&pFrameRGB); if (pFrame) av_frame_free(&pFrame); if (pCodecCtx) avcodec_close(pCodecCtx); if (out_buffer) av_free(out_buffer); //if (AudioCodec) // avcodec_close(AudioCodec); //if (m_SWRtx) // swr_free(&m_SWRtx); if (pFormatCtx) avformat_close_input(&pFormatCtx); dlg->m_finishCb(); return 0;
因为桌面背景不需要音频,这段代码中直接将音频流相关的给注释掉了。(另外,我这段代码中的音频播放相关的好像有问题,没有详细查过。)
int XPlayWnd::ShowInDlg(AVFrame * pFrameRGB, int width, int height, int bpp) { BITMAPFILEHEADER bmpheader; BITMAPINFOHEADER bmpinfo; FILE *fp; bmpheader.bfType = 0x4d42; bmpheader.bfReserved1 = 0; bmpheader.bfReserved2 = 0; bmpheader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER); bmpheader.bfSize = bmpheader.bfOffBits + width * height*bpp / 8; bmpinfo.biSize = sizeof(BITMAPINFOHEADER); bmpinfo.biWidth = width; bmpinfo.biHeight = height; bmpinfo.biPlanes = 1; bmpinfo.biBitCount = bpp; bmpinfo.biCompression = BI_RGB; bmpinfo.biSizeImage = (width*bpp + 31) / 32 * 4 * height; bmpinfo.biXPelsPerMeter = 100; bmpinfo.biYPelsPerMeter = 100; bmpinfo.biClrUsed = 0; bmpinfo.biClrImportant = 0; if (m_hbitmap) ::DeleteObject(m_hbitmap); CDC* dc = GetDC(); m_hbitmap = CreateDIBitmap(dc->GetSafeHdc(), //设备上下文的句柄 (LPBITMAPINFOHEADER)&bmpinfo, //位图信息头指针 (long)CBM_INIT, //初始化标志 pFrameRGB->data[0], //初始化数据指针 (LPBITMAPINFO)&bmpinfo, //位图信息指针 DIB_RGB_COLORS); CDC memDc; memDc.CreateCompatibleDC(dc); CRect rcClient; GetWindowRect(rcClient); memDc.SelectObject(m_hbitmap); int nleft = (rcClient.Width() - width) / 2; int top = (rcClient.Height() - height) / 2; dc->StretchBlt(nleft, top, width, height, &memDc, 0, 0, width, height, SRCCOPY); memDc.DeleteDC(); ReleaseDC(dc); return 0; }
这里也没啥解释的,就是将ffmpeg解析得到的图片数据转换为HBITMAP然后绘制到窗口上。
最后创建一个dialog和任务托盘图标以供操作。这些东西只要搞过MFC的人应该都没什么问题。
我的显卡是1060,如果使用集显的话CPU占用率大概在4%左右。主要还是看电脑性能。
我手上的视频解码播放相关的项目有两个,一个用的是ffmpeg,另一个用的是VLC。然而VLC相对这么简单的功能来说太过庞大(大概有100M+),因此就用了相对简单的MFC+ffmpeg。
另外:程序还有很多BUG,不过自己将就着用足够了。
程序:https://download.csdn.net/download/u014410266/12508118
源码:https://download.csdn.net/download/u014410266/12508114
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。