赞
踩
将最近写过的一些跟踪算法的demo做个总结吧,目前都是基于传统算法,模型的话实在是太多了!!!下面的都是源码可是实际运行成功的,不需要更改!可做为参考使用!
跟踪算法demo头文件:tracking.h
#pragma once
#ifndef _TRACK_H_
#define _TRACK_H_
#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>
#include <opencv2/tracking.hpp>
// 跟踪应用demo
class TRACK
{
public:
/** 背景的滤除,前景的浮现 背景建模实现 */
static void backgroundsubtractor();
// 以动物体的侦测
static void obj_detected();
// 追踪
static void tracker();
// 稀疏光流
static void klTrack();
// 稠密光流
static void denseTrack();
// 三帧差法跟踪
static void ThreeFrameSubtract();
// test
void print_f();
};
#endif // !_TRACK_H_
跟踪算法demo的主要算法文件.cpp:tracking.cpp
#include "tracking.h"
void TRACK::backgroundsubtractor()
{
cv::VideoCapture capture;
capture.open("D:/Images/video01.avi");
//VideoCapture capture(0);
if (!capture.isOpened())
{
std::cout << "could not load video data..." << std::endl;
return;
}
cv::Mat backmog2, backknn;
cv::Ptr<cv::BackgroundSubtractor> MOG2 = cv::createBackgroundSubtractorMOG2();
cv::Ptr<cv::BackgroundSubtractor> KNN = cv::createBackgroundSubtractorKNN();
double fps = capture.get(CV_CAP_PROP_FPS);
cv::Size size = cv::Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));
double width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
double height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
std::cout << "width=" << width << std::endl << "height=" << height << std::endl;
std::cout << "FPS:" << fps << std::endl;
//VideoWriter write("E:/Images/vidoe.mp4", CV_FOURCC('D', 'I', 'V', 'X'), 15.0, size, true);
cv::Mat frame, gray, binary;
cv::namedWindow("video-demo", CV_WINDOW_AUTOSIZE);
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3), cv::Point(-1, -1));
std::vector<cv::Mat> bgr;
while (capture.read(frame))
{
resize(frame, frame, cv::Size(), 0.25, 0.25);
//inRange(frame, Scalar(0, 127, 0), Scalar(127, 255, 127), gray); //大于和小于的阈值为零,在范围内的为255
cvtColor(frame, gray, CV_BGR2GRAY);
threshold(gray, binary, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
//bitwise_not(frame, frame);
//flip(frame, frame, 1); //水平翻转
MOG2->apply(frame, backmog2); //有点滤除背景的意思 apply()函数
//morphologyEx(backmog2, backmog2, MORPH_OPEN, kernel, Point(-1, -1));
//dilate(backmog2, backmog2, kernel, Point(-1, -1), 3);
KNN->apply(frame, backknn);
cv::imshow("KNN-apply", backknn);
morphologyEx(backknn, backknn, cv::MORPH_OPEN, kernel, cv::Point(-1, -1));
erode(backknn, backknn, kernel, cv::Point(-1, -1));
//dilate(backknn, backknn, kernel, Point(-1, -1), 3);
std::vector<std::vector<cv::Point>> contours;
cv::Rect rect;
findContours(backknn, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE, cv::Point());
double areamax = 0;
for (int i = 0; i < contours.size(); i++)
{
double area = contourArea(contours[i]);
std::cout << "area=" << area << std::endl;
if (area > areamax)
{
rect = boundingRect(contours[i]);
}
}
rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 8, 0);
cv::imshow("video-demo", frame);
cv::imshow("backmog2", backmog2);
cv::imshow("backnn", backknn);
//write.write(frame);
char c = cv::waitKey(100);
if (c == 27)
break;
}
cv::waitKey(0);
return;
}
void TRACK::print_f()
{
std::cout << "hello OpenCV... coming...!" << std::endl;
}
//***********************************************移动侦测物体***********************************************
//移动物体的侦测与统计
void TRACK::obj_detected()
{
cv::VideoCapture capture;
capture.open("D:/Images/video01.avi");
if (!capture.isOpened()) {
printf("could not load video data...\n");
return;
}
cv::namedWindow("input video", CV_WINDOW_AUTOSIZE);
cv::namedWindow("motion objects", CV_WINDOW_AUTOSIZE);
// 初始BS模型 背景建模取背景或前景图
//cv::Ptr<cv::BackgroundSubtractor> pMOG2 = cv::createBackgroundSubtractorMOG2();
//Ptr<BackgroundSubtractorMOG2> pMOG2 = createBackgroundSubtractorMOG2();
cv::Ptr<cv::BackgroundSubtractorKNN> pMOG2 = cv::createBackgroundSubtractorKNN();
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3), cv::Point(-1, -1));
std::vector<std::vector<cv::Point>> contours;
//vector<Vec4i> hireachy;
int count = 0;
int maxcount = 0;
int mincount = 100;
cv::Mat frame, gray, mogMask, backImage;
while (capture.read(frame)) {
imshow("input video", frame);
pMOG2->apply(frame, mogMask); //背景建模取前景图
pMOG2->getBackgroundImage(backImage); //提取背景图
GaussianBlur(mogMask, mogMask, cv::Size(3, 3), 0);
threshold(mogMask, mogMask, 100, 255, cv::THRESH_BINARY); //THRESH_OTSU 表示取自适应中最合适的阈值范围
//threshold(mogMask, mogMask, 100, 255, THRESH_BINARY | THRESH_OTSU);
morphologyEx(mogMask, mogMask, cv::MORPH_OPEN, kernel, cv::Point(-1, -1));
imshow("mogMask", mogMask);
imshow("backImage", backImage);
findContours(mogMask, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
count = 0;
char numText[8];
for (size_t t = 0; t < contours.size(); t++) {
double area = contourArea(contours[t]);
if (area < 40) continue;
cv::Rect selection = boundingRect(contours[t]);
if (selection.width < 5 || selection.height < 8) continue;
count++;
rectangle(frame, selection, cv::Scalar(0, 0, 255), 2, 8);
sprintf(numText, "%d", count);
putText(frame, numText, cv::Point(selection.x, selection.y), CV_FONT_NORMAL, cv::FONT_HERSHEY_PLAIN, cv::Scalar(255, 0, 0), 1, 8);
if (count > maxcount)
{
maxcount = count;
std::cout << "maxcount=" << maxcount << std::endl;
}
if (count < mincount)
{
mincount = count;
std::cout << "mincount=" << mincount << std::endl;
}
}
imshow("motion objects", frame);
char c = cv::waitKey(50);
if (c == 27) {// ESC
break;
}
}
capture.release();
cv::waitKey(0);
return;
}
// 追踪运动物体
void TRACK::tracker()
{
cv::Mat frame;
cv::VideoCapture capture("D:/Images/video01.avi");
//Ptr<Tracker> tracker = Tracker::create("KCF"); //BOOSTING,MIL,KCF,TLD,MEDIANFLOW 四种跟踪方法
//Ptr<Tracker> tracker = Tracker::create("TLD");
cv::Ptr<cv::Tracker> tracker = cv::TrackerBoosting::create();
cv::Rect2d rect;
capture.read(frame);
resize(frame, frame, cv::Size(), 1.25, 1.25);
imshow("frame", frame);
rect = selectROI("frame", frame, false, false);
tracker->init(frame, rect);
std::cout << "starting tracking!" << std::endl;
while (capture.read(frame))
{
resize(frame, frame, cv::Size(), 1.25, 1.25);
tracker->update(frame, rect);
rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 1);
imshow("TRACK", frame);
char c = cv::waitKey(50);
if (c == 27)
break;
}
cv::waitKey(0);
return;
}
using namespace std;
using namespace cv;
//稀疏光流检测&&运动目标的检测
cv::Mat frame, gray;
cv::Mat prev_frame, prev_gray;
std::vector<cv::Point2f> features; //shi-tomasi角点检测-特征数据
std::vector<cv::Point2f> iniPoints; //初始化特征数据
std::vector<cv::Point2f> fpts[2]; //保持当前帧和前一帧的特征点位置
std::vector<uchar> status; //特征点跟踪成功标志位
std::vector<float> errors; //跟踪时候区域误差和
void drawTrackLines();
void detectFeatures(cv::Mat& ingray) //特征点检测(角点)
{
goodFeaturesToTrack(ingray, features, 5000, 0.01, 10, Mat(), 3, false, 0.04);
cout << "detect features=" << features.size() << endl;
}
void drawFeature(Mat &inFrame) //画特征点
{
for (int t = 0; t < fpts[0].size(); ++t)
{
circle(inFrame, fpts[0][t], 2, Scalar(0, 0, 255), 2, 8, 0);
}
}
void klTrackFeature() //光流函数及检测光流
{
calcOpticalFlowPyrLK(prev_gray, gray, fpts[0], fpts[1], status, errors); //KLT 将检测的光流送入数组中保存
int k = 0;
//特征点过滤
for (int i = 0; i < fpts[1].size(); i++)
{
double dist = abs(fpts[0][i].x - fpts[1][i].x) + abs(fpts[0][i].y - fpts[1][i].y);
if (dist > 2 && status[i])
{
iniPoints[k] = iniPoints[i];
fpts[1][k++] = fpts[1][i];
}
}
//保存特征点并绘制跟踪轨迹
iniPoints.resize(k); //裁剪不要的特征点,更新集合大小
fpts[1].resize(k);
drawTrackLines();
swap(fpts[1], fpts[0]); //更新帧的特征点
}
void drawTrackLines() //画光流形成的前后的过程
{
for (int i = 0; i < fpts[1].size(); i++)
{
line(frame, iniPoints[i], fpts[1][i], Scalar(0, 255, 0), 1, 8, 0);
circle(frame, fpts[1][i], 2, Scalar(0, 0, 255), 2, 8, 0);
}
}
// 稀疏光流
void TRACK::klTrack()
{
VideoCapture capture("D:/Images/video01.avi");
while (capture.read(frame))
{
//resize(frame, frame, Size(), 0.25, 0.25);
cvtColor(frame, gray, CV_BGR2GRAY);
if (fpts[0].size() < 40)
{
detectFeatures(gray);
fpts[0].insert(fpts[0].end(), features.begin(), features.end());
iniPoints.insert(iniPoints.end(), features.begin(), features.end());
}
else
{
cout << "NO detect!" << endl;
}
if (prev_gray.empty())
{
gray.copyTo(prev_gray);
}
klTrackFeature();
drawFeature(frame);
//更新前一帧数据
gray.copyTo(prev_gray);
frame.copyTo(prev_frame);
imshow("frame", frame);
char c = waitKey(50);
if (c == 27)
{
break;
}
}
waitKey(0);
return;
}
//稠密光流的检测
void drawlines(Mat& flowdata, Mat& Image)
{
for (int i = 0; i < Image.rows; i++)
{
for (int j = 0; j < Image.cols; j++)
{
const Point2f fxy = flowdata.at<Point2f>(i, j);
if (fxy.x > 1 || fxy.y > 1)
{
line(Image, Point(j, i), Point((j + fxy.x), (i + fxy.y)), Scalar(0, 0, 255), 1, 8, 0);
circle(Image, Point(j, i), 2, Scalar(0, 255, 0), 2, 8, 0);
}
}
}
}
void TRACK::denseTrack()
{
Mat frame, gray, prev_gray, flowdata, flowresult;
VideoCapture capture("D:/Images/video01.avi");
capture.read(frame);
//resize(frame, frame, Size(), 0.25, 0.25);
cvtColor(frame, prev_gray, CV_BGR2GRAY);
//从第二帧数据开始
while (capture.read(frame))
{
//resize(frame, frame, Size(), 0.25, 0.25);
cvtColor(frame, gray, CV_BGR2GRAY);
if (!prev_gray.empty())
{
calcOpticalFlowFarneback(prev_gray, gray, flowdata, 0.5, 3, 15, 3, 5, 1.2, 0); //稠密光流的检测函数
cvtColor(prev_gray, flowresult, CV_GRAY2BGR); //灰度图,转不过来
drawlines(flowdata, flowresult);
imshow("flowresult", flowresult);
imshow("frame", frame);
}
char c = waitKey(50);
if (c == 27)
break;
}
waitKey(0);
return;
}
int FORE_GROUD = 255; int thresh = 10;
void TRACK::ThreeFrameSubtract()
{
VideoCapture video("D:/Images/video01.avi"); //判断如果video是否可以打开
if (!video.isOpened())
return;
//用于保存当前帧的图片
Mat currentBGRFrame;
//用来保存上一帧和当前帧的灰度图片
Mat previousSecondGrayFrame; Mat previousFirstGrayFrame; Mat currentGaryFrame;
//保存两次的帧差
Mat previousFrameDifference; //previousFrameFirst - previousFrameSecond的差分
Mat currentFrameDifference; //currentFrame - previousFrameFirst;
//用来保存帧差的绝对值
Mat absFrameDifferece;
//用来显示前景
Mat previousSegmentation; Mat currentSegmentation; Mat segmentation;
//显示前景
namedWindow("segmentation", 1);
createTrackbar("阈值:", "segmentation", &thresh, FORE_GROUD, NULL);
//帧数
int numberFrame = 0;
//形态学处理用到的算子
Mat morphologyKernel = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
for (;;)
{
long tStart = getTickCount();
//读取当前帧
//video >> currentBGRFrame;
video.read(currentBGRFrame);
//判断当前帧是否存在
if (!currentBGRFrame.data)
break;
numberFrame++;
//cout << "numberFrame=" << numberFrame << endl;
//颜色空间的转换
cvtColor(currentBGRFrame, currentGaryFrame, COLOR_BGR2GRAY);
if (numberFrame == 1) {
//保存当前帧的灰度图
previousSecondGrayFrame = currentGaryFrame.clone();
//显示视频
imshow("video", currentBGRFrame); continue;
}
else if (numberFrame == 2)
{
//保存当前帧的灰度图
previousFirstGrayFrame = currentGaryFrame.clone();
//previousFirst - previousSecond
subtract(previousFirstGrayFrame, previousSecondGrayFrame, previousFrameDifference, Mat(), CV_16SC1);
//取绝对值
absFrameDifferece = abs(previousFrameDifference);
//位深的改变
absFrameDifferece.convertTo(absFrameDifferece, CV_8UC1, 1, 0);
//阈值处理
threshold(absFrameDifferece, previousSegmentation, double(thresh), double(FORE_GROUD), THRESH_BINARY);
//显示视频
imshow("video", currentBGRFrame);
continue;
}
else
{
//src1-src2
subtract(currentGaryFrame, previousFirstGrayFrame, currentFrameDifference, Mat(), CV_16SC1);
//取绝对值
absFrameDifferece = abs(currentFrameDifference);
//位深的改变
absFrameDifferece.convertTo(absFrameDifferece, CV_8UC1, 1, 0);
//阈值处理
threshold(absFrameDifferece, currentSegmentation, double(thresh), double(FORE_GROUD), THRESH_BINARY); //与运算
bitwise_and(previousSegmentation, currentSegmentation, segmentation); //中值滤波
medianBlur(segmentation, segmentation, 3); //形态学处理(开闭运算)
//morphologyEx(segmentation,segmentation,MORPH_OPEN,morphologyKernel,Point(-1,-1),1,BORDER_REPLICATE);
morphologyEx(segmentation, segmentation, MORPH_CLOSE, morphologyKernel, Point(-1, -1), 2, BORDER_REPLICATE);
//找边界
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//复制segmentation
Mat tempSegmentation = segmentation.clone();
findContours(segmentation, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));//CV_RETR_TREE
vector< vector<Point> > contours_poly(contours.size());
/*存储运动物体*/
vector<Rect> boundRect;
boundRect.clear();
//画出运动物体
for (int index = 0; index < contours.size(); index++)
{
approxPolyDP(Mat(contours[index]), contours_poly[index], 3, true);
Rect rect = boundingRect(Mat(contours_poly[index]));
rectangle(currentBGRFrame, rect, Scalar(0, 255, 255), 2);
}
//显示视频
imshow("video", currentBGRFrame);
//前景检测
imshow("segmentation", segmentation);
//保存当前帧的灰度图
previousFirstGrayFrame = currentGaryFrame.clone();
//保存当前的前景检测
previousSegmentation = currentSegmentation.clone();
}
long tEnd = getTickCount() - tStart;
long Time = (tEnd / getTickFrequency()) * 1000;
printf("Running time once again:%.2f\n", Time);
if (waitKey(33) == 'q')
break;
}
}
留在这里了,具体的实验就得自己亲自去测试了!!!
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。