当前位置:   article > 正文

图像跟踪算法demo的总结附源码_多物体追踪demo

多物体追踪demo

图像跟踪算法demo的总结附源码

将最近写过的一些跟踪算法的demo做个总结吧,目前都是基于传统算法,模型的话实在是太多了!!!下面的都是源码可是实际运行成功的,不需要更改!可做为参考使用!

跟踪算法demo头文件:tracking.h

#pragma once
#ifndef _TRACK_H_
#define _TRACK_H_

#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>
#include <opencv2/tracking.hpp>

// 跟踪应用demo
class TRACK
{
public:
	/** 背景的滤除,前景的浮现   背景建模实现 */
	static void backgroundsubtractor();
	// 以动物体的侦测
	static void obj_detected();
	// 追踪
	static void tracker();
	// 稀疏光流
	static void klTrack();
	// 稠密光流
	static void denseTrack();
	// 三帧差法跟踪
	static void ThreeFrameSubtract();
	// test
	void print_f();
};

#endif // !_TRACK_H_

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31

跟踪算法demo的主要算法文件.cpp:tracking.cpp

#include "tracking.h"

void TRACK::backgroundsubtractor()
{
	cv::VideoCapture capture;
	capture.open("D:/Images/video01.avi");
	//VideoCapture capture(0);
	if (!capture.isOpened())
	{
		std::cout << "could not load video data..." << std::endl;
		return;
	}
	cv::Mat backmog2, backknn;
	cv::Ptr<cv::BackgroundSubtractor> MOG2 = cv::createBackgroundSubtractorMOG2();
	cv::Ptr<cv::BackgroundSubtractor> KNN = cv::createBackgroundSubtractorKNN();
	double fps = capture.get(CV_CAP_PROP_FPS);
	cv::Size size = cv::Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));
	double width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
	double height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
	std::cout << "width=" << width << std::endl << "height=" << height << std::endl;
	std::cout << "FPS:" << fps << std::endl;
	//VideoWriter write("E:/Images/vidoe.mp4", CV_FOURCC('D', 'I', 'V', 'X'), 15.0, size, true);
	cv::Mat frame, gray, binary;
	cv::namedWindow("video-demo", CV_WINDOW_AUTOSIZE);
	cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3), cv::Point(-1, -1));
	std::vector<cv::Mat> bgr;
	while (capture.read(frame))
	{
		resize(frame, frame, cv::Size(), 0.25, 0.25);
		//inRange(frame, Scalar(0, 127, 0), Scalar(127, 255, 127), gray);   //大于和小于的阈值为零,在范围内的为255
		cvtColor(frame, gray, CV_BGR2GRAY);
		threshold(gray, binary, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
		//bitwise_not(frame, frame);
		//flip(frame, frame, 1);  //水平翻转
		MOG2->apply(frame, backmog2);  //有点滤除背景的意思  apply()函数
									   //morphologyEx(backmog2, backmog2, MORPH_OPEN, kernel, Point(-1, -1));
									   //dilate(backmog2, backmog2, kernel, Point(-1, -1), 3);
		KNN->apply(frame, backknn);
		cv::imshow("KNN-apply", backknn);
		morphologyEx(backknn, backknn, cv::MORPH_OPEN, kernel, cv::Point(-1, -1));
		erode(backknn, backknn, kernel, cv::Point(-1, -1));
		//dilate(backknn, backknn, kernel, Point(-1, -1), 3);
		std::vector<std::vector<cv::Point>> contours;
		cv::Rect rect;
		findContours(backknn, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE, cv::Point());
		double areamax = 0;
		for (int i = 0; i < contours.size(); i++)
		{
			double area = contourArea(contours[i]);
			std::cout << "area=" << area << std::endl;
			if (area > areamax)
			{
				rect = boundingRect(contours[i]);
			}
		}
		rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 8, 0);
		cv::imshow("video-demo", frame);
		cv::imshow("backmog2", backmog2);
		cv::imshow("backnn", backknn);
		//write.write(frame);
		char c = cv::waitKey(100);
		if (c == 27)
			break;
	}
	cv::waitKey(0);
	return;
}

void TRACK::print_f()
{
	std::cout << "hello OpenCV... coming...!" << std::endl;
}


//***********************************************移动侦测物体***********************************************
//移动物体的侦测与统计

void TRACK::obj_detected() 
{
	cv::VideoCapture capture;
	capture.open("D:/Images/video01.avi");
	if (!capture.isOpened()) {
		printf("could not load video data...\n");
		return;
	}

	cv::namedWindow("input video", CV_WINDOW_AUTOSIZE);
	cv::namedWindow("motion objects", CV_WINDOW_AUTOSIZE);

	// 初始BS模型   背景建模取背景或前景图
	//cv::Ptr<cv::BackgroundSubtractor> pMOG2 = cv::createBackgroundSubtractorMOG2();
	//Ptr<BackgroundSubtractorMOG2> pMOG2 = createBackgroundSubtractorMOG2();
	cv::Ptr<cv::BackgroundSubtractorKNN> pMOG2 = cv::createBackgroundSubtractorKNN();
	cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3), cv::Point(-1, -1));

	std::vector<std::vector<cv::Point>> contours;
	//vector<Vec4i> hireachy;
	int count = 0;
	int maxcount = 0;
	int mincount = 100;

	cv::Mat frame, gray, mogMask, backImage;
	while (capture.read(frame)) {
		imshow("input video", frame);
		pMOG2->apply(frame, mogMask);   //背景建模取前景图
		pMOG2->getBackgroundImage(backImage);   //提取背景图
		GaussianBlur(mogMask, mogMask, cv::Size(3, 3), 0);
		threshold(mogMask, mogMask, 100, 255, cv::THRESH_BINARY);   //THRESH_OTSU 表示取自适应中最合适的阈值范围
		//threshold(mogMask, mogMask, 100, 255, THRESH_BINARY | THRESH_OTSU);
		morphologyEx(mogMask, mogMask, cv::MORPH_OPEN, kernel, cv::Point(-1, -1));
		imshow("mogMask", mogMask);
		imshow("backImage", backImage);

		findContours(mogMask, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
		count = 0;
		char numText[8];
		for (size_t t = 0; t < contours.size(); t++) {
			double area = contourArea(contours[t]);
			if (area < 40) continue;
			cv::Rect selection = boundingRect(contours[t]);
			if (selection.width < 5 || selection.height < 8) continue;
			count++;
			rectangle(frame, selection, cv::Scalar(0, 0, 255), 2, 8);

			sprintf(numText, "%d", count);
			putText(frame, numText, cv::Point(selection.x, selection.y), CV_FONT_NORMAL, cv::FONT_HERSHEY_PLAIN, cv::Scalar(255, 0, 0), 1, 8);
			if (count > maxcount)
			{
				maxcount = count;
				std::cout << "maxcount=" << maxcount << std::endl;
			}
			if (count < mincount)
			{
				mincount = count;
				std::cout << "mincount=" << mincount << std::endl;
			}
		}

		imshow("motion objects", frame);
		char c = cv::waitKey(50);
		if (c == 27) {// ESC 
			break;
		}
	}

	capture.release();
	cv::waitKey(0);
	return;
}

// 追踪运动物体
void TRACK::tracker()
{
	cv::Mat frame;
	cv::VideoCapture capture("D:/Images/video01.avi");
	//Ptr<Tracker> tracker = Tracker::create("KCF");   //BOOSTING,MIL,KCF,TLD,MEDIANFLOW 四种跟踪方法
	//Ptr<Tracker> tracker = Tracker::create("TLD");
	cv::Ptr<cv::Tracker> tracker = cv::TrackerBoosting::create();
	cv::Rect2d rect;
	capture.read(frame);
	resize(frame, frame, cv::Size(), 1.25, 1.25);
	imshow("frame", frame);
	rect = selectROI("frame", frame, false, false);
	tracker->init(frame, rect);
	std::cout << "starting tracking!" << std::endl;
	while (capture.read(frame))
	{
		resize(frame, frame, cv::Size(), 1.25, 1.25);
		tracker->update(frame, rect);
		rectangle(frame, rect, cv::Scalar(0, 0, 255), 2, 1);
		imshow("TRACK", frame);
		char c = cv::waitKey(50);
		if (c == 27)
			break;
	}
	cv::waitKey(0);
	return;
}


using namespace std;
using namespace cv;
//稀疏光流检测&&运动目标的检测
cv::Mat frame, gray;
cv::Mat prev_frame, prev_gray;
std::vector<cv::Point2f> features;  //shi-tomasi角点检测-特征数据
std::vector<cv::Point2f> iniPoints;  //初始化特征数据
std::vector<cv::Point2f> fpts[2];   //保持当前帧和前一帧的特征点位置
std::vector<uchar> status;    //特征点跟踪成功标志位
std::vector<float> errors;   //跟踪时候区域误差和
void drawTrackLines();
void detectFeatures(cv::Mat& ingray)   //特征点检测(角点)
{
	goodFeaturesToTrack(ingray, features, 5000, 0.01, 10, Mat(), 3, false, 0.04);
	cout << "detect features=" << features.size() << endl;
}

void drawFeature(Mat &inFrame)   //画特征点
{
	for (int t = 0; t < fpts[0].size(); ++t)
	{
		circle(inFrame, fpts[0][t], 2, Scalar(0, 0, 255), 2, 8, 0);
	}
}
void klTrackFeature()    //光流函数及检测光流
{
	calcOpticalFlowPyrLK(prev_gray, gray, fpts[0], fpts[1], status, errors);  //KLT   将检测的光流送入数组中保存
	int k = 0;
	//特征点过滤
	for (int i = 0; i < fpts[1].size(); i++)
	{
		double dist = abs(fpts[0][i].x - fpts[1][i].x) + abs(fpts[0][i].y - fpts[1][i].y);
		if (dist > 2 && status[i])
		{
			iniPoints[k] = iniPoints[i];
			fpts[1][k++] = fpts[1][i];
		}
	}
	//保存特征点并绘制跟踪轨迹
	iniPoints.resize(k);   //裁剪不要的特征点,更新集合大小
	fpts[1].resize(k);
	drawTrackLines();
	swap(fpts[1], fpts[0]);   //更新帧的特征点
}
void drawTrackLines()   //画光流形成的前后的过程
{
	for (int i = 0; i < fpts[1].size(); i++)
	{
		line(frame, iniPoints[i], fpts[1][i], Scalar(0, 255, 0), 1, 8, 0);
		circle(frame, fpts[1][i], 2, Scalar(0, 0, 255), 2, 8, 0);
	}
}
// 稀疏光流
void TRACK::klTrack()
{
	VideoCapture capture("D:/Images/video01.avi");
	while (capture.read(frame))
	{
		//resize(frame, frame, Size(), 0.25, 0.25);
		cvtColor(frame, gray, CV_BGR2GRAY);
		if (fpts[0].size() < 40)
		{
			detectFeatures(gray);
			fpts[0].insert(fpts[0].end(), features.begin(), features.end());
			iniPoints.insert(iniPoints.end(), features.begin(), features.end());
		}
		else
		{
			cout << "NO detect!" << endl;
		}
		if (prev_gray.empty())
		{
			gray.copyTo(prev_gray);
		}
		klTrackFeature();
		drawFeature(frame);
		//更新前一帧数据
		gray.copyTo(prev_gray);
		frame.copyTo(prev_frame);
		imshow("frame", frame);
		char c = waitKey(50);
		if (c == 27)
		{
			break;
		}
	}
	waitKey(0);
	return;
}


//稠密光流的检测
void drawlines(Mat& flowdata, Mat& Image)
{
	for (int i = 0; i < Image.rows; i++)
	{
		for (int j = 0; j < Image.cols; j++)
		{
			const Point2f fxy = flowdata.at<Point2f>(i, j);
			if (fxy.x > 1 || fxy.y > 1)
			{
				line(Image, Point(j, i), Point((j + fxy.x), (i + fxy.y)), Scalar(0, 0, 255), 1, 8, 0);
				circle(Image, Point(j, i), 2, Scalar(0, 255, 0), 2, 8, 0);
			}
		}
	}
}
void TRACK::denseTrack()
{
	Mat frame, gray, prev_gray, flowdata, flowresult;
	VideoCapture capture("D:/Images/video01.avi");
	capture.read(frame);
	//resize(frame, frame, Size(), 0.25, 0.25);
	cvtColor(frame, prev_gray, CV_BGR2GRAY);
	//从第二帧数据开始
	while (capture.read(frame))
	{
		//resize(frame, frame, Size(), 0.25, 0.25);
		cvtColor(frame, gray, CV_BGR2GRAY);
		if (!prev_gray.empty())
		{
			calcOpticalFlowFarneback(prev_gray, gray, flowdata, 0.5, 3, 15, 3, 5, 1.2, 0);   //稠密光流的检测函数
			cvtColor(prev_gray, flowresult, CV_GRAY2BGR);   //灰度图,转不过来
			drawlines(flowdata, flowresult);
			imshow("flowresult", flowresult);
			imshow("frame", frame);
		}
		char c = waitKey(50);
		if (c == 27)
			break;
	}
	waitKey(0);
	return;
}


int FORE_GROUD = 255; int thresh = 10;
void TRACK::ThreeFrameSubtract()
{
	VideoCapture video("D:/Images/video01.avi"); //判断如果video是否可以打开 
	if (!video.isOpened())
		return;

	//用于保存当前帧的图片 

	Mat currentBGRFrame;
	//用来保存上一帧和当前帧的灰度图片 
	Mat previousSecondGrayFrame; Mat previousFirstGrayFrame; Mat currentGaryFrame;
	//保存两次的帧差 
	Mat previousFrameDifference; //previousFrameFirst - previousFrameSecond的差分 
	Mat currentFrameDifference;  //currentFrame - previousFrameFirst; 
								 //用来保存帧差的绝对值 
	Mat absFrameDifferece;
	//用来显示前景 
	Mat previousSegmentation; Mat currentSegmentation; Mat segmentation;
	//显示前景 
	namedWindow("segmentation", 1);
	createTrackbar("阈值:", "segmentation", &thresh, FORE_GROUD, NULL);
	//帧数 
	int numberFrame = 0;
	//形态学处理用到的算子 
	Mat morphologyKernel = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	for (;;)
	{  
		long tStart = getTickCount();
		//读取当前帧  
	   //video >> currentBGRFrame;
		video.read(currentBGRFrame);
		//判断当前帧是否存在  
		if (!currentBGRFrame.data)
			break;
		numberFrame++;
		//cout << "numberFrame=" << numberFrame << endl;
		//颜色空间的转换  
		cvtColor(currentBGRFrame, currentGaryFrame, COLOR_BGR2GRAY);
		if (numberFrame == 1) {
			//保存当前帧的灰度图   
			previousSecondGrayFrame = currentGaryFrame.clone();
			//显示视频  
			imshow("video", currentBGRFrame);   continue;
		}
		else if (numberFrame == 2)
		{
			//保存当前帧的灰度图  
			previousFirstGrayFrame = currentGaryFrame.clone();
			//previousFirst - previousSecond   
			subtract(previousFirstGrayFrame, previousSecondGrayFrame, previousFrameDifference, Mat(), CV_16SC1);
			//取绝对值   
			absFrameDifferece = abs(previousFrameDifference);
			//位深的改变   
			absFrameDifferece.convertTo(absFrameDifferece, CV_8UC1, 1, 0);
			//阈值处理  
			threshold(absFrameDifferece, previousSegmentation, double(thresh), double(FORE_GROUD), THRESH_BINARY);
			//显示视频  
			imshow("video", currentBGRFrame);
			continue;
		}
		else
		{
			//src1-src2   
			subtract(currentGaryFrame, previousFirstGrayFrame, currentFrameDifference, Mat(), CV_16SC1);
			//取绝对值  
			absFrameDifferece = abs(currentFrameDifference);
			//位深的改变   
			absFrameDifferece.convertTo(absFrameDifferece, CV_8UC1, 1, 0);
			//阈值处理   
			threshold(absFrameDifferece, currentSegmentation, double(thresh), double(FORE_GROUD), THRESH_BINARY);  //与运算   
			bitwise_and(previousSegmentation, currentSegmentation, segmentation);    //中值滤波   
			medianBlur(segmentation, segmentation, 3); //形态学处理(开闭运算)   
													   //morphologyEx(segmentation,segmentation,MORPH_OPEN,morphologyKernel,Point(-1,-1),1,BORDER_REPLICATE);   
			morphologyEx(segmentation, segmentation, MORPH_CLOSE, morphologyKernel, Point(-1, -1), 2, BORDER_REPLICATE);
			//找边界   
			vector< vector<Point> > contours;
			vector<Vec4i> hierarchy;
			//复制segmentation  
			Mat tempSegmentation = segmentation.clone();
			findContours(segmentation, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));//CV_RETR_TREE  
			vector< vector<Point> > contours_poly(contours.size());
			/*存储运动物体*/
			vector<Rect> boundRect;
			boundRect.clear();
			//画出运动物体   
			for (int index = 0; index < contours.size(); index++)
			{
				approxPolyDP(Mat(contours[index]), contours_poly[index], 3, true);
				Rect rect = boundingRect(Mat(contours_poly[index]));
				rectangle(currentBGRFrame, rect, Scalar(0, 255, 255), 2);
			}
			//显示视频  
			imshow("video", currentBGRFrame);
			//前景检测   
			imshow("segmentation", segmentation);
			//保存当前帧的灰度图   
			previousFirstGrayFrame = currentGaryFrame.clone();
			//保存当前的前景检测   
			previousSegmentation = currentSegmentation.clone();
		}
		long tEnd = getTickCount() - tStart;
		long Time = (tEnd / getTickFrequency()) * 1000;
		printf("Running time once again:%.2f\n", Time);

		if (waitKey(33) == 'q')
			break;
	}
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
  • 242
  • 243
  • 244
  • 245
  • 246
  • 247
  • 248
  • 249
  • 250
  • 251
  • 252
  • 253
  • 254
  • 255
  • 256
  • 257
  • 258
  • 259
  • 260
  • 261
  • 262
  • 263
  • 264
  • 265
  • 266
  • 267
  • 268
  • 269
  • 270
  • 271
  • 272
  • 273
  • 274
  • 275
  • 276
  • 277
  • 278
  • 279
  • 280
  • 281
  • 282
  • 283
  • 284
  • 285
  • 286
  • 287
  • 288
  • 289
  • 290
  • 291
  • 292
  • 293
  • 294
  • 295
  • 296
  • 297
  • 298
  • 299
  • 300
  • 301
  • 302
  • 303
  • 304
  • 305
  • 306
  • 307
  • 308
  • 309
  • 310
  • 311
  • 312
  • 313
  • 314
  • 315
  • 316
  • 317
  • 318
  • 319
  • 320
  • 321
  • 322
  • 323
  • 324
  • 325
  • 326
  • 327
  • 328
  • 329
  • 330
  • 331
  • 332
  • 333
  • 334
  • 335
  • 336
  • 337
  • 338
  • 339
  • 340
  • 341
  • 342
  • 343
  • 344
  • 345
  • 346
  • 347
  • 348
  • 349
  • 350
  • 351
  • 352
  • 353
  • 354
  • 355
  • 356
  • 357
  • 358
  • 359
  • 360
  • 361
  • 362
  • 363
  • 364
  • 365
  • 366
  • 367
  • 368
  • 369
  • 370
  • 371
  • 372
  • 373
  • 374
  • 375
  • 376
  • 377
  • 378
  • 379
  • 380
  • 381
  • 382
  • 383
  • 384
  • 385
  • 386
  • 387
  • 388
  • 389
  • 390
  • 391
  • 392
  • 393
  • 394
  • 395
  • 396
  • 397
  • 398
  • 399
  • 400
  • 401
  • 402
  • 403
  • 404
  • 405
  • 406
  • 407
  • 408
  • 409
  • 410
  • 411
  • 412
  • 413
  • 414
  • 415
  • 416
  • 417
  • 418
  • 419
  • 420
  • 421
  • 422
  • 423
  • 424
  • 425

留在这里了,具体的实验就得自己亲自去测试了!!!

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/代码探险家/article/detail/739173
推荐阅读
相关标签
  

闽ICP备14008679号