#include<iostream>
#include <string>
#include <fstream>
#include <sstream>
#include <opencv2/core.hpp>

#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgproc/types_c.h>
#include <opencv2/imgproc/imgproc_c.h>

#include <opencv2/video.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/videoio/videoio_c.h>

#include <opencv2/highgui.hpp>
#include <opencv2/highgui/highgui_c.h>

#include <opencv2/objdetect.hpp>
#include <opencv2/dnn.hpp>

#include "common.hpp"
using std::cin;
using std::cout;
using std::endl;
using namespace std;
using namespace cv;
using namespace dnn;
class MyData
{
public:
	MyData() : A(0), X(0), id()
	{}
	explicit MyData(int) : A(97), X(CV_PI), id("mydata1234") // explicit to avoid implicit conversion
	{}
	void write(FileStorage& fs) const                        //Write serialization for this class
	{
		fs << "{" << "A" << A << "X" << X << "id" << id << "}";
	}
	void read(const FileNode& node)                          //Read serialization for this class
	{
		A = (int)node["A"];
		X = (double)node["X"];
		id = (string)node["id"];
	}
public:   // Data Members
	int A;
	double X;
	string id;
};
namespace
{
	/** Global Variables */
	int alpha = 100;
	int beta = 100;
	int gamma_cor = 100;
	Mat img_original, img_corrected, img_gamma_corrected;
	void basicLinearTransform(const Mat &img, const double alpha_, const int beta_)
	{
    		Mat res;
    		img.convertTo(res, -1, alpha_, beta_);
    		hconcat(img, res, img_corrected);
    		imshow("Brightness and contrast adjustments", img_corrected);
	}
	void gammaCorrection(const Mat &img, const double gamma_)
	{
    		CV_Assert(gamma_ >= 0);
    //! [changing-contrast-brightness-gamma-correction]
    		Mat lookUpTable(1, 256, CV_8U);
    		uchar* p = lookUpTable.ptr();
    		for( int i = 0; i < 256; ++i){
        			p[i] = saturate_cast<uchar>(pow(i / 255.0, gamma_) * 255.0);
    		}
		Mat res = img.clone();
		LUT(img, lookUpTable, res);
		hconcat(img, res, img_gamma_corrected);
		imshow("Gamma correction", img_gamma_corrected);
	}
	void on_linear_transform_alpha_trackbar(int, void *)
	{
		double alpha_value = alpha / 100.0;
		int beta_value = beta - 100;
		basicLinearTransform(img_original, alpha_value, beta_value);
	}
	void on_linear_transform_beta_trackbar(int, void *)
	{
		double alpha_value = alpha / 100.0;
		int beta_value = beta - 100;
		basicLinearTransform(img_original, alpha_value, beta_value);
	}
	void on_gamma_correction_trackbar(int, void *)
	{
		double gamma_value = gamma_cor / 100.0;
		gammaCorrection(img_original, gamma_value);
	}
}
#define mscolor_red	Scalar(0, 0, 255)
#define mscolor_blue	Scalar(255, 0, 0)
#define mscolor_green	Scalar(0, 255, 0)

#define CONFIG_ENABLE_DZWL						1
#define CONFIG_ENABLE_DZHX						1
#define CONFIG_ENABLE_DRAW_ALL					0
#define CONFIG_ENABLE_DRAW_LARGE_MINSIZE		0
#define CONFIG_ENABLE_DZHX_CHECKLINE			1
#define CONFIG_ENABLE_DZHX_CHECKFANGXIANG		0
#define CONFIG_INPUT_1080P						0
		
#if  CONFIG_INPUT_1080P
#define CONFIG_ENABLE_SHOW_NFO	1
#define CONFIG_URL				"rtsp://admin:dx123456@192.168.1.237:554/media/video1"
#define CONFIG_INPUT_WIDTH		1920
#define CONFIG_INPUT_HEIGHT		1080
#define CONFIG_DEBUG_IMG		0
#define CONFIG_DEBUG_IMG_Y		500
#define CONFIG_ENABLE_SHOW_ALLFRAME			1
#else
#define CONFIG_ENABLE_SHOW_NFO	0
#define CONFIG_URL				"rtsp://admin:dx123456@192.168.1.237:554/media/video3"
#define CONFIG_INPUT_WIDTH		352
#define CONFIG_INPUT_HEIGHT		288
#define CONFIG_DEBUG_IMG			1
#define CONFIG_DEBUG_IMG_Y		300
#define CONFIG_ENABLE_SHOW_ALLFRAME			0
#endif

#define CONFIG_STAD_X(X)  	(X*CONFIG_INPUT_WIDTH/1920)
#define CONFIG_STAD_Y(Y)  	(Y*CONFIG_INPUT_HEIGHT/1080)

#define DZWL_X1		CONFIG_STAD_X(500)
#define DZWL_Y1		CONFIG_STAD_Y(500)
#define DZWL_X2		CONFIG_STAD_X(1020)
#define DZWL_Y2		CONFIG_STAD_Y(740)

#define DZHX_X1		CONFIG_STAD_X(850)
#define DZHX_Y1		CONFIG_STAD_Y(50)
#define DZHX_X2		CONFIG_STAD_X(900)
#define DZHX_Y2		CONFIG_STAD_Y(300)	

#define MIN_WIDTH	CONFIG_STAD_X(40)
#define MIN_HEIGHT	CONFIG_STAD_Y(40)
#define MAX_WIDTH	CONFIG_STAD_X(800)
#define MAX_HEIGHT	CONFIG_STAD_Y(800)
#define w 400
#define  path_sample_data	"../opencv/samples/data" 
#define  path_data			"../opencv/data" 
#define  path_download		"../download	" 

Ptr<BackgroundSubtractor> pBackSub;

CascadeClassifier fullbody_cascade;
CascadeClassifier lowerbody_cascade;
CascadeClassifier upbody_cascade;
std::vector<Rect> fullbody;
std::vector<Rect> lowerbody;
std::vector<Rect> upbody;
	int erosion_elem = 0;
	int erosion_size = 0;
	int dilation_elem = 0;
	int dilation_size = 0;
	Mat src;

std::string keys =
    "{ help  h     | | Print help message. }"
    "{ @alias      | | An alias name of model to extract preprocessing parameters from models.yml file. }"
    "{ zoo         | models.yml | An optional path to file with preprocessing parameters }"
    "{ input i     | | Path to input image or video file. Skip this argument to capture frames from a camera.}"
    "{ framework f | | Optional name of an origin framework of the model. Detect it automatically if it does not set. }"
    "{ classes     | | Optional path to a text file with names of classes. }"
    "{ backend     | 0 | Choose one of computation backends: "
                        "0: automatically (by default), "
                        "1: Halide language (http://halide-lang.org/), "
                        "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
                        "3: OpenCV implementation }"
    "{ target      | 0 | Choose one of target computation devices: "
                        "0: CPU target (by default), "
                        "1: OpenCL, "
                        "2: OpenCL fp16 (half-float precision), "
                        "3: VPU }";


std::vector<std::string> classes;
/*
0------------------>x
|
|
y
*/
void msopencv_comapi_text(Mat img,const String& text,int x,int y,Scalar color)
{
	putText(img, (const String&)text, Point(x, y), CV_FONT_HERSHEY_SIMPLEX, 1, color, 2, 8);
}
void msopencv_comapi_line(Mat img,int x1,int y1,int x2,int y2,Scalar color)
{
	line(img, Point(x1,y1), Point(x2,y2), color, 8,8,0) ;
}
void msopencv_comapi_line( Mat img, Point start, Point end )
{
	line( img,start,end,Scalar( 0, 0, 0 ),2,LINE_8 );
}
void msopencv_comapi_rectangle(Mat img,int x1,int y1,int x2,int y2,Scalar color)
{
	rectangle(img, Point(x1,y1), Point(x2,y2), color, 2,8) ;
}
void msopencv_api_ellipse( Mat img, double angle )
{
	ellipse( img,Point( w/2, w/2 ),Size( w/4, w/16 ),angle,0,360,Scalar( 255, 0, 0 ),2, 8 );
}
void msopencv_api_circle( Mat img, Point center )
{
	circle( img,center, w/32,Scalar( 0, 0, 255 ),FILLED,LINE_8 );
}
void msopencv_api_poly( Mat img )
{
	Point rook_points[1][20];
	rook_points[0][0]  = Point(    w/4,   7*w/8 );
	rook_points[0][1]  = Point(  3*w/4,   7*w/8 );
	rook_points[0][2]  = Point(  3*w/4,  13*w/16 );
	rook_points[0][3]  = Point( 11*w/16, 13*w/16 );
	rook_points[0][4]  = Point( 19*w/32,  3*w/8 );
	rook_points[0][5]  = Point(  3*w/4,   3*w/8 );
	rook_points[0][6]  = Point(  3*w/4,     w/8 );
	rook_points[0][7]  = Point( 26*w/40,    w/8 );
	rook_points[0][8]  = Point( 26*w/40,    w/4 );
	rook_points[0][9]  = Point( 22*w/40,    w/4 );
	rook_points[0][10] = Point( 22*w/40,    w/8 );
	rook_points[0][11] = Point( 18*w/40,    w/8 );
	rook_points[0][12] = Point( 18*w/40,    w/4 );
	rook_points[0][13] = Point( 14*w/40,    w/4 );
	rook_points[0][14] = Point( 14*w/40,    w/8 );
	rook_points[0][15] = Point(    w/4,     w/8 );
	rook_points[0][16] = Point(    w/4,   3*w/8 );
	rook_points[0][17] = Point( 13*w/32,  3*w/8 );
	rook_points[0][18] = Point(  5*w/16, 13*w/16 );
	rook_points[0][19] = Point(    w/4,  13*w/16 );
	
	const Point* ppt[1] = { rook_points[0] };
	int npt[] = { 20 };
	fillPoly( img,ppt,npt,1,Scalar( 255, 255, 255 ), LINE_8 );
}
void msopencv_comapi_box(Mat img_out, vector<Point> contours)
{
	RotatedRect box = fitEllipse(Mat(contours));
	ellipse(img_out, box, mscolor_blue, 2, 8);	
	circle(img_out, box.center, 5, mscolor_red, -1, 8);
}
void msopencv_comapi_imshow(const String& title,Mat img)
{
	imshow(title, img);
}
Ptr<BackgroundSubtractor> msopencv_api_background_init(const cv::String& algo)
{
	//create Background Subtractor objects
	Ptr<BackgroundSubtractor> pBackSub;
	if (algo== "MOG2"){
		pBackSub = createBackgroundSubtractorMOG2();
    	}else{	//"KNN, MOG2"
        		pBackSub = createBackgroundSubtractorKNN();
    	}
	return pBackSub;
}
Mat msopencv_api_background_done(Ptr<BackgroundSubtractor> pBackSub,Mat frame)
{	
	Mat fgMask;
        	//update the background model
        	pBackSub->apply(frame, fgMask);
        	//msopencv_comapi_imshow("Frame", frame);
        	//msopencv_comapi_imshow("FG Mask", fgMask);
	return fgMask;
}
int msopencv_api_background_sample(const cv::String& input,const cv::String& algo)
{
	//create Background Subtractor objects
	Ptr<BackgroundSubtractor> pBackSub=msopencv_api_background_init("MOG2");	
    	VideoCapture capture( samples::findFile( input ) );
    	if (!capture.isOpened()){
        		//error in opening the video input
        		cerr << "Unable to open: " << input << endl;
        		return 0;
    	}
		
	Mat frame, fgMask;
	while (true) {
        		capture >> frame;
        		if (frame.empty()){
            		break;
        		}
		fgMask=msopencv_api_background_done(pBackSub, frame);
			
        		//get the frame number and write it on the current frame
        		rectangle(frame, cv::Point(10, 2), cv::Point(100,20),cv::Scalar(255,255,255), -1);
        		stringstream ss;
        		ss << capture.get(CAP_PROP_POS_FRAMES);
        		string frameNumberString = ss.str();
        		putText(frame, frameNumberString.c_str(), cv::Point(15, 15),FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
				
        		//show the current frame and the fg masks
        		imshow("Frame", frame);
        		imshow("FG Mask", fgMask);
        		//get the input from the keyboard
        		int keyboard = waitKey(30);
        		if (keyboard == 'q' || keyboard == 27){
            		break;
        		}
    	}
	return 0;
}
vector<vector<Point>> msopencv_contoursapi_find(Ptr<BackgroundSubtractor> pBackSub,Mat img_background, Mat img_cap)
{
	Mat diff;
	//msopencv_comapi_imshow("img_cap", img_cap);
	if(NULL!=pBackSub){
		diff=msopencv_api_background_done( pBackSub,img_cap);
		//msopencv_comapi_text(img_cap,"background",50,200, mscolor_red);
	}else{
		Mat gray_background, gray_img;
		//msopencv_comapi_imshow("img_background", img_background);
#if 		0
		cvtColor(img_background, gray_background, CV_BGR2GRAY);
		cvtColor(img_cap, gray_img, CV_BGR2GRAY);
		absdiff(gray_background, gray_img, diff);
#else
	absdiff(img_background, img_cap, diff);
#endif
		//msopencv_comapi_text(img_cap,"absdiff",50,200, mscolor_red);
	}
#if CONFIG_DEBUG_IMG		
	msopencv_comapi_imshow("diff", diff);
	moveWindow("diff",352,CONFIG_DEBUG_IMG_Y);
#endif
	threshold(diff, diff, 45, 255, CV_THRESH_BINARY);
#if CONFIG_DEBUG_IMG		
	msopencv_comapi_imshow("threshold", diff);
	moveWindow("threshold",704,CONFIG_DEBUG_IMG_Y);
#endif
	medianBlur(diff, diff, 11);
#if CONFIG_DEBUG_IMG		
	msopencv_comapi_imshow("medianBlur", diff);
	moveWindow("medianBlur",1056,CONFIG_DEBUG_IMG_Y);
#endif
#if 1
	blur(diff, diff, Size(10, 10));
#if CONFIG_DEBUG_IMG		
	msopencv_comapi_imshow("blur", diff);
	moveWindow("blur",1408,CONFIG_DEBUG_IMG_Y);
#endif
	Mat element2 = getStructuringElement(MORPH_RECT, Size(10,10));
	dilate(diff, diff, element2);
#if CONFIG_DEBUG_IMG		
	msopencv_comapi_imshow("dilate", diff);
	moveWindow("dilate",1650,CONFIG_DEBUG_IMG_Y);
#endif
#endif
	Mat element = getStructuringElement(MORPH_RECT, Size(2, 2));
	erode(diff, diff, element);
	vector<vector<Point>> contours;  
	vector<Vec4i> hierarcy;
	findContours(diff, contours, hierarcy, CV_RETR_EXTERNAL, CHAIN_APPROX_NONE); 
	return contours;
}
void msopencv_contoursapi_draw(Mat img_out, vector<vector<Point>> contours)
{
	drawContours(img_out, contours, -1, Scalar(0,255,0), 5, 8);  
}
void msopencv_contoursapi_drawbox(Mat img_out, vector<vector<Point>> contours)
{
	int x0=0, y0=0, w0=0, h0=0,x1=0, y1=0;
	vector<Rect> boundRect(contours.size()); 
	vector<RotatedRect> box(contours.size());
	Point2f rect[4];
	for(int i=0; i<contours.size(); i++)
	{
		boundRect[i] = boundingRect((Mat)contours[i]); 
		x0 = boundRect[i].x;  
		y0 = boundRect[i].y;
		w0 = boundRect[i].width; 
		h0 = boundRect[i].height; 
		msopencv_comapi_box( img_out, contours[i]);
	}
}
void msopencv_contoursapi_drawrectangle(Mat img_out, vector<vector<Point>> contours)
{
	int x0=0, y0=0, w0=0, h0=0,x1=0, y1=0;
	vector<Rect> boundRect(contours.size()); 
	vector<RotatedRect> box(contours.size());
	Point2f rect[4];
	for(int i=0; i<contours.size(); i++)
	{
		boundRect[i] = boundingRect((Mat)contours[i]); 
		x0 = boundRect[i].x;  
		y0 = boundRect[i].y;
		w0 = boundRect[i].width; 
		h0 = boundRect[i].height; 
		msopencv_comapi_rectangle(img_out,x0, y0,x0+w0, y0+h0,mscolor_red);
	}
}
void msopencv_pepleapi_detect( Mat frame )
{
	Mat frame_gray;
	cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
	equalizeHist( frame_gray, frame_gray );
	fullbody_cascade.detectMultiScale( frame_gray, fullbody );
	lowerbody_cascade.detectMultiScale( frame_gray, lowerbody );
	upbody_cascade.detectMultiScale( frame_gray, upbody );	
}
Mat msopencv_pepleapi_draw( Mat frame )
{
	for ( size_t i = 0; i < fullbody.size(); i++ ){
		Point center( fullbody[i].x + fullbody[i].width/2, fullbody[i].y + fullbody[i].height/2 );
		ellipse( frame, center, Size( fullbody[i].width/2, fullbody[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 0 ), 4 );
		putText(frame, "f", center, CV_FONT_HERSHEY_SIMPLEX, 1, mscolor_red, 2, 8);
	}
	for ( size_t i = 0; i < lowerbody.size(); i++ ){
		Point center( lowerbody[i].x + lowerbody[i].width/2, lowerbody[i].y + lowerbody[i].height/2 );
		ellipse( frame, center, Size( lowerbody[i].width/2, lowerbody[i].height/2 ), 0, 0, 360, Scalar( 0, 255, 0 ), 4 );
		putText(frame, "l", center, CV_FONT_HERSHEY_SIMPLEX, 1, mscolor_red, 2, 8);
		size_t  center_x=lowerbody[i].x + lowerbody[i].width/2;
		size_t  center_y=lowerbody[i].y + lowerbody[i].height/2 ;
		for ( size_t i = 0; i < fullbody.size(); i++ ){
			if(fullbody[i].x<center_x
				&&center_x<(fullbody[i].x+fullbody[i].width)
				&&fullbody[i].y<center_y
				&&center_y<(fullbody[i].y+fullbody[i].height)
			){
				Point center( fullbody[i].x + fullbody[i].width/2, fullbody[i].y + fullbody[i].height/2 );
				ellipse( frame, center, Size( fullbody[i].width/2, fullbody[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 0 ), 4 );
				putText(frame, "flbody", center, CV_FONT_HERSHEY_SIMPLEX, 1, mscolor_red, 2, 8);
				break;
			}
		}
	}
	for ( size_t i = 0; i < upbody.size(); i++ ){
		Point center( upbody[i].x + upbody[i].width/2, upbody[i].y + upbody[i].height/2 );
		ellipse( frame, center, Size( upbody[i].width/2, upbody[i].height/2 ), 0, 0, 360, Scalar( 0, 0, 255 ), 4 );
		putText(frame, "u", center, CV_FONT_HERSHEY_SIMPLEX, 1, mscolor_red, 2, 8);
		size_t  center_x=upbody[i].x + upbody[i].width/2;
		size_t  center_y=upbody[i].y + upbody[i].height/2 ;
		for ( size_t i = 0; i < fullbody.size(); i++ ){
			if(fullbody[i].x<center_x
				&&center_x<(fullbody[i].x+fullbody[i].width)
				&&fullbody[i].y<center_y
				&&center_y<(fullbody[i].y+fullbody[i].height)
			){
				Point center( fullbody[i].x + fullbody[i].width/2, fullbody[i].y + fullbody[i].height/2 );
				ellipse( frame, center, Size( fullbody[i].width/2, fullbody[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 0 ), 4 );
				putText(frame, "fubody", center, CV_FONT_HERSHEY_SIMPLEX, 1, mscolor_red, 2, 8);
				break;
			}
		}
	}	
	return frame;
}

int msopencv_comapi_IsPointInRegion(int wl_x1,int wl_y1,int wl_x2,int wl_y2,int target_x1,int target_y1)
{
/*
1.--------------------------------------
(wl_x1,wl_y1)

			(target_x1,target_y1)

								(wl_x2,wl_y2)
2.--------------------------------------
								(wl_x2,wl_y2)
								
			(target_x1,target_y1)
			
(wl_x1,wl_y1)
*/
	int ret=0;
	if((	(target_x1>wl_x1 && target_y1>wl_y1)&&(target_x1<wl_x2&&target_y1<wl_y2))
		||(	(target_x1>wl_x1 && target_y1<wl_y1)&&(target_x1<wl_x2&&target_y1>wl_y2))){
		 ret=1;
	}
	return ret;
}

int msopencv_comapi_IsPointBelowStandard(int wl_x1,int wl_y1,int wl_x2,int wl_y2,int target_x1,int target_y1)
{

	int ret=0;
	int standard_y=0;
	
	int wl_y=(wl_y2 > wl_y1) ? (wl_y2-wl_y1) : (wl_y1-wl_y2);
	int wl_x=(wl_x2 > wl_x1) ? (wl_x2-wl_x1) : (wl_x1-wl_x2);
	
	if(target_x1>wl_x1){
		standard_y=wl_y1-(-wl_x1-target_x1)*wl_y/wl_x;
	}else{
		standard_y=(target_x1-wl_x1)*wl_y/wl_x+wl_y1;
	}
	if(standard_y>target_y1){
		ret=1;
	}
	//cout  <<"-------standard_y:"<<standard_y<< "-------target_x1:"<<target_x1<<",target_y1:"<<target_y1<<"\n";
	return ret;
}

int msopencv_comapi_IsTargetInRegion(int wl_x1,int wl_y1,int wl_x2,int wl_y2,
	int target_x1,int target_y1,int target_width,int target_height,
	int min_width,int min_height,int max_width,int max_height)
{
	int p1_x = wl_x1, p1_y =wl_y1 ;
	int p2_x = wl_x2, p2_y = wl_y2;
	int p3_x =target_x1, p3_y =target_y1 ;
	int p4_x = target_x1 +target_width, p4_y =target_y1+target_height;
	if((p1_x>p4_x||p2_x<p3_x||p1_y>p4_y||p2_y<p3_y)){
		return 0;
	}else{
		if(	(target_width>min_width)&&(target_height>min_height)){
			return 1;
		}else{
			return 0;
		}
	}
}
int msopencv_comapi_IsTargetCrossLine(Mat img_cap,int wl_x1,int wl_y1,int wl_x2,int wl_y2,
	int target_x1,int target_y1,int target_width,int target_height,
	int min_width,int min_height,int max_width,int max_height,int falg_jh)
{
	int ret=0;
	if(msopencv_comapi_IsTargetInRegion( wl_x1, wl_y1, wl_x2, wl_y2,target_x1, 
		target_y1, target_width, target_height, min_width, min_height, max_width, max_height)
	){
		#if CONFIG_ENABLE_DZHX_CHECKLINE
			//msopencv_comapi_rectangle(img_cap,target_x1, target_y1,target_x1+target_width, target_y1+target_height,mscolor_green);
			int y1=wl_y1;
			int y2=wl_y2;
			if(1==falg_jh){
				y1=wl_y2;
				y2=wl_y1;
			}
			int target_p1=msopencv_comapi_IsPointBelowStandard(wl_x1, y1, wl_x2, y2, target_x1, target_y1);
			int target_p2=msopencv_comapi_IsPointBelowStandard(wl_x1, y1, wl_x2, y2, target_x1+target_width, target_y1+target_height);
			int target_p3=msopencv_comapi_IsPointBelowStandard(wl_x1, y1, wl_x2, y2, target_x1, target_y1+target_height);
			int target_p4=msopencv_comapi_IsPointBelowStandard(wl_x1, y1, wl_x2, y2, target_x1+target_width, target_y1);
			//cout  << "-------target_start:"<<target_start<<",target_end:"<<target_end<<"\n";
			if((!target_p1&&target_p2)
				||(!target_p4&&target_p3)){
				ret=1;
			}
		#else
			ret=1;
		#endif
	}
	return ret;
}
void msopencv_comapi_drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
    //! [visualization1]
    double angle = atan2( (double) p.y - q.y, (double) p.x - q.x ); // angle in radians
    double hypotenuse = sqrt( (double) (p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));

    // Here we lengthen the arrow by a factor of scale
    q.x = (int) (p.x - scale * hypotenuse * cos(angle));
    q.y = (int) (p.y - scale * hypotenuse * sin(angle));
    line(img, p, q, colour, 1, LINE_AA);

    // create the arrow hooks
    p.x = (int) (q.x + 9 * cos(angle + CV_PI / 4));
    p.y = (int) (q.y + 9 * sin(angle + CV_PI / 4));
    line(img, p, q, colour, 1, LINE_AA);

    p.x = (int) (q.x + 9 * cos(angle - CV_PI / 4));
    p.y = (int) (q.y + 9 * sin(angle - CV_PI / 4));
    line(img, p, q, colour, 1, LINE_AA);
    //! [visualization1]
}
double msopencv_comapi_getOrientation(vector<Point> &pts, Mat &img)
{
    //! [pca]
    //Construct a buffer used by the pca analysis
    int sz = static_cast<int>(pts.size());
    Mat data_pts = Mat(sz, 2, CV_64F);
    for (int i = 0; i < data_pts.rows; i++)
    {
        data_pts.at<double>(i, 0) = pts[i].x;
        data_pts.at<double>(i, 1) = pts[i].y;
    }

    //Perform PCA analysis
    PCA pca_analysis(data_pts, Mat(), PCA::DATA_AS_ROW);

    //Store the center of the object
    Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
                      static_cast<int>(pca_analysis.mean.at<double>(0, 1)));

    //Store the eigenvalues and eigenvectors
    vector<Point2d> eigen_vecs(2);
    vector<double> eigen_val(2);
    for (int i = 0; i < 2; i++)
    {
        eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
                                pca_analysis.eigenvectors.at<double>(i, 1));

        eigen_val[i] = pca_analysis.eigenvalues.at<double>(i);
    }
    //! [pca]

    //! [visualization]
    // Draw the principal components
    circle(img, cntr, 3, Scalar(255, 0, 255), 2);
    Point p1 = cntr + 0.02 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
    Point p2 = cntr - 0.02 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
    msopencv_comapi_drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
    msopencv_comapi_drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);

    double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
    //! [visualization]

    return angle;
}

int msopencv_intrusionapi_detect(Ptr<BackgroundSubtractor> pBackSub,Mat img_background, Mat img_cap)
{
	//Mat img_out = img_cap.clone();
	int ret=0;
	int x0=0, y0=0,x1=0, y1=0;
	vector<vector<Point>> contours=msopencv_contoursapi_find(pBackSub, img_background,  img_cap);
	vector<Rect> boundRect(contours.size()); 

	int max_x=0;
	int max_y=0;
	int num_contours=0;
	for(int i=0; i<contours.size(); i++)
	{
		boundRect[i] = boundingRect((Mat)contours[i]); 
		x0 = boundRect[i].x;  
		y0 = boundRect[i].y;
		x1 = boundRect[i].x+boundRect[i].width; 
		y1 = boundRect[i].y+boundRect[i].height; 
#if CONFIG_ENABLE_DRAW_ALL		
		msopencv_contoursapi_draw(img_cap,contours);
#endif
#if CONFIG_ENABLE_DRAW_LARGE_MINSIZE
		if(	(boundRect[i].width>MIN_WIDTH)&&(boundRect[i].height>MIN_HEIGHT)){
			msopencv_comapi_rectangle(img_cap,x0, y0,x1, y1,mscolor_green);
			ret=3;
		}
#endif
		if(CONFIG_ENABLE_DZWL
			&&msopencv_comapi_IsTargetInRegion(DZWL_X1, DZWL_Y1, DZWL_X2, DZWL_Y2,
				 x0, y0, boundRect[i].width, boundRect[i].height,
				 MIN_WIDTH, MIN_HEIGHT, MAX_WIDTH, MAX_HEIGHT)){
			//msopencv_comapi_box( img_cap, contours[i]);
			msopencv_comapi_rectangle(img_cap,x0, y0,x1, y1,mscolor_red);
			ret=1;
#if 1
			char arlminfo[300]={0};
			time_t timep;
			time(&timep);
			char tmp[64];
			strftime(tmp,sizeof(tmp),"[%Y-%m-%d %H:%M:%S]",localtime(&timep));
			strcat(arlminfo,tmp);
			strcat(arlminfo,"DZWL");
			RotatedRect box = fitEllipse(Mat(contours[i]));
			double ddd=msopencv_comapi_getOrientation(contours[i], img_cap);
			double XXX=atan2((DZWL_Y2-DZWL_Y1), (DZWL_X2-DZWL_X1));
			msopencv_comapi_text(img_cap,arlminfo,x0 ,y0,mscolor_red);
			cout  <<arlminfo<< "-------target,width:"<<boundRect[i].width<<",height:"<<boundRect[i].height<<",getOrientation:"<<ddd<<",XXX:"<<XXX<<"\n";
#endif			
		}
		if(CONFIG_ENABLE_DZHX
			&&msopencv_comapi_IsTargetCrossLine(img_cap,DZHX_X1, DZHX_Y1, DZHX_X2, DZHX_Y2,
				 x0, y0, boundRect[i].width, boundRect[i].height,
				 MIN_WIDTH, MIN_HEIGHT, MAX_WIDTH, MAX_HEIGHT,1)
		){
			//msopencv_comapi_box(img_cap, contours[i]);
			msopencv_comapi_rectangle(img_cap,x0, y0,x1, y1,mscolor_red);
			if((0==max_x)||max_x>x0){
				max_x=x0;
			}
			if((0==max_y)||max_y>y0){
				max_y=y0;
			}
			ret=2;
#if 1		
			char arlminfo[300]={0};
			time_t timep;
			time(&timep);
			char tmp[64];
			strftime(tmp,sizeof(tmp),"[%Y-%m-%d %H:%M:%S]",localtime(&timep));
			strcat(arlminfo,tmp);
			strcat(arlminfo,"DZHX");
			RotatedRect box = fitEllipse(Mat(contours[i]));
			double ddd=msopencv_comapi_getOrientation(contours[i], img_cap);
			double XXX=atan2((DZWL_Y2-DZWL_Y1), (DZWL_X2-DZWL_X1));
			msopencv_comapi_text(img_cap,arlminfo,x0 ,y0,mscolor_red);
			num_contours+=1;
			cout  <<arlminfo<<"-------target,"<<box.center<<",width:"<<boundRect[i].width<<",height:"<<boundRect[i].height<<",getOrientation:"<<ddd<<",XXX:"<<XXX<<",num_contours"<<num_contours<<"\n";
#endif
		}
	}
	return ret;
}

void msopencv_api_operatiimg( const cv::String& srcimg_up)
{

	Mat img = imread( samples::findFile(srcimg_up) );
	Mat grey;
	cvtColor(img, grey, COLOR_BGR2GRAY);
	msopencv_comapi_imshow("grey", grey);

	Mat sobelx;
	Sobel(grey, sobelx, CV_32F, 1, 0);
	msopencv_comapi_imshow("sobelx", sobelx);
	
	double minVal, maxVal;
	minMaxLoc(sobelx, &minVal, &maxVal); 

	Mat draw;
	sobelx.convertTo(draw, CV_8U, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal));
	msopencv_comapi_imshow("draw", draw);
}

int msopencv_api_addweighted( const cv::String& srcimg_up ,const cv::String& srcimg_low,double alpha)
{
	// We use the alpha provided by the user if it is between 0 and 1
	if( alpha < 0 && alpha >1 ){
		return -1;
	}
	//srcimg_up and srcimg_low both have to be of the same size (width and height) and type
	Mat src1 = imread( samples::findFile(srcimg_up) );
	Mat src2 = imread( samples::findFile(srcimg_low) );
	if( src1.empty() ) { cout << "Error loading src1" << endl; return EXIT_FAILURE; }
	if( src2.empty() ) { cout << "Error loading src2" << endl; return EXIT_FAILURE; }
	
	double beta = ( 1.0 - alpha );
	Mat dst;
	addWeighted( src1, alpha, src2, beta, 0.0, dst);
	msopencv_comapi_imshow( "addweighted", dst );
	return 0;
}


int msopencv_api_contrastAndbrightness( const cv::String& srcimg,double alpha,int beta  )
{
	if( alpha < 1.0 && alpha >3.0 ){
		return -1;
	}
	if( beta < 0 && beta >100 ){
		return -1;
	}
	Mat image = imread( samples::findFile( srcimg ) );
	if( image.empty() ){
		cout << "Could not open or find the image!\n" << endl;
		return -1;
	}
	
	Mat new_image = Mat::zeros( image.size(), image.type() );
	for( int y = 0; y < image.rows; y++ ) {
		for( int x = 0; x < image.cols; x++ ) {
			for( int c = 0; c < image.channels(); c++ ) {
				new_image.at<Vec3b>(y,x)[c] =saturate_cast<uchar>( alpha*image.at<Vec3b>(y,x)[c] + beta );
			}
		}
	}
	imshow("contrastAndbrightness,Original Image", image);
	imshow("contrastAndbrightness,New Image", new_image);
	return 0;
}


int msopencv_api_contrastAndbrightness2( const cv::String& srcimg  )
{
	img_original = imread( samples::findFile( srcimg ) );
	if( img_original.empty() ){
		cout << "Could not open or find the image!\n" << endl;
		return -1;
	}
	img_corrected = Mat(img_original.rows, img_original.cols*2, img_original.type());
	img_gamma_corrected = Mat(img_original.rows, img_original.cols*2, img_original.type());

	hconcat(img_original, img_original, img_corrected);
	hconcat(img_original, img_original, img_gamma_corrected);

	namedWindow("Brightness and contrast adjustments");
	namedWindow("Gamma correction");

	createTrackbar("Alpha gain (contrast)", "Brightness and contrast adjustments", &alpha, 500, on_linear_transform_alpha_trackbar);
	createTrackbar("Beta bias (brightness)", "Brightness and contrast adjustments", &beta, 200, on_linear_transform_beta_trackbar);
	createTrackbar("Gamma correction", "Gamma correction", &gamma_cor, 200, on_gamma_correction_trackbar);

	on_linear_transform_alpha_trackbar(0, 0);
	on_gamma_correction_trackbar(0, 0);

	waitKey();

	//imwrite("linear_transform_correction.png", img_corrected);
	//imwrite("gamma_correction.png", img_gamma_corrected);
	return 0;
}

int msopencv_api_dft( const cv::String& srcimg  )
{
	Mat I = imread( samples::findFile( srcimg ), IMREAD_GRAYSCALE);
	if( I.empty()){
		cout << "Error opening image" << endl;
		return EXIT_FAILURE;
	}
	Mat padded;                          
	int m = getOptimalDFTSize( I.rows );
	int n = getOptimalDFTSize( I.cols ); 
	copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0));
	
	Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
	Mat complexI;
	merge(planes, 2, complexI);         // Add to the expanded another plane with zeros
	dft(complexI, complexI);            // this way the result may fit in the source matrix
  	// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
	split(complexI, planes);                   // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
	magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude
	Mat magI = planes[0];
	magI += Scalar::all(1);                    // switch to logarithmic scale
	log(magI, magI);

	magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));
    	// rearrange the quadrants of Fourier image  so that the origin is at the image center
	int cx = magI.cols/2;
	int cy = magI.rows/2;
	Mat q0(magI, Rect(0, 0, cx, cy));   // Top-Left - Create a ROI per quadrant
	Mat q1(magI, Rect(cx, 0, cx, cy));  // Top-Right
	Mat q2(magI, Rect(0, cy, cx, cy));  // Bottom-Left
	Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
	Mat tmp;                           // swap quadrants (Top-Left with Bottom-Right)
	q0.copyTo(tmp);
	q3.copyTo(q0);
	tmp.copyTo(q3);
	q1.copyTo(tmp);                    // swap quadrant (Top-Right with Bottom-Left)
	q2.copyTo(q1);
	tmp.copyTo(q2);
	normalize(magI, magI, 0, 1, NORM_MINMAX); // Transform the matrix with float values into a
	imshow("Input Image"       , I   );    // Show the result
	imshow("spectrum magnitude", magI);
	//waitKey();
	return EXIT_SUCCESS;
}

//These write and read functions must be defined for the serialization in FileStorage to work
static void write(FileStorage& fs, const std::string&, const MyData& x)
{
	x.write(fs);
}
static void read(const FileNode& node, MyData& x, const MyData& default_value = MyData())
{
	if(node.empty()){
		x = default_value;
	}else{
		x.read(node);
	}
}
// This function will print our custom class to the console
static ostream& operator<<(ostream& out, const MyData& m)
{
	out << "{ id = " << m.id << ", ";
	out << "X = " << m.X << ", ";
	out << "A = " << m.A << "}";
	return out;
}

//outputfile.yml.gz
int msopencv_api_ymloOryaml(string filename)
{
	{ //write
		Mat R = Mat_<uchar>::eye(3, 3),T = Mat_<double>::zeros(3, 1);
		MyData m(1);
		FileStorage fs(filename, FileStorage::WRITE);// FileStorage fs; fs.open(filename, FileStorage::WRITE);
		fs << "iterationNr" << 100;
		fs << "strings" << "[";                              // text - string sequence
		fs << "image1.jpg" << "Awesomeness" << "../data/baboon.jpg";
		fs << "]";                                           // close sequence
		fs << "Mapping";                              // text - mapping
		fs << "{" << "One" << 1;
		fs <<        "Two" << 2 << "}";
		fs << "R" << R;                                      // cv::Mat
		fs << "T" << T;
		fs << "MyData" << m;                                // your own data structures
		fs.release();                                       // explicit close
		cout << "Write Done." << endl;
	}
    {//read
		cout << endl << "Reading: " << endl;
		FileStorage fs;
		fs.open(filename, FileStorage::READ);
		int itNr;
		//fs["iterationNr"] >> itNr;
		itNr = (int) fs["iterationNr"];
		cout << itNr;
		if (!fs.isOpened()){ 
			cerr << "Failed to open " << filename << endl;
			return 1;
		}
		FileNode n = fs["strings"];                         // Read string sequence - Get node
		if (n.type() != FileNode::SEQ){
			cerr << "strings is not a sequence! FAIL" << endl;
			return 1;
		}
		FileNodeIterator it = n.begin(), it_end = n.end(); // Go through the node
		for (; it != it_end; ++it){
			cout << (string)*it << endl;
		}
		n = fs["Mapping"];                                // Read mappings from a sequence
		cout << "Two  " << (int)(n["Two"]) << "; ";
		cout << "One  " << (int)(n["One"]) << endl << endl;
		MyData m;
		Mat R, T;
		fs["R"] >> R;                                      // Read cv::Mat
		fs["T"] >> T;
		fs["MyData"] >> m;                                 // Read your own structure_
		cout << endl
				<< "R = " << R << endl;
		cout << "T = " << T << endl << endl;
		cout << "MyData = " << endl << m << endl << endl;
		//Show default behavior for non existing nodes
		cout << "Attempt to read NonExisting (should initialize the data structure with its default).";
		fs["NonExisting"] >> m;
		cout << endl << "NonExisting = " << endl << m << endl;
	}
		cout << endl
			<< "Tip: Open up " << filename << " with a text editor to see the serialized data." << endl;
	return 0;
}






int msopencv_api_draw( void ){
	char atom_window[] = "Drawing 1: Atom";
	char rook_window[] = "Drawing 2: Rook";
	Mat atom_image = Mat::zeros( w, w, CV_8UC3 );
	Mat rook_image = Mat::zeros( w, w, CV_8UC3 );
	msopencv_api_ellipse( atom_image, 90 );
	msopencv_api_ellipse( atom_image, 0 );
	msopencv_api_ellipse( atom_image, 45 );
	msopencv_api_ellipse( atom_image, -45 );
	msopencv_api_circle( atom_image, Point( w/2, w/2) );
	msopencv_api_poly( rook_image );
	msopencv_comapi_rectangle( rook_image,0, 7*w/8,w, w,mscolor_red);
	
  	msopencv_comapi_line( rook_image, Point( 0, 15*w/16 ), Point( w, 15*w/16 ) );
  	msopencv_comapi_line( rook_image, Point( w/4, 7*w/8 ), Point( w/4, w ) );
  	msopencv_comapi_line( rook_image, Point( w/2, 7*w/8 ), Point( w/2, w ) );
  	msopencv_comapi_line( rook_image, Point( 3*w/4, 7*w/8 ), Point( 3*w/4, w ) );
	
  	imshow( atom_window, atom_image );
  	moveWindow( atom_window, 0, 200 );
  	imshow( rook_window, rook_image );
 	moveWindow( rook_window, w, 200 );
  	return(0);
}

int msopencv_api_randomdraw( void )
{
	const int window_width = 900;
	const int window_height = 600;
	int x_1 = -window_width/2;
	int x_2 = window_width*3/2;
	int y_1 = -window_width/2;
	int y_2 = window_width*3/2;
	int c;
	// Start creating a window
	char window_name[] = "randomdraw";

	// Also create a random object (RNG)
	RNG rng( 0xFFFFFFFF );

  	/// Initialize a matrix filled with zeros
	Mat image = Mat::zeros( window_height, window_width, CV_8UC3 );
  	/// Show it in a window during DELAY ms
  	imshow( window_name, image );
  	waitKey(0);
  	/// Now, let's draw some lines
	Point pt1, pt2;
	for( int i = 0; i < 100; i++ ) {
		pt1.x = rng.uniform( x_1, x_2 );
		pt1.y = rng.uniform( y_1, y_2 );
		pt2.x = rng.uniform( x_1, x_2 );
		pt2.y = rng.uniform( y_1, y_2 );
		int icolor = (unsigned) rng;
		line( image, pt1, pt2, Scalar( icolor&255, (icolor>>8)&255, (icolor>>16)&255 ), rng.uniform(1, 10), 8 );
		imshow( window_name, image );
	}
  	waitKey(0);
  
  	/// Displaying the big end!
  	Size textsize = getTextSize("Mscore forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
  	Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2);
  	int lineType = 8;

  	Mat image2;

  	for( int i = 0; i < 255; i += 2 )
  	{
    		image2 = image - Scalar::all(i);
    		putText( image2, "Mscore forever!", org, FONT_HERSHEY_COMPLEX, 3,
             	Scalar(i, i, 255), 5, lineType );
    		imshow( window_name, image2 );
  	}
  	waitKey(0);
  	return 0;
}


int display_dst( Mat dst,int delay )
{
	imshow( "Smoothing Demo", dst );
    	int c = waitKey ( delay );
    	if( c >= 0 ) { return -1; }
    	return 0;
}
int display_caption( Mat src,Mat dst,const char* caption )
{
	dst = Mat::zeros( src.size(), src.type() );
	putText( dst, caption,Point( src.cols/4, src.rows/2),FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
	return display_dst(dst,1500);
}
int msopencv_api_smoothingImages ( const cv::String& srcimg  )
{
	int MAX_KERNEL_LENGTH = 31;
	int DELAY_BLUR = 100;
	namedWindow( "Smoothing Demo", WINDOW_AUTOSIZE );
	Mat src = imread( samples::findFile( srcimg ), IMREAD_COLOR );
	if (src.empty()){
        		printf(" Error opening image\n");
        		return EXIT_FAILURE;
    	}
	Mat dst = src.clone();
    	if( display_caption(src, dst, "Original Image" ) != 0 ){
        		return 0;
    	}
	if( display_dst( dst,1500 ) != 0 ){
		return 0;
	 }
	
	if( display_caption( src, dst, "Homogeneous Blur" ) != 0 ){
		return 0;
    	}
	for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 ){
        		blur( src, dst, Size( i, i ), Point(-1,-1) );
        		if( display_dst( dst,DELAY_BLUR ) != 0 ){
            		return 0;
        		}
    	}
	
	if( display_caption( src, dst, "Gaussian Blur" ) != 0 ){
        		return 0;
    	}
    	for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 ){
        		GaussianBlur( src, dst, Size( i, i ), 0, 0 );
        		if( display_dst( dst,DELAY_BLUR ) != 0 ){
			return 0;
		}
	}
	if( display_caption( src, dst, "Median Blur" ) != 0 ){
		return 0;
    	}
    	for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 ){
        		medianBlur ( src, dst, i );
        		if( display_dst( dst,DELAY_BLUR ) != 0 ){
			return 0;
		}
	}
	if( display_caption( src, dst, "Bilateral Blur" ) != 0 ){
		return 0;
	}
	for ( int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 ){
		bilateralFilter ( src, dst, i, i*2, i/2 );
		if( display_dst( dst,DELAY_BLUR ) != 0 ){
			return 0;
		}
	}
	display_caption( src, dst, "Done!" );
	return 0;
}



void msopencv_api_Erosion( int, void*  )
{
	int erosion_type = 0;
	if( erosion_elem == 0 ){ erosion_type = MORPH_RECT; }
	else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; }
	else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
	Mat element = getStructuringElement( erosion_type,Size( 2*erosion_size + 1, 2*erosion_size+1 ),Point( erosion_size, erosion_size ) );
	Mat erosion_dst;
	erode( src, erosion_dst, element );
	imshow( "Erosion Demo", erosion_dst );
}
void msopencv_api_Dilation( int, void*  )
{
	int dilation_type = 0;
	if( dilation_elem == 0 ){ dilation_type = MORPH_RECT; }
	else if( dilation_elem == 1 ){ dilation_type = MORPH_CROSS; }
	else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; }
	Mat element = getStructuringElement( dilation_type,Size( 2*dilation_size + 1, 2*dilation_size+1 ),Point( dilation_size, dilation_size ) );
	Mat dilation_dst;
	dilate( src, dilation_dst, element );
	imshow( "Dilation Demo", dilation_dst );
}
int msopencv_api_DilationAndErosion( const cv::String& srcimg  )
{
	src = imread( samples::findFile( srcimg ), IMREAD_COLOR );
	if( src.empty() ){
		cout << "Could not open or find the image!\n" << endl;
		return -1;
	}
	namedWindow( "Erosion Demo", WINDOW_AUTOSIZE );
	namedWindow( "Dilation Demo", WINDOW_AUTOSIZE );
	moveWindow( "Dilation Demo", src.cols, 0 );


	int const max_elem = 2;
	int const max_kernel_size = 21;
	createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",&erosion_elem, max_elem,msopencv_api_Erosion );
	createTrackbar( "Kernel size:\n 2n +1", "Erosion Demo",&erosion_size, max_kernel_size,msopencv_api_Erosion );
	createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo",&dilation_elem, max_elem,msopencv_api_Dilation );
	createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo",&dilation_size, max_kernel_size,msopencv_api_Dilation );
	msopencv_api_Erosion( 0, 0  );
	msopencv_api_Dilation(0, 0   );
	waitKey(0);
	return 0;
}



int msopencv_api_meanshift( string filename )
{
	VideoCapture capture(filename);
	if (!capture.isOpened()){
		cerr << "Unable to open file!" << endl;
		return 0;
	}
    Mat frame, roi, hsv_roi, mask;
    // take first frame of the video
    capture >> frame;
    // setup initial location of window
    Rect track_window(300, 200, 100, 50); // simply hardcoded the values
    // set up the ROI for tracking
    roi = frame(track_window);
    cvtColor(roi, hsv_roi, COLOR_BGR2HSV);
    inRange(hsv_roi, Scalar(0, 60, 32), Scalar(180, 255, 255), mask);
    float range_[] = {0, 180};
    const float* range[] = {range_};
    Mat roi_hist;
    int histSize[] = {180};
    int channels[] = {0};
    calcHist(&hsv_roi, 1, channels, mask, roi_hist, 1, histSize, range);
    normalize(roi_hist, roi_hist, 0, 255, NORM_MINMAX);
    // Setup the termination criteria, either 10 iteration or move by atleast 1 pt
    TermCriteria term_crit(TermCriteria::EPS | TermCriteria::COUNT, 10, 1);
    while(true){
        Mat hsv, dst;
        capture >> frame;
        if (frame.empty())
            break;
        cvtColor(frame, hsv, COLOR_BGR2HSV);
        calcBackProject(&hsv, 1, channels, roi_hist, dst, range);
        // apply meanshift to get the new location
        meanShift(dst, track_window, term_crit);
        // Draw it on image
        rectangle(frame, track_window, 255, 2);
        imshow("img2", frame);
        int keyboard = waitKey(30);
        if (keyboard == 'q' || keyboard == 27)
            break;
    }
	return 1;
}
int msopencv_api_camshift( string filename )
{
	VideoCapture capture(filename);
	if (!capture.isOpened()){
		//error in opening the video input
		cerr << "Unable to open file!" << endl;
		return 0;
	}
	Mat frame, roi, hsv_roi, mask;
	// take first frame of the video
	capture >> frame;
	// setup initial location of window
	//Rect track_window(300, 200, 100, 50); // simply hardcoded the values

	Rect track_window(300, 100, 100, 50); // simply hardcoded the values
	// set up the ROI for tracking
	roi = frame(track_window);
	cvtColor(roi, hsv_roi, COLOR_BGR2HSV);
	inRange(hsv_roi, Scalar(0, 60, 32), Scalar(180, 255, 255), mask);
	float range_[] = {0, 180};
	const float* range[] = {range_};
	Mat roi_hist;
	int histSize[] = {180};
	int channels[] = {0};
	calcHist(&hsv_roi, 1, channels, mask, roi_hist, 1, histSize, range);
	normalize(roi_hist, roi_hist, 0, 255, NORM_MINMAX);
	// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
	TermCriteria term_crit(TermCriteria::EPS | TermCriteria::COUNT, 10, 1);
	while(true){
		Mat hsv, dst;
		capture >> frame;
		if (frame.empty()){
			break;
		}
		cvtColor(frame, hsv, COLOR_BGR2HSV);
		calcBackProject(&hsv, 1, channels, roi_hist, dst, range);
		// apply camshift to get the new location
		RotatedRect rot_rect = CamShift(dst, track_window, term_crit);
		// Draw it on image
		Point2f points[4];
		rot_rect.points(points);
		for (int i = 0; i < 4; i++){
			line(frame, points[i], points[(i+1)%4], 255, 2);
		}
		imshow("img2", frame);
		int keyboard = waitKey(30);
		if (keyboard == 'q' || keyboard == 27){
			break;
		}
	}
	return 1;
}
void msopencv_api_opticalflow (Mat frame  ,Mat mask,Mat bg_gray,vector<Scalar> colors,
	vector<Point2f> p0,vector<Point2f> p1)
{
	// Create a mask image for drawing purposes
	Mat frame_gray;
	cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
	// calculate optical flow
	vector<uchar> status;
	vector<float> err;
	TermCriteria criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03);
	calcOpticalFlowPyrLK(bg_gray, frame_gray, p0, p1, status, err, Size(15,15), 2, criteria);
	vector<Point2f> good_new;
	for(uint i = 0; i < p0.size(); i++){
		// Select good points
		if(status[i] == 1) {
			good_new.push_back(p1[i]);
			// draw the tracks
			line(mask,p1[i], p0[i], colors[i], 2);
			circle(frame, p1[i], 5, colors[i], -1);
		}
	}
	Mat img;
	add(frame, mask, img);
	imshow("img", img);
	// Now update the previous frame and previous points
	bg_gray = frame_gray.clone();
	p0 = good_new;
}

int msopencv_api_opticalflow_sample ( string filename )
#if 0
{
	VideoCapture capture(filename);
	if (!capture.isOpened()){
		cerr << "Unable to open file!" << endl;
		return 0;
	}
	// Create some random colors
	vector<Scalar> colors;
	RNG rng;
	for(int i = 0; i < 100; i++){
		int r = rng.uniform(0, 256);
		int g = rng.uniform(0, 256);
		int b = rng.uniform(0, 256);
		colors.push_back(Scalar(r,g,b));
	}
	Mat old_frame, old_gray;
	vector<Point2f> p0, p1;
	// Take first frame and find corners in it
	capture >> old_frame;
	cvtColor(old_frame, old_gray, COLOR_BGR2GRAY);
	imshow("old_gray", old_gray);
	goodFeaturesToTrack(old_gray, p0, 100, 0.3, 7, Mat(), 7, false, 0.04);
	imshow("goodFeaturesToTrack", old_gray);
	// Create a mask image for drawing purposes
	Mat mask = Mat::zeros(old_frame.size(), old_frame.type());
	int num=0;
	while(true){
		Mat frame, frame_gray;
		capture >> frame;
		if (frame.empty()){
			break;
		}
		if(0==num){
			cvtColor(frame, old_gray, COLOR_BGR2GRAY);
			goodFeaturesToTrack(old_gray, p0, 100, 0.3, 7, Mat(), 7, false, 0.04);
			mask = Mat::zeros(frame.size(), frame.type());
		}
		msopencv_api_opticalflow (frame,mask ,old_gray , colors,p0,p1);
		if(num++>25){
			num=0;
		}
	}
	return 1;
}
#else
{
	VideoCapture capture(filename);
	if (!capture.isOpened()){
		cerr << "Unable to open file!" << endl;
		return 0;
	}
	// Create some random colors
	vector<Scalar> colors;
	RNG rng;
	for(int i = 0; i < 100; i++){
		int r = rng.uniform(0, 256);
		int g = rng.uniform(0, 256);
		int b = rng.uniform(0, 256);
		colors.push_back(Scalar(r,g,b));
	}
	Mat old_frame, old_gray;
	vector<Point2f> p0, p1;
	// Take first frame and find corners in it
	capture >> old_frame;
	cvtColor(old_frame, old_gray, COLOR_BGR2GRAY);
	imshow("old_gray", old_gray);
	goodFeaturesToTrack(old_gray, p0, 100, 0.3, 7, Mat(), 7, false, 0.04);
	imshow("goodFeaturesToTrack", old_gray);
	// Create a mask image for drawing purposes
	Mat mask = Mat::zeros(old_frame.size(), old_frame.type());
	int num=0;
	while(true){
		Mat frame, frame_gray;
		capture >> frame;
		if (frame.empty()){
			break;
		}
		if(0==num){
			cvtColor(frame, old_gray, COLOR_BGR2GRAY);
			goodFeaturesToTrack(old_gray, p0, 100, 0.3, 7, Mat(), 7, false, 0.04);
			mask = Mat::zeros(old_frame.size(), old_frame.type());
		}
		if(num++>25){
			num=0;
		}
		cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
		// calculate optical flow
		vector<uchar> status;
		vector<float> err;
		TermCriteria criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03);
		calcOpticalFlowPyrLK(old_gray, frame_gray, p0, p1, status, err, Size(15,15), 2, criteria);
		vector<Point2f> good_new;
		for(uint i = 0; i < p0.size(); i++){
			// Select good points
			if(status[i] == 1) {
				good_new.push_back(p1[i]);
				// draw the tracks
				line(mask,p1[i], p0[i], colors[i], 2);
				circle(frame, p1[i], 5, colors[i], -1);
			}
		}
		Mat img;
		add(frame, mask, img);
		imshow("Frame", img);
		int keyboard = waitKey(30);
		if (keyboard == 'q' || keyboard == 27){
			break;
		}
		// Now update the previous frame and previous points
		old_gray = frame_gray.clone();
		p0 = good_new;
	}
	return 1;
}
#endif

int msopencv_api_Denseopticalflow ( string filename )
{
    VideoCapture capture(filename);
    if (!capture.isOpened()){
        //error in opening the video input
        cerr << "Unable to open file!" << endl;
        return 0;
    }
    Mat frame1, prvs;
    capture >> frame1;
    cvtColor(frame1, prvs, COLOR_BGR2GRAY);
    while(true){
        Mat frame2, next;
        capture >> frame2;
        if (frame2.empty())
            break;
        cvtColor(frame2, next, COLOR_BGR2GRAY);
        Mat flow(prvs.size(), CV_32FC2);
        calcOpticalFlowFarneback(prvs, next, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
        // visualization
        Mat flow_parts[2];
        split(flow, flow_parts);
        Mat magnitude, angle, magn_norm;
        cartToPolar(flow_parts[0], flow_parts[1], magnitude, angle, true);
        normalize(magnitude, magn_norm, 0.0f, 1.0f, NORM_MINMAX);
        angle *= ((1.f / 360.f) * (180.f / 255.f));
        //build hsv image
        Mat _hsv[3], hsv, hsv8, bgr;
        _hsv[0] = angle;
        _hsv[1] = Mat::ones(angle.size(), CV_32F);
        _hsv[2] = magn_norm;
        merge(_hsv, 3, hsv);
        hsv.convertTo(hsv8, CV_8U, 255.0);
        cvtColor(hsv8, bgr, COLOR_HSV2BGR);
        imshow("frame2", bgr);
        int keyboard = waitKey(30);
        if (keyboard == 'q' || keyboard == 27)
            break;
        prvs = next;
    }
		return 1;
}

int msopencv_api_HaarcascadeDetection(  string filename,String fullbody_cascade_name ,  String lowerbody_cascade_name,  String uperbody_cascade_name )
{
    VideoCapture capture;
    //-- 2. Read the video stream
    capture.open( filename );
    if ( ! capture.isOpened() )
    {
        cout << "--(!)Error opening video capture\n";
        return -1;
    }
	
    Mat frame;
int img_cap_num=0;
    while ( capture.read(frame) )
    {
        if( frame.empty() )
        {
            cout << "--(!) No captured frame -- Break!\n";
            break;
        }
        //-- 3. Apply the classifier to the frame
	if(0==(img_cap_num%3)){
		msopencv_pepleapi_detect(frame);
		frame=msopencv_pepleapi_draw(frame);
	}
	 imshow( "Capture - Face detection", frame );		
     
        if( waitKey(10) == 27 )
        {
            break; // escape
        }
    }
    return 0;
}



int msopencv_api_loadcaffe(int argc, char** argv)
{
    CommandLineParser parser(argc, argv, keys);

    const std::string modelName = parser.get<String>("@alias");
    const std::string zooFile = parser.get<String>("zoo");

    keys += genPreprocArguments(modelName, zooFile);

    parser = CommandLineParser(argc, argv, keys);
    parser.about("Use this script to run classification deep learning networks using OpenCV.");
    if (argc == 1 || parser.has("help"))
    {
        parser.printMessage();
        return 0;
    }

    float scale = parser.get<float>("scale");
    Scalar mean = parser.get<Scalar>("mean");
    bool swapRB = parser.get<bool>("rgb");
    int inpWidth = parser.get<int>("width");
    int inpHeight = parser.get<int>("height");
    String model = findFile(parser.get<String>("model"));
    String config = findFile(parser.get<String>("config"));
    String framework = parser.get<String>("framework");
    int backendId = parser.get<int>("backend");
    int targetId = parser.get<int>("target");

    // Open file with classes names.
    if (parser.has("classes"))
    {
        std::string file = parser.get<String>("classes");
        std::ifstream ifs(file.c_str());
        if (!ifs.is_open())
            CV_Error(Error::StsError, "File " + file + " not found");
        std::string line;
        while (std::getline(ifs, line))
        {
            classes.push_back(line);
        }
    }

    if (!parser.check())
    {
        parser.printErrors();
        return 1;
    }
    CV_Assert(!model.empty());

    //! [Read and initialize network]
    Net net = readNet(model, config, framework);
    net.setPreferableBackend(backendId);
    net.setPreferableTarget(targetId);
    //! [Read and initialize network]

    // Create a window
    static const std::string kWinName = "Deep learning image classification in OpenCV";
    namedWindow(kWinName, WINDOW_NORMAL);

    //! [Open a video file or an image file or a camera stream]
    VideoCapture cap;
    if (parser.has("input"))
        cap.open(parser.get<String>("input"));
    else
        cap.open(0);
    //! [Open a video file or an image file or a camera stream]

    // Process frames.
    Mat frame, blob;
    while (waitKey(1) < 0)
    {
        cap >> frame;
        if (frame.empty())
        {
            waitKey();
            break;
        }

        //! [Create a 4D blob from a frame]
        blobFromImage(frame, blob, scale, Size(inpWidth, inpHeight), mean, swapRB, false);
        //! [Create a 4D blob from a frame]

        //! [Set input blob]
        net.setInput(blob);
        //! [Set input blob]
        //! [Make forward pass]
        Mat prob = net.forward();
        //! [Make forward pass]

        //! [Get a class with a highest score]
        Point classIdPoint;
        double confidence;
        minMaxLoc(prob.reshape(1, 1), 0, &confidence, 0, &classIdPoint);
        int classId = classIdPoint.x;
        //! [Get a class with a highest score]

        // Put efficiency information.
        std::vector<double> layersTimes;
        double freq = getTickFrequency() / 1000;
        double t = net.getPerfProfile(layersTimes) / freq;
        std::string label = format("Inference time: %.2f ms", t);
        putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));

        // Print predicted class.
        label = format("%s: %.4f", (classes.empty() ? format("Class #%d", classId).c_str() :
                                                      classes[classId].c_str()),
                                   confidence);
        putText(frame, label, Point(0, 40), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));

        imshow(kWinName, frame);
    }
    return 0;
}

int main(int argc, char** argv)
{	 
	if( !fullbody_cascade.load( "../opencv/data/haarcascades/haarcascade_fullbody.xml" ) ){
		cout << "--(!)Error loading face cascade\n";
		return -1;
	};
	if( !lowerbody_cascade.load(  "../opencv/data/haarcascades/haarcascade_lowerbody.xml" ) ){
		cout << "--(!)Error loading eyes cascade\n";
		return -1;
	};
	if( !upbody_cascade.load( "../opencv/data/haarcascades/haarcascade_upperbody.xml" ) ){
		cout << "--(!)Error loading eyes cascade\n";
		return -1;
	};
	//msopencv_api_operatiimg( "../opencv/samples/data/LinuxLogo.jpg");
	//msopencv_api_addweighted("../opencv/samples/data/LinuxLogo.jpg","../opencv/samples/data/WindowsLogo.jpg",0.8);
	//msopencv_api_contrastAndbrightness("../opencv/samples/data/lena.jpg",2.0,40);
	//msopencv_api_contrastAndbrightness2("../opencv/samples/data/lena.jpg");
	//msopencv_api_dft("../opencv/samples/data/lena.jpg");
	//msopencv_api_ymloOryaml("outputfile.yml.gz");
	//msopencv_api_draw();
	//msopencv_api_randomdraw();
	//msopencv_api_smoothingImages("../opencv/samples/data/lena.jpg");
	//msopencv_api_DilationAndErosion("../opencv/samples/data/lena.jpg");
	//msopencv_api_background("../opencv/samples/data/vtest.avi", "KNN");
	//msopencv_api_meanshift("download/slow_traffic_small.mp4");
	//msopencv_api_camshift("download/slow_traffic_small.mp4");
	//msopencv_api_opticalflow_sample("download/slow_traffic_small.mp4");
	//msopencv_api_Denseopticalflow("download/slow_traffic_small.mp4");
	//msopencv_api_HaarcascadeDetection( "../opencv/samples/data/vtest.avi","../opencv/data/haarcascades/haarcascade_fullbody.xml" ,  "../opencv/data/haarcascades/haarcascade_lowerbody.xml" ,  "../opencv/data/haarcascades/haarcascade_upperbody.xml" );
	//msopencv_api_loadcaffe( argc, argv);
	//full("rtsp://admin:dx123456@192.168.1.237:554/media/video1");
	//msopencv_api_opticalflow_sample("rtsp://admin:dx123456@192.168.1.237:554/media/video1");
	VideoCapture cap;
	 cap.open( CONFIG_URL );
	// cap.open( 0 );
	if(!cap.isOpened()){
		return -1;
	}
	Mat img_cap;
	Mat img_out;
	Mat img_background;
	int img_cap_num=0;
//MOG2,KNN
	//Ptr<BackgroundSubtractor> pBackSub=msopencv_api_background_init("MOG2");
	//Ptr<BackgroundSubtractor> pBackSub=msopencv_api_background_init("KNN");
	Ptr<BackgroundSubtractor> pBackSub=NULL;
	char tmp_alarm_dzwl[64];
	char tmp_alarm_dzhx[64];
	char tmp_time[64];

	int framenum_updatebackground=25;
	int framenum_percheck=15;
#if (CONFIG_ENABLE_DZHX &&CONFIG_ENABLE_DZHX_CHECKLINE)
	framenum_percheck=5;
#endif

#if (CONFIG_ENABLE_DZHX &&CONFIG_ENABLE_DZHX_CHECKFANGXIANG)
	framenum_percheck=5;
#endif
	while(1){
		cap>>img_cap;
		if(!img_cap.empty()){
#if 1	
			//double XXX=atan2(-1,-1);
			//cout  <<",XXX:"<<XXX<<"\n";
			int ret=0;
			if(0==(img_cap_num%framenum_updatebackground)){
				img_background = img_cap.clone();
			}
			if(0==(img_cap_num%framenum_percheck)){
				img_out= img_cap.clone();
				ret=msopencv_intrusionapi_detect(pBackSub,img_background, img_out);
			}
			if(img_cap_num++>3000){
				img_cap_num=0;
			}
		#if CONFIG_DEBUG_IMG	
			msopencv_comapi_imshow("img_cap", img_cap);
			moveWindow("img_cap",0,CONFIG_DEBUG_IMG_Y);
		#endif
		if(CONFIG_ENABLE_SHOW_ALLFRAME||0!=ret){
			time_t timep;
			time(&timep);
				if(1==ret){
					strftime(tmp_alarm_dzwl,sizeof(tmp_alarm_dzwl),"dzwl,%Y-%m-%d %H:%M:%S",localtime(&timep));
				}else if(2==ret){
					strftime(tmp_alarm_dzhx,sizeof(tmp_alarm_dzhx),"dzhx,%Y-%m-%d %H:%M:%S",localtime(&timep));
				}	
		//draw	
				if(CONFIG_ENABLE_DZWL){
					msopencv_comapi_rectangle(img_out, DZWL_X1,DZWL_Y1,DZWL_X2,DZWL_Y2,mscolor_blue) ;
				}
				if(CONFIG_ENABLE_DZHX){
					circle(img_out, Point(DZHX_X1,DZHX_Y1), 5, mscolor_red, -1, 8);
					circle(img_out, Point(DZHX_X2,DZHX_Y2), 5, mscolor_red, -1, 8);
					msopencv_comapi_line(img_out, DZHX_X1,DZHX_Y2,DZHX_X2,DZHX_Y1,mscolor_blue) ;
					//msopencv_comapi_rectangle(img_cap, DZHX_X1,DZHX_Y1,DZHX_X2,DZHX_Y2,mscolor_blue) ;
				}
			#if CONFIG_ENABLE_SHOW_NFO
		//frame_num		
	        			stringstream ss;
	        			ss << cap.get(CAP_PROP_POS_FRAMES);
	        			string frameNumberString = ss.str();
				msopencv_comapi_text(img_out,"frame_num:",30,50,mscolor_green);
				msopencv_comapi_text(img_out,frameNumberString.c_str(),220,50,mscolor_green);
		//min_size		
				msopencv_comapi_text(img_out,"min_size:",30,100,mscolor_green);
				msopencv_comapi_rectangle(img_out, 200,70,200+MIN_WIDTH,70+MIN_HEIGHT,mscolor_green) ;
		//detect	
				msopencv_comapi_text(img_out,"detect:",30,140,mscolor_green);
				msopencv_comapi_text(img_out,tmp_alarm_dzwl,150,140,mscolor_red);
				msopencv_comapi_text(img_out,"detect:",30,200,mscolor_green);
				msopencv_comapi_text(img_out,tmp_alarm_dzhx,150,200,mscolor_red);
		//time	
				strftime(tmp_time,sizeof(tmp_time),"%Y-%m-%d %H:%M:%S",localtime(&timep));
				msopencv_comapi_text(img_out,"time:",30,260,mscolor_green);
				msopencv_comapi_text(img_out,tmp_time,150,260,mscolor_green);
			#endif
				msopencv_comapi_imshow("img_out", img_out);
			#if CONFIG_DEBUG_IMG	
				msopencv_comapi_imshow("img_cap", img_cap);
				moveWindow("img_cap",0,CONFIG_DEBUG_IMG_Y);
				moveWindow("img_out",0,CONFIG_DEBUG_IMG_Y*2);
			#endif
			}
#endif	
			if(waitKey(10)==27){
				break;
			}
		}
	}
	cap.release();  
}

