#pragma once
#include "main.hpp"
#include "Aitech.hpp"

#define BOXSIZE(i) (max(i.w,i.h))
#define UL_CONER(i) (Point(i.x,i.y))
#define DL_CONER(i) (Point(i.x,i.y+i.h))
#define UR_CONER(i) (Point(i.x+i.w,i.y))
#define DR_CONER(i) (Point(i.x+i.w,i.y+i.h))
#define GET_DISTANCE(i,q) (sqrt(powf((i.x-q.x),2)+powf((i.y-q.y),2)))
#define GET_CENTER(i) (Point(i.x+i.w/2,i.y+i.h/2))

using namespace asio;

enum objs
{
	EXACTBOXID =9,
	LEFTBOXID,
	RIGHTBOXID,
	BLUE_BALLID,
	RED_BALLID,
	L_LANDMARKID,
	R_LANDMARKID,
	E_LANDMARKID,
	CORNERID
};

class Line_t
{
public:
	constexpr static float thresh = 50;//defines the threshold for a line to be considered related to another
	Point one;
	Point another;
	float theta;
	float length;
	bool relativity=1; //defines whether the order of beginning and ending point ordinates with the first one it compares to;
	static float getDis(const Point pt1,const Point pt2)
	{
		float distance = sqrt(powf((pt1.x-pt2.x),2)+powf((pt1.y-pt2.y),2));
		return distance;
	}
	void drawOn(Mat& m){
		line(m,one,another,Scalar(0,0,255),2);
	}
	bool isRelatedTo(Line_t& l2)
	{
		if(Line_t::getDis(this->one,l2.one)<thresh){
			if(Line_t::getDis(this->another,l2.another)<thresh){
				l2.relativity = 1;
				return true;
			}
		}
		if(Line_t::getDis(this->one,l2.another)<thresh){
			if(Line_t::getDis(this->another,l2.one)<thresh){
				l2.relativity = 0;
				return true;
			}
		}
		return false;
	}
	Line_t(Vec4i li)
	{
		one = Point(li[0],li[1]);
		another = Point(li[2],li[3]);
		theta = atan2((one.y-another.y),(one.x-another.x));
		length = getDis(one, another);
	}
	Line_t(float x1,float x2,float  x3,float x4)
	{
		one = Point((int)x1,(int)x2);
		another = Point((int)x3,(int)x4);
		theta = atan2((one.y-another.y),(one.x-another.x));
		length = sqrt(powf((x1-x3),2)+powf((x2-x4),2));
	}
	Line_t(){
		theta = 0;length=0;
	}
};

class Vision
{
public:
	bool newPic = 0;//used for async synchronization
	bool newRaw = 0;
	bbox_t targetFrame={0},targetBall={0},targetLandmark={0};
	vector<bbox_t>targetLandmarks;
	int targetFrameOrint = 0;//orintation of detected frame,>0 means right and <0 means left
	int targetLandmarkTheta = 90;
	std::vector<bbox_t> targetCorners;//biggest corners sorted by size
	cv::Mat undistort;//recified pic
	int HEIGHT;
	int WIDTH;//size of pic
private:
	cv::Mat raw;//raw image from anywhere
	cv::Mat mapX;//references for anti-distort remapping
	cv::Mat mapY;//references for anti-distort remapping
	cv::Mat undistortASYNC;//for async usage
	cv::VideoCapture* cam;//camera handle

	Detector* detector;//handle of darknet

	cv::KalmanFilter kf;//for future tracker use
	Mat extractColor(Mat t,int thresh);//private func to extract target color
	bool withinRect(bbox_t ref,Vec4i tar);
	bool withinRect(bbox_t ref,Point tar);
public:
	int cvGetLandmark();
	void undistortGenerate();
	int cvGetRect(bool filtered);//识别框角提取框形状
	int cvGetRect(//通过颜色识别提取框的朝向
		vector<Scalar>posTargetSets,//target colors
		vector<Scalar>negTargetSets,//background colors
		int pos_thresh,int neg_thresh);//颜色提取的阈值，色差值允许的最大值，越小提取越严格
	void detectClassifier(bool draw);//识别并分类，判断形状
	Vision(cv::VideoCapture* cap,std::string xmlfilename,Detector* detr);
	Vision(cv::VideoCapture* cap,Detector* detr);
	void feedInPic();
	void feedInPic(cv::Mat& captured);
	bool usable(){
		return this->cam->isOpened();
	}
	static void drawOn(Mat& src,vector<bbox_t>tars){
		char text[20] = {0};
		for(auto i:tars)
		{
			rectangle(src,Rect(i.x,i.y,i.w,i.h),Scalar(120+i.obj_id*5,255-i.obj_id*10,i.obj_id*10));
			sprintf(text,"%d",i.obj_id);
			putText(src,text,Point(i.x,i.y),FONT_HERSHEY_COMPLEX,1,Scalar(0,0,255));
		}
	}
	~Vision(){}
};

vector<Line_t> filterLines(vector<Vec4i>sets);