#include "dispatcher.h"
#include "dataloader.h"
#include "fastMath.hpp"
#include "zupply.hpp"

std::vector<Track_Info> Dispatcher::convert_tracker_to_render()const
{
	std::vector<Track_Info> track_infos;
	for (const auto& track : m_tracker->tracks)
	{
		auto&& vec = convert_track_to_pointsvec(track.get());
		track_infos.emplace_back(std::move(vec), track->GetLastRect(), track->info_get(),track->m_trackID);
	}
	return track_infos;
}

void Dispatcher::update_infos(const std::vector<Track_Info>& infos)
{
	for (size_t i = 0; i < infos.size(); i++)
	{
		m_tracker->tracks[i]->info_update(infos[i].info);
	}
}

std::vector<std::pair<cv::Point, bool>> Dispatcher::convert_track_to_pointsvec(const CTrack* track)const
{
	std::vector<std::pair<cv::Point, bool>> track_points;
	track_points.reserve(track->m_trace.size());
	for (size_t j = 0; j < track->m_trace.size(); ++j)
	{
		const TrajectoryPoint& pt1 = track->m_trace.at(j);

		track_points.push_back(std::make_pair(cv::Point(pt1.m_prediction), pt1.m_hasRaw));
	}
	return track_points;
}

void Dispatcher::load_params(const string& config_file)
{
	mParams = Configuration::ParserParams(config_file);
}

Dispatcher::face_detect Dispatcher::get_face(cv::Mat frame, const cv::Rect& detect_region)const
{
	cv::Rect face_rect(0,0,0,0);
	std::vector<cv::Point2f> landmarks;

	cv::Mat cross_frame = frame(cv::Range(detect_region.y, detect_region.y + detect_region.height / 3), 
		cv::Range(detect_region.x, detect_region.x + detect_region.width));
	if (cross_frame.empty()) return std::make_pair(std::make_pair(cross_frame, face_rect), landmarks);

	cv::Mat cross_frame_r;
	cv::resize(cross_frame, cross_frame_r, cv::Size(mParams.mtcnn_params.width, mParams.mtcnn_params.height));

	vector<Bbox> facebox;
	m_face_detector->detectFace(cross_frame_r, facebox);
	landmarks.reserve(5);

	//map back to ori image
	float row_ratio = (float)cross_frame.rows / cross_frame_r.rows;
	float col_ratio = (float)cross_frame.cols / cross_frame_r.cols;

	if (facebox.size() != 0) {
		int face_index = 0;

		if (facebox.size() > 1)
		{
			//pick face closest to center
			cv::Size center = cross_frame_r.size() / 2;
			int min_dis = INT_MAX;
			for (size_t i = 0; i < facebox.size(); i++)
			{
				cv::Point face_center = cv::Point(facebox[i].y2 + facebox[i].y1, facebox[i].x2 + facebox[i].x1);
				int dist = pow(face_center.x / 2 - center.width, 2.0) + pow(face_center.y / 2 - center.height, 2.0);

				if (dist < min_dis)
				{
					min_dis = dist;
					face_index = i;
				}
			}
		}

		auto it = facebox.begin() + face_index;
		if (it->score < 0.95)
			return std::make_pair(std::make_pair(cross_frame, face_rect), landmarks);

		face_rect = cv::Rect(cv::Point(it->y1*col_ratio, it->x1*row_ratio), cv::Point(it->y2*col_ratio, it->x2*row_ratio));

		if (mParams.face_ID_params.enable)
		{
			landmarks.clear();
			//assign landmarks
			for (int i = 0; i < 5; ++i)
			{
				landmarks.push_back(cv::Point2f(it->ppoint[i] * col_ratio, it->ppoint[i + 5] * row_ratio));
			}
		}
	}

	return std::make_pair(std::make_pair(cross_frame, face_rect), landmarks);
}

void Dispatcher::update_det_tracker(cv::Mat bgr_frame)
{
	std::vector<cv::Rect> rects;
	std::vector<cv::Point2f> center_points;

	m_detector->detect(bgr_frame, center_points, rects);

	std::vector<CRegion> regions(rects.begin(),rects.end());
	std::vector<Point_t> centers(center_points.begin(),center_points.end());

	//zz::time::Timer timer2;
	m_tracker->Update(centers, regions, bgr_frame);
	//logger->info("tracker elapsed time: ") << timer2.to_string();
}

void Dispatcher::update_face_info(cv::Mat frame)
{
	cv::Mat frame_ = frame.clone();

	second_type time_now;

	if (mParams.video_params.video_mode == "online")
		time_now = std::chrono::time_point_cast<std::chrono::seconds>(std::chrono::system_clock::now());
	else
	{
		int time_duration = m_videostream->get_frame_cnt() / 25;

		chrono::seconds time_d(time_duration);
		auto time_now = mParams.video_params.start_time;
		time_now += time_d;
	}
	
	m_statistics->update_info(&time_now);

	for (const auto& track : m_tracker->tracks)
	{
		InfoRect&& info = track->TrackIdstateUpdate(mParams.door_params);
		/*std::cout << info.flag << std::endl;
		std::cout << "....." << std::endl;*/
		if ((info.face_detect_times < 3 || (mParams.face_ID_params.enable && info.person_info.size() < 3)) && info.rectstatus == 1 && track->DistWithLine(mParams.door_params) < 150)
		{
			//std::cout << "info.rect" << info.rect << std::endl;
			auto&& detected_face = get_face(frame, info.rect);
			
			if (detected_face.first.second == cv::Rect(0, 0, 0, 0))
				continue;

			if (info.face_detect_times < 3)
			{
				auto&& face_info = m_gender->predictface_with_age(detected_face.first.first, detected_face.first.second);

				info.gender[info.face_detect_times++] = face_info.first;

				for (size_t i = 0; i < face_info.second.size(); i++)
				{
					info.ages[i] += (face_info.second[i]);
				}

				//test
				//test 
				auto it = std::max_element(info.ages.begin(), info.ages.end());
				auto index = std::distance(info.ages.begin(), it);
				std::cout << "age: " << index << "score: " << *it << std::endl;
			}
			
			if (mParams.face_ID_params.enable && info.person_info.size()<3)
			{
				person_info_type person_info = m_face_id->identify(detected_face.first.first, detected_face.second);
				if (person_info.first != "Unknown")
				{
					info.person_info.push_back(person_info);

					std::cout << "max_score: " << person_info.second << " max_id: " << person_info.first << std::endl;
				}
			}				
		}
		if (info.flag == 1 || info.flag == -1)
		{
			std::time_t tt = chrono::system_clock::to_time_t(time_now);
			char mbstr[100] = { 0 };
			std::strftime(mbstr, sizeof(mbstr), "%Y-%m-%d %H:%M:%S", std::localtime(&tt));
			string t_part(mbstr);
			t_part.erase(0, 5);
			t_part.erase(t_part.size() - 3, 3);
			info.time_s = t_part;

			/*zz::time::DateTime t;
			string t_part = t.to_string();
			t_part.erase(0, 3);
			t_part.erase(t_part.size() - 7, 7);
			info.time_s = t_part;*/

			cv::Rect temp(info.rect.x, info.rect.y, info.rect.width, info.rect.height);
			info.I_crop = frame_(cv::Range(temp.y, temp.y + temp.height), cv::Range(temp.x, temp.x + temp.width)).clone();		
			m_statistics->update_info(info);
		}
		
		track->info_update(info);
	}
}

void Dispatcher::init_modules()
{
	m_videostream = std::make_unique<VideoStream>(mParams.video_params);
	std::cout << "video init completed" << std::endl;
	cv::Mat frame = m_videostream->read_one_frame();

	if (frame.empty())
	{
		std::cerr << "Can't read frame " << std::endl;
		return;
	}

	if (caffe::GPUAvailable()) {
		printf("GPU is available\n");
		caffe::SetMode(caffe::GPU, 0);
	}
	else {
		printf("Only CPU is available\n");
	}
	
	mParams.human_detector_params.height = mParams.human_detector_params.width / (float)frame.cols * frame.rows;
	mParams.track_params.distance_threshold = (float)frame.rows / 10;

	//pose net

	if (mParams.detector_mode == 0)
	{
		m_detector = std::make_unique<Posenet>(mParams.human_detector_params.deploy_path.c_str(), mParams.human_detector_params.model_path.c_str(),
			cv::Size(mParams.human_detector_params.width, mParams.human_detector_params.height));
	}
	else if (mParams.detector_mode == 1)
	{
		m_detector = std::make_unique<SSDDetector>(mParams.person_detector_params.model_path, mParams.person_detector_params.epoch, mParams.person_detector_params.width,
			mParams.person_detector_params.height, mParams.person_detector_params.mean_r, mParams.person_detector_params.mean_g, mParams.person_detector_params.mean_b,
			mParams.person_detector_params.device_type, mParams.person_detector_params.device_id);
	}

	std::cout << "person detector init completed" << std::endl;

	//tracker
	m_tracker = std::make_shared<CTracker>(false, (CTracker::DistType)mParams.track_params.dist_type, (CTracker::KalmanType)mParams.track_params.kalman_type, (CTracker::FilterGoal)mParams.track_params.filter_goal,
		(CTracker::LostTrackType)mParams.track_params.lost_track_type, (CTracker::MatchType)mParams.track_params.match_type, mParams.track_params.delta_time, mParams.track_params.noise_magnitude,
		mParams.track_params.distance_threshold, mParams.track_params.max_skipped_frames, mParams.track_params.max_track_length);

	std::cout << "tracker init completed" << std::endl;

	//face detector
	std::vector<std::string> nets{ mParams.mtcnn_params.p_net,mParams.mtcnn_params.r_net,mParams.mtcnn_params.o_net };
	m_face_detector = std::make_unique<mtcnn>(mParams.mtcnn_params.height, mParams.mtcnn_params.width, nets);

	std::cout << "face detector init completed" << std::endl;

	//face gender
	m_gender = std::make_unique<FaceInfo::GenderAge>(mParams.face_info_params.models, mParams.face_info_params.epoch, mParams.face_info_params.width,
		mParams.face_info_params.height, mParams.face_info_params.mean_r, mParams.face_info_params.mean_g, mParams.face_info_params.mean_b,
		mParams.face_info_params.device_type, mParams.face_info_params.device_id);

	std::cout << "face gender init completed" << std::endl;

	//face id
	if (mParams.face_ID_params.enable)
	{
		m_face_id = std::make_unique<fr::FaceRecognizer>(mParams.face_ID_params.thresh);
		if (!m_face_id->load_model(mParams.face_ID_params.deploy_path, mParams.face_ID_params.model_path))
		{
			throw std::exception("face recognizer model load failed");
		}

		if (!m_face_id->load_mean(mParams.face_ID_params.feature_mean))
		{
			throw std::exception("face recognizer mean feature load failed");
		}

		std::cout << "face id init completed" << std::endl;

		//load dataset
		DataLoader::load_face_db(mParams.database_params.database_folder, mParams.database_params.databse_record, m_face_id.get(), nets);
		std::cout << "database load completed" << std::endl;
	}

	m_statistics = std::make_unique<Statistics>(mParams.door_params,mParams.statistic_params, mParams.render_params.render_mode, mParams.face_ID_params.thresh);
	std::cout << "statitics init completed" << std::endl;

	m_render = std::make_unique<Render>(mParams.render_params.background_image, mParams.render_params.person_image, mParams.door_params, mParams.render_params.render_mode);

	std::cout << "init completed" << std::endl;
}

void Dispatcher::run()
{
	cv::Mat frame;
	bool init_flag = false;
	init_modules();
	//cv::VideoWriter wr("./outyy.avi", cv::VideoWriter::fourcc('D', 'I', 'V', 'X'), 25, cv::Size(1080,1920), true);
	cv::namedWindow("Video", CV_WINDOW_NORMAL);
	cv::setWindowProperty("Video", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
	while (true)
	{	
		frame = m_videostream->read_one_frame();

		if (frame.empty())
			break;
		//cv::resize(frame, frame, cv::Size(720, 1280));
		//wr << frame;

		update_det_tracker(frame);
		update_face_info(frame);

		//render result	
		auto track_infos = convert_tracker_to_render();
		cv::Mat data_image = m_render->render(frame, track_infos, m_statistics->get_totoalinfo(),mParams.render_params.draw_rect);
		update_infos(track_infos);
		cv::imshow("Video", data_image);
		
		char key = cv::waitKey(1);
		if (key == 27)
			break;
	}
	cv::destroyWindow("Video");

}