#include "PoseDetcetion.h"
#include <openpose/headers.hpp>
#include <chrono> 
#include <thread> 
#include <PoseConfig.h>
#include <fstream>
#include <Windows.h>
#include <string>
#include <iostream>

const auto outputSize = op::flagsToPoint(FLAGS_output_resolution, "-1x-1");
const auto netInputSize = op::flagsToPoint(FLAGS_net_resolution, "-1x368");
const auto poseModel = op::flagsToPoseModel(FLAGS_model_pose);
const bool enableGoogleLogging = true;
const auto writeJson = (!FLAGS_write_json.empty() ? FLAGS_write_json : FLAGS_write_keypoint_json);
op::ScaleAndSizeExtractor scaleAndSizeExtractor(netInputSize, outputSize, FLAGS_scale_number, FLAGS_scale_gap);
op::CvMatToOpInput cvMatToOpInput;
op::CvMatToOpOutput cvMatToOpOutput;
op::PoseExtractorCaffe poseExtractorCaffe{ poseModel, FLAGS_model_folder,FLAGS_num_gpu_start,{}, op::ScaleMode::ZeroToOne, enableGoogleLogging };
op::PoseCpuRenderer poseRenderer{ poseModel, (float)FLAGS_render_threshold, !FLAGS_disable_blending,(float)FLAGS_alpha_pose };
op::OpOutputToCvMat opOutputToCvMat;
op::FrameDisplayer frameDisplayer{ "Start Caffe Listen Thread", outputSize };



bool PoseDetcetion::lock = false;
bool PoseDetcetion::is_start = false;
std::queue<cv::Mat> PoseDetcetion::image_msg_queue;
std::queue<cv::Mat> PoseDetcetion::image_msg_queue_qt;
void PoseDetcetion::init()
{

	op::log("Start Caffe Listen Thread", op::Priority::High);
	op::check(0 <= FLAGS_logging_level && FLAGS_logging_level <= 255, "Wrong logging_level value.",
		__LINE__, __FUNCTION__, __FILE__);
	if (FLAGS_alpha_pose < 0. || FLAGS_alpha_pose > 1.)
		op::error("Alpha value for blending must be in the range [0,1].", __LINE__, __FUNCTION__, __FILE__);
	if (FLAGS_scale_gap <= 0. && FLAGS_scale_number > 1)
		op::error("Incompatible flag configuration: scale_gap must be greater than 0 or scale_number = 1.",
			__LINE__, __FUNCTION__, __FILE__);
	if(!FLAGS_write_keypoint.empty() || !FLAGS_write_keypoint_json.empty())
		op::log("Flags `write_keypoint` and `write_keypoint_json` are deprecated and will eventually be removed."
			" Please, use `write_json` instead.", op::Priority::Max);
	poseExtractorCaffe.initializationOnThread();
	poseRenderer.initializationOnThread();
	op::log("Start OpenPose Thread", op::Priority::High);

}
cv::Mat PoseDetcetion::detcetPose(cv::Mat image) {
	cv::Mat inputImage = image;
	if (inputImage.empty()) {
		op::error("Image Error");
		return image;
	}
	const op::Point<int> imageSize{ inputImage.cols, inputImage.rows };
	std::vector<double> scaleInputToNetInputs;
	std::vector<op::Point<int>> netInputSizes;
	double scaleInputToOutput;
	op::Point<int> outputResolution;
	std::tie(scaleInputToNetInputs, netInputSizes, scaleInputToOutput, outputResolution)
		= scaleAndSizeExtractor.extract(imageSize);
	const auto netInputArray = cvMatToOpInput.createArray(inputImage, scaleInputToNetInputs, netInputSizes);
	auto outputArray = cvMatToOpOutput.createArray(inputImage, scaleInputToOutput, outputResolution);
	poseExtractorCaffe.forwardPass(netInputArray, imageSize, scaleInputToNetInputs);
	const auto poseKeypoints = poseExtractorCaffe.getPoseKeypoints();
	poseRenderer.renderPose(outputArray, poseKeypoints, scaleInputToOutput);
	auto outputImage = opOutputToCvMat.formatToCvMat(outputArray);
	cv::imshow("figure2", outputImage);
	return inputImage;
}



void PoseDetcetion::HCCSDK_POSE_SERVER() {
	op::log("Image Thread", op::Priority::High);
	while (1) {
		if (!lock) {
			if (image_msg_queue.size() >= 10) {
				while (image_msg_queue.size()) {
					cv::Mat inputImage = image_msg_queue.front();
					const op::Point<int> imageSize{ inputImage.cols, inputImage.rows };
					std::vector<double> scaleInputToNetInputs;
					std::vector<op::Point<int>> netInputSizes;
					double scaleInputToOutput;
					op::Point<int> outputResolution;
					std::tie(scaleInputToNetInputs, netInputSizes, scaleInputToOutput, outputResolution)
						= scaleAndSizeExtractor.extract(imageSize);
					const auto netInputArray = cvMatToOpInput.createArray(inputImage, scaleInputToNetInputs, netInputSizes);
					auto outputArray = cvMatToOpOutput.createArray(inputImage, scaleInputToOutput, outputResolution);
					poseExtractorCaffe.forwardPass(netInputArray, imageSize, scaleInputToNetInputs);
					const auto poseKeypoints = poseExtractorCaffe.getPoseKeypoints();
					poseRenderer.renderPose(outputArray, poseKeypoints, scaleInputToOutput);

					for (auto person = 0; person < poseKeypoints.getSize(0); person++)
					{
						op::log("Person " + std::to_string(person) + " (x, y, score):");
						std::string PersionToPrint;
						for (auto bodyPart = 0; bodyPart < poseKeypoints.getSize(1); bodyPart++)
						{
							std::string valueToPrint;
							for (auto xyscore = 0; xyscore < poseKeypoints.getSize(2); xyscore++)
							{
								valueToPrint += std::to_string(poseKeypoints[{person, bodyPart, xyscore}]) + " ";
								PersionToPrint += std::to_string(poseKeypoints[{person, bodyPart, xyscore}]) + " ";
							}
							PersionToPrint += "\n";
							op::log(valueToPrint);
						}
						SYSTEMTIME sys;
						GetLocalTime(&sys);
						std::string time = std::to_string(sys.wYear) +
							std::to_string(sys.wMonth) +
							std::to_string(sys.wDay) +
							std::to_string(sys.wHour) +
							std::to_string(sys.wMinute) +
							std::to_string(sys.wSecond) +
							std::to_string(sys.wMilliseconds) +
							std::to_string(sys.wDayOfWeek);
						op::log(time);
						std::string path = "./save/" + time + ".csv";
						std::ofstream   ofresult(path, std::ios::app);
						ofresult << PersionToPrint;
					}
					
					auto outputImage = opOutputToCvMat.formatToCvMat(outputArray);
					if (image_msg_queue_qt.size() < 10) {
						image_msg_queue_qt.push(outputImage);
					}
					image_msg_queue.pop();
				}
			}
		}
	}
}
void PoseDetcetion::LOCAL_POSE_SERVER() {
	op::log("Image Thread", op::Priority::High);
	while (1) {
		if (!lock) {
			if (image_msg_queue.size() >= 10) {
				while (image_msg_queue.size()) {
					cv::Mat inputImage = image_msg_queue.front();
					const op::Point<int> imageSize{ inputImage.cols, inputImage.rows };
					std::vector<double> scaleInputToNetInputs;
					std::vector<op::Point<int>> netInputSizes;
					double scaleInputToOutput;
					op::Point<int> outputResolution;
					std::tie(scaleInputToNetInputs, netInputSizes, scaleInputToOutput, outputResolution)
						= scaleAndSizeExtractor.extract(imageSize);
					const auto netInputArray = cvMatToOpInput.createArray(inputImage, scaleInputToNetInputs, netInputSizes);
					auto outputArray = cvMatToOpOutput.createArray(inputImage, scaleInputToOutput, outputResolution);
					poseExtractorCaffe.forwardPass(netInputArray, imageSize, scaleInputToNetInputs);
					const auto poseKeypoints = poseExtractorCaffe.getPoseKeypoints();
					//std::ifstream ifs;
					//const auto baseIndex= poseKeypoints.getSize(2)*
					//const auto x= poseKeypoints[baseIndex]

					poseRenderer.renderPose(outputArray, poseKeypoints, scaleInputToOutput);
					auto outputImage = opOutputToCvMat.formatToCvMat(outputArray);
					if (image_msg_queue_qt.size() < 10) {
						image_msg_queue_qt.push(outputImage);
					}
					image_msg_queue.pop();
				}
			}
		}
	}
}
