#include <string>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <onnxruntime_cxx_api.h>

#include <iostream>
#include <chrono>



using namespace std;
using namespace cv;



const float DEPTH_SCALE = 0.1;

// RGB 相机内参矩阵 (3x3)
const vector<float> rgb_R{ 1013.051f,  0.0f,       484.3115f,
						   0.0f,       1125.893f,  317.5117f,
						   0.0f,       0.0f,       1.0f };

const float RGB_fx = rgb_R[0];
const float RGB_fy = rgb_R[4];
const float RGB_cx = rgb_R[2];
const float RGB_cy = rgb_R[5];

// 外参旋转矩阵 R (3x3)
const vector<float> R{ 0.9946733f,    -0.0002597581f,   -0.1030771f,
						0.001034095f,  0.9999716f,       0.00745885f,
						0.1030722f,    -0.00752571f,     0.9946454f };

// 平移向量 T (3x1)
const vector<float> T{ -22.19651f, 0.4646935f, -4.033343f };



// 深度学习推理
std::vector<float> run_alogimg(const string& modelPath, Mat& img)
{	
	float confThreshold = 0.3;
	std::vector<float> mRes;
	int input_size = 640;
	int batch_size = 1;
	size_t mOutput_h = 8;
	size_t mOutput_w = 8400;
	//*********************** 1. 创建onnxruntime环境
	
	std::wstring					onnxPath = std::wstring(modelPath.begin(), modelPath.end());

	Ort::SessionOptions				session_options;
	Ort::Env						mEnv = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "project");

	session_options.SetGraphOptimizationLevel(ORT_ENABLE_ALL);		//ORT_ENABLE_BASIC 设置图优化级别为基本优化
	std::unique_ptr<Ort::Session>	mSession = std::make_unique<Ort::Session>(mEnv, onnxPath.c_str(), session_options);

	size_t							mTpixels = input_size * input_size * 3 * batch_size;
	std::array<int64_t, 4>			mInput_shape_info = { batch_size, 3, input_size, input_size };


	//*********************** 2. process
	Mat img_processed;
	int m_img_w = img.cols;
	int m_img_h = img.rows;
	int _max = std::max(m_img_w, m_img_h);
	cv::Mat _img = cv::Mat::ones(cv::Size(_max, _max), CV_8UC3);
	_img = cv::Scalar(114, 114, 114);
	cv::Rect roi(0, 0, m_img_w, m_img_h);
	img.copyTo(_img(roi));

	//cv::imwrite("D:\\codes\\DEPLOY\\guakePose\\datas\\4\\test1.png", _img);

	float m_x_ratio = _img.cols / static_cast<float>(input_size); // 缩放尺寸
	float m_y_ratio = _img.rows / static_cast<float>(input_size);
	img_processed = cv::dnn::blobFromImage(_img, 1.0 / 255.0,
		cv::Size(input_size, input_size), cv::Scalar(0, 0, 0),
		true, false, CV_32F);

	//*********************** 3. run model
	auto allocator_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
	Ort::Value input_tensor_ = Ort::Value::CreateTensor<float>(allocator_info,					// 内存
		img_processed.ptr<float>(),		// 数据指针 
		mTpixels,						// 数据长度
		mInput_shape_info.data(),		// shape
		mInput_shape_info.size());		// shape len
	const std::array<const char*, 1> inputNames = { "images" };
	const std::array<const char*, 1> outNames = { "output0" };

	std::vector<Ort::Value> ort_outputs;

	// 推理
	if (!mSession) {
		// 确保session_已经被初始化
		throw std::runtime_error("LOG Session is not initialized. Call Initialize first.");
	}
	try {
		ort_outputs = mSession->Run(Ort::RunOptions{ nullptr },
			inputNames.data(),		// 输入节点名称数组首地址
			&input_tensor_,			// 输入张量的指针
			1,						// 输入张量的数量
			outNames.data(),		// 输出节点名称数组首地址
			outNames.size());		// 输出张量的数量
	}
	catch (std::exception e) {
		std::cout << e.what() << std::endl;
	}

	float* mPData = ort_outputs[0].GetTensorMutableData<float>();

	//*********************** 4. post process
	cv::Mat dout(mOutput_h, mOutput_w, CV_32F, (float*)mPData); // 8 * 8400
	cv::Mat det_output = dout.t(); // 8400x8

	// 筛选概率最大的
	int index = 0;
	float max_conf = -1;
	for (int i = 0; i < mOutput_w; i++) {
		auto curren_conf = det_output.at<float>(i, 4);
		if (max_conf < curren_conf) {
			max_conf = curren_conf;
			index = i;
		}
	}

	if (max_conf < confThreshold) {
		mRes = { 0, 0 };
		return mRes;
	}


	vector<float> res_;
	float* ptr = det_output.ptr<float>(index); // 指向第 index 行首元素
	res_ = std::vector<float>(ptr, ptr + det_output.cols);


	
	//vector<float> res_ = res[index];  // cx, cy, w, h, class_id, p1, p2, score
	vector<float> p_xyc = {};
	float p1 = res_[5] * m_x_ratio;
	float p2 = res_[6] * m_y_ratio;
	float p_score = res_[7];

	float cx = res_[0] * m_x_ratio;
	float cy = res_[1] * m_y_ratio;
	float w = res_[2] * m_x_ratio;
	float h = res_[3] * m_y_ratio;
	mRes = {p1, p2};

	// draw
	string text = "conf: " + std::to_string(max_conf);
	cv::putText(img, text, cv::Point(0, 20), cv::FONT_HERSHEY_SIMPLEX, 0.5, 
						cv::Scalar(0, 0, 255), 1, cv::LINE_AA);

	cv::circle(img, cv::Point(static_cast<int>(p1), static_cast<int>(p2)), 3, cv::Scalar(0, 0, 255), -1);
	return mRes;


} // infer







int main() {

	string			imgPath				= "D:\\codes\\DEPLOY\\guakePose\\datas\\4\\3.png";
	string			modelPath			= "D:\\codes\\DEPLOY\\guakePose\\weights\\pose_detect.onnx";

	Mat				img					= cv::imread(imgPath);

	
	
	// run 
	auto	start				= std::chrono::high_resolution_clock::now();

	std::vector<float> resPoint	= run_alogimg(modelPath, img);
	cv::imwrite("D:\\codes\\DEPLOY\\guakePose\\datas\\4\\res.png", img);

	auto end					= std::chrono::high_resolution_clock::now();
	auto duration				= std::chrono::duration_cast<std::chrono::microseconds>(end - start);
	double ms					= duration.count() / 1000.0;  // 转为毫秒
	std::cout << "ONNX Inference Time: " << ms << " ms" << std::endl;
}












