﻿


#include <assert.h>

// #include "../public/dlm_crypt.hpp"
// #include "../public/gpu_utility.hpp"
// #include "../public/cuda_utility.hpp"
#include "./infer_seg_v2_o.h"

namespace txr_algo_dlm_seg
{ 



    /**
	 * @brief 从文件中读取数据
	 *
	 * @param file_path 文件的路径。
	 * @return 返回包含文件内容的std::vector<char>。
	 */
	std::vector<char> get_file_data(const std::string& file_path) {
		std::ifstream file(file_path, std::ios_base::binary);
		assert(file.good());
		file.seekg(0, std::ios_base::end);
		int file_len = file.tellg();
		file.seekg(0, std::ios_base::beg);
		std::vector<char> data(file_len, 0);
		file.read(data.data(), file_len);
		file.close();
		return data;
	}

    /**
     * @brief 对数据执行简单解密操作
     * @param data 需要解密的数据
     */
    void decrypt_data(std::vector<char>& data) {
        char key = 0x1;
        for (size_t i = 0; i < data.size(); ++i) {
            data[i] ^= key;
        }
    }
	
	    /**
     * @brief 解密文件并提取XML和BIN数据
     * @param merged_path 加密文件路径
     * @param info 存储解密信息的结构体
     * @param xml_data 存储XML数据的向量
     * @param bin_data 存储BIN数据的向量
     * @return 解密成功返回true,失败返回false
     */
    bool decryptFileTo_XB_buffer(
        const std::string& merged_path,
        st_encrypt_info_ovino& info, 
        std::vector<char>& xml_data,
        std::vector<char>& bin_data)
    {
        // 检查文件扩展名是否正确
        if (merged_path.size() < 6 || merged_path.substr(merged_path.size() - 6) != DATS_EXTENSION) {
            throw std::runtime_error("The provided file is not a valid file.");
        }

        // 读取加密的文件数据
        std::vector<char> merged_data = get_file_data(merged_path);

        // 检查数据大小是否足够包含头部信息
        if (merged_data.size() < sizeof(st_encrypt_header)) {
            throw std::runtime_error("Merged data is too small to contain the header.");
        }

        // 读取头部信息
        st_encrypt_header header;
        std::memcpy(&header, merged_data.data(), sizeof(st_encrypt_header));

        // 检查数据大小是否足够包含所有部分
        if (merged_data.size() < sizeof(st_encrypt_header) + header.struct_size + header.xml_size + header.bin_size) {
            throw std::runtime_error("Merged data is too small to contain all parts.");
        }

        // 复制加密信息到info结构体
        std::memcpy(&info, merged_data.data() + sizeof(st_encrypt_header), header.struct_size);

        // 提取并解密XML数据
        xml_data.assign(
            merged_data.begin() + sizeof(st_encrypt_header) + header.struct_size,
            merged_data.begin() + sizeof(st_encrypt_header) + header.struct_size + header.xml_size
        );
        decrypt_data(xml_data);

        // 提取并解密BIN数据
        bin_data.assign(
            merged_data.begin() + sizeof(st_encrypt_header) + header.struct_size + header.xml_size,
            merged_data.begin() + sizeof(st_encrypt_header) + header.struct_size + header.xml_size + header.bin_size
        );
        decrypt_data(bin_data);

        return true;
    }

	    /**
     * @brief 将权重数据从std::vector<char>转换为OpenVINO的ov::Tensor对象
     *
     * @param weightsBuffer 包含权重数据的字符向量
     * @return 返回一个数据类型为u8且大小与权重数据匹配的ov::Tensor对象
     */
    ov::Tensor vector_to_tensor(const std::vector<char>& weightsBuffer) {
        size_t weightsSize = weightsBuffer.size();
        ov::Tensor weights(ov::element::u8, {weightsSize});
        std::memcpy(weights.data(), weightsBuffer.data(), weightsSize);
        return weights;
    }

	

	//===================================================================================
	InferSegV2_o::InferSegV2_o()
	{}
	InferSegV2_o::~InferSegV2_o()
	{}

	bool InferSegV2_o::Init(int gpu_id, const char* input_dats_path)
	{
		std::cout << "InferSegV2_o::Init" << std::endl;
		std::vector<char> v_engine_data;

		// return LoadEngine(v_engine_data);
			// 初始化OpenVINO
		core = ov::Core();

		// 解密并编译模型
		const std::string file_path = input_dats_path;
		std::vector<char> xml_buffer = {};
		std::vector<char> bin_buffer = {};

		// std::cout << "file_path: " << file_path << std::endl;
		decryptFileTo_XB_buffer(
				file_path,
				m_info,
				xml_buffer,
				bin_buffer);
        // Print decryption info
        std::cout << "Decryption Info:" << std::endl;

        std::cout << "GPU Model: " << m_info.gpu_model << std::endl;
        std::cout << "Batch Size: " << m_info.batch_size << std::endl;
        std::cout << "Input Channels: " << m_info.input_channel << std::endl;
        std::cout << "Max Image Length: " << m_info.img_max_len << std::endl;
        std::cout << "Mean: " << m_info.img_mean[0] << "," << m_info.img_mean[1] << "," << m_info.img_mean[2] << std::endl;
        std::cout << "Std: " << m_info.img_std[0] << "," << m_info.img_std[1] << "," << m_info.img_std[2] << std::endl;
        std::cout << "Dynamic: " << (m_info.dynamic ? "Yes" : "No") << std::endl;
        std::cout << "Dilation: " << (m_info.dilation ? "Yes" : "No") << std::endl;
        std::cout << "Box Threshold: " << m_info.box_threshold << std::endl;
        std::cout << "Unclip Ratio: " << m_info.unclip_ratio << std::endl;
        std::cout << "Mode: " << m_info.mode << std::endl;
        std::cout << "Engine Size: " << m_info.engine_size << std::endl;
		
		std::string model_xml(xml_buffer.begin(), xml_buffer.end());
		ov::Tensor bin_tensor = vector_to_tensor(bin_buffer);

		
		try {
			std::shared_ptr<ov::Model> model = core.read_model(model_xml, bin_tensor);
			compiled_model = core.compile_model(model, "AUTO");
		}
		catch (const std::exception& e) {
			throw;
		}
		// 创建推理请求
		size_t num_requests = m_info.batch_size;
		infer_requests.resize(num_requests);
		for (size_t i = 0; i < num_requests; ++i) {
			infer_requests[i] = compiled_model.create_infer_request();
		}
		return true;
	}

	bool InferSegV2_o::LoadEngine(std::vector<char> v_engine_data)
	{

		return true;
	}

	std::tuple<std::vector<std::vector<float>>, int, int> 
	InferSegV2_o::prepareImage(st_detect_unit* p_units, int num, std::vector<cv::Mat>& v_imgs) 
	{
    	std::vector<std::vector<float>> batch_results(num);
		v_imgs.resize(num);
		float ratio = 1.0f;
		int final_resize_h = 0;
		int final_resize_w = 0;

		for (int n = 0; n < num; ++n)
		{
			st_img_rgb& img = p_units[n].img;
			cv::Mat& src_img = v_imgs[n];
			{
				src_img.create(img.h, img.w, CV_8UC3);
				int index = 0;
				for (int i = 0; i < src_img.rows; ++i)
				{
					for (int j = 0; j < src_img.cols; ++j)
					{
						cv::Vec3b rgb;
						rgb[2] = img.pp_rgbf[0][index]*255.f;
						rgb[1] = img.pp_rgbf[1][index]*255.f;
						rgb[0] = img.pp_rgbf[2][index]*255.f;
						src_img.at<cv::Vec3b>(i, j) = rgb;
						index++;
					}
				}
			}
			cv::Mat rsz_img;

			if (__max(float(src_img.rows), float(src_img.cols)) > m_info.img_max_len) {
				if (float(src_img.rows) > float(src_img.cols))
					ratio = float(m_info.img_max_len) / float(src_img.rows);
				else
					ratio = float(m_info.img_max_len) / float(src_img.cols);
			}
			else {
				ratio = 1.0f;
			}

			int resize_h = int(float(src_img.rows) * ratio);
			int resize_w = int(float(src_img.cols) * ratio);

			resize_h = __max(int(std::round(float(resize_h) / 32) * 32), 32);
			resize_w = __max(int(std::round(float(resize_w) / 32) * 32), 32);


			cv::resize(src_img, rsz_img, cv::Size(resize_w, resize_h));

			rsz_img.convertTo(rsz_img, CV_32FC3, 1.0 / 255.f, 0);


			std::vector<cv::Mat> bgr_channels(3);
			cv::split(rsz_img, bgr_channels);

			// 为当前图像预分配内存
			batch_results[n].reserve(3 * resize_h * resize_w);

			// 按CHW格式存储数据
			for (int i = 0; i < 3; ++i) {
				bgr_channels[i] = (bgr_channels[i] - m_info.img_mean[i]) / m_info.img_std[i];
				
				const float* ptr = bgr_channels[i].ptr<float>(0);
				size_t channel_size = bgr_channels[i].total();
				
				// 将当前通道数据添加到结果中
				batch_results[n].insert(batch_results[n].end(), ptr, ptr + channel_size);
			}

			final_resize_h = resize_h;
			final_resize_w = resize_w;

		}
   		return std::make_tuple(batch_results, final_resize_h, final_resize_w);
	}


	//============================================================================


	void InferSegV2_o::Detect(st_detect_unit* p_unit, int num)
	{
		int batch_size = m_info.batch_size;
		if (num>batch_size)
		{
			std::cout << "input unit size > batch size!" << std::endl;
			num = batch_size;
		}

		auto a_start = std::chrono::high_resolution_clock::now();

		int net_w = m_info.img_max_len,
			net_h = m_info.img_max_len,
			net_c = m_info.input_channel;



		std::vector<cv::Mat> v_mats;
		std::vector<std::vector<float>> processed_data;
		int resize_h, resize_w;

		std::tie(processed_data, resize_h, resize_w) = prepareImage(p_unit, num, v_mats);
		std::cout << "processed_data size: " << processed_data.size() << std::endl;
		// 推理
		for (size_t i = 0; i < batch_size; ++i) {
			ov::Tensor input_tensor(ov::element::f32, 
									{1, 3, static_cast<size_t>(resize_h), static_cast<size_t>(resize_w)}, 
									processed_data[i].data());

			infer_requests[i].set_input_tensor(input_tensor);
			infer_requests[i].infer();

			ov::Tensor output_tensor = infer_requests[i].get_output_tensor();
			float* output_data = output_tensor.data<float>();

			// 获取输出维度信息
			auto shape = output_tensor.get_shape();
			m_v_predict_shape.clear();
			for (auto dim : shape) {
				m_v_predict_shape.push_back(dim);
		}

		// 创建临时缓冲区来存储处理后的数据
		std::vector<float> processed_output;
		processed_output.resize(resize_h * resize_w);

		// DBNet输出格式为[batch, 1, h, w]
		size_t output_h = shape[2];
		size_t output_w = shape[3];
		
		// 如果需要调整大小
		if (output_h != resize_h || output_w != resize_w) {
				// 创建源Mat，注意DBNet输出是单通道的概率图
				cv::Mat output_mat(output_h, output_w, CV_32F, output_data);
				cv::Mat resized_mat;
				cv::resize(output_mat, resized_mat, cv::Size(resize_w, resize_h));
				
				// 复制调整大小后的数据
				std::memcpy(processed_output.data(), resized_mat.data, resize_h * resize_w * sizeof(float));
			} 
			else {
				// 直接复制数据，注意DBNet输出已经是概率值（经过sigmoid）
				std::memcpy(processed_output.data(), output_data, resize_h * resize_w * sizeof(float));
			}
			
			// 调用原有的后处理函数
			auto p_start = std::chrono::high_resolution_clock::now();
			postProcess(p_unit, num, processed_output.data(), resize_h, resize_w);
			auto p_end = std::chrono::high_resolution_clock::now();
			float total_inf = std::chrono::duration<float, std::milli>(p_end - p_start).count();
			std::cout << "postProcess take: " << total_inf << " ms." << std::endl;
		}
	
	}

	void InferSegV2_o::postProcess(st_detect_unit* p_unit,int size, float* output,const int& final_resize_h, const int& final_resize_w)
	{
		st_detect_unit& unit = p_unit[0];
		
		st_img_rgb & img = unit.img;
		cv::Mat src_img;
		{
			src_img.create(img.h, img.w, CV_8UC3);
			int index = 0;
			for (int i = 0; i < src_img.rows; ++i)
			{
				for (int j = 0; j < src_img.cols; ++j)
				{
					cv::Vec3b rgb;
					rgb[2] = img.pp_rgbf[0][index] * 255.f;
					rgb[1] = img.pp_rgbf[1][index] * 255.f;
					rgb[0] = img.pp_rgbf[2][index] * 255.f;
					src_img.at<cv::Vec3b>(i, j) = rgb;
					index++;
				}
			}
		}

		std::vector<float> pred(final_resize_h * final_resize_w, 0.0);
		std::vector<unsigned char> cbuf(final_resize_h * final_resize_w, ' ');

		for (int i = 0; i < final_resize_h * final_resize_w; i++)
		{
			pred[i] = float(output[i]);
			cbuf[i] = (unsigned char)((output[i]) * 255);
		}

		cv::Mat cbuf_map(final_resize_h, final_resize_w, CV_8UC1, (unsigned char*)cbuf.data());
		cv::Mat pred_map(final_resize_h, final_resize_w, CV_32F, (float*)pred.data());

		const double threshold = 0.3 * 255;
		const double maxvalue = 255;
		cv::Mat bit_map;
		cv::threshold(cbuf_map, bit_map, threshold, maxvalue, cv::THRESH_BINARY);

		if (m_info.dilation)
		{
			cv::Mat dila_ele =
				cv::getStructuringElement(cv::MORPH_RECT, cv::Size(2, 2));
			cv::dilate(bit_map, bit_map, dila_ele);
		}
		auto p_start = std::chrono::high_resolution_clock::now();

		//cv::imshow("bit_map", bit_map);
		//cv::waitKey(0);
		BoxesFromBitmap(pred_map, bit_map, final_resize_h, final_resize_w, m_info, unit);

		auto p_end = std::chrono::high_resolution_clock::now();
		float total_inf = std::chrono::duration<float, std::milli>(p_end - p_start).count();
		std::cout << "BoxesFromBitmap take: " << total_inf << " ms." << std::endl;
	}

}//txr_algo_dlm_seg