#include "recognize_util.h"
#include "common\common.hpp"
#include "common\gpu_utility.h"


namespace txr_algo_dlm_rec
{


    /**
	 * @brief 从文件中读取数据
	 *
	 * @param file_path 文件的路径。
	 * @return 返回包含文件内容的std::vector<char>。
	 */
	std::vector<char> get_file_data(const std::string& file_path) {
		std::ifstream file(file_path, std::ios_base::binary);
		assert(file.good());
		file.seekg(0, std::ios_base::end);
		int file_len = file.tellg();
		file.seekg(0, std::ios_base::beg);
		std::vector<char> data(file_len, 0);
		file.read(data.data(), file_len);
		file.close();
		return data;
	}

    /**
     * @brief 对数据执行简单解密操作
     * @param data 需要解密的数据
     */
    void decrypt_data(std::vector<char>& data) {
        char key = 0x1;
        for (size_t i = 0; i < data.size(); ++i) {
            data[i] ^= key;
        }
    }
	
	    /**
     * @brief 解密文件并提取XML和BIN数据
     * @param merged_path 加密文件路径
     * @param info 存储解密信息的结构体
     * @param xml_data 存储XML数据的向量
     * @param bin_data 存储BIN数据的向量
     * @return 解密成功返回true,失败返回false
     */
    bool decryptFileTo_XB_buffer(
        const std::string& merged_path,
        st_encrypt_info_ovino& info, 
        std::vector<char>& xml_data,
        std::vector<char>& bin_data)
    {
        // 检查文件扩展名是否正确
        if (merged_path.size() < 6 || merged_path.substr(merged_path.size() - 6) != DATS_EXTENSION) {
            throw std::runtime_error("The provided file is not a valid file.");
        }

        // 读取加密的文件数据
        std::vector<char> merged_data = get_file_data(merged_path);

        // 检查数据大小是否足够包含头部信息
        if (merged_data.size() < sizeof(st_encrypt_header)) {
            throw std::runtime_error("Merged data is too small to contain the header.");
        }

        // 读取头部信息
        st_encrypt_header header;
        std::memcpy(&header, merged_data.data(), sizeof(st_encrypt_header));

        // 检查数据大小是否足够包含所有部分
        if (merged_data.size() < sizeof(st_encrypt_header) + header.struct_size + header.xml_size + header.bin_size) {
            throw std::runtime_error("Merged data is too small to contain all parts.");
        }

        // 复制加密信息到info结构体
        std::memcpy(&info, merged_data.data() + sizeof(st_encrypt_header), header.struct_size);

        // 提取并解密XML数据
        xml_data.assign(
            merged_data.begin() + sizeof(st_encrypt_header) + header.struct_size,
            merged_data.begin() + sizeof(st_encrypt_header) + header.struct_size + header.xml_size
        );
        decrypt_data(xml_data);

        // 提取并解密BIN数据
        bin_data.assign(
            merged_data.begin() + sizeof(st_encrypt_header) + header.struct_size + header.xml_size,
            merged_data.begin() + sizeof(st_encrypt_header) + header.struct_size + header.xml_size + header.bin_size
        );
        decrypt_data(bin_data);

        return true;
    }

	    /**
     * @brief 将权重数据从std::vector<char>转换为OpenVINO的ov::Tensor对象
     *
     * @param weightsBuffer 包含权重数据的字符向量
     * @return 返回一个数据类型为u8且大小与权重数据匹配的ov::Tensor对象
     */
    ov::Tensor vector_to_tensor(const std::vector<char>& weightsBuffer) {
        size_t weightsSize = weightsBuffer.size();
        ov::Tensor weights(ov::element::u8, {weightsSize});
        std::memcpy(weights.data(), weightsBuffer.data(), weightsSize);
        return weights;
    }

	
	//===================================================================================


	Rec_Container::Rec_Container()
	{	
		// cudaStreamCreate(&m_stream);
	}

	Rec_Container::~Rec_Container()
	{
		// cudaStreamDestroy(m_stream);
		Reset();
	}

	void Rec_Container::Reset(){};

	/**
	 * @brief 初始化识别器
	 * 
	 * @param gpu_id GPU设备ID
	 * @param input_dats_path 加密模型文件路径(.hdats)
	 * @return 初始化成功返回true,失败返回false
	 * 

	 */
	bool Rec_Container::Init(
		int gpu_id, 
		const char* input_dats_path)
	{
		std::cout << "Rec_Container::Init" << std::endl;
		std::vector<char> v_engine_data;

		// return LoadEngine(v_engine_data);
			// 初始化OpenVINO
		core = ov::Core();

		// 解密并编译模型
		const std::string file_path = input_dats_path;
		std::vector<char> xml_buffer = {};
		std::vector<char> bin_buffer = {};

		std::cout << "file_path: " << file_path << std::endl;
		decryptFileTo_XB_buffer(
				file_path,
				m_info,
				xml_buffer,
				bin_buffer);
        // 打印解密后的信息
        std::cout << "Decryption Info:" << std::endl;	

        std::cout << "GPU Model: " << m_info.gpu_model << std::endl;
        std::cout << "Batch Size: " << m_info.batch_size << std::endl; 
        std::cout << "Input Channels: " << m_info.input_channel << std::endl;
        std::cout << "Image Width: " << m_info.image_width << std::endl;
        std::cout << "Image Height: " << m_info.image_height << std::endl;
        std::cout << "Engine Size: " << m_info.engine_size << std::endl;
		
		std::string model_xml(xml_buffer.begin(), xml_buffer.end());
		ov::Tensor bin_tensor = vector_to_tensor(bin_buffer);

		
		try {
			std::shared_ptr<ov::Model> model = core.read_model(model_xml, bin_tensor);
			compiled_model = core.compile_model(model, "AUTO");
		}
		catch (const std::exception& e) {
			throw;
		}
		// 创建推理请求
		size_t num_requests = m_info.batch_size;
		infer_requests.resize(num_requests);
		for (size_t i = 0; i < num_requests; ++i) {
			infer_requests[i] = compiled_model.create_infer_request();
		}
		
	}

	int  Rec_Container::BatchSize()
	{
		return m_info.batch_size;
	}

	bool Rec_Container::LoadEngine(std::vector<char> v_engine_data)
	{
		Reset();
		return true;
	}

	/**
	 * @brief 准备图像数据用于推理
	 * @param p_imgs 输入图像数组指针
	 * @param num 图像数量
	 * @return 返回处理后的浮点数据向量
	 */
	void Rec_Container::prepareImage(
		st_dlm_data* p_imgs, 
		int num, 
		std::vector<std::vector<float>>& processed_data)
	{
		processed_data.clear();
		processed_data.reserve(num);

		// 遍历每张输入图像
		for (int i = 0; i < num; ++i)
		{
			image_t & img = p_imgs[i].img;
			
			// 创建OpenCV Mat对象
			cv::Mat mat;
			mat.create(img.h, img.w, CV_8UC3);
			int img_size = mat.rows*mat.cols;
			int index = 0;
			
			// 将浮点RGB数据转换为OpenCV格式
			for (int i = 0; i < mat.rows; ++i)
			{
				for (int j = 0; j < mat.cols; ++j)
				{
					cv::Vec3b rgb;
					rgb[2] = img.rgbf[0][index] * 255.f; // R通道
					rgb[1] = img.rgbf[1][index] * 255.f; // G通道 
					rgb[0] = img.rgbf[2][index] * 255.f; // B通道
					mat.at<cv::Vec3b>(i, j) = rgb;
					index++;
				}
			}

			// 计算宽高比
			float ratio = float(mat.cols) / float(mat.rows);
			int resize_w;
			
			// 根据高宽比确定目标宽度
			if (ceilf(m_info.image_height * ratio) > m_info.image_width)
				resize_w = m_info.image_width;
			else
				resize_w = int(ceilf(m_info.image_height * ratio));

			cv::Mat rsz_img;

			// 调整图像大小并归一化
			cv::resize(mat, rsz_img, cv::Size(resize_w, m_info.image_height), 0.f, 0.f,
				cv::INTER_LINEAR);
			rsz_img.convertTo(rsz_img, CV_32FC3, 1.0 / 255.f, 0);

			// 分离通道
			std::vector<cv::Mat> split_img(m_info.input_channel);
			cv::split(rsz_img, split_img);

			std::vector<float> nchw_data(
				m_info.image_width * 
				m_info.image_height * 
				m_info.input_channel);
			float* data = nchw_data.data();
			// 对每个通道进行标准化和填充处理
			for (int c = 0; c < m_info.input_channel; ++c) 
			{
				split_img[c] = (split_img[c] - 0.5) / 0.5;
				
				cv::copyMakeBorder(
					split_img[c], 
					split_img[c], 
					0, 0, 0,
					int(m_info.image_width - split_img[c].cols), 
					cv::BORDER_CONSTANT, { 0, 0, 0 }
					);
				memcpy(data, split_img[c].data, split_img[c].rows * split_img[c].cols * sizeof(float));
				data += split_img[c].rows * split_img[c].cols;
			}

			// 转换为 NCHW 格式
			// std::vector<float> nchw_data(m_info.input_channel * m_info.image_height * m_info.image_width);
			// for (int c = 0; c < m_info.input_channel; ++c) {
			// 	for (int h = 0; h < m_info.image_height; ++h) {
			// 		for (int w = 0; w < m_info.image_width; ++w) {
			// 			nchw_data[c * m_info.image_height * m_info.image_width + h * m_info.image_width + w] 
			// 				= split_img[c].at<float>(h, w);
			// 		}
			// 	}
			// }

			// 将处理后的数据添加到batch中
			processed_data.push_back(nchw_data);
		}
	}

	void Rec_Container::Detect(
		st_dlm_data* p_imgs, 
		int num, 
		std::vector<st_result_info> &v_results)
	{
		std::cout << "Rec_Container::Detect Detect size: " << num << std::endl;
		int batch_size = m_info.batch_size;
		assert(num >0 && num <= batch_size);

		// float* dlm_input_img = (float*)m_v_dev_buffer[0];
		int net_w = m_info.image_width,
			net_h = m_info.image_height,
			net_c = m_info.input_channel;

		// std::cout << "net_w: " << m_info.image_width << ", net_h: " << m_info.image_height << ", net_c: " << m_info.input_channel << std::endl;

		std::vector<std::vector<float>> processed_data;
		prepareImage(p_imgs, num, processed_data);

		// 推理
		for (size_t i = 0; i < batch_size; ++i) {
			ov::Tensor input_tensor(ov::element::f32, 
									{1, 3, static_cast<size_t>(net_h), static_cast<size_t>(net_w)}, 
									processed_data[i].data());

			infer_requests[i].set_input_tensor(input_tensor);
			infer_requests[i].infer();

			ov::Tensor output_tensor = infer_requests[i].get_output_tensor();
			float* output_data = output_tensor.data<float>();

			// 更新输出维度信息
			auto shape = output_tensor.get_shape();
			m_v_predict_shape.clear();
			for (auto dim : shape) {
				m_v_predict_shape.push_back(dim);
			}

			// 现在可以安全调用postProcess
			v_results = postProcess(p_imgs, num, output_data, (int)output_tensor.get_size());
		}


	}

	std::vector<st_result_info> Rec_Container::postProcess(st_dlm_data* p_imgs, int num, float* output, const int outSize)
	{
		using namespace txr_algo_dlm_rec;
		std::vector<st_result_info> vec_result;

		for (int m = 0; m < __min(m_v_predict_shape[0], num); m++)
		{
			st_result_info result;
			
			int last_index = 0;
			for (int n = 0; n < m_v_predict_shape[1]; n++)
			{
				int argmax_idx =
					int(argmax(
						&output[(m * m_v_predict_shape[1] + n) * m_v_predict_shape[2]],
						&output[(m * m_v_predict_shape[1] + n + 1) * m_v_predict_shape[2]]
					));
				float max_value =
					float(*std::max_element(
						&output[(m * m_v_predict_shape[1] + n) * m_v_predict_shape[2]],
						&output[(m * m_v_predict_shape[1] + n + 1) * m_v_predict_shape[2]]
					));
				if (argmax_idx > 0 && (!(n > 0 && argmax_idx == last_index)))
				{
					//score_sum += max_value;
					//score.push_back(max_value);
					//str_res.push_back(labels[argmax_idx - 1]);
					if (result.vaild_num < CODE_MAX_NUM)
					{
						st_rec_code& code = result.code[result.vaild_num];
						code.prob = max_value;
						code.code_label = argmax_idx - 1;
						result.vaild_num++;
					}
					//count += 1;
				}
				last_index = argmax_idx;
			}
			//score_sum /= count;
			vec_result.push_back(result);
		}
		return vec_result;
	}

}//namespace txr_algo_dlm_cls