#include "YOLOv6.h"
#include "common\common.hpp"
#include "common\gpu_utility.h"


namespace txr_algo_dlm_rec
{
	std::vector<char> get_file_data(std::string file_path)
	{
		std::ifstream file(file_path, std::ios_base::binary);
		assert(file.good());
		file.seekg(0, std::ios_base::end);
		int file_len = file.tellg();
		file.seekg(0, std::ios_base::beg);

		std::vector<char> data(file_len, 0);
		file.read(data.data(), file_len);

		file.close();
		return data;
	}

	void decrypt_data(std::vector<char> data)
	{
		char trans = 55;
		for (int i = 0; i < data.size(); ++i)
		{
			data[i] -= trans;
			trans += 11;
		}
	}
	bool decryptFile(std::string file_path, st_encrypt_info& info, std::vector<char>& engine_data)
	{
		std::vector<char> file_data = get_file_data(file_path);
		decrypt_data(file_data);
		if (file_data.size() <= sizeof(st_encrypt_info))
		{
			std::cout << "file data too small!\n";
			return false;
		}
		info = *((st_encrypt_info*)file_data.data());
		if (info.engine_size + sizeof(st_encrypt_info) != file_data.size())
		{
			std::cout << "file data is bad format!\n";
			return false;
		}

		std::string gpu_model = GetGPUModel();
		std::string build_gpu_model = info.gpu_model;
		if (gpu_model != build_gpu_model)
		{
			std::cout << "gpu model warning: build is: " << build_gpu_model << " target is: " << gpu_model << std::endl;
		}

		int index = sizeof(st_encrypt_info);
		engine_data.resize(info.engine_size);
		memcpy(engine_data.data(), file_data.data() + index, info.engine_size);
		return true;
	}

	bool set_gpu_id(const int id = 0)
	{
		cudaError_t status = cudaSetDevice(id);
		if (status != cudaSuccess)
		{
			std::cout << "gpu id :" + std::to_string(id) + " not exist !" << std::endl;
			return false;
		}
		return true;
	}
	//===================================================================================


Detector::Detector()
{	
	cudaStreamCreate(&m_stream);
}

Detector::~Detector()
{
	cudaStreamDestroy(m_stream);
	Reset();
}

void Detector::Reset()
{
	if (m_context)
	{
		m_context->destroy();
	}
	if (m_engine)
	{
		m_engine->destroy();
	}
	for (int i = 0; i < m_v_dev_buffer.size(); ++i)
	{
		if (m_v_dev_buffer[i])
		{
			cudaFree(m_v_dev_buffer[i]);
			m_v_dev_buffer[i] = 0;
		}
	}
	m_v_dev_buffer.clear();
}

bool Detector::Init(int gpu_id, const char* hdats_path)
{
	std::string file_path = hdats_path;
	std::vector<char> v_engine_data;
	if (!set_gpu_id(gpu_id) || !decryptFile(file_path, m_info, v_engine_data))
	{
		return false;
	}
	return LoadEngine(v_engine_data);
}

int  Detector::BatchSize()
{
	return m_info.batch_size;
}

bool Detector::LoadEngine(std::vector<char> v_engine_data)
{
	Reset();
	nvinfer1::IRuntime* trtRuntime;
	trtRuntime = nvinfer1::createInferRuntime(sample::gLogger.getTRTLogger());
	m_engine = trtRuntime->deserializeCudaEngine(v_engine_data.data(), v_engine_data.size(), nullptr);
	if (m_engine == nullptr)
	{
		std::cout << "deserialize fail" << std::endl;
		return false;
	}
	m_context = m_engine->createExecutionContext();
	assert(m_context != nullptr);

	//get buffers
	assert(m_engine->getNbBindings() == 2);
	int nbBindings = m_engine->getNbBindings();
	m_v_dev_buffer.resize(nbBindings);
	m_v_dev_buf_size.resize(nbBindings);


	m_v_dev_buf_size[0] = 
		m_info.batch_size * 
		m_info.input_channel * m_info.image_height * m_info.image_width;
	cudaMalloc(&m_v_dev_buffer[0], m_v_dev_buf_size[0] * sizeof(float));

	int inputIndex = 0;
	//buffer[0]
	m_context->setOptimizationProfile(0);
	auto in_dims = m_context->getBindingDimensions(inputIndex);
	
	in_dims.d[0] = m_info.batch_size;
	in_dims.d[1] = m_info.input_channel;
	in_dims.d[2] = m_info.image_height;
	in_dims.d[3] = m_info.image_width;
	m_context->setBindingDimensions(inputIndex, in_dims);

	// buffer[1]   
	int outputIndex = 1;
	auto out_dims = m_context->getBindingDimensions(outputIndex);

	m_out_size = 1;
	for (int j = 0; j < out_dims.nbDims; j++)
	{
		m_out_size *= out_dims.d[j];
	}

	m_v_predict_shape.clear();
	for (int j = 0; j < out_dims.nbDims; j++)
	{
		m_v_predict_shape.push_back(out_dims.d[j]);
	}
	m_v_dev_buf_size[1] = m_out_size;
	cudaMalloc(&m_v_dev_buffer[1], m_v_dev_buf_size[1] * sizeof(float));

	return true;
}

std::vector<float> Detector::prepareImage(st_dlm_data* p_imgs, int num)
{
	std::vector<float> result(m_info.batch_size * m_info.image_width * m_info.image_height * m_info.input_channel);
	float* data = result.data();
	for (int i = 0; i < num; ++i)
	{
		image_t & img = p_imgs[i].img;
		
		cv::Mat mat;
		mat.create(img.h, img.w, CV_8UC3);
		int img_size = mat.rows*mat.cols;
		int index = 0;
		for (int i = 0; i < mat.rows; ++i)
		{
			for (int j = 0; j < mat.cols; ++j)
			{
				cv::Vec3b rgb;
				rgb[2] = img.rgbf[0][index] * 255.f;
				rgb[1] = img.rgbf[1][index] * 255.f;
				rgb[0] = img.rgbf[2][index] * 255.f;
				mat.at<cv::Vec3b>(i, j) = rgb;
				index++;
			}
		}
		//cv::Mat flt_img;
		//flt_img = mat.clone();
		float ratio = float(mat.cols) / float(mat.rows);
		int resize_w, resize_h;
		if (ceilf(m_info.image_height * ratio) > m_info.image_width)
			resize_w = m_info.image_width;
		else
			resize_w = int(ceilf(m_info.image_height * ratio));

		cv::Mat rsz_img;
		cv::Mat out_img;

		cv::resize(mat, rsz_img, cv::Size(resize_w, m_info.image_height), 0.f, 0.f,
			cv::INTER_LINEAR);
		rsz_img.convertTo(rsz_img, CV_32FC3, 1.0 / 255.f, 0);

		std::vector<cv::Mat> split_img(m_info.input_channel);
		cv::split(rsz_img, split_img);

		//HWC TO CHW

		int channelLength = m_info.image_width * m_info.image_height;

		for (int i = 0; i < m_info.input_channel; ++i) 
		{
			/*split_img[i] = (split_img[i] - img_mean[i]) / img_std[i];*/
			split_img[i] = (split_img[i] - 0.5) / 0.5;
			cv::copyMakeBorder(split_img[i], split_img[i], 0, 0, 0,
				int(m_info.image_width - split_img[i].cols), cv::BORDER_CONSTANT, { 0, 0, 0 });
			memcpy(data, split_img[i].data, channelLength * sizeof(float));
			data += channelLength;
		}
	}
	return result;
}
#define USE_RESIZE_GPU

void Detector::Detect(st_dlm_data* p_imgs, int num, std::vector<st_result_info> &v_results)
{
	int batch_size = m_info.batch_size;
	assert(num >0 && num <= batch_size);

	float* dlm_input_img = (float*)m_v_dev_buffer[0];

	int net_w = m_info.image_width,
		net_h = m_info.image_height,
		net_c = m_info.input_channel;

#ifdef USE_RESIZE_GPU
	if (_v_resize_space.size() < batch_size)
	{
		for (auto i : _v_resize_space)
		{
			CuImgResizeFree(i);
		}
		_v_resize_space.resize(batch_size);
	}
	//auto r_start = std::chrono::high_resolution_clock::now();
	{
		float mean[3] = { 0.5,0.5,0.5 };
		float dev[3] = { 0.5,0.5,0.5 };
		for (int i = 0; i < batch_size; ++i)
		{
			st_dlm_data& data = i<num ? p_imgs[i]: p_imgs[0];
			image_t & img = data.img;
			float ratio = float(img.w) / float(img.h);
			int resize_w, resize_h;
			if (ceilf(m_info.image_height * ratio) > m_info.image_width)
				resize_w = m_info.image_width;
			else
				resize_w = int(ceilf(m_info.image_height * ratio));
			resize_h = m_info.image_height;

			st_cuda_resize_dev_space & space = _v_resize_space[i];
			int max_line_size = __max(__max(img.w, net_w), __max(img.h, net_h));
			CuImgResizeMalloc(space, max_line_size * max_line_size);

			space.img_rgbf_net = dlm_input_img + i * net_w * net_h * net_c;

			if (img.rgb8)
			{
				std::swap(img.rgb8[0], img.rgb8[2]);
				CuImgResizeRgb8(img.rgb8, img.w, img.h, resize_w, resize_h, space, net_w, net_h, mean, dev);
			}
			else if (img.rgb16)
			{
				std::swap(img.rgb16[0], img.rgb16[2]);
				CuImgResizeRgb16(img.rgb16, img.w, img.h, resize_w, resize_h, space, net_w, net_h, mean, dev);
			}
			else if (img.rgbf)
			{
				std::swap(img.rgbf[0], img.rgbf[2]);
				CuImgResizeRgbF(img.rgbf, img.w, img.h, resize_w, resize_h, space, net_w, net_h, mean, dev);
			}
		}
	}
#else
	std::vector<float> buffer = prepareImage(p_imgs, num);
	cudaMemcpy(m_v_dev_buffer[0], buffer.data(), buffer.size() * sizeof(float), cudaMemcpyHostToDevice);
#endif 
	if (false)
	{
		static int test_count = 0;
		{
			using namespace cv;
			std::vector<float> v_img;
			v_img.resize(net_w * net_h * 3);
			cudaMemcpy(v_img.data(), m_v_dev_buffer[0], net_w * net_h * 3 * sizeof(float), cudaMemcpyDeviceToHost);
			Mat mat;
			mat.create(net_h, net_w, CV_8UC3);
			int img_size = mat.rows * mat.cols;
			int index = 0;
			for (int i = 0; i < mat.rows; ++i)
			{
				for (int j = 0; j < mat.cols; ++j)
				{
					Vec3b rgb;
					rgb[2] = (v_img[index]) * 255.f;
					rgb[1] = (v_img[index + img_size]) * 255.f;
					rgb[0] = (v_img[index + img_size + img_size]) * 255.f;
					mat.at<Vec3b>(i, j) = rgb;
					index++;
				}
			}
			test_count++;
			imwrite("resize_" + std::to_string(test_count) + ".png", mat);
		}
	}

	if (m_v_out_buf.size() != m_out_size * batch_size)
	{
		m_v_out_buf.resize(m_out_size * batch_size);
	}
	//auto r_end = std::chrono::high_resolution_clock::now();
	//{
		//float total_inf = std::chrono::duration<float, std::milli>(r_end - r_start).count();
		//std::cout << "Resize take: " << total_inf << " ms." << std::endl;
	//}
	
	//auto t_start = std::chrono::high_resolution_clock::now();
	void** bindings = &m_v_dev_buffer[0];
	m_context->enqueueV2(bindings, m_stream, NULL);
	cudaMemcpyAsync(m_v_out_buf.data(), m_v_dev_buffer[1], m_v_dev_buf_size[1]*sizeof(float), cudaMemcpyDeviceToHost, m_stream);
	cudaStreamSynchronize(m_stream);

	//auto t_end = std::chrono::high_resolution_clock::now();
	//float total_inf = std::chrono::duration<float, std::milli>(t_end - t_start).count();
	//std::cout << "Inference take: " << total_inf << " ms." << std::endl;

	//auto p_start = std::chrono::high_resolution_clock::now();
	v_results = postProcess(p_imgs, num, m_v_out_buf.data(), m_out_size);
	//auto p_end = std::chrono::high_resolution_clock::now();
	//float total_p = std::chrono::duration<float, std::milli>(p_end - p_start).count();
	//std::cout << "PostProcess take: " << total_p << " ms." << std::endl;
}

std::vector<st_result_info> Detector::postProcess(st_dlm_data* p_imgs, int num, float* output, const int& outSize)
{
	using namespace txr_algo_dlm_rec;
	std::vector<st_result_info> vec_result;

	for (int m = 0; m < __min(m_v_predict_shape[0], num); m++)
	{
		st_result_info result;
		
		int last_index = 0;
		for (int n = 0; n < m_v_predict_shape[1]; n++)
		{
			int argmax_idx =
				int(argmax(
					&output[(m * m_v_predict_shape[1] + n) * m_v_predict_shape[2]],
					&output[(m * m_v_predict_shape[1] + n + 1) * m_v_predict_shape[2]]
				));
			float max_value =
				float(*std::max_element(
					&output[(m * m_v_predict_shape[1] + n) * m_v_predict_shape[2]],
					&output[(m * m_v_predict_shape[1] + n + 1) * m_v_predict_shape[2]]
				));
			if (argmax_idx > 0 && (!(n > 0 && argmax_idx == last_index)))
			{
				//score_sum += max_value;
				//score.push_back(max_value);
				//str_res.push_back(labels[argmax_idx - 1]);
				if (result.vaild_num < CODE_MAX_NUM)
				{
					st_rec_code& code = result.code[result.vaild_num];
					code.prob = max_value;
					code.code_label = argmax_idx - 1;
					result.vaild_num++;
				}
				//count += 1;
			}
			last_index = argmax_idx;
		}
		//score_sum /= count;
		vec_result.push_back(result);
	}
	return vec_result;
}


}//namespace txr_algo_dlm_cls