#include "yolov5.h"
//#include "cuda_utils.h"


YOLOv5Inference::YOLOv5Inference() {
	//engine_name_ = _engine_name_;
	data = nullptr;
	prob = nullptr;
	hasCreate = false;

	context = nullptr;
	engine = nullptr;
	runtime = nullptr;
}

YOLOv5Inference::~YOLOv5Inference() {
	if (data != nullptr) {
		delete[] data;
		data = nullptr;
	}
	if (prob != nullptr) {
		delete[] prob;
		prob = nullptr;
	}
	// Release stream and buffers
	if (hasCreate) {
		cudaStreamDestroy(stream);
		CUDA_CHECK(cudaFree(buffers[inputIndex]));
		CUDA_CHECK(cudaFree(buffers[outputIndex]));
	}

	// Destroy the engine
	if (context != nullptr) {
		context->destroy();
		context = nullptr;
	}
	if (engine != nullptr) {
		engine->destroy();
		engine = nullptr;
	}
	if (runtime != nullptr) {
		runtime->destroy();
		runtime = nullptr;
	}
}

void YOLOv5Inference::init() {
	#define USE_FP16
	gpu_device_ = 0;
	nms_thresh_ = 0.4;
	conf_thresh_ = 0.2;
	batch_size_ = 1;
	input_img_w_ = 512;
	input_img_h_ = 512;
	class_num_ = 1;

	input_blob_name_ = std::string("data");
	output_blob_name_ = std::string("prob");

	output_size_ =Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1;
}

bool YOLOv5Inference::loadModel(std::string _engine_name_) {
	engine_name = _engine_name_;
	cudaSetDevice(gpu_device_);
	// deserialize the .engine and run inference
	std::ifstream file(engine_name, std::ios::binary);
	if (!file.good()) {
		std::cerr << "read " << engine_name << " error!" << std::endl;
		return false;
	}
	char *trtModelStream = nullptr;
	size_t size = 0;
	file.seekg(0, file.end);
	size = file.tellg();
	file.seekg(0, file.beg);
	trtModelStream = new char[size];
	assert(trtModelStream);
	file.read(trtModelStream, size);
	file.close();

	// prepare input data ---------------------------
	//static float data[batch_size_ * 3 * input_img_h_ * input_img_w_];
	int cnt = batch_size_ * 3 * input_img_h_ * input_img_w_;
	data = new float[batch_size_ * 3 * input_img_h_ * input_img_w_];
	//for (int i = 0; i < 3 * INPUT_H * INPUT_W; i++)
	//    data[i] = 1.0;
	//static float prob[BATCH_SIZE * OUTPUT_SIZE];
	prob = new float[batch_size_ * output_size_];
	runtime = nvinfer1::createInferRuntime(gLogger);
	assert(runtime != nullptr);
	engine = runtime->deserializeCudaEngine(trtModelStream, size);
	assert(engine != nullptr);
	context = engine->createExecutionContext();
	std::cout << "tensorRT allocate: " << engine->getWorkspaceSize() << std::endl;

	assert(context != nullptr);
	delete[] trtModelStream;
	assert(engine->getNbBindings() == 2);
	//void* buffers[2];
	// In order to bind the buffers, we need to know the names of the input and output tensors.
	// Note that indices are guaranteed to be less than IEngine::getNbBindings()
	inputIndex = engine->getBindingIndex(input_blob_name_.c_str());
	outputIndex = engine->getBindingIndex(output_blob_name_.c_str());
	assert(inputIndex == 0);
	assert(outputIndex == 1);
	// Create GPU buffers on device
	CUDA_CHECK(cudaMalloc(&buffers[inputIndex], batch_size_ * 3 * input_img_h_ * input_img_w_ * sizeof(float)));
	CUDA_CHECK(cudaMalloc(&buffers[outputIndex], batch_size_ * output_size_ * sizeof(float)));
	// Create stream
	CUDA_CHECK(cudaStreamCreate(&stream));
	hasCreate = true;
}
 
std::vector<YOLOResult> YOLOv5Inference::detect(cv::Mat img) {
	std::vector<YOLOResult> results;
	//cv::Mat color_mat;
	if (img.channels() != 3) {
		cv::cvtColor(img, img,cv::COLOR_GRAY2BGR);
	}
	cv::Mat pr_img = preprocess_img(img, input_img_w_, input_img_h_); // letterbox BGR to RGB
	int i = 0;
	int b = 0;
	for (int row = 0; row < input_img_h_; ++row) {
		uchar* uc_pixel = pr_img.data + row * pr_img.step;
		for (int col = 0; col < input_img_w_; ++col) {
			int idx = b * 3 * input_img_h_ * input_img_w_ + i;
			data[idx] = (float)uc_pixel[2] / 255.0;
			idx = b * 3 * input_img_h_ * input_img_w_ + i + input_img_h_ * input_img_w_;
			data[idx] = (float)uc_pixel[1] / 255.0;
			idx = b * 3 * input_img_h_ * input_img_w_ + i + 2 * input_img_h_ * input_img_w_;
			data[idx] = (float)uc_pixel[0] / 255.0;
			uc_pixel += 3;
			++i;
		}
	}
	auto start = std::chrono::system_clock::now();
	//double time_G5 = (double)cv::getTickCount();
	doInference(*context, stream, buffers, data, prob, batch_size_);
	//time_G5 = ((double)cv::getTickCount() - time_G5) / cv::getTickFrequency();



	auto end = std::chrono::system_clock::now();
	int costTime = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
	//std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
	std::vector<Yolo::Detection> batch_res;
	auto& res = batch_res;
	//nms(res, &prob[output_size_], conf_thresh_, nms_thresh_);
	nms(res, prob, conf_thresh_, nms_thresh_);


	for (size_t j = 0; j < res.size(); j++) {
		cv::Rect r = get_rect(img, res[j].bbox, input_img_w_, input_img_h_);
		YOLOResult result;
		result.class_id = res[j].class_id;
		result.conf = res[j].conf;
		result.objectRect = r;
		//cv::rectangle(img, r, cv::Scalar(0x27, 0xC1, 0x36), 2);
		results.push_back(result);
		//cv::putText(img, std::to_string((int)res[j].class_id), cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2);
	}
	return results;
}

int YOLOv5Inference::get_width(int x, float gw, int divisor) {
	return int(ceil((x * gw) / divisor)) * divisor;
}

int YOLOv5Inference::get_depth(int x, float gd) {
	if (x == 1) return 1;
	int r = round(x * gd);
	if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0) {
		--r;
	}
	return std::max<int>(r, 1);
}

void YOLOv5Inference::doInference(nvinfer1::IExecutionContext& context, cudaStream_t& stream, void **buffers, float* input, float* output, int batchSize) {
	// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
	CUDA_CHECK(cudaMemcpyAsync(buffers[0], input, batchSize * 3 * input_img_h_ * input_img_w_ * sizeof(float), cudaMemcpyHostToDevice, stream));
	context.enqueue(batchSize, buffers, stream, nullptr);
	CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * output_size_ * sizeof(float), cudaMemcpyDeviceToHost, stream));
	cudaStreamSynchronize(stream);
}

void YOLOv5Inference::set_gpu_device_(int _gpu_device_) {
	gpu_device_ = _gpu_device_;
}

void YOLOv5Inference::set_nms_thresh(float _nms_thresh_) {
	nms_thresh_ = _nms_thresh_;
}

void YOLOv5Inference::set_conf_thresh(float _conf_thresh_) {
	conf_thresh_ = _conf_thresh_;
}

void YOLOv5Inference::set_batch_size(int _batch_size_) {
	batch_size_ = _batch_size_;
}

void YOLOv5Inference::set_input_img_w(int _input_img_w_) {
	input_img_w_ = _input_img_w_;
}

void YOLOv5Inference::set_input_img_h_(int _input_img_h_) {
	input_img_h_ = _input_img_h_;
}

void YOLOv5Inference::set_class_num(int _class_num_) {
	class_num_ = _class_num_;
}



/****************empty****************************/
int YOLOv5Inference:: classify(cv::Mat img) {
	return 1;
}

std::vector<int>  YOLOv5Inference::classify(std::vector<cv::Mat> imgs) {
	std::vector<int>a;
	return a;
}