#include "modelDetectProc.h"


ModelDetectProc::ModelDetectProc(){
	input_blob_name_ = std::string("data");
	output_blob_name_ = std::string("prob");
	batchSize = 1;
	input_imgH = 512;
	input_imgW = 512;

}
ModelDetectProc::~ModelDetectProc(){

	
}

void ModelDetectProc::onSetInput_H(int _inputH) {
	input_imgH = _inputH;
}

void ModelDetectProc::onSetInput_W(int _inputW) {
	input_imgW = _inputW;
}

void ModelDetectProc::onSetBatchSize(int _batchSize) {
	batchSize = _batchSize;
}

void ModelDetectProc::onModelDetect(std::string engine_name, std::string imgsPath, std::string saveImgsPath){
	
	std::ifstream file(engine_name, std::ios::binary);
	if (!file.good()) {
		std::cerr << "read " << engine_name << " error!" << std::endl;
		return;
	}
	char *trtModelStream = nullptr;
	size_t size = 0;
	file.seekg(0, file.end);
	size = file.tellg();
	file.seekg(0, file.beg);
	trtModelStream = new char[size];
	assert(trtModelStream);
	file.read(trtModelStream, size);
	file.close();

	std::vector<std::string> file_names;
	if (read_files_in_dir(imgsPath.c_str(), file_names) < 0) {
		std::cerr << "read_files_in_dir failed." << std::endl;
		return;
	}

	emit modelDetectFileCount((int)file_names.size());
	// prepare input data ---------------------------
	//float *data = new float[batchSize * 3 * input_imgH * input_imgW];
	float* data = new float[batchSize * 3 * input_imgH * input_imgW];

	//for (int i = 0; i < 3 * input_imgH * input_imgW; i++)
	//    data[i] = 1.0;
	//float *prob = new float[batchSize * OUTPUT_SIZE];
	float* prob = new float[batchSize * OUTPUT_SIZE];

	IRuntime* runtime = createInferRuntime(gLogger);
	assert(runtime != nullptr);
	ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
	assert(engine != nullptr);
	IExecutionContext* context = engine->createExecutionContext();
	assert(context != nullptr);
	delete[] trtModelStream;
	assert(engine->getNbBindings() == 2);

	// In order to bind the buffers, we need to know the names of the input and output tensors.
	// Note that indices are guaranteed to be less than IEngine::getNbBindings()
	const int inputIndex = engine->getBindingIndex(input_blob_name_.c_str());
	const int outputIndex = engine->getBindingIndex(output_blob_name_.c_str());
	assert(inputIndex == 0);
	assert(outputIndex == 1);
	// Create GPU buffers on device
	void* buffers[2];

	CUDA_CHECK(cudaMalloc(&buffers[inputIndex], batchSize * 3 * input_imgH * input_imgW * sizeof(float)));
	CUDA_CHECK(cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float)));
	// Create stream
	cudaStream_t stream;
	CUDA_CHECK(cudaStreamCreate(&stream));

	int fcount = 0;
	for (int f = 0; f < (int)file_names.size(); f++) {
		fcount++;
		if (fcount < batchSize && f + 1 != (int)file_names.size()) continue;
		for (int b = 0; b < fcount; b++) {
			cv::Mat img = cv::imread(imgsPath + "/" + file_names[f - fcount + 1 + b], 1);
			if (img.empty()) continue;
			cv::Mat pr_img = preprocess_img(img, input_imgW, input_imgH); // letterbox BGR to RGB
			int i = 0;
			for (int row = 0; row < input_imgH; ++row) {
				uchar* uc_pixel = pr_img.data + row * pr_img.step;
				for (int col = 0; col < input_imgW; ++col) {
					data[b * 3 * input_imgH * input_imgW + i] = (float)uc_pixel[2] / 255.0;
					data[b * 3 * input_imgH * input_imgW + i + input_imgH * input_imgW] = (float)uc_pixel[1] / 255.0;
					data[b * 3 * input_imgH * input_imgW + i + 2 * input_imgH * input_imgW] = (float)uc_pixel[0] / 255.0;
					uc_pixel += 3;
					++i;
				}
			}
		}

		// Run inference
		auto start = std::chrono::system_clock::now();
		doInference(*context, stream, buffers, data, prob, batchSize);
		auto end = std::chrono::system_clock::now();
		std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
		
		std::vector<std::vector<Yolo::Detection>> batch_res(fcount);
		for (int b = 0; b < fcount; b++) {
			auto& res = batch_res[b];
			YOLOV5Net::nms(res, &prob[b * OUTPUT_SIZE], CONF_THRESH, NMS_THRESH);
		}
		for (int b = 0; b < fcount; b++) {
			auto& res = batch_res[b];
			//std::cout << res.size() << std::endl;
			cv::Mat img = cv::imread(imgsPath + "/" + file_names[f - fcount + 1 + b]);
			for (size_t j = 0; j < res.size(); j++) {
				cv::Rect r = YOLOV5Net::get_rect(img, res[j].bbox);
				cv::rectangle(img, r, cv::Scalar(0x27, 0xC1, 0x36), 2);
				std::cout << "img_dir: " << file_names[f - fcount + 1 + b] << std::endl;

				std::cout << "label: " << res[j].class_id << std::endl;
				std::cout << "conf: " << res[j].conf << std::endl;


				//std::cout << "lx: " << r.x << " ly: " << r.y << std::endl;
				//cv::putText(img, std::to_string((int)res[j].class_id), cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2);
			}

			/*char imgPath[1000];
			sprintf(imgPath, "detect_%s/%s_detect", saveImgsPath.c_str(), file_names[f - fcount + 1 + b].c_str());
			cv::imwrite(imgPath, img);*/
			cv::imwrite(saveImgsPath + "/" + file_names[f - fcount + 1 + b], img);
		}
		fcount = 0;

		emit modelDetectCurrValue(f);
	}
	// Release stream and buffers
	cudaStreamDestroy(stream);
	CUDA_CHECK(cudaFree(buffers[inputIndex]));
	CUDA_CHECK(cudaFree(buffers[outputIndex]));
	if (data != nullptr) {
		delete[] data;
		data = nullptr;
	}
	if (prob != nullptr) {
		delete[] prob;
		prob = nullptr;
	}
	// Destroy the engine
	if (context != nullptr) {
		context->destroy();
		context = nullptr;
	}
	if (engine != nullptr) {
		engine->destroy();
		engine = nullptr;
	}
	if (runtime != nullptr) {
		runtime->destroy();
		runtime = nullptr;
	}
	emit modelDetectComplete(true);
}

void ModelDetectProc::doInference(IExecutionContext & context, cudaStream_t & stream, void ** buffers, float * input, float * output, int batchSize) {
	// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
	double t1 = (double)cv::getTickCount();
	CUDA_CHECK(cudaMemcpyAsync(buffers[0], input, batchSize * 3 * input_imgH * input_imgW * sizeof(float), cudaMemcpyHostToDevice, stream));
	t1 = ((double)cv::getTickCount() - t1) * 1000 / cv::getTickFrequency();
	//std::cout << "t1: " << t1 << std::endl;

	double t2 = (double)cv::getTickCount();
	context.enqueue(batchSize, buffers, stream, nullptr);
	t2 = ((double)cv::getTickCount() - t2) * 1000 / cv::getTickFrequency();
	//std::cout << "t2: " << t2 << std::endl;

	double t3 = (double)cv::getTickCount();
	CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
	t3 = ((double)cv::getTickCount() - t3) * 1000 / cv::getTickFrequency();
	//std::cout << "t3: " << t3 << std::endl;

	double t4 = (double)cv::getTickCount();
	cudaStreamSynchronize(stream);
	t4 = ((double)cv::getTickCount() - t4) * 1000 / cv::getTickFrequency();
	//std::cout << "t4: " << t4 << std::endl;

}
