#include "modelTransProc.h"
ModelTransProc::ModelTransProc(){
	input_img_w_ = 512;
	input_img_h_ = 512;
	class_num_ = 1;
	batch_size_ = 1;
	is_p6 = false;
	gd = 0.33;
	gw = 0.50;

	input_blob_name_ = std::string("data");
	output_blob_name_ = std::string("prob");

}

ModelTransProc::~ModelTransProc()
{
}

ICudaEngine * ModelTransProc::build_engine_p6(unsigned int maxBatchSize, IBuilder * builder, IBuilderConfig * config, DataType dt, float & gd, float & gw, std::string & wts_name){
	INetworkDefinition* network = builder->createNetworkV2(0U);

	// Create input tensor of shape {3, input_img_h_, input_img_w_} with name input_blob_name_.c_str()
	ITensor* data = network->addInput(input_blob_name_.c_str(), dt, Dims3{ 3, input_img_h_, input_img_w_ });
	assert(data);

	//std::map<std::string, Weights> weightMap = loadWeights(wts_name);
	std::map<std::string, Weights> weightMap = YOLOV5Net::loadWeights("");

	/* ------ yolov5 backbone------ */
	auto focus0 = YOLOV5Net::focus(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0");
	auto conv1 = YOLOV5Net::convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
	auto c3_2 = YOLOV5Net::C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
	auto conv3 = YOLOV5Net::convBlock(network, weightMap, *c3_2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
	auto c3_4 = YOLOV5Net::C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4");
	auto conv5 = YOLOV5Net::convBlock(network, weightMap, *c3_4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
	auto c3_6 = YOLOV5Net::C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
	auto conv7 = YOLOV5Net::convBlock(network, weightMap, *c3_6->getOutput(0), get_width(768, gw), 3, 2, 1, "model.7");
	auto c3_8 = YOLOV5Net::C3(network, weightMap, *conv7->getOutput(0), get_width(768, gw), get_width(768, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
	auto conv9 = YOLOV5Net::convBlock(network, weightMap, *c3_8->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.9");
	auto spp10 = YOLOV5Net::SPP(network, weightMap, *conv9->getOutput(0), get_width(1024, gw), get_width(1024, gw), 3, 5, 7, "model.10");
	auto c3_11 = YOLOV5Net::C3(network, weightMap, *spp10->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.11");

	/* ------ yolov5 head ------ */
	auto conv12 = YOLOV5Net::convBlock(network, weightMap, *c3_11->getOutput(0), get_width(768, gw), 1, 1, 1, "model.12");
	auto upsample13 = network->addResize(*conv12->getOutput(0));
	assert(upsample13);
	upsample13->setResizeMode(ResizeMode::kNEAREST);
	upsample13->setOutputDimensions(c3_8->getOutput(0)->getDimensions());
	ITensor* inputTensors14[] = { upsample13->getOutput(0), c3_8->getOutput(0) };
	auto cat14 = network->addConcatenation(inputTensors14, 2);
	auto c3_15 = YOLOV5Net::C3(network, weightMap, *cat14->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.15");

	auto conv16 = YOLOV5Net::convBlock(network, weightMap, *c3_15->getOutput(0), get_width(512, gw), 1, 1, 1, "model.16");
	auto upsample17 = network->addResize(*conv16->getOutput(0));
	assert(upsample17);
	upsample17->setResizeMode(ResizeMode::kNEAREST);
	upsample17->setOutputDimensions(c3_6->getOutput(0)->getDimensions());
	ITensor* inputTensors18[] = { upsample17->getOutput(0), c3_6->getOutput(0) };
	auto cat18 = network->addConcatenation(inputTensors18, 2);
	auto c3_19 = YOLOV5Net::C3(network, weightMap, *cat18->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.19");

	auto conv20 = YOLOV5Net::convBlock(network, weightMap, *c3_19->getOutput(0), get_width(256, gw), 1, 1, 1, "model.20");
	auto upsample21 = network->addResize(*conv20->getOutput(0));
	assert(upsample21);
	upsample21->setResizeMode(ResizeMode::kNEAREST);
	upsample21->setOutputDimensions(c3_4->getOutput(0)->getDimensions());
	ITensor* inputTensors21[] = { upsample21->getOutput(0), c3_4->getOutput(0) };
	auto cat22 = network->addConcatenation(inputTensors21, 2);
	auto c3_23 = YOLOV5Net::C3(network, weightMap, *cat22->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.23");

	auto conv24 = YOLOV5Net::convBlock(network, weightMap, *c3_23->getOutput(0), get_width(256, gw), 3, 2, 1, "model.24");
	ITensor* inputTensors25[] = { conv24->getOutput(0), conv20->getOutput(0) };
	auto cat25 = network->addConcatenation(inputTensors25, 2);
	auto c3_26 = YOLOV5Net::C3(network, weightMap, *cat25->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.26");

	auto conv27 = YOLOV5Net::convBlock(network, weightMap, *c3_26->getOutput(0), get_width(512, gw), 3, 2, 1, "model.27");
	ITensor* inputTensors28[] = { conv27->getOutput(0), conv16->getOutput(0) };
	auto cat28 = network->addConcatenation(inputTensors28, 2);
	auto c3_29 = YOLOV5Net::C3(network, weightMap, *cat28->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.29");

	auto conv30 = YOLOV5Net::convBlock(network, weightMap, *c3_29->getOutput(0), get_width(768, gw), 3, 2, 1, "model.30");
	ITensor* inputTensors31[] = { conv30->getOutput(0), conv12->getOutput(0) };
	auto cat31 = network->addConcatenation(inputTensors31, 2);
	auto c3_32 = YOLOV5Net::C3(network, weightMap, *cat31->getOutput(0), get_width(2048, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.32");

	/* ------ detect ------ */
	IConvolutionLayer* det0 = network->addConvolutionNd(*c3_23->getOutput(0), 3 * (class_num_ + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.0.weight"], weightMap["model.33.m.0.bias"]);
	IConvolutionLayer* det1 = network->addConvolutionNd(*c3_26->getOutput(0), 3 * (class_num_ + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.1.weight"], weightMap["model.33.m.1.bias"]);
	IConvolutionLayer* det2 = network->addConvolutionNd(*c3_29->getOutput(0), 3 * (class_num_ + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.2.weight"], weightMap["model.33.m.2.bias"]);
	IConvolutionLayer* det3 = network->addConvolutionNd(*c3_32->getOutput(0), 3 * (class_num_ + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.3.weight"], weightMap["model.33.m.3.bias"]);

	auto yolo = YOLOV5Net::addYoLoLayer(network, weightMap, "model.33", std::vector<IConvolutionLayer*>{det0, det1, det2, det3});
	yolo->getOutput(0)->setName(output_blob_name_.c_str());
	network->markOutput(*yolo->getOutput(0));

	// Build engine
	builder->setMaxBatchSize(maxBatchSize);
	config->setMaxWorkspaceSize(16 * (1 << 20));  // 16MB
#if defined(USE_FP16)
	config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
	std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
	assert(builder->platformHasFastInt8());
	config->setFlag(BuilderFlag::kINT8);
	Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, input_img_w_, input_img_h_, "./coco_calib/", "int8calib.table", input_blob_name_.c_str());
	config->setInt8Calibrator(calibrator);
#endif

	std::cout << "Building engine, please wait for a while..." << std::endl;
	ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
	std::cout << "Build engine successfully!" << std::endl;

	// Don't need the network any more
	network->destroy();

	// Release host memory
	for (auto& mem : weightMap)
	{
		free((void*)(mem.second.values));
	}

	return engine;
}

ICudaEngine * ModelTransProc::build_engine(unsigned int maxBatchSize, IBuilder * builder, IBuilderConfig * config, DataType dt, float & gd, float & gw, std::string & wts_name){
	INetworkDefinition* network = builder->createNetworkV2(0U);

	// Create input tensor of shape {3, input_img_h_, input_img_w_} with name input_blob_name_.c_str()
	ITensor* data = network->addInput(input_blob_name_.c_str(), dt, Dims3{ 3, input_img_h_, input_img_w_ });
	assert(data);

	std::map<std::string, Weights> weightMap = YOLOV5Net::loadWeights(wts_name);

	/* ------ yolov5 backbone------ */
	auto focus0 = YOLOV5Net::focus(network, weightMap, *data, 3, get_width(64, gw), 3, "model.0");
	auto conv1 = YOLOV5Net::convBlock(network, weightMap, *focus0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
	auto bottleneck_CSP2 = YOLOV5Net::C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
	auto conv3 = YOLOV5Net::convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
	auto bottleneck_csp4 = YOLOV5Net::C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(9, gd), true, 1, 0.5, "model.4");
	auto conv5 = YOLOV5Net::convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
	auto bottleneck_csp6 = YOLOV5Net::C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
	auto conv7 = YOLOV5Net::convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
	auto spp8 = YOLOV5Net::SPP(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, 9, 13, "model.8");

	/* ------ yolov5 head ------ */
	auto bottleneck_csp9 = YOLOV5Net::C3(network, weightMap, *spp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.9");
	auto conv10 = YOLOV5Net::convBlock(network, weightMap, *bottleneck_csp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10");

	auto upsample11 = network->addResize(*conv10->getOutput(0));
	assert(upsample11);
	upsample11->setResizeMode(ResizeMode::kNEAREST);
	upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions());

	ITensor* inputTensors12[] = { upsample11->getOutput(0), bottleneck_csp6->getOutput(0) };
	auto cat12 = network->addConcatenation(inputTensors12, 2);
	auto bottleneck_csp13 = YOLOV5Net::C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13");
	auto conv14 = YOLOV5Net::convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14");

	auto upsample15 = network->addResize(*conv14->getOutput(0));
	//auto upsample15 = network->addResize(*conv14->getOutput(0));
	assert(upsample15);
	upsample15->setResizeMode(ResizeMode::kNEAREST);
	upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());

	ITensor* inputTensors16[] = { upsample15->getOutput(0), bottleneck_csp4->getOutput(0) };
	auto cat16 = network->addConcatenation(inputTensors16, 2);

	auto bottleneck_csp17 = YOLOV5Net::C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17");

	/* ------ detect ------ */
	IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (class_num_ + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]);
	auto conv18 = YOLOV5Net::convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18");
	ITensor* inputTensors19[] = { conv18->getOutput(0), conv14->getOutput(0) };
	auto cat19 = network->addConcatenation(inputTensors19, 2);
	auto bottleneck_csp20 = YOLOV5Net::C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20");
	IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (class_num_ + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]);
	auto conv21 = YOLOV5Net::convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21");
	ITensor* inputTensors22[] = { conv21->getOutput(0), conv10->getOutput(0) };
	auto cat22 = network->addConcatenation(inputTensors22, 2);
	auto bottleneck_csp23 = YOLOV5Net::C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
	IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (class_num_ + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]);

	auto yolo = YOLOV5Net::addYoLoLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2});
	yolo->getOutput(0)->setName(output_blob_name_.c_str());
	network->markOutput(*yolo->getOutput(0));

	// Build engine
	builder->setMaxBatchSize(maxBatchSize);
	config->setMaxWorkspaceSize(16 * (1 << 20));  // 16MB
#if defined(USE_FP16)
	config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
	std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
	assert(builder->platformHasFastInt8());
	config->setFlag(BuilderFlag::kINT8);
	Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, input_img_w_, input_img_h_, "./coco_calib/", "int8calib.table", input_blob_name_.c_str());
	config->setInt8Calibrator(calibrator);
#endif

	std::cout << "Building engine, please wait for a while..." << std::endl;
	ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
	std::cout << "Build engine successfully!" << std::endl;

	// Don't need the network any more
	network->destroy();

	// Release host memory
	for (auto& mem : weightMap)
	{
		free((void*)(mem.second.values));
	}

	return engine;
}

int ModelTransProc::get_width(int x, float gw, int divisor){
	return int(ceil((x * gw) / divisor)) * divisor;
}

int ModelTransProc::get_depth(int x, float gd){
	if (x == 1) return 1;
	int r = round(x * gd);
	if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0) {
		--r;
	}
	return std::max<int>(r, 1);
}

void ModelTransProc::APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream, bool& is_p6, float& gd, float& gw, std::string& wts_name) {
	// Create builder
	IBuilder* builder = createInferBuilder(gLogger);
	IBuilderConfig* config = builder->createBuilderConfig();

	// Create model to populate the network, then set the outputs and create an engine
	ICudaEngine *engine = nullptr;
	if (is_p6) {
		engine = build_engine_p6(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
	}
	else {
		engine = build_engine(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
	}
	assert(engine != nullptr);

	// Serialize the engine
	(*modelStream) = engine->serialize();

	// Close everything down
	engine->destroy();
	builder->destroy();
	config->destroy();
}

void ModelTransProc::onSetInput_H(int _inputH){
	input_img_h_ = _inputH;
}

void ModelTransProc::onSetInput_W(int _inputW){
	input_img_w_ = _inputW;
}

void ModelTransProc::onSetClassNum(int _classNum) {
	class_num_ = _classNum;
}

void ModelTransProc::onSetYoloType(int _type){
	if (_type == -1)
		return;
	switch (_type)
	{
	case 0:
		gd = 0.33;
		gw = 0.50;
		break;
	case 1:
		gd = 0.67;
		gw = 0.75;
		break;
	case 2:
		gd = 1.0;
		gw = 1.0;
		break;
	case 3:
		gd = 1.33;
		gw = 1.25;
		break;
	default:
		break;
	}
}

void ModelTransProc::onSetBatchSize(int _batchSize){
	batch_size_ = _batchSize;
}

void ModelTransProc::onTransModel(std::string wtsName, std::string engine) {
	if (!wtsName.empty()) {
		IHostMemory* modelStream{ nullptr };
		APIToModel(batch_size_, &modelStream, is_p6, gd, gw, wtsName);
		assert(modelStream != nullptr);
		std::ofstream p(engine, std::ios::binary);
		if (!p) {
			std::cerr << "could not open plan output file" << std::endl;
			emit modelTransComplete(false);
			return;
		}
		p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
		modelStream->destroy();
	}
	emit modelTransComplete(true);
}
