#include <fstream>
#include <iostream>
#include <sstream>
#include <algorithm>
#include <string>
#include <numeric>
#include <chrono>
#include <vector>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <dirent.h>
#include "NvInfer.h"
#include "cuda_runtime_api.h"
#include "logging.h"

#define CHECK(status) \
    do\
    {\
        auto ret = (status);\
        if (ret != 0)\
        {\
            std::cerr << "Cuda failure: " << ret << std::endl;\
            abort();\
        }\
    } while (0)

#define DEVICE 0  // GPU id

using namespace nvinfer1;

// stuff we know about the network and the input/output blobs
static const int INPUT_W = 800;
static const int INPUT_H = 800;
static const int NUM_CLASSES = 1;
const char* INPUT_BLOB_NAME = "data";
const char* OUTPUT_BLOB_NAME = "output";
static Logger gLogger;

cv::Mat static_resize(cv::Mat& img) {
    //float r = min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0));
    //// r = std::min(r, 1.0f);
    //int unpad_w = r * img.cols;
    //int unpad_h = r * img.rows;
    cv::Mat re(INPUT_H, INPUT_W, CV_8UC3);
    cv::resize(img, re, re.size());
    //cv::Mat out(INPUT_H, INPUT_W, CV_8UC3, cv::Scalar(114, 114, 114));
    //re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows)));
    return re;
}


static bool sort_point(const std::vector<int>& a, const std::vector<int>& b) {
	return a[0] < b[0];
}


void ImageFromBlob(float* blob, cv::Mat& img) {
	int channels = 1;
	int img_h = img.rows;
	int img_w = img.cols;
	int out_size = img_h * img_w;

	for (size_t h = 0; h < img_h; h++)
	{
		for (size_t w = 0; w < img_w; w++)
		{
			float blob_value = blob[h * img_w + w];
			int img_value = 0;
			if (blob_value > 255.0 * 0.5) {
				img_value = 255;
			}
			else {
				img_value = 0;
			}
			img.at<uchar>(h, w) = img_value;
		}
	}

}

float* blobFromImage(cv::Mat& img){
    float* blob = new float[img.total()*3];
    int channels = 3;
    int img_h = img.rows;
    int img_w = img.cols;
    for (size_t c = 0; c < channels; c++) 
    {
        for (size_t  h = 0; h < img_h; h++) 
        {
            for (size_t w = 0; w < img_w; w++) 
            {
                blob[c * img_w * img_h + h * img_w + w] =
                    (float)img.at<cv::Vec3b>(h, w)[c] / 255.0;
            }
        }
    }
    return blob;
}

static void decode_outputs(float* prob, cv::Mat& out_mask) {

		cv::Mat img(INPUT_H, INPUT_W, CV_8UC1);
		ImageFromBlob(prob, img);
		img.copyTo(out_mask(cv::Rect(0, 0, img.cols, img.rows)));
}

void draw_mask(cv::Mat& img, cv::Mat& mask, cv::Mat& img_show) {
	int img_h = mask.rows;
	int img_w = mask.cols;
	int color_channel = 0;
	for (size_t h = 0; h < img_h; h++)
	{
		for (size_t w = 0; w < img_w; w++)
		{
			if (mask.at<uchar>(h, w) == 255) {
				img_show.at<cv::Vec3b>(h, w)[color_channel] = (int)img.at<cv::Vec3b>(h, w)[color_channel] * 0.5 + 255 * 0.5;
			}
		}
	}
}

static void post_process(cv::Mat& img, cv::Mat& mask, cv::Mat& img_show, int num_instances)
{
	// erode + findContours
	auto start_post = std::chrono::system_clock::now();
	cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5,5));
	cv::erode(mask, mask, kernel, cv::Point(-1,-1), 2);
	std::vector<std::vector<cv::Point>> contours;
	cv::findContours(mask, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
	auto end_post = std::chrono::system_clock::now();
	std::cout << "post-process time-consuming: " <<
		std::chrono::duration_cast<std::chrono::milliseconds>(end_post - start_post).count() << "ms" << std::endl;

	num_instances = contours.size();
	std::cout << "find " << num_instances << " instances" << std::endl;

	// vis
	auto start_vis = std::chrono::system_clock::now();
	draw_mask(img, mask, img_show);
	std::vector<std::vector<int>> points_centers(num_instances);
	for (int i = 0; i < num_instances; i++) {
		auto contour = contours[i];
		int num_points = contour.size();
		int x_total = 0;
		int y_total = 0;
		for (int j = 0; j < num_points; j++) {
			cv::Point point = contour[j];
			x_total += point.x;
			y_total += point.y;
		}
		int x_mean = x_total / num_points;
		int y_mean = y_total / num_points;

		std::vector<int> point_c;
		point_c.push_back(x_mean);
		point_c.push_back(y_mean);
		points_centers[i] = point_c;
		point_c.clear();
	}
	// sort all points_center
	sort(points_centers.begin(), points_centers.end(), sort_point);
	for (int i = 0; i < num_instances; i++) {
		auto center = points_centers[i];
		char text[256];
		sprintf(text, "%d", i);
		cv::putText(img_show, text, cv::Point(center[0], center[1]), cv::FONT_HERSHEY_SIMPLEX, 0.6, cv::Scalar(0, 0, 255), 2);
	}
	char pred_text[256];
	sprintf(pred_text, "%s%d", "nums_pred: ", num_instances);
	cv::putText(img_show, pred_text, cv::Point(10, 50), cv::FONT_HERSHEY_SIMPLEX, 0.7, cv::Scalar(0, 0, 255), 2);
	auto end_vis = std::chrono::system_clock::now();
	std::cout << "visualization time-consuming: " <<
		std::chrono::duration_cast<std::chrono::milliseconds>(end_vis - start_vis).count() << "ms" << std::endl;
}


void doInference(IExecutionContext& context, float* input, float* output, const int output_size, cv::Size input_shape) {
    const ICudaEngine& engine = context.getEngine();

    // Pointers to input and output device buffers to pass to engine.
    // Engine requires exactly IEngine::getNbBindings() number of buffers.
    assert(engine.getNbBindings() == 2);
    void* buffers[2];

    // In order to bind the buffers, we need to know the names of the input and output tensors.
    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
    const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);

    assert(engine.getBindingDataType(inputIndex) == nvinfer1::DataType::kFLOAT);
    const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
    assert(engine.getBindingDataType(outputIndex) == nvinfer1::DataType::kFLOAT);
    int mBatchSize = engine.getMaxBatchSize();

    // Create GPU buffers on device
    CHECK(cudaMalloc(&buffers[inputIndex], 3 * input_shape.height * input_shape.width * sizeof(float)));
    CHECK(cudaMalloc(&buffers[outputIndex], output_size*sizeof(float)));

    // Create stream
    cudaStream_t stream;
    CHECK(cudaStreamCreate(&stream));

    // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
    CHECK(cudaMemcpyAsync(buffers[inputIndex], input, 3 * input_shape.height * input_shape.width * sizeof(float), cudaMemcpyHostToDevice, stream));
    context.enqueue(1, buffers, stream, nullptr);
    CHECK(cudaMemcpyAsync(output, buffers[outputIndex], output_size * sizeof(float), cudaMemcpyDeviceToHost, stream));
    cudaStreamSynchronize(stream);

    // Release stream and buffers
    cudaStreamDestroy(stream);
    CHECK(cudaFree(buffers[inputIndex]));
    CHECK(cudaFree(buffers[outputIndex]));
}

int main(int argc, char** argv) {
	std::cout << "begining ......" << std::endl;
    cudaSetDevice(DEVICE);
    // create a model using the API directly and serialize it to a stream
    char *trtModelStream{nullptr};
    size_t size{0};

    if (argc == 4 && std::string(argv[2]) == "-i") {
        const std::string engine_file_path {argv[1]};
        std::ifstream file(engine_file_path, std::ios::binary);
        if (file.good()) {
            file.seekg(0, file.end);
            size = file.tellg();
            file.seekg(0, file.beg);
            trtModelStream = new char[size];
            assert(trtModelStream);
            file.read(trtModelStream, size);
            file.close();
        }
    } else {
        std::cerr << "arguments not right!" << std::endl;
        std::cerr << "run 'python3 yolox/deploy/trt.py -n yolox-{tiny, s, m, l, x}' to serialize model first!" << std::endl;
        std::cerr << "Then use the following command:" << std::endl;
        std::cerr << "./stdc.exe ../model_trt.engine -i ../../../assets/dog.jpg  // deserialize file and run inference" << std::endl;
        return -1;
    }
    const std::string input_image_path {argv[3]};

    //std::vector<std::string> file_names;
    //if (read_files_in_dir(argv[2], file_names) < 0) {
        //std::cout << "read_files_in_dir failed." << std::endl;
        //return -1;
    //}
	std::cout << "load engine success ....." << std::endl;

    IRuntime* runtime = createInferRuntime(gLogger);
    assert(runtime != nullptr);
    ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
	assert(engine != nullptr);
    IExecutionContext* context = engine->createExecutionContext();
	assert(context != nullptr);
    delete[] trtModelStream;
    auto out_dims = engine->getBindingDimensions(1);
    auto output_size = 1;
    for(int j=0;j<out_dims.nbDims;j++) {
        output_size *= out_dims.d[j];
    }
    static float* prob = new float[output_size];

	std::cout << "reading image ....." << std::endl;
    cv::Mat img = cv::imread(input_image_path);
    int img_w = img.cols;
    int img_h = img.rows;
    cv::Mat pr_img = static_resize(img);
    std::cout << "blob image" << std::endl;

    float* blob;
    blob = blobFromImage(pr_img);
    float scale = min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0));

    // run inference
    auto start = std::chrono::system_clock::now();
    doInference(*context, blob, prob, output_size, pr_img.size());
    auto end = std::chrono::system_clock::now();
    std::cout << "model inference time-consuming: " << 
		std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;

	cv::Mat out_mask(INPUT_H, INPUT_W, CV_8UC1);
	auto start_decode = std::chrono::system_clock::now();
    decode_outputs(prob, out_mask);
	auto end_decode = std::chrono::system_clock::now();
	std::cout << "decode_outputs time-consuming: " << 
		std::chrono::duration_cast<std::chrono::milliseconds>(end_decode - start_decode).count() << "ms" << std::endl;

	cv::Mat img_show = pr_img.clone();
	int num_instances = 0;
	cv::imshow("out_mask_pre", out_mask);
    post_process(pr_img, out_mask, img_show, num_instances);

	//cv::imwrite("G:\workspaces\stdc_shushu\deploy\cpp\debug.png", out_mask);
	cv::imshow("out_mask_post", out_mask);
	cv::imshow("img_show", img_show);

    // delete the pointer to the float
    delete blob;
    // destroy the engine
    context->destroy();
    engine->destroy();
    runtime->destroy();


	cv::waitKey(0);
	cv::destroyAllWindows();
    return 0;
}
