#include "app_yolo/yolo.h"
#include "trt_define.h"
#include "trt_engine/infer_controller.h"
#include "trt_engine/preprocess_kernel.cuh"
#include "app_yolo/object_detector.hpp"
#include "utils/logger.h"

using namespace trt_infer;
using namespace std;
_YOLO_NAMESPACE_BEGINE

const char *type_name(Type type)
{
    switch (type)
    {
    case Type::V5:
        return "YoloV5";
    case Type::X:
        return "YoloX";
    default:
        return "Unknow";
    }
}

void decode_kernel_invoker(
    float *predict, int num_bboxes, int num_classes, float confidence_threshold,
    float *invert_affine_matrix, float *parray,
    int max_objects, cudaStream_t stream);

void nms_kernel_invoker(
    float *parray, float nms_threshold, int max_objects, cudaStream_t stream);

using ControllerImpl = InferController<
    cv::Mat,            // Input
    BoxArray,           // Output
    tuple<string, int>, // start param
    AffineMatrix        // JobAdditional
    >;
class InferImpl : public Infer, public ControllerImpl
{
public:
    virtual bool startup(
        const string &engine_file,
        Type type,
        int gpuid,
        float confidence_threshold,
        float nms_threshold,
        NMSMethod nms_method,
        int max_objects,
        bool use_multi_preprocess_stream)
    {
        LOGER_INST.init();
        if (type == Type::V5)
        {
            LOGI << "YoloV5 infer startup";
            LOGI << "\tEngine file: " << engine_file;
            LOGI << "\tConfidence threshold: " << confidence_threshold;
            LOGI << "\tNMS method: " << (nms_method == NMSMethod::CPU ? "CPU" : "FastGPU");
            LOGI << "\tMax objects: " << max_objects;
            LOGI << "\tUse multi preprocess stream: " << (use_multi_preprocess_stream ? "True" : "False");
            normalize_ = CUDAKernel::Norm::alpha_beta(1 / 255.0f, 0.0f, CUDAKernel::ChannelType::Invert);
        }
        else if (type == Type::X)
        {
            normalize_ = CUDAKernel::Norm::None();
        }
        else
        {
            LOGE << "Unsupport Yolo type";
        }
        use_multi_preprocess_stream_ = use_multi_preprocess_stream;
        confidence_threshold_ = confidence_threshold;
        nms_threshold_ = nms_threshold;
        nms_method_ = nms_method;
        max_objects_ = max_objects;
        return ControllerImpl::startup(make_tuple(engine_file, gpuid));
    }

protected:
    virtual void work(promise<bool> &result) override
    {
        string engine_file = get<0>(start_param_);
        int gpuid = get<1>(start_param_);

        CUDATools::set_device_id(gpuid);
        auto engine = load_infer(engine_file);
        if (!engine)
        {
            LOGE << "Load engine failed";
            result.set_value(false);
            return;
        }

        engine->print();

        const int MAX_IMAGE_BBOX = max_objects_;
        const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag
        Tensor affine_matrix_device(DataType::Float);
        Tensor output_array_device(DataType::Float);
        int max_batch_size = engine->get_max_batch_size();
        auto input = engine->tensor("images");
        auto output = engine->tensor("output");
        int num_class = output->size(2) - 5;

        input_width_ = input->size(3);
        input_height_ = input->size(2);
        tensor_allocator_ = make_shared<MonopolyAllocator<Tensor>>(max_batch_size * 2);
        stream_ = engine->get_stream();
        gpu_ = gpuid;
        result.set_value(true);
        input->resize_single_dim(0, max_batch_size).to_gpu();
        affine_matrix_device.set_stream(stream_);
        affine_matrix_device.resize(max_batch_size, 8).to_gpu();

        // counter + bboxes
        output_array_device.resize(max_batch_size, 1 + MAX_IMAGE_BBOX * NUM_BOX_ELEMENT).to_gpu();
        vector<Job> fetch_jobs;
        while (get_jobs_and_wait(fetch_jobs, max_batch_size))
        {
            int infer_batch_size = fetch_jobs.size();
            input->resize_single_dim(0, infer_batch_size);

            for (int ibatch = 0; ibatch < infer_batch_size; ++ibatch)
            {
                auto &job = fetch_jobs[ibatch];
                auto &mono = job.mono_tensor->data();

                if (mono->get_stream() != stream_)
                {
                    // synchronize preprocess stream finish.
                    checkRuntime(cudaStreamSynchronize(mono->get_stream()));
                }

                affine_matrix_device.copy_from_gpu(affine_matrix_device.offset(ibatch), mono->get_workspace()->gpu(), 6);
                input->copy_from_gpu(input->offset(ibatch), mono->gpu(), mono->count());
                job.mono_tensor->release();
            }
            // input->save_to_file("D:\\learnSpace\\trt_infer\\test_data\\input_tensor.tensor");

            engine->forward(false);
            output_array_device.to_gpu(false);
            for (int ibatch = 0; ibatch < infer_batch_size; ++ibatch)
            {
                auto &job = fetch_jobs[ibatch];
                float *output_array_ptr = output_array_device.gpu<float>(ibatch);
                auto affine_matrix = affine_matrix_device.cpu<float>(ibatch);
                checkRuntime(cudaMemsetAsync(output_array_ptr, 0, sizeof(int), stream_));
                decode_kernel_invoker(
                    output->gpu<float>(ibatch),
                    output->size(1),
                    num_class,
                    confidence_threshold_,
                    affine_matrix,
                    output_array_ptr,
                    MAX_IMAGE_BBOX,
                    stream_);

                if (nms_method_ == NMSMethod::FastGPU)
                {
                    nms_kernel_invoker(
                        output_array_ptr,
                        nms_threshold_,
                        MAX_IMAGE_BBOX,
                        stream_);
                }
                output_array_device.to_cpu();
                for (int ibatch = 0; ibatch < infer_batch_size; ++ibatch)
                {
                    float *parray = output_array_device.cpu<float>(ibatch);
                    int count = std::min((int)parray[0], MAX_IMAGE_BBOX);
                    auto &job = fetch_jobs[ibatch];
                    auto &image_based_boxes = job.output;
                    for (int i = 0; i < count; ++i)
                    {
                        float *pbox = parray + 1 + i * NUM_BOX_ELEMENT;
                        int label = pbox[5];
                        int keepflag = pbox[6];
                        if (keepflag == 1)
                        {
                            image_based_boxes.emplace_back(
                                pbox[0], pbox[1], pbox[2], pbox[3], pbox[4], label);
                        }
                    }
                    if (nms_method_ == NMSMethod::CPU)
                    {
                        image_based_boxes = nms_cpu(image_based_boxes, nms_threshold_);
                    }
                    job.pro->set_value(image_based_boxes);
                }
                fetch_jobs.clear();
            }
        }
        stream_ = nullptr;
        tensor_allocator_.reset();
        LOGI << "Infer thread exit";
    }

    virtual bool preprocess(Job &job, const cv::Mat &image) override
    {
        if (tensor_allocator_ == nullptr)
        {
            LOGE << "Tensor allocator is null";
            return false;
        }

        if (image.empty())
        {
            LOGE << "Empty image";
            return false;
        }

        job.mono_tensor = tensor_allocator_->query();
        if (job.mono_tensor == nullptr)
        {
            LOGE << "Query tensor failed";
            return false;
        }

        CUDATools::AutoDevice auto_device(gpu_);
        auto &tensor = job.mono_tensor->data();
        CUStream preprocess_stream = nullptr;

        if (tensor == nullptr)
        {
            // not init
            tensor = make_shared<Tensor>();
            tensor->set_workspace(make_shared<MixMemory>());

            if (use_multi_preprocess_stream_)
            {
                checkRuntime(cudaStreamCreate(&preprocess_stream));

                // owner = true, stream needs to be free during deconstruction
                tensor->set_stream(preprocess_stream, true);
            }
            else
            {
                preprocess_stream = stream_;

                // owner = false, tensor ignored the stream
                tensor->set_stream(preprocess_stream, false);
            }
        }

        cv::Size input_size(input_width_, input_height_);
        job.additional.compute(image.size(), input_size);

        preprocess_stream = tensor->get_stream();
        tensor->resize(1, 3, input_height_, input_width_);

        size_t size_image = image.cols * image.rows * 3;
        size_t size_matrix = CUDATools::upbound(sizeof(job.additional.d2i), 32);
        auto workspace = tensor->get_workspace();
        uint8_t *gpu_workspace = (uint8_t *)workspace->gpu(size_matrix + size_image);
        float *affine_matrix_device = (float *)gpu_workspace;
        uint8_t *image_device = size_matrix + gpu_workspace;

        uint8_t *cpu_workspace = (uint8_t *)workspace->cpu(size_matrix + size_image);
        float *affine_matrix_host = (float *)cpu_workspace;
        uint8_t *image_host = size_matrix + cpu_workspace;

        // checkCudaRuntime(cudaMemcpyAsync(image_host,   image.data, size_image, cudaMemcpyHostToHost,   stream_));
        //  speed up
        memcpy(image_host, image.data, size_image);
        memcpy(affine_matrix_host, job.additional.d2i, sizeof(job.additional.d2i));
        checkRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, preprocess_stream));
        checkRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(job.additional.d2i), cudaMemcpyHostToDevice, preprocess_stream));

        CUDAKernel::warp_affine_bilinear_and_normalize_plane(
            image_device, image.cols * 3, image.cols, image.rows,
            tensor->gpu<float>(), input_width_, input_height_,
            affine_matrix_device, 114,
            normalize_, preprocess_stream);
        return true;
    }

    virtual vector<shared_future<BoxArray>> commits(const vector<cv::Mat> &images) override
    {
        return ControllerImpl::commits(images);
    }

    virtual std::shared_future<BoxArray> commit(const cv::Mat &image) override
    {
        return ControllerImpl::commit(image);
    }

private:
    int input_width_ = 0;
    int input_height_ = 0;
    int gpu_ = 0;
    float confidence_threshold_ = 0;
    float nms_threshold_ = 0;
    int max_objects_ = 1024;
    NMSMethod nms_method_ = NMSMethod::FastGPU;
    CUStream stream_ = nullptr;
    bool use_multi_preprocess_stream_ = false;
    CUDAKernel::Norm normalize_;
};

shared_ptr<Infer> create_infer(
    const string &engine_file, Type type, int gpuid,
    float confidence_threshold, float nms_threshold,
    NMSMethod nms_method, int max_objects,
    bool use_multi_preprocess_stream)
{
    shared_ptr<InferImpl> instance(new InferImpl());
    if (!instance->startup(
            engine_file, type, gpuid, confidence_threshold,
            nms_threshold, nms_method, max_objects, use_multi_preprocess_stream))
    {
        instance.reset();
    }
    return instance;
}

_YOLO_NAMESPACE_END