#include "alpha_pose.h"
#include "trt_engine/infer_controller.h"
#include "utils/affine_matrix.h"
#include "utils/logger.h"
#include "trt_engine/preprocess_kernel.cuh"

_ALPHA_POSE_NAMESPACE_BEGINE

using ControllerImpl = InferController<
    MatWithBox,                             // Input
    PoseKeypointsArray,                     // Output
    std::tuple<std::string, int>,           // Start Param (engine file, gpuid)
    AffineMatrixWapper>                     // Additional
>;

class InferImpl : public Infer, public ControllerImpl
{
public:
    InferImpl(const std::string &engine_file, int gpuid, float confidence_threshold, bool use_multi_preprocess_stream)
    {
        LOGI << __FUNCTION__;
        LOGI << "engine_file: " << engine_file;
        LOGI << "gpuid: " << gpuid;
        LOGI << "confidence_threshold: " << confidence_threshold;
        LOGI << "use_multi_preprocess_stream: " << use_multi_preprocess_stream;
        ControllerImpl::startup(std::make_tuple(engine_file, gpuid));
    }

protected:
    virtual bool preprocess(Job &job, const cv::Mat &image) override
    {
        LOGI << __FUNCTION__;
        if (tensor_allocator_ == nullptr)
        {
            LOGE << "tensor_allocator_ is nullptr";
            return false;
        }
        if (image.empty())
        {
            LOGE << "image is empty";
            return false;
        }

        job.mono_tensor = tensor_allocator_->query();
        if (job.mono_tensor == nullptr)
        {
            LOGE << "job.mono_tensor is nullptr";
            return false;
        }

        CUDATools::AutoDevice auto_device(gpu_id_);
        auto &tensor = job.mono_tensor->data();
        CUStream preprocess_stream = nullptr;

        if (tensor == nullptr) {
            // not init
            tensor = std::make_shared<Tensor>();
            tensor->set_workspace(std::make_shared<MixMemory>());

            if (use_multi_preprocess_stream_)
            {
                checkRuntime(cudaStreamCreate(&preprocess_stream));
                tensor->set_stream(preprocess_stream, true);
            } else 
            {
                preprocess_stream = nullptr;
                tensor->set_stream(preprocess_stream, false);
            }
        }

        cv::Size input_size(input_width_, input_height_);
        size_t size_image = input_width_ * input_height_ * 3;
        // memory align to 32
        size_t size_matirx = CUDATools::upbound(sizeof(job.additional.get_matrix_size()), 32);

        // Calculate the affine matrix 
        if (job.input.box.width == 0 || job.input.box.height == 0)
        {
            LOGE << "box width or height is 0";
            return false;
        }

        float rate = job.input.box.width > 100 ? 0.1f : 1.5f;
        float pad_width = job.input.box.width * (1 + 2 * rate);
        float pad_height = job.input.box.height * (1 + 1 * rate);
        float scale = std::min(input_width_ / pad_width, input_height_ / pad_height);
        job.additional.move(-job.input.box.x, -job.input.box.y);
        job.additional.scale(scale, scale);

        // gpu_workspace: |affine_matrix_device|image_device-------|
        auto workspace = tensor->get_workspace();
        uint8_t *gpu_workspace = (uint8_t*)workspace->gpu(size_image + size_matirx);
        float *affine_matrix_device = (float*)gpu_workspace;
        uint8_t *image_device = gpu_workspace + size_matirx;

        uint8_t *cpu_workspace = (uint8_t*)workspace->cpu(size_image + size_matirx);
        float *affine_matrix_host = (float*)cpu_workspace;
        float *image_host = (float*)(cpu_workspace + size_matirx);

        memcpy(affine_matrix_host, job.additional.d2i().data(), job.additional.get_matrix_size());
        memcpy(image_host, image.data, size_image);

        checkRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, size_matirx, cudaMemcpyHostToDevice, preprocess_stream));
        checkRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, preprocess_stream));
        float mean[] = {0.480, 0.457, 0.406};  // BGR
        float std[] = {1.0f, 1.0f, 1.0f};
        auto nomal = CUDAKernel::Norm::mean_std(mean, std, 1/255.0f, CUDAKernel::ChannelType::Invert);

        CUDAKernel::warp_affine_bilinear_and_normalize_plane(
            image_device, image.cols * 3, image.cols, image.rows,
            tensor->gpu<float>(), input_width_, input_height_,
            affine_matrix_device, 114,
            nomal, preprocess_stream);

        return true;
    }

    virtual void work(std::promise<bool> &result) override
    {
        LOGI << __FUNCTION__ << " start";
        std::string engine_file = std::get<0>(start_param_);

        CUDATools::set_device_id(gpu_id_);
        auto engine = load_infer(engine_file);
        if (engine == nullptr)
        {
            LOGE << "load_infer failed";
            result.set_value(false);
            return;
        }

        engine->print();

        const int 
        gpu_id_ = std::get<1>(start_param_);

    }

    virtual ~InferImpl() {}

    virtual std::shared_future<PoseKeypointsArray> commit(const MatWithBox &image) override
    {
        return ControllerImpl::commit(image);
    }

    virtual std::vector<std::shared_future<PoseKeypointsArray>> commits(const std::vector<MatWithBox> &images) override
    {
        return ControllerImpl::commits(images);
    }

private:
    int gpu_id_;
    int input_width_;
    int input_height_;
    bool use_multi_preprocess_stream_ = false;


};


_ALPHA_POSE_NAMESPACE_END