#include <assert.h>
#include <vector>
#include <iostream>
#include <cuda_fp16.h>
#include "custom_resize_plugins.h"
#include "cuda_utils.h"
// #include "device_functions.h"

namespace Tn {
    template<typename T>
    void write(char *&buffer, const T &val) {
        *reinterpret_cast<T *>(buffer) = val;
        buffer += sizeof(T);
    }

    template<typename T>
    void read(const char *&buffer, T &val) {
        val = *reinterpret_cast<const T *>(buffer);
        buffer += sizeof(T);
    }
}

namespace nvinfer1 {
    CustomResizePlugin::CustomResizePlugin() {

    }

    CustomResizePlugin::CustomResizePlugin(const void *data, size_t length) {
        // using namespace Tn;
        // const char *d = reinterpret_cast<const char *>(data), *a = d;

        // assert(d == a + length);
    }

    CustomResizePlugin::~CustomResizePlugin() {
    }

    int CustomResizePlugin::getNbOutputs() const {
        return 1;
    }

    void CustomResizePlugin::serialize(void *buffer) const {
        // using namespace Tn;
        // char* d = static_cast<char*>(buffer), *a = d;

        // assert(d == a + getSerializationSize());
    }

    size_t CustomResizePlugin::getSerializationSize() const {
        return 0;
    }

    int CustomResizePlugin::initialize() {
        return 0;
    }

    DimsExprs CustomResizePlugin::getOutputDimensions(
            int outputIndex, const DimsExprs *inputs, int nbInputs, IExprBuilder &exprBuilder) {
        // the first input is the resized tensor, the second input provide the output shape information
        DimsExprs out_dim;
        out_dim.nbDims = 4;
        out_dim.d[0] = inputs[1].d[0];
        out_dim.d[1] = inputs[1].d[1];
        out_dim.d[2] = inputs[1].d[2];
        out_dim.d[3] = inputs[1].d[3];

        return out_dim;
    }

    // Set plugin namespace
    void CustomResizePlugin::setPluginNamespace(const char *pluginNamespace) {
        mPluginNamespace = pluginNamespace;
    }

    const char *CustomResizePlugin::getPluginNamespace() const {
        return mPluginNamespace;
    }

    // Return the DataType of the plugin output at the requested index
    DataType
    CustomResizePlugin::getOutputDataType(int index, const nvinfer1::DataType *inputTypes, int nbInputs) const {
        return inputTypes[0];
        //return DataType::kFLOAT;
    }

    void CustomResizePlugin::configurePlugin(const DynamicPluginTensorDesc *in, int nbInputs,
                                             const DynamicPluginTensorDesc *out, int nbOutputs) {
        assert(nbInputs == 2);
        assert(nbOutputs == 1);
        assert(in[0].desc.dims.d[0] == in[1].desc.dims.d[0] && in[0].desc.dims.d[1] == in[1].desc.dims.d[1]);
    }

    bool CustomResizePlugin::supportsFormatCombination(
            int pos, const PluginTensorDesc *inOut, int nbInputs, int nbOutputs) {
        assert(0 <= pos && pos < 3);
        const auto *in = inOut;
        const auto *out = inOut + nbInputs;
        //const bool consistentFloatPrecision = (in[0].type == in[pos].type);
        switch (pos) {
            case 0:
                return (in[0].type == DataType::kFLOAT && in[0].format == PluginFormat::kLINEAR);
            case 1:
                return (in[1].type == DataType::kHALF && in[1].format == PluginFormat::kLINEAR);
            case 2:
                return (in[2].type == DataType::kHALF && in[2].format == PluginFormat::kLINEAR);
        }
        return false;
    }

    const char *CustomResizePlugin::getPluginType() const {
        return "CustomResizeLayer_TRT";
    }

    const char *CustomResizePlugin::getPluginVersion() const {
        return "1";
    }

    void CustomResizePlugin::destroy() {
        delete this;
    }

    // Clone the plugin
    IPluginV2DynamicExt *CustomResizePlugin::clone() const {
        CustomResizePlugin *p = new CustomResizePlugin();
        p->setPluginNamespace(mPluginNamespace);
        return p;
    }

    //
    __global__ void kernel_resize_cuda_float(
            const int *input_size,
            const int *output_size,
            const float *input_data,
            float *output_data) {
        int isize_b = output_size[0], isize_c = output_size[1], isize_h = output_size[2], isize_w = output_size[3];
        int input_size_b = input_size[0], input_size_c = input_size[1], input_size_h = input_size[2], input_size_w = input_size[3];

        float rate_h = 1.0f * input_size_h / isize_h;
        float rate_w = 1.0f * input_size_w / isize_w;

        int istart_y_idx = blockIdx.y * blockDim.y + threadIdx.y;
        int istart_x_idx = blockIdx.x * blockDim.x + threadIdx.x;
        int output_chw = isize_c * isize_h * isize_w;
        int output_hw = isize_h * isize_w;
        int output_w = isize_w;
        int input_chw = input_size_c * input_size_h * input_size_w;
        int input_hw = input_size_h * input_size_w;
        int input_w = input_size_w;
        for (int ib = 0; ib < isize_b; ib += 1) {
            for (int ic = 0; ic < isize_c; ic += 1) {
                int ih = istart_y_idx, iw = istart_x_idx;
                if (ih < isize_h && iw < isize_w) {
                    int output_index = ib * output_chw + ic * output_hw + ih * output_w + iw;
                    float ref_ih = ih * rate_h;
                    float ref_iw = iw * rate_w;
                    int l_ih = __float2int_rd(ref_ih);
                    int r_ih = __float2int_ru(ref_ih - 1.0e-6f);
                    int l_iw = __float2int_rd(ref_iw);
                    int r_iw = __float2int_ru(ref_iw - 1.0e-6f);

                    float factor_ih = ref_ih - l_ih;
                    float factor_iw = ref_iw - l_iw;

                    int ref_idx_1 = ib * input_chw + ic * input_hw + l_ih * input_w + l_iw;
                    int ref_idx_2 = ib * input_chw + ic * input_hw + l_ih * input_w + r_iw;
                    int ref_idx_3 = ib * input_chw + ic * input_hw + r_ih * input_w + r_iw;
                    int ref_idx_4 = ib * input_chw + ic * input_hw + r_ih * input_w + l_iw;
                    float x0, x1, x2, x3;
                    x0 = (1 - factor_ih) * (1 - factor_iw);
                    x1 = (1 - factor_ih) ;
                    x2 = (factor_ih * factor_iw) ;
                    x3 = factor_ih * (1 - factor_iw);
                    output_data[output_index] = x0 * input_data[ref_idx_1] + x1 * input_data[ref_idx_2]
                                                + x2 * input_data[ref_idx_3] + x3 * input_data[ref_idx_4];
                }
            }
        }

        return;
    }

    __global__ void kernel_resize_cuda_half(
            const int *input_size,
            const int *output_size,
            const half *input_data,
            half *output_data) {
        int isize_b = output_size[0], isize_c = output_size[1], isize_h = output_size[2], isize_w = output_size[3];
        int input_size_b = input_size[0], input_size_c = input_size[1], input_size_h = input_size[2], input_size_w = input_size[3];

        float rate_h = 1.0f * input_size_h / isize_h;
        float rate_w = 1.0f * input_size_w / isize_w;

        int istart_y_idx = blockIdx.y * blockDim.y + threadIdx.y;
        int istart_x_idx = blockIdx.x * blockDim.x + threadIdx.x;
        int output_chw = isize_c * isize_h * isize_w;
        int output_hw = isize_h * isize_w;
        int output_w = isize_w;
        int input_chw = input_size_c * input_size_h * input_size_w;
        int input_hw = input_size_h * input_size_w;
        int input_w = input_size_w;
        for (int ib = 0; ib < isize_b; ib += 1) {
            for (int ic = 0; ic < isize_c; ic += 1) {
                int ih = istart_y_idx, iw = istart_x_idx;
                if (ih < isize_h && iw < isize_w) {
                    int output_index = ib * output_chw + ic * output_hw + ih * output_w + iw;
                    float ref_ih = ih * rate_h;
                    float ref_iw = iw * rate_w;
                    int l_ih = __float2int_rd(ref_ih);
                    int r_ih = __float2int_ru(ref_ih - 1.0e-6f);
                    int l_iw = __float2int_rd(ref_iw);
                    int r_iw = __float2int_ru(ref_iw - 1.0e-6f);

                    float factor_ih = ref_ih - l_ih;
                    float factor_iw = ref_iw - l_iw;

                    int ref_idx_1 = ib * input_chw + ic * input_hw + l_ih * input_w + l_iw;
                    int ref_idx_2 = ib * input_chw + ic * input_hw + l_ih * input_w + r_iw;
                    int ref_idx_3 = ib * input_chw + ic * input_hw + r_ih * input_w + r_iw;
                    int ref_idx_4 = ib * input_chw + ic * input_hw + r_ih * input_w + l_iw;
                    //float x0, x1, x2, x3;
                    half x0 = __float2half((1 - factor_ih) * (1 - factor_iw));
                    half x1 = __float2half(1 - factor_ih) ;
                    half x2 = __float2half(factor_ih * factor_iw) ;
                    half x3 = __float2half(factor_ih * (1 - factor_iw));
                    half add1 = __hadd(__hmul(x0, input_data[ref_idx_1]), __hmul(x1, input_data[ref_idx_2]));
                    half add2 = __hadd(__hmul(x2, input_data[ref_idx_3]), __hmul(x3, input_data[ref_idx_4]));
                    output_data[output_index] = __hadd(add1, add2);
                }
            }
        }

        return;
    }

    //
//    template<typename scalar_t>
//    __global__ void kernel_resize_2d_texture(
//            cudaTextureObject_t &texObj,
//            float rate_h,
//            float rate_w,
//            int output_h,
//            int output_w,
//            scalar_t *output_data) {
//        int y_idx = blockIdx.y * blockDim.y + threadIdx.y;
//        int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
//        if (y_idx < output_h && x_idx < output_w) {
//            scalar_t ref_y = y_idx * rate_h + 0.5f;
//            scalar_t ref_x = x_idx * rate_w + 0.5f;
//            int output_idx = y_idx * output_w + x_idx;
//
//            // TODO: 使用纹理内存差值出错设备内存非法访问！！！20210601
//            float tmp = tex2D<scalar_t>(texObj, ref_x, ref_y);
//
//            // output_data[output_idx] = tex2D<float>(texObj, ref_x, ref_y);
//            output_data[output_idx] = tmp;
//            // output_data[output_idx] = tex2D<float>(texObj, 1, 1);
//        }
//        return;
//    }

    // 
    __global__ void kernel_resize_layer_texture() {}

    template<typename scalar_t>
    int resize_by_cuda(
            nvinfer1::DataType data_type,
            const Dims &input_size,
            const Dims &output_size,
            cudaStream_t &stream,
            const scalar_t *input_data,
            scalar_t *output_data) {
        int output_b = output_size.d[0], output_c = output_size.d[1], output_h = output_size.d[2], output_w = output_size.d[3];
        int ThreadXNumPerBlock = 32, ThreadYNumPerBlock = 32;
        int BlockXNum = output_w / ThreadXNumPerBlock + 1;
        int BlockYNum = output_h / ThreadYNumPerBlock + 1;
        dim3 blockSize(ThreadXNumPerBlock, ThreadXNumPerBlock);
        dim3 gridSize(BlockXNum, BlockYNum);
        //CUDA_CHECK(cudaMemset(output_data, 0, sizeof(float)*output_b*output_c*output_h*output_w));

        size_t len = sizeof(int) * 4;
        int *dev_OutputSize = nullptr;
        int *dev_InputSize = nullptr;
        CUDA_CHECK(cudaMalloc((void **) &dev_OutputSize, len));
        CUDA_CHECK(cudaMalloc((void **) &dev_InputSize, len));
        CUDA_CHECK(cudaMemcpy(dev_OutputSize, output_size.d, len, cudaMemcpyHostToDevice));
        CUDA_CHECK(cudaMemcpy(dev_InputSize, input_size.d, len, cudaMemcpyHostToDevice));

        switch (data_type) {
            case nvinfer1::DataType::kFLOAT:
                kernel_resize_cuda_float<<<gridSize, blockSize, 0, stream>>>(dev_InputSize, dev_OutputSize, (const float *)input_data,
                                                                             (float *)output_data);
                break;
            case nvinfer1::DataType::kHALF:
                kernel_resize_cuda_half<<<gridSize, blockSize, 0, stream>>>(dev_InputSize, dev_OutputSize, (const half *)input_data,
                                                                            (half *)output_data);
                break;
            default:
                return -1;

        }


        CUDA_CHECK(cudaPeekAtLastError());

        CUDA_CHECK(cudaFree(dev_OutputSize));
        CUDA_CHECK(cudaFree(dev_InputSize));

        return 0;
    }

    template int resize_by_cuda<float>(
            nvinfer1::DataType data_type,
            const Dims &input_size,
            const Dims &output_size,
            cudaStream_t &stream,
            const float *input_data,
            float *output_data);

    template int resize_by_cuda<half>(
            nvinfer1::DataType data_type,
            const Dims &input_size,
            const Dims &output_size,
            cudaStream_t &stream,
            const half *input_data,
            half *output_data);

//    template<typename scalar_t>
//    void resize_by_2d_texture(
//            nvinfer1::DataType data_type,
//            const Dims &input_size,
//            const Dims &output_size,
//            cudaStream_t &stream,
//            const scalar_t *input_data,
//            scalar_t *output_data
//    ) {
//        int input_b = input_size.d[0];
//        int input_c = input_size.d[1];
//        int input_h = input_size.d[2];
//        int input_w = input_size.d[3];
//        int input_hw = input_h * input_w;
//        int input_chw = input_c * input_hw;
//
//        // int output_b = output_size.d[0];
//        int output_c = output_size.d[1];
//        int output_h = output_size.d[2];
//        int output_w = output_size.d[3];
//        int output_hw = output_h * output_w;
//        int output_chw = output_c * output_hw;
//
//        float rate_h = 1.0f * input_h / output_h;
//        float rate_w = 1.0f * input_w / output_w;
//
//        printf("Line 339!\n");
//
//        // Allocate CUDA array in device memory
//        cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
//        cudaArray_t cuArray = 0;
//        CUDA_CHECK(cudaMallocArray(&cuArray, &channelDesc, input_w, input_h));
//        size_t spitch = sizeof(scalar_t) * input_w;
//        for (int ib = 0; ib < input_b; ++ib) {
//            printf("Line 348!\n");
//            for (int ic = 0; ic < input_c; ++ic) {
//                scalar_t *map_output_data = output_data + ib * output_chw + ic * output_hw;
//                const scalar_t *map_input_data = input_data + ib * input_chw + ic * input_hw;
//                CUDA_CHECK(
//                        cudaMemcpy2DToArray(cuArray, 0, 0, map_input_data, spitch, sizeof(scalar_t) * input_w, input_h,
//                                            cudaMemcpyDeviceToDevice));
//
//                // Specify texture resource description
//                cudaResourceDesc resDesc;
//                memset(&resDesc, 0, sizeof(resDesc));
//                resDesc.resType = cudaResourceTypeArray;
//                resDesc.res.array.array = cuArray;
//
//                // Specify texture object parameters description
//                cudaTextureDesc texDesc;
//                memset(&texDesc, 0, sizeof(texDesc));
//                texDesc.addressMode[0] = cudaAddressModeClamp;
//                texDesc.addressMode[1] = cudaAddressModeClamp;
//                texDesc.filterMode = cudaFilterModeLinear;
//                texDesc.readMode = cudaReadModeElementType;
//                texDesc.normalizedCoords = 0;
//
//                // Create texture object
//                cudaTextureObject_t texObj = 0;
//                cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
//
//                int ThreadXNumPerBlock = 32, ThreadYNumPerBlock = 32;
//                int BlockXNum = output_w / ThreadXNumPerBlock + 1;
//                int BlockYNum = output_h / ThreadYNumPerBlock + 1;
//                dim3 blockSize(ThreadXNumPerBlock, ThreadXNumPerBlock);
//                dim3 gridSize(BlockXNum, BlockYNum);
//                kernel_resize_2d_texture<scalar_t><<<gridSize, blockSize, 0, stream>>>(texObj, rate_h, rate_w, output_h,
//                                                                                       output_w, map_output_data);
//
//                // Destroy texture object
//                cudaDestroyTextureObject(texObj);
//            }
//        }
//        // Destory CUDA array
//        cudaFreeArray(cuArray);
//        printf("Line 387!\n");
//    }
//
//    template void resize_by_2d_texture<float>(
//            nvinfer1::DataType data_type,
//            const Dims &input_size,
//            const Dims &output_size,
//            cudaStream_t &stream,
//            const float *input_data,
//            float *output_data
//    );
//
//    template void resize_by_2d_texture<half>(
//            nvinfer1::DataType data_type,
//            const Dims &input_size,
//            const Dims &output_size,
//            cudaStream_t &stream,
//            const half *input_data,
//            half *output_data
//    );

    // TODO: 还不能支持半精度计FP16计算！！！
    int CustomResizePlugin::enqueue(
            const PluginTensorDesc *inputDesc,
            const PluginTensorDesc *outputDesc,
            const void *const *inputs,
            void *const *outputs,
            void *workspace,
            cudaStream_t stream) {
        printf("Hello Custom Rsize Plugin!\n");
        Dims input_size = inputDesc[0].dims;
        Dims output_size = outputDesc[0].dims;

        const float *input_data = (const float *) inputs[0];
        float *output_data = (float *) outputs[0];

        auto data_type = inputDesc[0].type;
        switch (data_type) {
            case nvinfer1::DataType::kFLOAT:
                resize_by_cuda<float>(
                        nvinfer1::DataType::kFLOAT,
                        input_size,
                        output_size,
                        stream,
                        (const float *) inputs[0],
                        (float *) outputs[0]);
                /*resize_by_2d_texture(
                        nvinfer1::DataType::kFLOAT,
                        input_size,
                        output_size,
                        stream,
                        (const float*)inputs[0],
                        (float*)outputs[0]);*/
                break;
            case nvinfer1::DataType::kHALF:
                resize_by_cuda<half>(
                        nvinfer1::DataType::kHALF,
                        input_size,
                        output_size,
                        stream,
                        (const half *) inputs[0],
                        (half *) outputs[0]);
                /*resize_by_2d_texture(
                        nvinfer1::DataType::kHALF,
                        input_size,
                        output_size,
                        stream,
                        (const half*)inputs[0],
                        (half*)outputs[0]);*/
                break;
            default:
                std::cout << " not support data type! " << std::endl;
                exit(-1);
        }

        printf("BaiBai Custom Resize Plugin!\n\n");
        return 0;
    }

    size_t CustomResizePlugin::getWorkspaceSize(
            const PluginTensorDesc *inputs, int nbInputs, const PluginTensorDesc *outputs, int nbOutputs) const {
        return 0;
    }


    //////////////////////////////////////////////////////////////////////////////////
    PluginFieldCollection CustomResizePluginCreator::mFC{};
    std::vector <PluginField> CustomResizePluginCreator::mPluginAttributes;

    CustomResizePluginCreator::CustomResizePluginCreator() {
        mPluginAttributes.clear();

        mFC.nbFields = mPluginAttributes.size();
        mFC.fields = mPluginAttributes.data();
    }

    CustomResizePluginCreator::~CustomResizePluginCreator() {

    }

    const char *CustomResizePluginCreator::getPluginName() const {
        return "CustomResizeLayer_TRT";
    }

    const char *CustomResizePluginCreator::getPluginVersion() const {
        return "1";
    }

    const PluginFieldCollection *CustomResizePluginCreator::getFieldNames() {
        return &mFC;
    }

    IPluginV2DynamicExt *CustomResizePluginCreator::createPlugin(const char *name, const PluginFieldCollection *fc) {
        // assert(fc->nbFields == 2);
        // assert(strcmp(fc->fields[0].name, "start") == 0);
        // assert(strcmp(fc->fields[1].name, "stride") == 0);
        // int *start = (int*)(fc->fields[0].data);
        // int *stride = (int*)(fc->fields[1].data);

        CustomResizePlugin *obj = new CustomResizePlugin();
        obj->setPluginNamespace(mNamespace.c_str());
        return obj;
    }

    IPluginV2DynamicExt *
    CustomResizePluginCreator::deserializePlugin(const char *name, const void *serialData, size_t serialLength) {
        // This object will be deleted when the network is destroyed, which will
        // call CustomResizePlugin::destroy()
        CustomResizePlugin *obj = new CustomResizePlugin(serialData, serialLength);
        obj->setPluginNamespace(mNamespace.c_str());
        return obj;
    }


}

