//
// Created by xuxu on 2020/8/13.
//

#ifndef ANDROID_RUN_TFLITE_CLKERNEL_H
#define ANDROID_RUN_TFLITE_CLKERNEL_H

#include <string>
#include "opencl_wrapper.h"
#include "opencl_runtime.h"

#define CI_TILE 4
#define CO_TILE 4

extern std::vector<size_t> k0_local[];
extern std::vector<size_t> k2_local[];

std::vector<float> Multiply(const std::vector<float> &a_mat, const std::vector<float> &b_mat, int m, int n, int k);

class ConvolutionKernel
{
public:
    OpenCLRuntime *runtime;
    StorageType storage_type;
    Layout layout;
    DataType data_type;
    std::string attr_str;
    bool enable_winograd;
    bool single_thread_2xFLT4 = false;

    int N{};
    int CI{}, IH{}, IW{};
    int CO{}, OH{}, OW{};
    int KH{}, KW{};
    int strideH{}, strideW{};
    int padLeft{}, padRight{}, padTop{}, padBottom{};
    int dilationH{}, dilationW{};
    bool is_relu = false;
    bool is_relu6 = false;
    int CI_ALIGN, CO_ALIGN, CI_SLICES, CO_SLICES;
    bool use_winograd = false;
    int TILE_X, TILE_Y, TILE_XY; // only for winograd

    cl_kernel kernel_4x4to36;
    cl_kernel kernel_conv;
    cl_kernel kernel_conv1;
    cl_kernel kernel_36to4x4;

    cl_mem input_mem{}, output_mem{};
    cl_mem weight_buffer{}, bias_buffer{};
    cl_mem winograd_mem0{}, winograd_mem1{};

    ConvolutionKernel(StorageType storage_type,
                      Layout layout,
                      DataType data_type,
                      const std::string &attr_str,
                      bool enable_winograd,
                      bool single_thread_2xFLT4)
            : storage_type(storage_type), layout(layout), data_type(data_type), attr_str(attr_str),
              enable_winograd(enable_winograd), single_thread_2xFLT4(single_thread_2xFLT4)
    {
        runtime = OpenCLRuntime::GetSingleton();
        assert(layout == Layout::NHWC4 || layout == Layout::NC4HW4 || layout == Layout::NHC4W4);
        sscanf(attr_str.c_str(),
               "inputNHWC_%dx%dx%dx%d_outputNHWC_%dx%dx%dx%d_kernelHW_%dx%d_strideHW_%dx%d_padTopBottomLeftRight_%dx%dx%dx%d_dilationHW_%dx%d",
               &N, &IH, &IW, &CI,
               &N, &OH, &OW, &CO,
               &KH, &KW,
               &strideH, &strideW,
               &padTop, &padBottom, &padLeft, &padRight,
               &dilationH, &dilationW);
        CI_ALIGN = UP_DIV(CI, CI_TILE) * CI_TILE;
        CO_ALIGN = UP_DIV(CO, CO_TILE) * CO_TILE;
        CI_SLICES = UP_DIV(CI, CI_TILE);
        CO_SLICES = UP_DIV(CO, CO_TILE);

        TILE_X = UP_DIV(IW, 4);
        TILE_Y = UP_DIV(IH, 4);
        TILE_XY = TILE_X * TILE_Y;
        use_winograd = enable_winograd && IsSuitableForWinograd4x4To6x6();
    }

    virtual ~ConvolutionKernel()
    {
        mslite::clReleaseKernel(kernel_conv);
        if (use_winograd)
        {
            mslite::clReleaseKernel(kernel_4x4to36);
            mslite::clReleaseKernel(kernel_36to4x4);
        }
    }

    void Compile()
    {
        std::string build_option;
        if (data_type == DataType::FLOAT16)
            build_option = "-DFLT=half -DFLT4=half4 -DFLT16=half16 "
                           "-DWRITE_IMAGE=write_imageh -DREAD_IMAGE=read_imageh -DTO_FLT4=convert_half4";
        else
            build_option = "-DFLT=float -DFLT4=float4 -DFLT16=float16 "
                           "-DWRITE_IMAGE=write_imagef -DREAD_IMAGE=read_imagef -DTO_FLT4=convert_float4";
        if (single_thread_2xFLT4)
            build_option += " -Dsingle_thread_2xFLT4";

        if (use_winograd)
        {
            kernel_4x4to36 = OpenCLRuntime::BuildKernel(ReadKernelSource("cl/winograd_4x4to36.cl"),
                                                        "Winograd4x4To36", build_option);
            kernel_conv = OpenCLRuntime::BuildKernel(ReadKernelSource("cl/winograd_conv.cl"),
                                                     "WinogradConvolution", build_option);
            kernel_36to4x4 = OpenCLRuntime::BuildKernel(ReadKernelSource("cl/winograd_36to4x4.cl"),
                                                        "Winograd36To4x4", build_option);
        }
        else
        {
            kernel_conv = OpenCLRuntime::BuildKernel(CodeGenConvolution(), "Convolution", build_option);
            kernel_conv1 = OpenCLRuntime::BuildKernel(ReadKernelSource("cl/convolution.cl"), "Convolution",
                                                      build_option);
        }
    }

    int InitMemory();

    void SetInputData(const std::string &data_path);

    int BindArgs()
    {
        int arg_cn = 0;
        if (use_winograd)
        {
            arg_cn = 0;
            cl_int4 _4x4to36_in_shape = {N, IH, IW, CI_SLICES};
            cl_int4 _4x4to36_out_shape = {N, 36, TILE_XY, CI_SLICES};
            mslite::clSetKernelArg(kernel_4x4to36, arg_cn++, sizeof(cl_mem), (void *) &input_mem);
            mslite::clSetKernelArg(kernel_4x4to36, arg_cn++, sizeof(cl_mem), (void *) &winograd_mem0);
            mslite::clSetKernelArg(kernel_4x4to36, arg_cn++, sizeof(cl_int4), (void *) &_4x4to36_in_shape);
            mslite::clSetKernelArg(kernel_4x4to36, arg_cn++, sizeof(cl_int4), (void *) &_4x4to36_out_shape);

            arg_cn = 0;
            cl_int4 conv_in_shape = {N, 36, TILE_XY, CI_SLICES};
            cl_int4 conv_out_shape = {N, 36, TILE_XY, CO_SLICES};
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_mem), (void *) &winograd_mem0);
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_mem), (void *) &winograd_mem1);
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_mem), (void *) &weight_buffer);
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_int4), (void *) &conv_in_shape);
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_int4), (void *) &conv_out_shape);

            arg_cn = 0;
            cl_int4 _36to4x4_in_shape = {N, 16, TILE_XY, CO_SLICES};
            cl_int4 _36to4x4_out_shape = {N, OH, OW, CO_SLICES};
            mslite::clSetKernelArg(kernel_36to4x4, arg_cn++, sizeof(cl_mem), (void *) &winograd_mem1);
            mslite::clSetKernelArg(kernel_36to4x4, arg_cn++, sizeof(cl_mem), (void *) &output_mem);
            mslite::clSetKernelArg(kernel_36to4x4, arg_cn++, sizeof(cl_mem), (void *) &bias_buffer);
            mslite::clSetKernelArg(kernel_36to4x4, arg_cn++, sizeof(cl_int4), (void *) &_36to4x4_in_shape);
            mslite::clSetKernelArg(kernel_36to4x4, arg_cn++, sizeof(cl_int4), (void *) &_36to4x4_out_shape);
        }
        else
        {
            cl_int4 input_shape = {N, IH, IW, CI_SLICES};
            cl_int4 output_shape = {N, OH, OW, CO_SLICES};
            cl_int4 kernel_stride = {KH, KW, strideH, strideW};
            cl_int4 pad = {padTop, padBottom, padLeft, padRight};

            arg_cn = 0;
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_mem), (void *) &input_mem);
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_mem), (void *) &output_mem);
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_mem), (void *) &weight_buffer);
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_mem), (void *) &bias_buffer);
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_int4), &input_shape);
            mslite::clSetKernelArg(kernel_conv, arg_cn++, sizeof(cl_int4), &output_shape);

            arg_cn = 0;
            mslite::clSetKernelArg(kernel_conv1, arg_cn++, sizeof(cl_mem), (void *) &input_mem);
            mslite::clSetKernelArg(kernel_conv1, arg_cn++, sizeof(cl_mem), (void *) &output_mem);
            mslite::clSetKernelArg(kernel_conv1, arg_cn++, sizeof(cl_mem), (void *) &weight_buffer);
            mslite::clSetKernelArg(kernel_conv1, arg_cn++, sizeof(cl_mem), (void *) &bias_buffer);
            mslite::clSetKernelArg(kernel_conv1, arg_cn++, sizeof(cl_int4), &input_shape);
            mslite::clSetKernelArg(kernel_conv1, arg_cn++, sizeof(cl_int4), &output_shape);
            mslite::clSetKernelArg(kernel_conv1, arg_cn++, sizeof(cl_int4), &kernel_stride);
            mslite::clSetKernelArg(kernel_conv1, arg_cn++, sizeof(cl_int4), &pad);
        }
        return CL_SUCCESS;
    }

    int Run(int num_runs)
    {
        if (use_winograd)
        {
            auto time0 = runtime->RunKernel(kernel_4x4to36, {size_t(TILE_XY), 6, size_t(CI_SLICES)}, k0_local[0],
                                            num_runs);
            auto time1 = runtime->RunKernel(kernel_conv, {size_t(UP_DIV(TILE_XY, 2)), 36, size_t(UP_DIV(CO_SLICES, 2))},
                                            {8, 6, 2}, num_runs);
            auto time2 = runtime->RunKernel(kernel_36to4x4, {size_t(TILE_XY), 4, size_t(CO_SLICES)}, k2_local[0],
                                            num_runs);
            printf("winograd used %.3fms\n", time0 + time1 + time2);
        }
        else
        {
            std::vector<size_t> global, local;
            SetConvGlobalLocal(&global, &local);
            auto time0 = runtime->RunKernel(kernel_conv, global, local, num_runs);
            auto time1 = runtime->RunKernel(kernel_conv1, global, local, num_runs);
            printf("fast %d%%\n", int(time1 / time0 * 100 - 100));
        }
        return CL_SUCCESS;
    }

    void CompareOutput(const std::string &data_path) const;

private:
    bool IsSuitableForWinograd4x4To6x6() const
    {
        const bool suitable_attributes =
                KH == 3 && KW == 3 &&
                strideH == 1 && strideW == 1 &&
                padTop == 1 && padBottom == 1 && padLeft == 1 && padRight == 1 &&
                dilationH == 1 && dilationW == 1 &&
                IH == OH && IW == OW;

        // 用winograd 7ms, 不用9ms. inputNHWC_1x32x512x50_outputNHWC_1x32x512x48_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1
        // 但ci/co很小的不行. 用winograd 5.1ms, 不用2.62ms. inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1
        // Mali among other devices has smaller SIMD line size
        const int min_depth = 12;// device.IsMali() ? 16 : 32;
        const int min_hw = 16;//device.IsMali() ? 32 : 128;
        const bool recommended_channels = CI_SLICES >= min_depth && CO_SLICES >= min_depth;
        const bool recommended_hw = TILE_XY >= min_hw;
        return suitable_attributes && recommended_channels && recommended_hw;
    }

    std::string CodeGenConvolution() const;

    std::vector<float> RearrangeWeightsToWinograd4x4To6x6Weights(const std::string &weight_file) const;

    template<class T>
    std::vector<T> RearrangeWeightsToOHWIOGroupI4O4(std::vector<T> weight_OHWI, int OGroup);

    int SetConvGlobalLocal(std::vector<size_t> *global, std::vector<size_t> *local)
    {
        const cl_int3 work_group_size = {4, 4, 1};
        const cl_int3 max_work_item_sizes = {512, 512, 512};
        const int max_work_group_size = 512;
        const int max_z_size = std::min(16, max_work_item_sizes.z);

        // 先用OH OW CO_SLICES初始化global，并且441对齐
        int global_h = UP_DIV(OH, work_group_size.x) * work_group_size.x;
        int global_w = UP_DIV(OW, work_group_size.y) * work_group_size.y;
        int global_c = UP_DIV(CO_SLICES, work_group_size.z) * work_group_size.z;

        // 使用策略计算local
        int local_c = GetBiggestDivider(global_c, max_z_size);
        int local_hw_size = std::min(256, max_work_group_size) / local_c;
        int local_w = std::min(global_w, local_hw_size);
        int local_h = std::min(local_hw_size / local_w, global_h);
        if (local_h == global_h && global_h % 2 == 0)
            local_h = global_h / 2;

        global->resize(3);
        local->resize(3);
        global->at(0) = UP_DIV(OW, local_w) * local_w;
        global->at(1) = UP_DIV(OH, local_h) * local_h;
        global->at(2) = UP_DIV(CO_SLICES, local_c) * local_c;
        local->at(0) = local_w;
        local->at(1) = local_h;
        local->at(2) = local_c;

        // 2 float4 per thread
        if (single_thread_2xFLT4)
        {
            global->at(0) = UP_DIV(global->at(0), 2);
            if (local->at(0) > global->at(0))
                local->at(0) = global->at(0);
//            local->at(0) = UP_DIV(local->at(0), 2);
//            local->at(0)=4;
        }

        printf("global=%lu %lu %lu local=%lu %lu %lu\n", global->at(0), global->at(1), global->at(2),
               local->at(0), local->at(1), local->at(2));
        return CL_SUCCESS;
    }

    void Write2File(cl_mem mem, const std::string &file_name) const;
};

#endif //ANDROID_RUN_TFLITE_CLKERNEL_H