//
// Created by xuxu on 2020/8/18.
//

#include <vector>
#include <cmath>
#include "CLKernel.h"

std::vector<size_t> k0_local[] = {{8, 6, 4},
                                  {8, 6, 2},
                                  {4, 6, 2},
                                  {4, 6, 2},
                                  {2, 6, 2},
                                  {2, 6, 1},
                                  {1, 6, 1},
                                  {1, 3, 1},
                                  {1, 1, 1}};

std::vector<size_t> k2_local[] = {{32, 4, 2},
                                  {16, 4, 2},
                                  {16, 4, 1},
                                  {8,  4, 1},
                                  {4,  4, 1},
                                  {2,  4, 1},
                                  {1,  4, 1},
                                  {1,  2, 1},
                                  {1,  1, 1}};

std::vector<float> Multiply(const std::vector<float> &a_mat, const std::vector<float> &b_mat, int m, int n, int k)
{
    std::vector<float> result(m * k);
    for (int y = 0; y < m; ++y)
    {
        for (int x = 0; x < k; ++x)
        {
            float sum = 0.0f;
            for (int i = 0; i < n; ++i)
            {
                sum += a_mat[y * n + i] * b_mat[i * k + x];
            }
            result[y * k + x] = sum;
        }
    }
    return result;
}

std::string ConvolutionKernel::CodeGenConvolution() const
{
    std::string code;
    code += "#pragma OPENCL EXTENSION cl_khr_fp16 : enable\n\n";

    code += "__constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;\n"
            "\n"
            "__kernel void Convolution(__read_only image2d_t input,\n"
            "                          __write_only image2d_t output,\n"
            "                          __global FLT4 *weight,\n"
            "                          __global FLT4 *bias,\n"
            "                          const int4 input_shape,\n"
            "                          const int4 output_shape)\n"
            "{\n"
            "    int oh = get_global_id(1);\n";

    if (single_thread_2xFLT4)
    {
        code += "    int ow = get_global_id(0) * 2;\n";
    }
    else
    {
        code += "    int ow = get_global_id(0);\n";
    }
    code += "    int co_slice = get_global_id(2);\n"
            "\n"
            "    int CI_SLICES = input_shape.w;\n"
            "    int CO_SLICES = output_shape.w;\n\n";

    code += "    #define IH " + std::to_string(IH) + "\n";
    code += "    #define IW " + std::to_string(IW) + "\n";
    code += "    #define OH " + std::to_string(OH) + "\n";
    code += "    #define OW " + std::to_string(OW) + "\n";
    code += "    #define KH " + std::to_string(KH) + "\n";
    code += "    #define KW " + std::to_string(KW) + "\n";
    code += "    #define strideH " + std::to_string(strideH) + "\n";
    code += "    #define strideW " + std::to_string(strideW) + "\n";
    code += "    #define padTop " + std::to_string(padTop) + "\n";
    code += "    #define padLeft " + std::to_string(padLeft) + "\n\n";

    code += "    if (oh >= OH || ow >= OW || co_slice >= CO_SLICES)\n"
            "        return;\n\n";

    bool check_ow = (OW % 2) == 1;
    if (check_ow)
    {
        code += "    int last_is_double = 1;\n"
                "    if (ow + 1 >= OW)\n"
                "        last_is_double = 0;\n\n";
    }

    code += "    FLT4 out0 = (FLT4)(0.0f, 0.0f, 0.0f, 0.0f);\n";
    if (single_thread_2xFLT4)
    {
        code += "    FLT4 out1 = (FLT4)(0.0f, 0.0f, 0.0f, 0.0f);\n";
    }

    code += "    __global FLT4 *w = weight + co_slice * KH * KW * CI_SLICES * 4;\n"
            "\n"
            "    for (int kh = 0; kh < KH; ++kh)\n"
            "    {\n"
            "        int ih = kh + oh * strideH - padTop;\n"
            "        for (int kw = 0; kw < KW; ++kw)\n"
            "        {\n";

    if (single_thread_2xFLT4)
    {
        if (padTop || padBottom)
        {
            code += "if (ih >= 0 && ih < IH)\n"
                    "{\n";
        }

        code += "            int iw0 = kw + (ow + 0) * strideW - padLeft;\n";
        if (check_ow)
        {
            code += "            if (last_is_double)\n"
                    "            {\n";
        }

        code += "                int iw1 = kw + (ow + 1) * strideW - padLeft;\n"
                "                for (int ci_slice = 0; ci_slice < CI_SLICES; ci_slice++)\n"
                "                {\n"
                "                    FLT4 in0 = READ_IMAGE(input, smp_zero, (int2)(iw0, ci_slice * IH + ih));\n"
                "                    out0 += w[0] * in0.x;\n"
                "                    out0 += w[1] * in0.y;\n"
                "                    out0 += w[2] * in0.z;\n"
                "                    out0 += w[3] * in0.w;\n"
                "                    FLT4 in1 = READ_IMAGE(input, smp_zero, (int2)(iw1, ci_slice * IH + ih));\n"
                "                    out1 += w[0] * in1.x;\n"
                "                    out1 += w[1] * in1.y;\n"
                "                    out1 += w[2] * in1.z;\n"
                "                    out1 += w[3] * in1.w;\n"
                "                    w += 4;\n"
                "                }\n";
        if (check_ow)
        {
            code +=
                    "            }\n"
                    "            else\n"
                    "            {\n"
                    "                for (int ci_slice = 0; ci_slice < CI_SLICES; ci_slice++)\n"
                    "                {\n"
                    "                    FLT4 in0 = READ_IMAGE(input, smp_zero, (int2)(iw0, ci_slice * IH + ih));\n"
                    "                    out0 += w[0] * in0.x;\n"
                    "                    out0 += w[1] * in0.y;\n"
                    "                    out0 += w[2] * in0.z;\n"
                    "                    out0 += w[3] * in0.w;\n"
                    "                    w += 4;\n"
                    "                }\n"
                    "            }\n";
        }
        if (padTop || padBottom)
        {
            code += "}\n"
                    "else\n"
                    "{\n"
                    "    w += CI_SLICES * 4;\n"
                    "}\n";
        }
    }
    else
    {
        code += "            int iw = kw + ow * strideW - padLeft;\n";
        if (padTop || padBottom || padLeft || padRight)
        {
            code += "if (ih >= 0 && ih < IH && iw >= 0 && iw < IW)\n"
                    "{\n";
        }

        code += "                for (int ci_slice = 0; ci_slice < CI_SLICES; ci_slice++)\n"
                "                {\n"
                "                    FLT4 in0 = READ_IMAGE(input, smp_zero, (int2)(iw, ci_slice * IH + ih));\n"
                "                    out0 += w[0] * in0.x;\n"
                "                    out0 += w[1] * in0.y;\n"
                "                    out0 += w[2] * in0.z;\n"
                "                    out0 += w[3] * in0.w;\n"
                "                    w += 4;\n"
                "                }\n";

        if (padTop || padBottom || padLeft || padRight)
        {
            code += "}\n"
                    "else\n"
                    "{\n"
                    "    w += CI_SLICES * 4;\n"
                    "}\n";
        }
    }

    // KH/KW over
    code += "        }\n"
            "    }\n\n";

    code += "    out0 = out0 + bias[co_slice];\n";
    if (is_relu)
    {
        code += "    out0 = max(out0, (FLT4)(0.0f));\n";
    }
    else if (is_relu6)
    {
        code += "    out0 = clamp(out0, (FLT4)(0.0f), (FLT4)(6.0f));\n";
    }
    code += "    WRITE_IMAGE(output, (int2)(ow + 0, co_slice * OH + oh), out0);\n";

    if (single_thread_2xFLT4)
    {
        if (check_ow)
        {
            code += "    if (last_is_double)"
                    "    {\n";
        }
        code += "    out1 = out1 + bias[co_slice];\n";
        if (is_relu)
        {
            code += "    out1 = max(out1, (FLT4)(0.0f));\n";
        }
        else if (is_relu6)
        {
            code += "    out1 = clamp(out1, (FLT4)(0.0f), (FLT4)(6.0f));\n";
        }
        code += "    WRITE_IMAGE(output, (int2)(ow + 1, co_slice * OH + oh), out1);\n";
        if (check_ow)
        {
            code += "}\n";
        }
    }

    code += "}\n"; // function over

//    std::cout << code << std::endl;
    return code;
}

int ConvolutionKernel::InitMemory()
{
    if (storage_type == StorageType::TEXTURE_2D)
    {
        size_t in_width, in_height;
        size_t out_width, out_height;
        if (layout == Layout::NHWC4)
        {
            in_width = IW * CI_SLICES, in_height = IH;
            out_width = OW * CO_SLICES, out_height = OH;
        }
        else // if (layout == Layout::NC4HW4 || layout == Layout::NHC4W4)
        {
            in_width = IW, in_height = IH * CI_SLICES;
            out_width = OW, out_height = OH * CO_SLICES;
        }
        input_mem = runtime->CreateImage2D(in_width, in_height, CL_MEM_READ_WRITE, data_type);
        output_mem = runtime->CreateImage2D(out_width, out_height, CL_MEM_READ_WRITE, data_type);
    }
    else
    {
        int input_size = N * IH * IW * CI_ALIGN * SizeOf(data_type);
        int output_size = N * OH * OW * CO_ALIGN * SizeOf(data_type);
        input_mem = runtime->CreateBuffer(input_size, CL_MEM_READ_ONLY, data_type);
        output_mem = runtime->CreateBuffer(output_size, CL_MEM_WRITE_ONLY, data_type);
    }

    if (use_winograd)
    {
        size_t H = 36;
        size_t W = TILE_XY;
        winograd_mem0 = runtime->CreateImage2D(W, CI_SLICES * H, CL_MEM_READ_WRITE, data_type);
        winograd_mem1 = runtime->CreateImage2D(W, CO_SLICES * H, CL_MEM_READ_WRITE, data_type);
    }

    int weight_size;
    if (use_winograd)
        weight_size = UP_DIV(CO_SLICES, 2) * 6 * 6 * CI_SLICES * 2 * CI_TILE * CO_TILE * SizeOf(data_type);
    else
        weight_size = CO_SLICES * KH * KW * CI_SLICES * CI_TILE * CO_TILE * SizeOf(data_type);
    weight_buffer = runtime->CreateBuffer(weight_size, CL_MEM_READ_ONLY, data_type);
    bias_buffer = runtime->CreateBuffer(CO_ALIGN * SizeOf(data_type), CL_MEM_READ_ONLY, data_type);
    return CL_SUCCESS;
}

void ConvolutionKernel::SetInputData(const std::string &data_path)
{
    std::string input_file = data_path + "/" + attr_str + "/input_" + LayoutNames[layout] + ".bin";
    runtime->SetData(input_mem, input_file);

    if (use_winograd)
    {
//            std::string weight_file = data_path + "/" + attr_str + "/weight_winograd7D.bin";
//            runtime->SetData(weight_buffer, weight_file);

        std::string weight_file = data_path + "/" + attr_str + "/weight_OHWI.bin";
        auto weight_O66I_fp32 = RearrangeWeightsToWinograd4x4To6x6Weights(weight_file);
        auto weight_7D_fp32 = RearrangeWeightsToOHWIOGroupI4O4(weight_O66I_fp32, 2);
        if (data_type == DataType::FLOAT32)
            runtime->SetBufferData(weight_buffer, reinterpret_cast<const char *>(weight_7D_fp32.data()));
        else
        {
            auto weight_7D_fp16 = FP32ToFP16(weight_7D_fp32);
            runtime->SetBufferData(weight_buffer, reinterpret_cast<const char *>(weight_7D_fp16.data()));
        }
    }
    else
    {
//            std::string weight_file = data_path + "/" + attr_str + "/weight_OHWIIO.bin";
//            runtime->SetData(weight_buffer, weight_file);
        std::string weight_file = data_path + "/" + attr_str + "/weight_OHWI.bin";
        if (data_type == DataType::FLOAT32)
        {
            auto weight_OHWI = ReadDataFromFile<float>(weight_file, CO * KH * KW * CI);
            auto weight_7D = RearrangeWeightsToOHWIOGroupI4O4<float>(weight_OHWI, 1);
            runtime->SetBufferData(weight_buffer, reinterpret_cast<const char *>(weight_7D.data()));
        }
        else
        {
            auto weight_OHWI = ReadDataFromFile<short>(weight_file, CO * KH * KW * CI);
            auto weight_7D = RearrangeWeightsToOHWIOGroupI4O4<short>(weight_OHWI, 1);
            runtime->SetBufferData(weight_buffer, reinterpret_cast<const char *>(weight_7D.data()));
        }
    }

    std::string bias_file = data_path + "/" + attr_str + "/bias_C4.bin";
    runtime->SetData(bias_buffer, bias_file);
}

std::vector<float> ConvolutionKernel::RearrangeWeightsToWinograd4x4To6x6Weights(const std::string &weight_file) const
{
    std::vector<float> weight_O33I;
    if (data_type == DataType::FLOAT32)
        weight_O33I = ReadDataFromFile<float>(weight_file, CO * KH * KH * CI);
    else
    {
        auto weight_O33I_fp16 = ReadDataFromFile<short>(weight_file, CO * KH * KH * CI);
        weight_O33I = FP16ToFP32(weight_O33I_fp16);
    }

    auto weight_O33I_ptr = reinterpret_cast<float *>(weight_O33I.data());
    std::vector<float> weight_O66I(CO * 6 * 6 * CI);

//        auto Gt = GetTransposedMatrixForWinograd(6, 3);
    std::vector<float> Gt = {1.0000000000, 1.0000000000, 1.0000000000, 1.0000000000, 1.0000000000, 0.0000000000,
                             0.0000000000, 0.7071067691, -0.7071067691, 1.4142135382, -1.4142135382, 0.0000000000,
                             0.0000000000, 0.4999999702, 0.4999999702, 1.9999998808, 1.9999998808, 1.0000000000};

    std::vector<float> G(Gt.size());
    for (int y = 0; y < 3; ++y)
    {
        for (int x = 0; x < 6; ++x)
        {
            G[x * 3 + y] = Gt[y * 6 + x];
        }
    }

    for (int co = 0; co < CO; ++co)
    {
        for (int ci = 0; ci < CI; ++ci)
        {
            std::vector<float> in_vals(9);
            for (int kh = 0; kh < 3; ++kh)
            {
                for (int kw = 0; kw < 3; ++kw)
                {
                    const int f_index = ((co * 3 + kh) * 3 + kw) * CI + ci;
                    in_vals[kh * 3 + kw] = weight_O33I_ptr[f_index];
                }
            }


            auto temp_vals = Multiply(G, in_vals, 6, 3, 3);
            auto out_vals = Multiply(temp_vals, Gt, 6, 3, 6);
            for (int kh = 0; kh < 6; ++kh)
            {
                for (int kw = 0; kw < 6; ++kw)
                {
                    const int f_index = ((co * 6 + kh) * 6 + kw) * CI + ci;
                    weight_O66I[f_index] = out_vals[kh * 6 + kw];
                }
            }
        }
    }
    return weight_O66I;
}

template<class T>
std::vector<T> ConvolutionKernel::RearrangeWeightsToOHWIOGroupI4O4(std::vector<T> weight_OHWI, int OGroup)
{
    int KH_ = use_winograd ? 6 : KH;
    int KW_ = use_winograd ? 6 : KW;
    int n = UP_DIV(CO, CO_TILE * OGroup) * KH_ * KW_ * CI_SLICES * OGroup * CI_TILE * CO_TILE;
    std::vector<T> weight_7D(n, 0);
    for (int co = 0, idx = 0; co < CO; ++co)
    {
        for (int kh = 0; kh < KH_; ++kh)
        {
            for (int kw = 0; kw < KW_; ++kw)
            {
                for (int ci = 0; ci < CI; ++ci)
                {
                    int co_outer = co / (CO_TILE * OGroup);
                    int group_idx = co % (CO_TILE * OGroup) / CO_TILE;
                    int co_inner = co % CO_TILE;
                    int ci_outer = ci / CI_TILE;
                    int ci_inner = ci % CI_TILE;
                    int idx_7D =
                            (((((co_outer * KH_ + kh) * KW_ + kw) * CI_SLICES + ci_outer) * OGroup + group_idx) *
                             CI_TILE + ci_inner) * CO_TILE + co_inner;
                    weight_7D[idx_7D] = weight_OHWI[idx++];
                }
            }
        }
    }
    return weight_7D;
}

void ConvolutionKernel::Write2File(cl_mem mem, const std::string &file_name) const
{
    auto mem_data = runtime->GetData(mem);

    std::fstream out_stream;
    out_stream.open(file_name, std::ios::out | std::ios::binary);
    out_stream.write(reinterpret_cast<char *>(mem_data.data()), mem_data.size());
    out_stream.close();
}

void ConvolutionKernel::CompareOutput(const std::string &data_path) const
{
    Write2File(output_mem, "output_mem.bin");
    if (use_winograd)
    {
        Write2File(winograd_mem0, "winograd_mem0.bin");
        Write2File(winograd_mem1, "winograd_mem1.bin");
    }

    std::vector<char> output_origin = runtime->GetData(output_mem);
    std::string expect_file = data_path + "/" + attr_str + "/expect_" + LayoutNames[layout] + ".bin";
    auto expect_origin = ReadDataFromFile<char>(expect_file, output_origin.size());

    // fp16 -> fp32
    std::vector<float> output(output_origin.size() / SizeOf(data_type));
    std::vector<float> expect(output_origin.size() / SizeOf(data_type));
    if (data_type == DataType::FLOAT16)
    {
        for (int i = 0; i < output.size(); ++i)
        {
            output[i] = __gnu_h2f_ieee(reinterpret_cast<const short *>(output_origin.data())[i]);
            expect[i] = __gnu_h2f_ieee(reinterpret_cast<const short *>(expect_origin.data())[i]);
        }
    }
    else
    {
        memcpy(output.data(), output_origin.data(), output.size() * sizeof(float));
        memcpy(expect.data(), expect_origin.data(), expect.size() * sizeof(float));
    }

    // 打印输出前10个
    printf("output[0:10]:   ");
    for (int i = 0; i < 10; i++)
    {
        printf("%.3f ", output[i]);
        if (i % 2)
            printf("   ");
    }
    printf("\n");

    // 逐个比较
    float eps = data_type == DataType::FLOAT16 ? 0.1 : 1e-3;
    if (layout == NHWC4)
    {
        for (int n = 0, cn = 0; n < N; ++n)
            for (int h = 0; h < OH; ++h)
                for (int w = 0; w < OW; ++w)
                    for (int c = 0; c < CO_SLICES; ++c)
                        for (int i = 0; i < 4; ++i, ++cn)
                            if (std::fabs(output[cn] - expect[cn]) > eps)
                            {
                                printf("error at [%d %d %d %d %d] except=%.3f output=%.3f error=%f\n",
                                       n, h, w, c, i, expect[cn], output[cn], expect[cn] - output[cn]);
                                return;
                            }
    }
    else if (layout == NC4HW4)
    {
        for (int n = 0, cn = 0; n < N; ++n)
            for (int c = 0; c < CO_SLICES; ++c)
                for (int h = 0; h < OH; ++h)
                    for (int w = 0; w < OW; ++w)
                        for (int i = 0; i < 4; ++i, ++cn)
                            if (std::fabs(output[cn] - expect[cn]) > eps)
                            {
                                printf("error at [%d %d %d %d %d] except=%.3f output=%.3f error=%f\n",
                                       n, c, h, w, i, expect[cn], output[cn], expect[cn] - output[cn]);
                                return;
                            }
    }

    printf("COMPARE ALL SUCCESS!!!\n\n");
}