/**
 * __device__ __forceinline__ int GetThreadNum(void);
 * @brief Get Thread(sip) number
 * @param
 * @return Thread(sip) number
 */

/**
 * __device__ __forceinline__ int GetThreadIdx(void)
 * @brief Get global thread(sip) idx
 * @param
 * @return global thread(sip) idx
 */

/**
 * __device__ void reduce_dim0_fp32(int in_addr_, int dim1, int dim0, int
 * out_addr_)
 *
 * @details The data type is fp32, and the input data would be reshape
 *           to (dim1, dim0).
 *
 * @brief Applies the function:
 *        The input data (dim1, dim0) would be reduced_sum into (dim1, 1).
 *
 * @param in_addr_ The starting address of the input data, must be 128Byte
 *                 aligned.
 * @param dim1 The value of dim1 in reshaped dimension (dim1, dim0).
 * @param dim0 The value of dim0 in reshaped dimension (dim1, dim0). dim0 == 32X
 * @param out_addr_ The starting address of the output data, must be 128Byte
 *                  aligned.
 * @attention The space from the end of the input to the 128Byte alignment must
 *            be readable, and the space from the end of the output to the
 *            128Byte alignment must be writable.
 *            All functions in the current file follow this rule.
 */

__attribute__((global, cooperative, reserve_r29_for_scavenge)) void kernel_softmax(
    float *__restrict dev_input, float *__restrict dev_out, const int rows,
    const int cols)
{
    // 获取当前线程的全局索引
    int thread_idx = GetThreadIdx();
    constexpr int a = 1;

    // 定义与初始化DTE
    tops_dte_ctx_t dte_inp[2];
    tops_dte_ctx_t dte_pad[2];
    tops_dte_ctx_t dte_out;

    dte_inp[0].init();
    dte_inp[1].init();
    dte_pad[0].init();
    dte_pad[1].init();
    dte_out.init();

    // 定义事件
    tops::event event_inp[2];
    tops::event event_pad[2];
    tops::event event_out;

    // 定义L3的shape
    int32_t global_shape[2] = {rows, cols};

    // 定义L1的shape
    int row_use_l1 = (rows / 16) < 16 ? (rows / 24) : 16;

    if(row_use_l1 == 0)
    {
        row_use_l1 = 1;
    }

    int32_t private_shape[2] = {row_use_l1, cols};

    int pad_cols = ((cols + 31) / 32) * 32;

    int32_t private_shape_pad[2] = {row_use_l1, pad_cols};

    int L1_BUFFER_SIZE = row_use_l1 * cols;    // 定义L1的缓冲区大小
    int L1_BUFFER_PAD = row_use_l1 * pad_cols; // pad后l1的大小

    // pad的大小
    u_int32_t pad_low[] = {0, 0};
    u_int32_t pad_high[] = {0, static_cast<u_int32_t>(pad_cols - cols)};
    u_int32_t pad_mid[] = {0, 0};

    // 定义L1的缓冲区
    __valigned__ float l1_buffer_inp[L1_BUFFER_SIZE];

    // pad后l1的缓冲区
    __valigned__ float l1_buffer_out[L1_BUFFER_PAD];

    // 求e的n次方后的l1缓冲区
    __valigned__ float l1_buffer_exp[L1_BUFFER_PAD];

    // reduce操作之后的l1缓冲区
    __valigned__ float l1_buffer_reduce[32];

    // 结果的l1缓冲区
    __valigned__ float l1_buffer_res[L1_BUFFER_PAD];

    // mdspan绑定
    tops::mdspan l3_input(tops::Global, dev_input, global_shape);
    tops::mdspan l3_output(tops::Global, dev_out, global_shape);

    tops::mdspan l1_input(tops::Private, l1_buffer_inp, private_shape);

    tops::mdspan l1_output(tops::Private, l1_buffer_out, private_shape_pad);

    tops::mdspan l1_exp(tops::Private, l1_buffer_exp, private_shape_pad);

    tops::mdspan l1_reduce(tops::Private, l1_buffer_reduce, private_shape_pad);

    tops::mdspan l1_res(tops::Private, l1_buffer_res, private_shape_pad);

    // r维度上一共的循环次数
    auto r_loops = (rows + row_use_l1 - 1) / row_use_l1;

    // 工作的线程总数
    auto nr_working_threads = 24;

    // 结束多余的工作线程
    // if (thread_idx >= nr_working_threads)
    // {
    //     return;
    // }

    auto base_loops = r_loops / nr_working_threads;
    auto thread_r_loops = base_loops + ((base_loops * nr_working_threads + thread_idx) < r_loops ? 1 : 0); // 在行上循环的次数
    auto thread_col_loops = (cols + 31) / 32;                                                              // 在列上循环的次数

    // 连接两个DMA
    dte_inp[0].connect(dte_pad[0]);

    for (int i = 0; i < thread_r_loops; i++)
    {
        // m维度的偏移量
        auto global_r_offset = i * nr_working_threads + thread_idx;
        auto global_r_idx = global_r_offset * row_use_l1;

        // 从L3加载数据到L1
        event_inp[0] = tops::slice_async(dte_inp[0], l1_input, l3_input, {global_r_idx, 0});

        // 对L1进行pad操作
        event_pad[0] = tops::pad_async(dte_pad[0], l1_output, l1_input, pad_low, pad_high, pad_mid, -1000000.0f);

        tops::wait(event_pad[0]);

        for (int j = 0; j < row_use_l1; j++)
        {
            for (int k = 0; k < thread_col_loops; k++)
            {
                // c维度的偏移量
                auto c_idx = j * pad_cols + k * 32;

                const auto &v_inp = tops::vload<vfloat>(l1_buffer_out + c_idx);

                // 计算e的n次方
                const auto &v_exp = tops::vexp<vfloat>(v_inp);

                tops::vstore(v_exp, l1_buffer_exp + c_idx);
            }
        }

        // 地址
        long exp_addr = (long)(l1_buffer_exp);
        long reduce_addr = (long)(l1_buffer_reduce);

        // 调用reduce_dim0_fp32函数进行reduce操作
        reduce_dim0_fp32(exp_addr, row_use_l1, pad_cols, reduce_addr);

        if (i != 0)
        {
            tops::wait(event_out);
        }

        // 计算softmax
        for (int j = 0; j < row_use_l1; j++)
        {
            const auto &v_reduce = tops::vbroadcast<vfloat>(l1_buffer_reduce[j]);

            for (int k = 0; k < thread_col_loops; k++)
            {
                auto c_idx = j * pad_cols + k * 32;

                const auto &v_exp = tops::vload<vfloat>(l1_buffer_exp + c_idx);

                const auto &v_res = tops::vdiv<vfloat>(v_exp, v_reduce);

                tops::vstore(v_res, l1_buffer_res + c_idx);
            }
        }

        event_out = tops::deslice_async(dte_out, l3_output, l1_res, {global_r_idx, 0});

        if (i + 1 >= thread_r_loops)
        {
            tops::wait(event_out);
        }
    }

    // 销毁DTE
    dte_inp[0].destroy();
    dte_inp[1].destroy();
    dte_pad[0].destroy();
    dte_pad[1].destroy();
    dte_out.destroy();
}

void GCU_SOFTMAX(float *__restrict dev_input, float *__restrict dev_out,
                 const int rows, const int cols)
{
    // 设置kernel参数
    static const size_t blocks = 2;
    static const size_t threads = 12;

    // 调用kernel
    kernel_softmax<<<blocks, threads>>>(dev_input, dev_out, rows, cols);
}