__attribute__((global, cooperative, reserve_r29_for_scavenge)) void kernel_softmax(
    float *__restrict dev_input, float *__restrict dev_out, const int rows,
    const int cols)
{

    // 获取线程的数量
    int thread_num = GetThreadNum();

    // 获取当前线程的全局索引
    int thread_idx = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x;

    // 定义与初始化DTE
    tops_dte_ctx_t dte_l1_inp;
    tops_dte_ctx_t dte_l1_pad;
    tops_dte_ctx_t dte_l1_out;

    dte_l1_inp.init();
    dte_l1_pad.init();
    dte_l1_out.init();

    // 定义事件
    tops::event event_inp;
    tops::event event_pad;
    tops::event event_out;

    // 定义L3的形状
    int32_t global_shape[2] = {rows, cols};

    // 定义L1的形状，根据数据量决定L1的大小
    int row_use_l1 = (rows <= 32) ? 1 : 4;

    int pad_cols = ((cols + 31) / 32) * 32; // pad后的列数

    int32_t private_shape[2] = {row_use_l1, cols};
    int32_t private_shape_pad[2] = {row_use_l1, pad_cols};

    // pad的大小
    u_int32_t pad_low[] = {0, 0};
    u_int32_t pad_high[] = {0, static_cast<u_int32_t>(pad_cols - cols)};
    u_int32_t pad_mid[] = {0, 0};

    // 定义l1的缓冲区
    int L1_BUFFER_SIZE = row_use_l1 * cols;    // 定义L1的缓冲区大小
    int L1_BUFFER_PAD = row_use_l1 * pad_cols; // pad后l1的大小

    // 输入的l1
    __valigned__ float l1_buffer_inp[L1_BUFFER_SIZE];

    // 定义pad后l1
    __valigned__ float l1_buffer_out[L1_BUFFER_PAD];

    // 定义exp后的l1
    __valigned__ float l1_buffer_exp[L1_BUFFER_PAD];

    // reduce之后的l1
    __valigned__ float l1_buffer_reduce[32];

    // softmax之后的l1
    __valigned__ float l1_buffer_res[L1_BUFFER_PAD];

    // mdspan绑定
    // L3
    tops::mdspan l3_input(tops::Global, dev_input, global_shape);
    tops::mdspan l3_output(tops::Global, dev_out, global_shape);

    // L1
    tops::mdspan l1_input(tops::Private, l1_buffer_inp, private_shape);

    tops::mdspan l1_output(tops::Private, l1_buffer_out, private_shape_pad);

    tops::mdspan l1_exp(tops::Private, l1_buffer_exp, private_shape_pad);

    tops::mdspan l1_reduce(tops::Private, l1_buffer_reduce, private_shape_pad);

    tops::mdspan l1_res(tops::Private, l1_buffer_res, private_shape_pad);

    // r维度上一共的循环次数
    int r_loops = rows / row_use_l1;

    // 工作的线程总数
    int nr_working_threads = (r_loops <= thread_num) ? r_loops : thread_num;

    // 结束多余的工作线程
    if (thread_idx >= nr_working_threads)
    {
        return;
    }

    // 每个线程处理的循环次数
    int base_loops = r_loops / nr_working_threads;
    int thread_r_loops = base_loops + ((base_loops * nr_working_threads + thread_idx < r_loops) ? 1 : 0); // 在行上循环的次数
    int thread_col_loops = (cols + 31) / 32;                                                              // 在列上循环的次数

    // 连接DTE
    dte_l1_inp.connect(dte_l1_pad);

    for (int i = 0; i < thread_r_loops; i++)
    {
        // m维度的偏移量
        int global_r_idx = (i * nr_working_threads + thread_idx) * row_use_l1;

        // 从L3加载数据到L1
        event_inp = tops::slice_async(dte_l1_inp, l1_input, l3_input, {global_r_idx, 0});

        // 对L1进行pad操作
        event_pad = tops::pad_async(dte_l1_pad, l1_output, l1_input, pad_low, pad_high, pad_mid, -1000000.0f);

        tops::wait(event_pad);

        for (int j = 0; j < row_use_l1; j++) // 对L1进行exp操作
        {
            for (int k = 0; k < thread_col_loops; k++)
            {
                // c维度的偏移量
                auto col_idx = j * pad_cols + k * 32;

                const auto &v_inp = tops::vload<vfloat>(l1_buffer_out + col_idx);

                // 计算exp
                const auto &v_exp = tops::vexp<vfloat>(v_inp);

                tops::vstore(v_exp, l1_buffer_exp + col_idx);
            }
        }

        // 调用reduce_dim0_fp32函数进行reduce操作
        long exp_addr = (long)(l1_buffer_exp);
        long reduce_addr = (long)(l1_buffer_reduce);

        reduce_dim0_fp32(exp_addr, row_use_l1, pad_cols, reduce_addr);

        if (i != 0)
        {
            tops::wait(event_out);
        }

        // 计算softmax
        for (int j = 0; j < row_use_l1; j++) // 对l1进行softmax操作
        {
            const auto &v_reduce = tops::vbroadcast<vfloat>(l1_buffer_reduce[j]);

            for (int k = 0; k < thread_col_loops; k++)
            {
                auto col_idx = j * pad_cols + k * 32;

                const auto &v_exp = tops::vload<vfloat>(l1_buffer_exp + col_idx);

                // 计算softmax
                const auto &v_res = tops::vdiv<vfloat>(v_exp, v_reduce);

                tops::vstore(v_res, l1_buffer_res + col_idx);
            }
        }

        event_out = tops::deslice_async(dte_l1_out, l3_output, l1_res, {global_r_idx, 0});

        if (i + 1 >= thread_r_loops)
        {
            tops::wait(event_out);
        }
    }

    // 销毁DTE
    dte_l1_inp.destroy();
    dte_l1_pad.destroy();
    dte_l1_out.destroy();
}

void GCU_SOFTMAX(float *__restrict dev_input, float *__restrict dev_out,
                 const int rows, const int cols)
{
    // 设置kernel参数
    static const size_t blocks = 2;
    static const size_t threads = 12;

    // 调用kernel
    kernel_softmax<<<blocks, threads>>>(dev_input, dev_out, rows, cols);
}
