__attribute__((global, cooperative, reserve_r29_for_scavenge)) void kernel_softmax(
    float *__restrict dev_input, float *__restrict dev_out, const int rows,
    const int cols)
{

    // 定义共享内存
    __shared__ float shared_mem_inp[1600000];
    __shared__ float shared_mem_out[1600000];

    // 获取所有线程数量
    int thread_num = GetThreadNum();

    // 获取全局索引
    int thread_idx = GetThreadIdx();

    // 获取局部索引
    int local_idx = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x;

    // 定义与初始化DTE
    tops_dte_ctx_t dte_inp[2]; // L2->L1
    tops_dte_ctx_t dte_pad[2]; // L1->L1
    tops_dte_ctx_t dte_out; // L1->L2

    __private_dte__ tops_dte_ctx_t dte_l2_inp[2]; // L3->L2
    __private_dte__ tops_dte_ctx_t dte_l2_out[2]; // L2->L3

    dte_inp[0].init();
    dte_inp[1].init();
    dte_pad[0].init();
    dte_pad[1].init();
    dte_out.init();

    dte_l2_inp[0].init();
    dte_l2_inp[1].init();
    dte_l2_out[0].init();
    dte_l2_out[1].init();


    // 定义事件
    tops::event event_inp[2];
    tops::event event_pad[2];
    tops::event event_out;

    tops::event event_l2_inp[2];
    tops::event event_l2_out[2];

    // 定义L3的shape
    int32_t global_shape[2] = {rows, cols};

    // 定义L2的shape
    int32_t row_use_l2 = ((rows / 16) > 4) ? (rows / 16) : 4;

    int32_t shared_shape[2] = {row_use_l2, cols};

    // L1的shape
    int32_t row_use_l1 = (row_use_l2 > 4) ? 4 : row_use_l2;

    int32_t private_shape[2] = {row_use_l1, cols};

    auto pad_cols = ((cols + 31) / 32) * 32;

    int32_t private_shape_pad[2] = {row_use_l1, pad_cols};

    // pad的大小
    u_int32_t pad_low[] = {0, 0};
    u_int32_t pad_high[] = {0, static_cast<u_int32_t>(pad_cols - cols)};
    u_int32_t pad_mid[] = {0, 0};


    // mdspan绑定
    // L3
    tops::mdspan l3_input(tops::Global, dev_input, global_shape);
    tops::mdspan l3_output(tops::Global, dev_out, global_shape);

    // L2
    tops::mdspan l2_input(tops::Shared, shared_mem_inp + (local_idx * row_use_l2 * cols), shared_shape);
    tops::mdspan l2_output(tops::Shared, shared_mem_out + (local_idx * row_use_l2 * cols), shared_shape);


    // 定义L1的缓冲区
    auto L1_BUFFER_SIZE = row_use_l1 * cols;    // 定义L1的缓冲区大小
    auto L1_BUFFER_PAD = row_use_l1 * pad_cols; // pad后l1的大小

    __valigned__ float l1_buffer_inp[L1_BUFFER_SIZE];

    // pad后l1的缓冲区
    __valigned__ float l1_buffer_out[L1_BUFFER_PAD];

    // 求e的n次方后的l1缓冲区
    __valigned__ float l1_buffer_exp[L1_BUFFER_PAD];

    // reduce操作之后的l1缓冲区
    __valigned__ float l1_buffer_reduce[32];

    // 结果的l1缓冲区
    __valigned__ float l1_buffer_res[L1_BUFFER_PAD];

    // mdspan绑定
    tops::mdspan l1_input(tops::Private, l1_buffer_inp, private_shape);

    tops::mdspan l1_output(tops::Private, l1_buffer_out, private_shape_pad);

    tops::mdspan l1_exp(tops::Private, l1_buffer_exp, private_shape_pad);

    tops::mdspan l1_reduce(tops::Private, l1_buffer_reduce, private_shape_pad);

    tops::mdspan l1_res(tops::Private, l1_buffer_res, private_shape_pad);


    // L2上的r维度一共的循环次数
    auto r_loops = rows / row_use_l2;

    // 工作的线程总数
    auto nr_working_threads = (r_loops <= thread_num) ? r_loops : thread_num;


    // 结束多余的工作线程
    if (thread_idx >= nr_working_threads)
    {
        return;
    }


    auto base_loops = r_loops / nr_working_threads;
    auto thread_r_loops = base_loops + ((base_loops * nr_working_threads + thread_idx) < r_loops ? 1 : 0);
    auto thread_r_l1_loops = row_use_l2 / row_use_l1;
    auto thread_col_loops = (cols + 31) / 32;


    // 连接两个DTE
    dte_inp[0].connect(dte_pad[0]);

    for(int i = 0; i < thread_r_loops; i++)
    {
        auto global_r_idx = (i * nr_working_threads + thread_idx) * row_use_l2;

        // 加载L3的输入数据
        event_l2_inp[0] = tops::slice_async(dte_l2_inp[0], l2_input, l3_input, {global_r_idx, 0});
        tops::wait(event_l2_inp[0]);

        for(int j = 0; j < thread_r_l1_loops; j++)
        {   
            auto l1_r_idx = j * row_use_l1;

            // 从L3加载数据到L1
            event_inp[0] = tops::slice_async(dte_inp[0], l1_input, l2_input, {l1_r_idx, 0});

            // 对L1进行pad操作
            event_pad[0] = tops::pad_async(dte_pad[0], l1_output, l1_input, pad_low, pad_high, pad_mid, -1000000.0f);
            tops::wait(event_pad[0]);

            for (int k = 0; k < row_use_l1; k++)
            {
                for(int g = 0; g < thread_col_loops; g++)
                {
                    // c维度的偏移量
                    auto c_idx = k * pad_cols + g * 32;

                    const auto &v_inp = tops::vload<vfloat>(l1_buffer_out + c_idx);

                    // 计算e的n次方
                    const auto &v_exp = tops::vexp<vfloat>(v_inp);

                    tops::vstore(v_exp, l1_buffer_exp + c_idx);
                }
            }

            // 地址
            long exp_addr = (long)(l1_buffer_exp);
            long reduce_addr = (long)(l1_buffer_reduce);

            // 调用reduce_dim0_fp32函数进行reduce操作
            reduce_dim0_fp32(exp_addr, row_use_l1, pad_cols, reduce_addr);

            // 计算softmax
            for (int k = 0; k < row_use_l1; k++)
            {
                const auto &v_reduce = tops::vbroadcast<vfloat>(l1_buffer_reduce[k]);

                for (int g = 0; g < thread_col_loops; g++)
                {
                    auto c_idx = k * pad_cols + g * 32;

                    const auto &v_exp = tops::vload<vfloat>(l1_buffer_exp + c_idx);

                    const auto &v_res = tops::vdiv<vfloat>(v_exp, v_reduce);

                    tops::vstore(v_res, l1_buffer_res + c_idx);
                }
            }

            event_out = tops::deslice_async(dte_out, l2_output, l1_res, {l1_r_idx, 0});

            tops::wait(event_out);
        }

        event_l2_out[0] = tops::deslice_async(dte_l2_out[0], l3_output, l2_output, {global_r_idx, 0});
        tops::wait(event_l2_out[0]);
    }

    // 销毁DTE
    dte_inp[0].destroy();
    dte_inp[1].destroy();
    dte_pad[0].destroy();
    dte_pad[1].destroy();
    dte_out.destroy();

    dte_l2_inp[0].destroy();
    dte_l2_inp[1].destroy();
    dte_l2_out[0].destroy();
    dte_l2_out[1].destroy();
}

void GCU_SOFTMAX(float *__restrict dev_input, float *__restrict dev_out,
                 const int rows, const int cols)
{
    // 设置kernel参数
    static const size_t blocks = 2;
    static const size_t threads = 12;

    // 调用kernel
    kernel_softmax<<<blocks, threads>>>(dev_input, dev_out, rows, cols);
}