__attribute__((global, cooperative, reserve_r29_for_scavenge)) void kernel_sdpa(
    float *__restrict dev_query, float *__restrict dev_key,
    float *__restrict dev_value, float *__restrict dev_out, const int bs,
    const int head_n, const int seq_len, const int head_size)
{
    // 声明共享内存
    __shared__ float s_qk_buffer[5000000];

    // 获取线程的数量
    int thread_num = GetThreadNum();

    // 获取当前线程的全局索引
    int thread_idx = GetThreadIdx();

    // 获取线程的局部索引
    int local_idx = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x;

    // 声明DTE
    tops_dte_ctx_t dte_q_input;   // query输入
    tops_dte_ctx_t dte_k_input;   // key输入
    tops_dte_ctx_t dte_q_trans;   // query转置
    tops_dte_ctx_t dte_k_trans;   // key转置
    tops_dte_ctx_t dte_qk_output; // query * key输出

    tops_dte_ctx_t dte_qk_output_shared; // query * key输入
    tops_dte_ctx_t dte_qk_pad;           // query * key填充
    tops_dte_ctx_t dte_qk_res;           // softmax输出

    tops_dte_ctx_t dte_v_input;    // value输入
    tops_dte_ctx_t dte_sfqk_input; // softmax(query * key)输入
    tops_dte_ctx_t dte_v_trans;    // value转置
    tops_dte_ctx_t dte_sfqk_trans; // softmax(query * key)转置
    tops_dte_ctx_t dte_qkv_output; // query * key * value输出

    // 初始化DTE
    dte_q_input.init();
    dte_k_input.init();
    dte_q_trans.init();
    dte_k_trans.init();
    dte_qk_output.init();

    dte_qk_output_shared.init();
    dte_qk_pad.init();
    dte_qk_res.init();

    dte_v_input.init();
    dte_sfqk_input.init();
    dte_v_trans.init();
    dte_sfqk_trans.init();
    dte_qkv_output.init();

    // 声明事件
    tops::event event_q_input;
    tops::event event_k_input;
    tops::event event_q_trans;
    tops::event event_k_trans;
    tops::event event_qk_output;

    tops::event event_qk_output_shared;
    tops::event event_qk_pad;
    tops::event event_qk_res;

    tops::event event_v_input;
    tops::event event_sfqk_input;
    tops::event event_v_trans;
    tops::event event_sfqk_trans;
    tops::event event_qkv_output;

    // 对矩阵乘法维度的切分
    int M_TILE_SIZE = 32; // 在M维度上对seq_len的切分
    int N_TILE_SIZE = head_size;
    int KERNEL_TILE_SIZE = 32;

    // L3形状
    int32_t global_q_shape[4] = {bs, head_n, seq_len, head_size};
    int32_t global_k_shape[4] = {bs, head_n, seq_len, head_size};
    int32_t global_v_shape[4] = {bs, head_n, seq_len, head_size};
    int32_t global_out_shape[4] = {bs, head_n, seq_len, head_size};

    // L1 形状
    int32_t private_q_shape[4] = {1, 1, M_TILE_SIZE, N_TILE_SIZE};
    int32_t private_k_shape[4] = {1, 1, M_TILE_SIZE, N_TILE_SIZE};

    int32_t private_q_shape_reformat[3] = {M_TILE_SIZE, (N_TILE_SIZE / KERNEL_TILE_SIZE), KERNEL_TILE_SIZE};
    int32_t private_k_shape_reformat[3] = {(M_TILE_SIZE / KERNEL_TILE_SIZE), KERNEL_TILE_SIZE, N_TILE_SIZE};

    int32_t private_q_trans_shape[3] = {(N_TILE_SIZE / KERNEL_TILE_SIZE), M_TILE_SIZE, KERNEL_TILE_SIZE};
    int32_t private_k_trans_shape[3] = {(M_TILE_SIZE / KERNEL_TILE_SIZE), N_TILE_SIZE, KERNEL_TILE_SIZE};

    int32_t private_qk_shape[4] = {1, 1, M_TILE_SIZE, M_TILE_SIZE};

    // 第二次矩阵乘法维度的切分
    int32_t private_sfqk_shape[4] = {1, 1, M_TILE_SIZE, M_TILE_SIZE};
    int32_t private_v_shape[4] = {1, 1, M_TILE_SIZE, N_TILE_SIZE};

    int32_t private_qkv_shape[4] = {1, 1, M_TILE_SIZE, N_TILE_SIZE};

    int32_t private_sfqk_shape_reformat[3] = {M_TILE_SIZE, (M_TILE_SIZE / KERNEL_TILE_SIZE), KERNEL_TILE_SIZE};
    int32_t private_v_shape_reformat[3] = {M_TILE_SIZE, (N_TILE_SIZE / KERNEL_TILE_SIZE), KERNEL_TILE_SIZE};

    int32_t private_sfqk_trans_shape[3] = {(M_TILE_SIZE / KERNEL_TILE_SIZE), M_TILE_SIZE, KERNEL_TILE_SIZE};
    int32_t private_v_trans_shape[3] = {(N_TILE_SIZE / KERNEL_TILE_SIZE), M_TILE_SIZE, KERNEL_TILE_SIZE};

    // pad形状
    int rows_use = 4;
    int cols_use = seq_len;
    int private_qk_inp_shape[4] = {1, 1, rows_use, cols_use};
    auto pad_cols = ((cols_use + 31) / 32) * 32;
    int private_shape_pad[4] = {1, 1, rows_use, pad_cols};

    // pad的大小
    u_int32_t pad_low[] = {0, 0, 0, 0};
    u_int32_t pad_high[] = {0, 0, 0, static_cast<u_int32_t>(pad_cols - cols_use)};
    u_int32_t pad_mid[] = {0, 0, 0, 0};

    // softmax
    float scale = tops::sqrt(float(head_size));
    const auto &v_scale_sqrt = tops::vbroadcast<vfloat>(scale);

    // l3缓存绑定
    tops::mdspan global_q(tops::Global, dev_query, global_q_shape);
    tops::mdspan global_k(tops::Global, dev_key, global_k_shape);
    tops::mdspan global_v(tops::Global, dev_value, global_v_shape);
    tops::mdspan global_out(tops::Global, dev_out, global_out_shape);

    // l1缓存绑定
    static const int L1_BUFFER_SIZE = 1 * 1 * M_TILE_SIZE * N_TILE_SIZE;
    static const int L1_BUFFER_OUT_SIZE = 1 * 1 * M_TILE_SIZE * M_TILE_SIZE;

    static const int L1_BUFFER_IN_SIZE = 1 * 1 * rows_use * cols_use;
    static const int L1_BUFFER_PAD_SIZE = 1 * 1 * rows_use * pad_cols;

    __valigned__ float query_l1_buffer[L1_BUFFER_SIZE];
    __valigned__ float key_l1_buffer[L1_BUFFER_SIZE];

    __valigned__ float query_l1_trans_buffer[L1_BUFFER_SIZE];
    __valigned__ float key_l1_trans_buffer[L1_BUFFER_SIZE];

    __valigned__ float qk_l1_buffer[L1_BUFFER_OUT_SIZE];

    __valigned__ float qk_l1_buffer_inp[L1_BUFFER_IN_SIZE];
    __valigned__ float qk_l1_buffer_pad[L1_BUFFER_PAD_SIZE];
    __valigned__ float l1_buffer_exp[L1_BUFFER_PAD_SIZE];
    __valigned__ float l1_buffer_reduce[32];
    __valigned__ float l1_buffer_res[L1_BUFFER_PAD_SIZE];

    __valigned__ float value_l1_buffer[L1_BUFFER_SIZE];
    __valigned__ float sfqk_l1_buffer[L1_BUFFER_OUT_SIZE];

    __valigned__ float value_l1_buffer_trans[L1_BUFFER_SIZE];
    __valigned__ float sfqk_l1_buffer_trans[L1_BUFFER_OUT_SIZE];

    __valigned__ float qkv_l1_buffer[L1_BUFFER_SIZE];

    tops::mdspan private_q(tops::Private, query_l1_buffer, private_q_shape);
    tops::mdspan private_k(tops::Private, key_l1_buffer, private_k_shape);

    tops::mdspan private_q_trans(tops::Private, query_l1_trans_buffer, private_q_trans_shape);
    tops::mdspan private_k_trans(tops::Private, key_l1_trans_buffer, private_k_trans_shape);

    tops::mdspan private_q_reformat(tops::Private, query_l1_buffer, private_q_shape_reformat);
    tops::mdspan private_k_reformat(tops::Private, key_l1_buffer, private_k_shape_reformat);

    tops::mdspan private_qk(tops::Private, qk_l1_buffer, private_qk_shape);

    tops::mdspan private_qk_inp(tops::Private, qk_l1_buffer_inp, private_qk_inp_shape);
    tops::mdspan private_qk_pad(tops::Private, qk_l1_buffer_pad, private_shape_pad);
    tops::mdspan private_exp(tops::Private, l1_buffer_exp, private_shape_pad);
    tops::mdspan private_res(tops::Private, l1_buffer_res, private_shape_pad);

    tops::mdspan private_sfqk(tops::Private, sfqk_l1_buffer, private_sfqk_shape);
    tops::mdspan private_value(tops::Private, value_l1_buffer, private_v_shape);

    tops::mdspan private_sfqk_reformat(tops::Private, sfqk_l1_buffer, private_sfqk_shape_reformat);
    tops::mdspan private_value_reformat(tops::Private, value_l1_buffer, private_v_shape_reformat);

    tops::mdspan private_sfqk_trans(tops::Private, sfqk_l1_buffer_trans, private_sfqk_trans_shape);
    tops::mdspan private_value_trans(tops::Private, value_l1_buffer_trans, private_v_trans_shape);

    tops::mdspan private_qkv(tops::Private, qkv_l1_buffer, private_qkv_shape);

    // head_n维度的循环次数
    int thread_head_n_loops = 2;

    // m维度总循环次数
    int m_loops = (seq_len + M_TILE_SIZE - 1) / M_TILE_SIZE;

    // k维度总循环次数
    int k_loops = m_loops;

    // 每个线程的l2的缓存的宽度
    int l2_w = k_loops * M_TILE_SIZE;

    while (l2_w * m_loops * M_TILE_SIZE >= 256 * 1024 * 3)
    {
        m_loops = (m_loops + 1) / 2;
    }

    // 每个线程的l2的缓存的高度
    int l2_h = m_loops * M_TILE_SIZE;

    // l2的形状
    int32_t shared_qk_shape[4] = {1, 1, l2_h, l2_w};

    // m_loops的分块数
    int mt_loops = (k_loops + m_loops - 1) / m_loops;

    auto cols_loops = pad_cols / 32;

    // l2缓存绑定
    tops::mdspan shared_qk(tops::Shared, s_qk_buffer + (l2_h * l2_w * local_idx), shared_qk_shape);

    // DTE绑定
    dte_q_input.connect(dte_q_trans);
    dte_k_input.connect(dte_k_trans);
    dte_sfqk_input.connect(dte_sfqk_trans);
    dte_v_input.connect(dte_v_trans);
    dte_qk_output_shared.connect(dte_qk_pad);

    for (auto i = 0; i < bs; i++)
    {
        // batch上的全局索引
        int global_bs_idx = i;

        for (auto j = 0; j < thread_head_n_loops; j++)
        {
            int global_head_n_idx = j * 16 + thread_idx;

            int sum_loops = k_loops;
            int base_idx = 0;
            int m_loops = l2_h / M_TILE_SIZE;
            
            for (auto k = 0; k < mt_loops; k++)
            {
                // 每次使用的m维度的循环的次数
                m_loops = ((sum_loops - m_loops) >= 0) ? m_loops : sum_loops;

                // 剩下的总循环的次数
                sum_loops -= m_loops;

                for (auto g = 0; g < m_loops; g++)
                {
                    int global_g_idx = base_idx + g * M_TILE_SIZE;
                    int local_g_idx = g * M_TILE_SIZE;

                    event_q_input = tops::slice_async(dte_q_input, private_q, global_q, {global_bs_idx, global_head_n_idx, global_g_idx, 0});
                    event_q_trans = tops::transpose_async(dte_q_trans, private_q_trans, private_q_reformat, {1, 0, 2});
                    tops::wait(event_q_trans);

                    for (auto w = 0; w < k_loops; w++)
                    {
                        auto global_w_idx = w * M_TILE_SIZE;

                        long lhs_addr = (long)(query_l1_trans_buffer);
                        long rhs_addr = (long)(key_l1_trans_buffer);
                        long out_addr = (long)(qk_l1_buffer);

                        event_k_input = tops::slice_async(dte_k_input, private_k, global_k, {global_bs_idx, global_head_n_idx, global_w_idx, 0});
                        event_k_trans = tops::transpose_async(dte_k_trans, private_k_trans, private_k_reformat, {0, 2, 1});
                        tops::wait(event_k_trans);

                        // 调用dot_general_fp32进行 sub matrix gemm
                        dot_general_fp32(lhs_addr, rhs_addr, M_TILE_SIZE, N_TILE_SIZE, M_TILE_SIZE, 0, 1, out_addr);

                        event_qk_output = tops::deslice_async(dte_qk_output, shared_qk, private_qk, {0, 0, local_g_idx, global_w_idx});

                        tops::wait(event_qk_output);
                    }
                }

                auto r_loops = (m_loops * M_TILE_SIZE + rows_use - 1) / rows_use;

                for (auto g = 0; g < r_loops; g++)
                {

                    auto global_r_idx = g * rows_use;

                    event_qk_output_shared = tops::slice_async(dte_qk_output_shared, private_qk_inp, shared_qk, {0, 0, global_r_idx, 0});
                    event_qk_pad = tops::pad_async(dte_qk_pad, private_qk_pad, private_qk_inp, pad_low, pad_high, pad_mid, -10000000.0f);
                    tops::wait(event_qk_pad);

                    for (auto w = 0; w < rows_use; w++)
                    {
                        for (auto z = 0; z < cols_loops; z++)
                        {
                            // n维度的偏移量
                            size_t n_idx = w * pad_cols + z * 32;
                            const auto &v_inp = tops::vload<vfloat>(qk_l1_buffer_pad + n_idx);

                            // 缩放
                            const auto &v_handle = tops::vdiv<vfloat>(v_inp, v_scale_sqrt);

                            // 计算exp
                            const auto &v_exp = tops::vexp<vfloat>(v_handle);

                            tops::vstore(v_exp, l1_buffer_exp + n_idx);
                        }
                    }

                    // 地址
                    long exp_addr = (long)(l1_buffer_exp);
                    long reduce_addr = (long)(l1_buffer_reduce);

                    reduce_dim0_fp32(exp_addr, rows_use, pad_cols, reduce_addr);

                    // 计算softmax
                    for (auto w = 0; w < rows_use; w++)
                    {
                        const auto &v_reduce = tops::vbroadcast<vfloat>(l1_buffer_reduce[w]);

                        for (auto z = 0; z < cols_loops; z++)
                        {
                            auto c_idx = w * pad_cols + z * 32;

                            const auto &v_exp = tops::vload<vfloat>(l1_buffer_exp + c_idx);

                            const auto &v_res = tops::vdiv<vfloat>(v_exp, v_reduce);

                            tops::vstore(v_res, l1_buffer_res + c_idx);
                        }
                    }

                    event_qk_res = tops::deslice_async(dte_qk_res, shared_qk, private_res, {0, 0, global_r_idx, 0});

                    tops::wait(event_qk_res);
                }

                for (auto g = 0; g < m_loops; g++)
                {
                    auto global_g_idx = base_idx + g * M_TILE_SIZE;
                    auto local_g_idx = g * M_TILE_SIZE;

                    for (auto w = 0; w < k_loops; w++)
                    {
                        auto global_w_idx = w * M_TILE_SIZE;

                        long lhs_addr = (long)(sfqk_l1_buffer_trans);
                        long rhs_addr = (long)(value_l1_buffer_trans);
                        long out_addr = (long)(qkv_l1_buffer);

                        event_sfqk_input = tops::slice_async(dte_sfqk_input, private_sfqk, shared_qk, {0, 0, local_g_idx, global_w_idx});
                        event_sfqk_trans = tops::transpose_async(dte_sfqk_trans, private_sfqk_trans, private_sfqk_reformat, {1, 0, 2});

                        event_v_input = tops::slice_async(dte_v_input, private_value, global_v, {global_bs_idx, global_head_n_idx, global_w_idx, 0});
                        event_v_trans = tops::transpose_async(dte_v_trans, private_value_trans, private_value_reformat, {1, 0, 2});

                        tops::wait(event_sfqk_trans);
                        tops::wait(event_v_trans);

                        // 调用dot_general_fp32进行 sub matrix gemm
                        dot_general_fp32(lhs_addr, rhs_addr, M_TILE_SIZE, M_TILE_SIZE, N_TILE_SIZE, w, k_loops, out_addr);
                    }

                    event_qkv_output = tops::deslice_async(dte_qkv_output, global_out, private_qkv, {global_bs_idx, global_head_n_idx, global_g_idx, 0});
                    tops::wait(event_qkv_output);
                }
                base_idx += m_loops * M_TILE_SIZE;
            }
        }
    }

    // 销毁DTE
    dte_q_input.destroy();
    dte_k_input.destroy();
    dte_q_trans.destroy();
    dte_k_trans.destroy();
    dte_qk_output.destroy();

    dte_qk_output_shared.destroy();
    dte_qk_pad.destroy();
    dte_qk_res.destroy();

    dte_v_input.destroy();
    dte_sfqk_input.destroy();
    dte_v_trans.destroy();
    dte_sfqk_trans.destroy();
    dte_qkv_output.destroy();
}

void GCU_SDPA(float *__restrict dev_query, float *__restrict dev_key,
              float *__restrict dev_value, float *__restrict dev_out,
              const int bs, const int head_n, const int seq_len,
              const int head_size)
{
    static const size_t blocks = 2;
    static const size_t threads = 8;

    // 调用kernel
    kernel_sdpa<<<blocks, threads>>>(dev_query, dev_key, dev_value, dev_out, bs, head_n, seq_len, head_size);
}