/**
 * __device__ __forceinline__ int GetThreadNum(void);
 * @brief Get Thread(sip) number
 * @param
 * @return Thread(sip) number
 */

/**
 * __device__ __forceinline__ int GetThreadIdx(void)
 * @brief Get global thread(sip) idx
 * @param
 * @return global thread(sip) idx
 */

/**
 * __device__ void dot_general_fp32(int lhs_addr, int rhs_addr, int M, int K, int N, int reduce_index, int reduce_cnt, int out_addr)
 * @brief
 * M == 16X
 * N = 32X
 * K = 32X
 * Semantic: [M,K] * [K,N] == [M,N]
 * DataFormat: [K/32,M,32] * [N/32,K,32] == [M,N]
 * @param lhs_addr lhs addr
 * @param rhs_addr rhs addr
 * @param M
 * @param K
 * @param M
 * @param reduce_index
 * @param reduce_cnt
 * @param out_addr output addr
 */
// 所有的测试点，保证 M <= 4096, N <= 4096, K <= 4096

#define TILE_SIZE 128

__attribute__((global, cooperative)) void kernel_gemm(float *lhs, float *rhs, float *out, int M, int K, int N)
{
    // 为左右矩阵分配共享内存   
    __shared__ float shared_L[TILE_SIZE][TILE_SIZE];
    __shared__ float shared_R[TILE_SIZE][TILE_SIZE];
    // 获得线程数量
    int thread_num = GetThreadNum();
    // 获得当前线程ID
    int thread_idx = GetThreadIdx();

    // 声明DTE
    // 左操作数DTE
    tops_dte_ctx_t dte_lhs[2];
    // 右操作数DTE
    tops_dte_ctx_t dte_rhs[2];
    // 左操作数DTE(transpose)
    tops_dte_ctx_t dte_lhs_trans[2];
    // 右操作数DTE(transpose)
    tops_dte_ctx_t dte_rhs_trans[2];
    // 计算结果DTE
    tops_dte_ctx_t dte_out;

    // 初始化DTE，双缓冲区
    dte_lhs[0].init();
    dte_lhs[1].init();
    dte_rhs[0].init();
    dte_rhs[1].init();
    dte_lhs_trans[0].init();
    dte_lhs_trans[1].init();
    dte_rhs_trans[0].init();
    dte_rhs_trans[1].init();
    dte_out.init();

    // 事件: 左操作数DMA完成
    tops::event event_lhs0;
    tops::event event_lhs1;
    // 事件: 右操作数DMA完成
    tops::event event_rhs0;
    tops::event event_rhs1;
    // 事件: 左操作数Transpose DMA完成
    tops::event e_lhs_trans_0;
    tops::event e_lhs_trans_1;
    // 事件: 右操作数Transpose DMA完成
    tops::event e_rhs_trans_0;
    tops::event e_rhs_trans_1;
    // 事件: 结果DMA完成
    tops::event e_out;

    // M,K,N 的切分
    static const int M_TILING = TILE_SIZE;
    static const int K_TILING = TILE_SIZE;
    static const int N_TILING = TILE_SIZE;

    // 一次 dot 调用需要的最小 K 和 N 的长度
    static const int KERNEL_K_LEN = 32;
    static const int KERNEL_N_LEN = 32;

    //! L3 shape
    int32_t global_lhs_shape[2] = {M, K};
    int32_t global_rhs_shape[2] = {K, N};
    int32_t global_out_shape[2] = {M, N};
    // L1 shape
    int32_t private_lhs_shape[2] = {M_TILING, K_TILING};
    int32_t private_rhs_shape[2] = {K_TILING, N_TILING};
    int32_t private_out_shape[2] = {M_TILING, N_TILING};
    // L1 reshape, 128 * 4 * 32
    int32_t private_lhs_shape_reformat[3] = {M_TILING, K_TILING / KERNEL_K_LEN, KERNEL_K_LEN};
    int32_t private_rhs_shape_reformat[3] = {K_TILING, N_TILING / KERNEL_N_LEN, KERNEL_N_LEN};
    // L1 transpose，4 * 128 * 32
    int32_t private_lhs_trans_shape[3] = {K_TILING / KERNEL_K_LEN, M_TILING, KERNEL_K_LEN};
    int32_t private_rhs_trans_shape[3] = {N_TILING / KERNEL_N_LEN, K_TILING, KERNEL_N_LEN};

    // mdspan for L3 memory
    tops::mdspan global_lhs(tops::Global, lhs, global_lhs_shape);
    tops::mdspan global_rhs(tops::Global, rhs, global_rhs_shape);
    tops::mdspan global_out(tops::Global, out, global_out_shape);

    // lhs/rhs/out L1 buffer size, 128 * 128 * 4
    static const int LHS_L1_BUFFER_SIZE = M_TILING * K_TILING * sizeof(float);
    static const int RHS_L1_BUFFER_SIZE = K_TILING * N_TILING * sizeof(float);
    static const int OUT_L1_BUFFER_SIZE = M_TILING * N_TILING * sizeof(float);

    // L1 memory
    // lhs
    __valigned__ float lhs_l1_buffer0[LHS_L1_BUFFER_SIZE];
    __valigned__ float lhs_l1_buffer1[LHS_L1_BUFFER_SIZE];
    __valigned__ float lhs_trans_l1_buffer0[LHS_L1_BUFFER_SIZE];
    __valigned__ float lhs_trans_l1_buffer1[LHS_L1_BUFFER_SIZE];
    // rhs
    __valigned__ float rhs_l1_buffer0[RHS_L1_BUFFER_SIZE];
    __valigned__ float rhs_l1_buffer1[RHS_L1_BUFFER_SIZE];
    __valigned__ float rhs_trans_l1_buffer0[RHS_L1_BUFFER_SIZE];
    __valigned__ float rhs_trans_l1_buffer1[RHS_L1_BUFFER_SIZE];
    // out
    __valigned__ float out_l1_buffer0[OUT_L1_BUFFER_SIZE];
    __valigned__ float out_l1_buffer1[OUT_L1_BUFFER_SIZE];

    // L1 mdspan
    tops::mdspan private_lhs0(tops::Private, lhs_l1_buffer0, private_lhs_shape);
    tops::mdspan private_lhs1(tops::Private, lhs_l1_buffer1, private_lhs_shape);
    tops::mdspan private_lhs0_reformat(tops::Private, lhs_l1_buffer0, private_lhs_shape_reformat);
    tops::mdspan private_lhs1_reformat(tops::Private, lhs_l1_buffer1, private_lhs_shape_reformat);
    tops::mdspan private_lhs0_trans(tops::Private, lhs_trans_l1_buffer0, private_lhs_trans_shape);
    tops::mdspan private_lhs1_trans(tops::Private, lhs_trans_l1_buffer1, private_lhs_trans_shape);

    tops::mdspan private_rhs0(tops::Private, rhs_l1_buffer0, private_rhs_shape);
    tops::mdspan private_rhs1(tops::Private, rhs_l1_buffer1, private_rhs_shape);
    tops::mdspan private_rhs0_reformat(tops::Private, rhs_l1_buffer0, private_rhs_shape_reformat);
    tops::mdspan private_rhs1_reformat(tops::Private, rhs_l1_buffer1, private_rhs_shape_reformat);
    tops::mdspan private_rhs0_trans(tops::Private, rhs_trans_l1_buffer0, private_rhs_trans_shape);
    tops::mdspan private_rhs1_trans(tops::Private, rhs_trans_l1_buffer1, private_rhs_trans_shape);

    tops::mdspan private_out0(tops::Private, out_l1_buffer0, private_out_shape);
    tops::mdspan private_out1(tops::Private, out_l1_buffer1, private_out_shape);

    // M维度循环次数
    auto m_loops = (M + M_TILING - 1) / M_TILING;
    // N维度循环次数
    auto n_loops = (N + N_TILING - 1) / N_TILING;
    // K维度循环次数
    auto k_loops = (K + K_TILING - 1) / K_TILING;

    // 工作的线程总数
    auto nr_working_threads = m_loops <= thread_num ? m_loops : thread_num;
    // 结束多余的工作线程
    if (thread_idx >= nr_working_threads)
    {
        return;
    }
    // 当前线程在 M 维度循环的次数
    auto base_loops = m_loops / nr_working_threads;
    auto thread_m_loops = base_loops + (base_loops * nr_working_threads + thread_idx < m_loops);
    auto thread_k_loops = k_loops;
    auto thread_n_loops = n_loops;

    // 连接两个 dma，表示一个 dma 操作完成后立即自动触发另一个，无需手动操作
    dte_lhs_trans[0].connect(dte_lhs[0]);
    dte_lhs_trans[1].connect(dte_lhs[1]);
    dte_rhs_trans[0].connect(dte_rhs[0]);
    dte_rhs_trans[1].connect(dte_rhs[1]);

    // 需要并行的次数
    for (auto i = 0; i < thread_m_loops; ++i)
    {
        auto global_m_offset = i * nr_working_threads + thread_idx;
        auto global_m_index = global_m_offset * M_TILING;
        // 内层在 N 上循环
        for (auto j = 0; j < thread_n_loops; ++j)
        {
            auto global_n_offset = j;
            auto global_n_index = global_n_offset * N_TILING;
            long lhs_addr = (long)(lhs_trans_l1_buffer0);
            long rhs_addr = (long)(rhs_trans_l1_buffer0);
            long out_addr = (long)(out_l1_buffer0);
            // load lhs from L3 to L1
            // L3->L1 [M, K] -> [M_TILING, K_TILING]
            e_lhs_trans_0 = tops::slice_async(dte_lhs_trans[0], private_lhs0, global_lhs, {global_m_index, 0});
            // transpose in L1
            event_lhs0 = tops::transpose_async(dte_lhs[0], private_lhs0_trans, private_lhs0_reformat, {1, 0, 2});

            //! load rhs from L3 to L1
            // do L3->L1 [K, N] -> [K_TILING, N_TILING]
            e_rhs_trans_0 = tops::slice_async(dte_rhs_trans[0], private_rhs0, global_rhs, {0, global_n_index});
            // transpose in L1
            event_rhs0 = tops::transpose_async(dte_rhs[0], private_rhs0_trans, private_rhs0_reformat, {1, 0, 2});

            // 等待DMA完成
            tops::wait(event_lhs0);
            tops::wait(event_rhs0);

            // 调用dot_general_fp32进行 sub matrix gemm
            dot_general_fp32(lhs_addr, rhs_addr, M_TILING, K_TILING, N_TILING, 0, 1, out_addr);

            // store L3->L1
            e_out = tops::deslice_async(dte_out, global_out, private_out0, {global_m_index, global_n_index});
            tops::wait(e_out);
        }
    }

    // 销毁DTE
    dte_lhs[0].destroy();
    dte_lhs[1].destroy();
    dte_rhs[0].destroy();
    dte_rhs[1].destroy();
    dte_lhs_trans[0].destroy();
    dte_lhs_trans[1].destroy();
    dte_rhs_trans[0].destroy();
    dte_rhs_trans[1].destroy();
    dte_out.destroy();
}
void GCU_GEMM(float *__restrict dev_lhs, float *__restrict dev_rhs, float *__restrict dev_out, const int m, const int k, const int n)
{
    static const size_t blocks = 1;
    static const size_t threads = 12;
    // 调用kernel
    kernel_gemm<<<blocks, threads>>>(dev_lhs, dev_rhs, dev_out, m, k, n);
}