/**
 * __device__ void dot_general_fp32(int lhs_addr, int rhs_addr, int M, int K, int N, int reduce_index, int reduce_cnt, int out_addr)
 * @brief
 * M == 16X
 * N = 32X
 * K = 32X
 * Semantic: [M,K] * [K,N] == [M,N]
 * DataFormat: [K/32,M,32] * [N/32,K,32] == [M,N]
 * @param lhs_addr lhs addr
 * @param rhs_addr rhs addr
 * @param M
 * @param K
 * @param M
 * @param reduce_index reduce index
 * @param reduce_cnt total reduce times
 * @param out_addr output addr
 */

__global__
void kernel_gemm(float *lhs, float *rhs, float *out, int M, int K, int N)
{
    // 获取线程数量
    int thread_num = 24; // TODO  use nttp to pass this number (<block * thread>)
    // 获取当前线程的全局
    int const thread_idx = blockIdx.x * blockDim.x + threadIdx.x;

    // 左操作数DTE
    tops_dte_ctx_t dte_lhs[2];

    // 右操作数DTE
    tops_dte_ctx_t dte_rhs[2];

    // 左操作数DTE(transpose)
    tops_dte_ctx_t dte_lhs_trans[2];

    // 右操作数DTE(transpose)
    tops_dte_ctx_t dte_rhs_trans[2];

    // 计算结果DTE
    tops_dte_ctx_t dte_out;

    // 初始化DTE
    // FIXME use dte_scope instead
    dte_lhs[0].init();
    dte_lhs[1].init();
    dte_rhs[0].init();
    dte_rhs[1].init();
    dte_lhs_trans[0].init();
    dte_lhs_trans[1].init();
    dte_rhs_trans[0].init();
    dte_rhs_trans[1].init();
    dte_out.init();

    // M,K,N 的切分
    constexpr int M_TILING = 64;
    constexpr int K_TILING = 128;
    constexpr int N_TILING = 128;

    // 一次 dot 调用需要的最小 K 和 N 的长度
    constexpr int KERNEL_K_LEN = 32;
    constexpr int KERNEL_N_LEN = 32;

    // L3 shape, double buffer
    int32_t global_lhs_shape[2] = {M, K};
    int32_t global_rhs_shape[2] = {K, N};
    int32_t global_out_shape[2] = {M, N};

    // L1 shape, double buffer
    int32_t private_lhs_shape[2] = {M_TILING, K_TILING};
    int32_t private_rhs_shape[2] = {K_TILING, N_TILING};
    int32_t private_out_shape[2] = {M_TILING, N_TILING};

    // L1 reshape
    int32_t private_lhs_shape_reformat[3] = {M_TILING, K_TILING / KERNEL_K_LEN, KERNEL_K_LEN};
    int32_t private_rhs_shape_reformat[3] = {K_TILING, N_TILING / KERNEL_N_LEN, KERNEL_N_LEN};

    // L1 transpose
    int32_t private_lhs_trans_shape[3] = {K_TILING / KERNEL_K_LEN, M_TILING, KERNEL_K_LEN};
    int32_t private_rhs_trans_shape[3] = {N_TILING / KERNEL_N_LEN, K_TILING, KERNEL_N_LEN};

    // mdspan for L3 memory
    tops::mdspan global_lhs(tops::Global, lhs, global_lhs_shape);
    tops::mdspan global_rhs(tops::Global, rhs, global_rhs_shape);
    tops::mdspan global_out(tops::Global, out, global_out_shape);

    // lhs/rhs/out L1 buffer size
    constexpr int LHS_L1_BUFFER_SIZE = M_TILING * K_TILING * sizeof(float);
    constexpr int RHS_L1_BUFFER_SIZE = K_TILING * N_TILING * sizeof(float);
    constexpr int OUT_L1_BUFFER_SIZE = M_TILING * N_TILING * sizeof(float);

    // L1 memory
    // lhs
    __valigned__ float lhs_l1_buffer0[LHS_L1_BUFFER_SIZE];

    // lhsTranspose
    __valigned__ float lhs_trans_l1_buffer0[LHS_L1_BUFFER_SIZE];

    // rhs
    __valigned__ float rhs_l1_buffer0[RHS_L1_BUFFER_SIZE];

    __valigned__ float rhs_trans_l1_buffer0[RHS_L1_BUFFER_SIZE];

    // out
    __valigned__ float out_l1_buffer0[OUT_L1_BUFFER_SIZE];

    // L1 mdspan
    // lhs
    tops::mdspan private_lhs0(tops::Private, lhs_l1_buffer0, private_lhs_shape);
    tops::mdspan private_lhs0_reformat(tops::Private, lhs_l1_buffer0, private_lhs_shape_reformat);
    tops::mdspan private_lhs0_trans(tops::Private, lhs_trans_l1_buffer0, private_lhs_trans_shape);

    // rhs
    tops::mdspan private_rhs0(tops::Private, rhs_l1_buffer0, private_rhs_shape);
    tops::mdspan private_rhs0_reformat(tops::Private, rhs_l1_buffer0, private_rhs_shape_reformat);
    tops::mdspan private_rhs0_trans(tops::Private, rhs_trans_l1_buffer0, private_rhs_trans_shape);

    // out
    tops::mdspan private_out0(tops::Private, out_l1_buffer0, private_out_shape);

    // M维度循环次数
    auto m_loops = (M + M_TILING - 1) / M_TILING;

    // N维度循环次数
    auto n_loops = (N + N_TILING - 1) / N_TILING;

    // K维度循环次数
    auto k_loops = (K + K_TILING - 1) / K_TILING;

    // 工作的线程总数
    auto nr_working_threads = m_loops <= thread_num ? m_loops : thread_num;

    // 结束多余的工作线程
    if (thread_idx >= nr_working_threads)
    {
        return;
    }

    // 当前线程在 M 维度循环的次数, 左矩阵按照 M 维度循环实现并行
    auto base_loops = m_loops / nr_working_threads;
    auto thread_m_loops = base_loops + (base_loops * nr_working_threads + thread_idx < m_loops);

    auto thread_k_loops = k_loops;
    auto thread_n_loops = n_loops;

    // 连接两个 dma，表示一个 dma 操作完成后立即自动触发另一个，无需手动操作
    dte_lhs_trans[0].connect(dte_lhs[0]);
    dte_lhs_trans[1].connect(dte_lhs[1]);
    dte_rhs_trans[0].connect(dte_rhs[0]);
    dte_rhs_trans[1].connect(dte_rhs[1]);

    for (auto i = 0; i < thread_m_loops; ++i) // M维度循环
    {
        // 计算m维度的偏移量
        auto global_m_offset = i * nr_working_threads + thread_idx;
        auto global_m_index = global_m_offset * M_TILING;

        for (auto j = 0; j < thread_n_loops; ++j) // N维度循环
        {
            // 计算n维度的偏移量
            auto global_n_index = j * N_TILING;

            for (auto k = 0; k < thread_k_loops; ++k)
            {
                // 计算k维度的偏移量
                auto global_k_index = k * K_TILING;

                auto lhs_addr = long(lhs_trans_l1_buffer0);
                auto rhs_addr = long(rhs_trans_l1_buffer0);
                auto out_addr = long(out_l1_buffer0);

                tops::slice_async(dte_lhs_trans[0], private_lhs0, global_lhs, {global_m_index, global_k_index});
                //! load rhs from L3 to L1
                // do L3->L1 [K, N] -> [K_TILING, N_TILING]
                auto event_lhs0 = tops::transpose_async(dte_lhs[0], private_lhs0_trans, private_lhs0_reformat, {1, 0, 2});
                // transpose in L1
                tops::slice_async(dte_rhs_trans[0], private_rhs0, global_rhs, {global_k_index, global_n_index});
                auto event_rhs0 = tops::transpose_async(dte_rhs[0], private_rhs0_trans, private_rhs0_reformat, {1, 0, 2});

                tops::wait(event_lhs0);
                tops::wait(event_rhs0);

                // 调用dot_general_fp32进行 sub matrix gemm
                dot_general_fp32(lhs_addr, rhs_addr, M_TILING, K_TILING, N_TILING, k, thread_k_loops, out_addr);

            } // K维度循环
              // store L3->L1
            tops::deslice(dte_out, global_out, private_out0, {global_m_index, global_n_index});
        }
    }
}

void GCU_GEMM(float *__restrict dev_lhs, float *__restrict dev_rhs, float *__restrict dev_out, const int m, const int k, const int n)
{
    kernel_gemm<<<2, 12>>>(dev_lhs, dev_rhs, dev_out, m, k, n);
}