/**
 * __device__ __forceinline__ int GetThreadNum(void);
 * @brief Get Thread(sip) number
 * @param
 * @return Thread(sip) number
 */

/**
 * __device__ __forceinline__ int GetThreadIdx(void)
 * @brief Get global thread(sip) idx
 * @param
 * @return global thread(sip) idx
 */

/**
 * __device__ void dot_general_fp32(int lhs_addr, int rhs_addr, int M, int K,
 * int N, int reduce_index, int reduce_cnt, int out_addr)
 * @brief
 * M == 16X
 * N = 32X
 * K = 32X
 * Semantic: [M,K] * [K,N] == [M,N]
 * DataFormat: [K/32,M,32] * [N/32,K,32] == [M,N]
 * @param lhs_addr lhs addr
 * @param rhs_addr rhs addr
 * @param M
 * @param K
 * @param M
 * @param reduce_index
 * @param reduce_cnt
 * @param out_addr output addr
 */

/**
 * __device__ void reduce_dim0_fp32(int in_addr_, int dim1, int dim0, int
 * out_addr_)
 *
 * @details The data type is fp32, and the input data would be reshape
 *           to (dim1, dim0).
 *
 * @brief Applies the function:
 *        The input data (dim1, dim0) would be reduced_sum into (dim1, 1).
 *
 * @param in_addr_ The starting address of the input data, must be 128Byte
 *                 aligned.
 * @param dim1 The value of dim1 in reshaped dimension (dim1, dim0).
 * @param dim0 The value of dim0 in reshaped dimension (dim1, dim0). dim0 == 32X
 * @param out_addr_ The starting address of the output data, must be 128Byte
 *                  aligned.
 * @attention The space from the end of the input to the 128Byte alignment must
 *            be readable, and the space from the end of the output to the
 *            128Byte alignment must be writable.
 *            All functions in the current file follow this rule.
 */

// example code as follow
/**
for b in range(bs)
    for h = thread_idx; h < head_n; h += thread_num
        for s_q=0; s_q < seq_len_align; s_q += seq_sub
            load query(slice and transpose)-> private_q

            for s = 0; s < seq_len_align; s += seq_sub
                load key(slice and transpose)-> private_k
                private_q @ private_k -> private_qk (shape=1,1,seq_sub,seq_sub)

                for v_data in private_qk
                    exp(v_data * scale + mask) -> private_qk_exp
                reduce_sum(private_qk_exp) -> private_exp_sum

                store private_qk_exp -> shared_qk_exp

            transpose private_exp_sum with {1, 0} -> private_exp_sum_t
            reduce_sum(private_exp_sum_t) -> private_exp_sum_final

            for s = 0; s < seq_len_align; s += seq_sub
                load shared_qk_exp -> private_qk_exp
                for k in range(seq_sub)
                    sum = private_exp_sum_final[k]
                    sum -> v_sum
                    for v_data in private_qk_exp
                        v_data / v_sum -> private_qk_att

                load value(slice and transpose)-> private_value
                private_qk_att @ private_value -> private_out

            store private_out -> global_out
**/

__attribute__((global, cooperative)) void kernel_sdpa(
    float *__restrict dev_query, float *__restrict dev_key,
    float *__restrict dev_value, float *__restrict dev_out, const int bs,
    const int head_n, const int seq_len, const int head_size) {

}

void GCU_SDPA(float *__restrict dev_query, float *__restrict dev_key,
              float *__restrict dev_value, float *__restrict dev_out,
              const int bs, const int head_n, const int seq_len,
              const int head_size) {
  static const size_t blocks = 2;
  static const size_t threads = 12;

  // 调用kernel
  kernel_sdpa<<<blocks, threads>>>(dev_query, dev_key, dev_value, dev_out, bs,
                                   head_n, seq_len, head_size);
}
