#include "kernel_operator.h"
#include "../../include/batch_matmul.h"
#include "../../include/batch_epilogue.h"


/**
 * @brief: group gemm
 * @param [in] layoutA: A矩阵排布格式
 * @param [in] layoutB: B矩阵排布格式
 * @param [in] zeroPaddingM: A、C矩阵零填充后的M维度
 * @param [in] zeroPaddingN: B、C矩阵零填充后的N维度
 * @param [in] zeroPaddingK: A、B矩阵零填充后的K维度
 * @param [in] batchCount: 批量矩阵乘的batch数
 * @param [in] d_validM: 每批矩阵乘的A、C矩阵M维度有效长度数组
 * @param [in] d_validN: 每批矩阵乘的B、C矩阵N维度有效长度数组
 * @param [in] d_validK: 每批矩阵乘的A、B矩阵N维度有效长度数组
 * @param [in] alpha: alpha*AB+beta*C
 * @param [in] d_A_pointer: 每批矩阵乘的零填充A矩阵首地址数组
 * @param [in] d_B_pointer: 每批矩阵乘的零填充B矩阵首地址数组
 * @param [in] beta:  alpha*AB+beta*C
 * @param [out] d_C_pointer: 每批矩阵乘的零填充B矩阵首地址数组
 * @param [out] d_AicAivWorkspace_Pointer: Aic Aiv 同步的GM空间首地址数组
 * @param [in] fftsAddr: 跨核同步需要的地址
 * @param [in] is_alpha1_beta0: 是否有 alpha==1.0 && beta==0.0
 */

 extern "C" __global__ [aicore] void LLMsGEMM_batch_QKTVP_device (
    layoutType layoutA, 
    layoutType layoutB, 
    int64_t zeroPaddingM, 
    int64_t zeroPaddingN, 
    int64_t zeroPaddingK, 
    int64_t batchCount, 
    __gm__ int64_t*  d_validM, 
    __gm__ int64_t*  d_validN, 
    __gm__ int64_t*  d_validK, 
    half alpha, 
    __gm__ half**  d_A_pointer, 
    __gm__ half**  d_B_pointer, 
    half beta,
    __gm__ half**  d_C_pointer, 
    // padding相关
    __gm__ uint8_t *d_is_A_padding, 
    __gm__ uint8_t *d_is_B_padding, 
    __gm__ half** d_A_pointer_padding, 
    __gm__ half** d_B_pointer_padding, 
    __gm__ int64_t *d_valid_padding_lda, 
    __gm__ int64_t *d_valid_padding_ldb, 
    //
    __gm__ half** d_AicAivWorkspace_Pointer, 
    uint64_t fftsAddr,  
    uint8_t is_alpha1_beta0 
) {

#if __DAV_C220_CUBE__
        // ？
    AscendC::SetSyncBaseAddr(fftsAddr);

    AscendC::SetAtomicNone();
    AscendC::SetLoadDataPaddingValue<uint64_t>((uint64_t)0);
    AscendC::SetFixpipeNz2ndFlag(1, 0, 0);

    BatchMatmul<L1M0, L1N0, L1K0, WORKSPACENUM>(
        layoutA, 
        layoutB, 
        zeroPaddingM, 
        zeroPaddingN, 
        zeroPaddingK, 
        batchCount,
        d_validM, 
        d_validN, 
        d_validK, 
        alpha, 
        d_A_pointer, 
        d_B_pointer, 
        beta, 
        d_C_pointer, 
        //
        d_is_A_padding, 
        d_is_B_padding, 
        d_A_pointer_padding, 
        d_B_pointer_padding, 
        d_valid_padding_lda, 
        d_valid_padding_ldb, 
        //
        d_AicAivWorkspace_Pointer,
        is_alpha1_beta0
    );

#elif __DAV_C220_VEC__

    AscendC::SetSyncBaseAddr(fftsAddr);

    AscendC::SetAtomicNone();
    // tensor高维切分计算接口的外部mask设置
    AscendC::SetMaskNorm();
    AscendC::SetVectorMask<half, AscendC::MaskMode::NORMAL>( 0xfffffffffffffff, 0xfffffffffffffff );


    BatchMatmulEpilogue<L1M0, L1N0, L1K0, WORKSPACENUM>(
        zeroPaddingM, 
        zeroPaddingN, 
        batchCount,
        d_validM, 
        d_validN, 
        alpha, 
        beta, 
        d_C_pointer, 
        d_AicAivWorkspace_Pointer,
        is_alpha1_beta0
    );

#endif

}
    
