#pragma once
#include "./custom_type.h"
#include "./data_transfer.h"
#include "./kernel_const.h"
#include "./kernel_utils.h"
#include "kernel_operator.h"

/**
 * @brief: 计算：单核单次矩阵乘结果块=A行块*B列块
 * @param [in] L1_M0: 搬入L1计算的A矩阵M维度大小
 * @param [in] L1_N0: 搬入L1计算的B矩阵N维度大小
 * @param [in] L1_K0: 搬入L1计算的AB矩阵K维度大小
 * @param [in] gm_tensorA: A矩阵行块首地址
 * @param [in] gm_tensorB: B矩阵列块首地址
 * @param [in] l1_tensor: L1完整空间
 * @param [in] l0a_tensor: L0A完整空间
 * @param [in] l0b_tensor: L0B完整空间
 * @param [out] l0c_pingpongBuf: 本次计算分配的L0C缓冲区
 * @param [in] layoutA: A矩阵排布格式
 * @param [in] layoutB: B矩阵排布格式
 * @param [in] strideA: A矩阵L1_M0 * L1_K0块的不同行/列起始地址间距
 * @param [in] strideB: B矩阵L1_K0 * L1_K0块的不同行/列起始地址间距
 * @param [in] strideC: C矩阵L1_M0 * L1_N0块的不同行起始地址间距
 * @param [in] gm_mActual: A矩阵行块行数，C矩阵结果块行数
 * @param [in] gm_nActual: B矩阵列块列数，C矩阵结果块列数
 * @param [in] K: A矩阵行块列数、B矩阵列块行数（完整K维度）
 */
template<
    uint32_t L1_M0,
    uint32_t L1_N0,
    uint32_t L1_K0
>
[aicore] inline __attribute__((always_inline)) void MatmulForResultBlock(
    AscendC::GlobalTensor<half> gm_tensorA, 
    AscendC::GlobalTensor<half> gm_tensorB, 
    AscendC::LocalTensor<half> l1a_pingpongBuf[], 
    uint32_t l1a_pingpongNum, 
    AscendC::LocalTensor<half> l1b_pingpongBuf[], 
    uint32_t l1b_pingpongNum, 
    AscendC::LocalTensor<uint8_t> l0a_tensor, 
    AscendC::LocalTensor<uint8_t> l0b_tensor, 
    AscendC::LocalTensor<float> l0c_pingpongBuf[], 
    uint32_t l0c_pingpongNum, 
    layoutType layoutA, 
    layoutType layoutB, 
    uint32_t strideA, 
    uint32_t strideB, 
    uint32_t strideC, 
    uint32_t gm_mActual, 
    uint32_t gm_nActual, 
    uint32_t K, 
    uint32_t cur_aicore_block, 
    uint32_t cur_aicore_k_split, 
    // 任务块间preload
    AscendC::GlobalTensor<half> gm_tensorAPreLoad, 
    AscendC::GlobalTensor<half> gm_tensorBPreLoad, 
    uint32_t strideAPreLoad, 
    uint32_t strideBPreLoad, 
    uint32_t gm_mActualPreLoad, 
    uint32_t gm_nActualPreLoad, 
    uint32_t KPreLoad, 
    uint8_t isLastBlock
){

    // L1对于AB矩阵能装下的最大块
    static constexpr uint32_t l1_blockASize = L1_M0 * L1_K0;
    static constexpr uint32_t l1_blockBSize = L1_K0 * L1_N0;
    // L0C计算结果块大小（单位：元素个数）
    static constexpr uint32_t l0c_blockSize = L1_M0 * L1_N0;


    // 传入的数据在L1内按基块大小对齐
    uint32_t l1_mActualRoundBaseblock = RoundUp<uint32_t>(gm_mActual, L1L0_BASEBLOCK_M0);
    uint32_t l1_nActualRoundBaseblock = RoundUp<uint32_t>(gm_nActual, L1L0_BASEBLOCK_N0);
    // K维度上需要进行切分，然后循环计算，循环内再对切分后的K进行对齐，然后搬运数据
    uint32_t l1_kTotalLoop = CeilDiv<uint32_t>(K, L1_K0);

    // shuffleK添加
    uint64_t shuffleKData = 4;

    // L1数据预取 preload
    // 这里的k是逻辑上的，[0, l1_kTotalLoop-1)
    // uint32_t l1_loop_k = 0;
    uint32_t l1_loop_k_logic = 0;
    if(cur_aicore_block == 0)
    {
        // TODO shuffleK怎么加？
        // 改上面的 l1_loop_k 为 l1_loop_k_logic
        // 搬运时定义 l1_loop_k 为: 上面的 l1_loop_k_logic 加上循环偏移
        // 改下面的 l1_loop_k_pingpong 为 l1_loop_k_logic + cur_aicore_k_split
        // 预取判断的l1_loop_k改成l1_loop_k_logic
        // 搬运时gm偏移里面的 l1_loop_k 不变

        uint32_t l1_loop_k = (l1_loop_k_logic + AscendC::GetBlockIdx() % shuffleKData) % l1_kTotalLoop; 

        uint32_t gm_kActual = (l1_loop_k == l1_kTotalLoop-1 ? K - l1_loop_k * L1_K0 : L1_K0 );
        uint32_t l1_kActualRoundBaseblock = RoundUp<uint32_t>(gm_kActual, L1L0_BASEBLOCK_K0);
        // 当前aicore计算的是第几个K分段，用于缓冲区选择以及对应的同步
        // uint32_t l1_loop_k_pingpong = l1_loop_k + cur_aicore_k_split;
        uint32_t l1_loop_k_pingpong = l1_loop_k_logic + cur_aicore_k_split;
        
        // 要等待L1的A被读完，才能从GM读A数据块写入到L1内
        // AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loop_k % l1a_pingpongNum));
        AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loop_k_pingpong % l1a_pingpongNum));
        Gm2L1_Nd2Nz(
            // l1a_pingpongBuf[l1_loop_k % l1a_pingpongNum], 
            l1a_pingpongBuf[l1_loop_k_pingpong % l1a_pingpongNum], 
            gm_tensorA[l1_loop_k * L1_K0], 
            gm_mActual, 
            gm_kActual, 
            strideA, 
            l1_mActualRoundBaseblock, 
            l1_kActualRoundBaseblock
        );
        // 已从GM读入A数据到L1，可以读取L1内数据
        // AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loop_k % l1a_pingpongNum));
        AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loop_k_pingpong % l1a_pingpongNum));


        // 要等待L1的B读完，才能从GM读B数据块写入到L1内
        // AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loop_k % l1b_pingpongNum));
        AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loop_k_pingpong % l1b_pingpongNum));
        Gm2L1_Nd2Nz(
            // l1b_pingpongBuf[l1_loop_k % l1b_pingpongNum], 
            l1b_pingpongBuf[l1_loop_k_pingpong % l1b_pingpongNum], 
            gm_tensorB[l1_loop_k * L1_K0 * strideB], 
            gm_kActual, 
            gm_nActual, 
            strideB, 
            l1_kActualRoundBaseblock, 
            l1_nActualRoundBaseblock
        );
        // 已从GM读入B数据到L1，可以读取L1内数据
        // AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loop_k % l1b_pingpongNum));
        AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loop_k_pingpong % l1b_pingpongNum));
    }

    // L1计算
    for( /*uint32_t l1_loop_k = 0*/; l1_loop_k_logic < l1_kTotalLoop; l1_loop_k_logic++ ){
        
        uint32_t l1_loop_k = (l1_loop_k_logic + AscendC::GetBlockIdx() % shuffleKData) % l1_kTotalLoop;

        // 当前此循环计算的数据的K维度和按32B对齐后K维度
        uint32_t gm_kActual = (l1_loop_k == l1_kTotalLoop-1 ? K - l1_loop_k * L1_K0 : L1_K0 );
        uint32_t l1_kActualRoundBaseblock = RoundUp<uint32_t>(gm_kActual, L1L0_BASEBLOCK_K0);
        // 当前aicore计算的是第几个K分段，用于缓冲区选择以及对应的同步
        uint32_t l1_loop_k_pingpong = l1_loop_k_logic + cur_aicore_k_split;
        
        // L1数据预取 prefetch 除了最后一次循环外不用预取，其他时候都要预取下一次循环计算的数据
        // if( l1_loop_k + 1 < l1_kTotalLoop ){
        if( l1_loop_k_logic + 1 < l1_kTotalLoop ){

            // uint32_t l1_loop_k_next = l1_loop_k + 1;
            uint32_t l1_loop_k_next_logic = l1_loop_k_logic + 1;
            uint32_t l1_loop_k_next = (l1_loop_k_next_logic + AscendC::GetBlockIdx() % shuffleKData) % l1_kTotalLoop; 
            uint32_t gm_kActualPrefetch = (l1_loop_k_next == l1_kTotalLoop-1 ? K - l1_loop_k_next * L1_K0 : L1_K0);
            uint32_t l1_kActualRoundBaseblockPrefetch = RoundUp<uint32_t>(gm_kActualPrefetch, L1L0_BASEBLOCK_K0);
            // 当前预取的是第几个K分段，用于缓冲区选择以及对应的同步
            // uint32_t l1_loop_k_next_pingpong = l1_loop_k_next + cur_aicore_k_split;
            uint32_t l1_loop_k_next_pingpong = l1_loop_k_next_logic + cur_aicore_k_split;

            // 要等待L1的A被读完，才能从GM读A数据块写入到L1内
            // AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loop_k_next%l1a_pingpongNum));
            AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loop_k_next_pingpong % l1a_pingpongNum));
            Gm2L1_Nd2Nz(
                // l1a_pingpongBuf[l1_loop_k_next % l1a_pingpongNum], 
                l1a_pingpongBuf[l1_loop_k_next_pingpong % l1a_pingpongNum], 
                gm_tensorA[l1_loop_k_next * L1_K0], 
                gm_mActual, 
                gm_kActualPrefetch, 
                strideA, 
                l1_mActualRoundBaseblock, 
                l1_kActualRoundBaseblockPrefetch
            );
            // 已从GM读入A数据到L1，可以读取L1内数据
            // AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loop_k_next%l1a_pingpongNum));
            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loop_k_next_pingpong % l1a_pingpongNum));

            // 要等待L1的B读完，才能从GM读B数据块写入到L1内
            // AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loop_k_next%l1b_pingpongNum));
            AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loop_k_next_pingpong % l1b_pingpongNum));
            Gm2L1_Nd2Nz(
                // l1b_pingpongBuf[l1_loop_k_next % l1b_pingpongNum], 
                l1b_pingpongBuf[l1_loop_k_next_pingpong % l1b_pingpongNum], 
                gm_tensorB[l1_loop_k_next * L1_K0 * strideB], 
                gm_kActualPrefetch, 
                gm_nActual, 
                strideB, 
                l1_kActualRoundBaseblockPrefetch, 
                l1_nActualRoundBaseblock
            );
            // 已从GM读入B数据到L1，可以读取L1内数据
            // AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loop_k_next%l1b_pingpongNum));
            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loop_k_next_pingpong % l1b_pingpongNum));

        } // 任务块内预取结束

        // 任务块之间的预取
        // if( l1_loop_k + 1 == l1_kTotalLoop && isLastBlock == 0){
        if( l1_loop_k_logic == l1_kTotalLoop-1 && isLastBlock == 0){
            // 此时 l1_loop_k == l1_kTotalLoop，预取下一任务块的A行块B列块的第0段k

            uint32_t l1_loop_k_preload_logic = 0;
            // uint32_t l1_loop_k_preload = 0;
            uint32_t l1_kTotalLoopPreLoad = CeilDiv<uint32_t>(KPreLoad, L1_K0);
            uint32_t l1_loop_k_preload = (l1_loop_k_preload_logic + AscendC::GetBlockIdx() % shuffleKData) % l1_kTotalLoopPreLoad;

            uint32_t gm_kActualPreLoad = (l1_loop_k_preload == l1_kTotalLoopPreLoad - 1 ? KPreLoad - l1_loop_k_preload * L1_K0 : L1_K0);
            uint32_t l1_mActualPreLoadRoundBaseblock = RoundUp<uint32_t>(gm_mActualPreLoad, L1L0_BASEBLOCK_M0);
            uint32_t l1_nActualPreLoadRoundBaseblock = RoundUp<uint32_t>(gm_nActualPreLoad, L1L0_BASEBLOCK_N0);
            uint32_t l1_kActualPreLoadRoundBaseblock = RoundUp<uint32_t>(gm_kActualPreLoad, L1L0_BASEBLOCK_K0);

            // uint32_t l1_loop_k_next_pingpong = l1_loop_k_preload + l1_kTotalLoop + cur_aicore_k_split;
            uint32_t l1_loop_k_next_pingpong = l1_loop_k_preload_logic + l1_kTotalLoop + cur_aicore_k_split;

            AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loop_k_next_pingpong % l1a_pingpongNum));
            Gm2L1_Nd2Nz(
                l1a_pingpongBuf[l1_loop_k_next_pingpong % l1a_pingpongNum], 
                gm_tensorAPreLoad[l1_loop_k_preload * L1_K0], 
                gm_mActualPreLoad, 
                gm_kActualPreLoad, 
                strideAPreLoad, 
                l1_mActualPreLoadRoundBaseblock, 
                l1_kActualPreLoadRoundBaseblock
            );
            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loop_k_next_pingpong % l1a_pingpongNum));

            AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loop_k_next_pingpong % l1b_pingpongNum));
            Gm2L1_Nd2Nz(
                l1b_pingpongBuf[l1_loop_k_next_pingpong % l1b_pingpongNum], 
                gm_tensorBPreLoad[l1_loop_k_preload * L1_K0 * strideBPreLoad], 
                gm_kActualPreLoad, 
                gm_nActualPreLoad, 
                strideBPreLoad, 
                l1_kActualPreLoadRoundBaseblock, 
                l1_nActualPreLoadRoundBaseblock
            );
            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loop_k_next_pingpong % l1b_pingpongNum));

        } // 任务块间预取结束

        // 以下L0AB循环读L1AB缓冲区内数据块进行Mmad计算：对(L1_M0, L1_N0, L1_K0)在K维度划分(MN不变)，计算需要划分多少次
        // 注意这里不同于L1，有指定的最大AB块大小(L1_M0, L1_N0, L1_K0)，要考虑L1内AB块可能不同，要同时考虑L0A缓冲区循环几次能装下L1的A和L0B缓冲区循环几次能装下L1的B

        // L0AB都是开双缓冲
        static constexpr uint32_t l0ab_pingpongNum = 2;
        uint32_t l0ab_pingpongBufSize = L0AB_BYTES / l0ab_pingpongNum / sizeof(half);

        // 开辟L0AB的双缓冲
        AscendC::LocalTensor<half> l0a_pingpongBuf[l0ab_pingpongNum];
        AscendC::LocalTensor<half> l0b_pingpongBuf[l0ab_pingpongNum];

        #pragma unroll
        for(uint32_t i = 0; i < l0ab_pingpongNum; i++){
            l0a_pingpongBuf[i] = l0a_tensor[i * l0ab_pingpongBufSize * sizeof(half)].template ReinterpretCast<half>();
            l0b_pingpongBuf[i] = l0b_tensor[i * l0ab_pingpongBufSize * sizeof(half)].template ReinterpretCast<half>();
        }

        // 双缓冲同步，一对缓冲区一个event_id
        #pragma unroll
        for(uint32_t i = 0; i < l0ab_pingpongNum; i++){
            AscendC::SetFlag<AscendC::HardEvent::M_MTE1>((event_t)i);
        }

        uint32_t l0_M0 = l1_mActualRoundBaseblock;
        uint32_t l0_N0 = l1_nActualRoundBaseblock;
        uint32_t l0_K0_l1a = RoundDown<uint32_t>(l0ab_pingpongBufSize / l0_M0, L1L0_BASEBLOCK_K0);
        uint32_t l0_K0_l1b = RoundDown<uint32_t>(l0ab_pingpongBufSize / l0_N0, L1L0_BASEBLOCK_K0);
        uint32_t l0_K0 = (l0_K0_l1a < l0_K0_l1b ? l0_K0_l1a : l0_K0_l1b);
        // 对L1内数据循环搬运至L0AB
        uint32_t l0_kTotalLoop = CeilDiv<uint32_t>(l1_kActualRoundBaseblock, l0_K0);

        // L0AB计算
        for(uint32_t l0_loop_k = 0; l0_loop_k < l0_kTotalLoop; l0_loop_k++){
            // 计算K维度本次搬运到L0AB的实际有多长
            // 这个是不是按基块对齐的？前面搬运到L1的时候已对齐了
            uint32_t l0_K0Actual = ( l0_loop_k == l0_kTotalLoop-1 ? l1_kActualRoundBaseblock-l0_loop_k*l0_K0 : l0_K0 );
            // 可以不用加这个round  直接用l0_K0Actual结果也正确，因为在L1里本来就已经做过对齐了
            //uint32_t l0_K0ActualRound = RoundUp<uint32_t>(l0_K0Actual, L1L0_BASEBLOCK_K0);

            // 从L1内读数据入L0AB（读L1的wait在循环外完成了）
            AscendC::WaitFlag<AscendC::HardEvent::M_MTE1>((event_t)(l0_loop_k % l0ab_pingpongNum));

            // L0A第0次读取L1的时候申请L1读取
            if(l0_loop_k == 0){
                AscendC::WaitFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loop_k_pingpong % l1a_pingpongNum));
            }
            L12L0_Nz2Zz(
                l0a_pingpongBuf[l0_loop_k % l0ab_pingpongNum], 
                // l1a_pingpongBuf[l1_loop_k % l1a_pingpongNum][l0_M0 * l0_K0 * l0_loop_k], 
                l1a_pingpongBuf[l1_loop_k_pingpong % l1a_pingpongNum][l0_M0 * l0_K0 * l0_loop_k], 
                l0_M0, 
                //l0_K0ActualRound,
                l0_K0Actual, 
                l0_M0, 
                L1L0_BASEBLOCK_M0, 
                L1L0_BASEBLOCK_K0
            );
            // L0A最后一次读取L1的时候释放L1读取
            if(l0_loop_k == l0_kTotalLoop - 1){
                AscendC::SetFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loop_k_pingpong % l1a_pingpongNum));
            }
            //已经写入L0AB，可以开始计算（循环内流水）
            AscendC::SetFlag<AscendC::HardEvent::MTE1_M>((event_t)(l0_loop_k % l0ab_pingpongNum));

            
            // L0B第0次读取L1的时候申请L1读取
            if(l0_loop_k == 0){
                AscendC::WaitFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loop_k_pingpong % l1b_pingpongNum));
            }
            L12L0_Nz2Zn(
                l0b_pingpongBuf[l0_loop_k % l0ab_pingpongNum], 
                // l1b_pingpongBuf[l1_loop_k % l1b_pingpongNum][l0_K0 * l0_loop_k * L1L0_BASEBLOCK_N0], 
                l1b_pingpongBuf[l1_loop_k_pingpong % l1b_pingpongNum][l0_K0 * l0_loop_k * L1L0_BASEBLOCK_N0], 
                //l0_K0ActualRound,
                l0_K0Actual, 
                l0_N0, 
                l1_kActualRoundBaseblock, 
                L1L0_BASEBLOCK_K0, 
                L1L0_BASEBLOCK_N0
            );
            // L0B最后一次读取L1的时候释放L1读取
            if(l0_loop_k == l0_kTotalLoop - 1){
                AscendC::SetFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loop_k_pingpong % l1b_pingpongNum));
            }

            //已经写入L0AB，可以开始计算（循环内流水）
            AscendC::SetFlag<AscendC::HardEvent::MTE1_M>((event_t)(l0ab_pingpongNum + l0_loop_k % l0ab_pingpongNum));

            //等待L0AB写入完毕，Mmad开始计算
            AscendC::WaitFlag<AscendC::HardEvent::MTE1_M>((event_t)(l0_loop_k % l0ab_pingpongNum));
            AscendC::WaitFlag<AscendC::HardEvent::MTE1_M>((event_t)(l0ab_pingpongNum + l0_loop_k % l0ab_pingpongNum));

            bool isFirstMmad = (l0_loop_k == 0 && l1_loop_k_logic == 0);

            // 在Mmad第一次计算前才申请L0C
            if(isFirstMmad)
                AscendC::WaitFlag<AscendC::HardEvent::FIX_M>((event_t)(cur_aicore_block % l0c_pingpongNum));
            
            // Mmad的参数？
            AscendC::Mmad(
                l0c_pingpongBuf[cur_aicore_block % l0c_pingpongNum], 
                l0a_pingpongBuf[l0_loop_k % l0ab_pingpongNum],
                l0b_pingpongBuf[l0_loop_k % l0ab_pingpongNum],
                AscendC::MmadParams(
                    l0_M0, 
                    l0_N0, 
                    l0_K0Actual, 
                    0, 
                    false, 
                    isFirstMmad
                )
            );

            // 经验，当L0AB内矩阵维度大于这个阈值的时候，前一个Mmad指令一定在后一个Mmad之前执行，不需要PIPE_M
            // 这里一般是 128/16 * 256/16 == 8 * 16 > 10，不需要PIPE_M
            // if(l0_M0 / L1L0_BASEBLOCK_M0 * l0_N0 / L1L0_BASEBLOCK_N0 < 10){
                AscendC::PipeBarrier<PIPE_M>();
            // }
            

            AscendC::SetFlag<AscendC::HardEvent::M_MTE1>((event_t)(l0_loop_k % l0ab_pingpongNum));
        }

        // L0双缓冲同步，一对缓冲区一个event_id
        #pragma unroll
        for(uint32_t i = 0; i < l0ab_pingpongNum; i++){
            AscendC::WaitFlag<AscendC::HardEvent::M_MTE1>((event_t)i);
        }

    }

    // if(!isLastBlock){ // 任务块间数据预取
    
    // }

}

/**
 * @brief: 批量矩阵乘，完成分核和L0C缓存分配，然后调用单核计算
 * @param [in] L1_M0: 搬入L1计算的A矩阵M维度大小
 * @param [in] L1_N0: 搬入L1计算的B矩阵N维度大小
 * @param [in] L1_K0: 搬入L1计算的AB矩阵K维度大小
 * @param [in] WORKSPACE_NUM: 
 * @param [in] layoutA: A矩阵排布格式
 * @param [in] layoutB: B矩阵排布格式
 * @param [in] zeroPaddingM: A、C矩阵零填充后的M维度
 * @param [in] zeroPaddingN: B、C矩阵零填充后的N维度
 * @param [in] zeroPaddingK: A、B矩阵零填充后的K维度
 * @param [in] batchCount: 批量矩阵乘的batch数
 * @param [in] d_validM: 每批矩阵乘的A、C矩阵M维度有效长度数组
 * @param [in] d_validN: 每批矩阵乘的B、C矩阵N维度有效长度数组
 * @param [in] d_validK: 每批矩阵乘的A、B矩阵N维度有效长度数组
 * @param [in] alpha: alpha*AB+beta*C
 * @param [in] d_A_pointer: 每批矩阵乘的零填充A矩阵首地址数组
 * @param [in] d_B_pointer: 每批矩阵乘的零填充B矩阵首地址数组
 * @param [in] beta:  alpha*AB+beta*C
 * @param [out] d_C_pointer: 每批矩阵乘的零填充B矩阵首地址数组
 * @param [out] d_AicAivWorkspace_Pointer: Aic Aiv 同步的GM空间首地址数组
 * @param [in] is_alpha1_beta0: 是否有 alpha==1.0 && beta==0.0
 */
template<
    uint32_t L1_M0,
    uint32_t L1_N0,
    uint32_t L1_K0,
    uint32_t WORKSPACE_NUM>
[aicore] inline __attribute__((always_inline)) void BatchMatmul(
    layoutType layoutA, 
    layoutType layoutB, 
    int64_t zeroPaddingM, 
    int64_t zeroPaddingN,
    int64_t zeroPaddingK, 
    int64_t batchCount,
    __gm__ int64_t*  d_validM, 
    __gm__ int64_t*  d_validN, 
    __gm__ int64_t*  d_validK, 
    half alpha, 
    __gm__ half**  d_A_pointer, 
    __gm__ half**  d_B_pointer, 
    half beta, 
    __gm__ half**  d_C_pointer, 
    // padding 相关
    __gm__ uint8_t* d_is_A_padding, 
    __gm__ uint8_t* d_is_B_padding, 
    __gm__ half** d_A_pointer_padding, 
    __gm__ half** d_B_pointer_padding, 
    __gm__ int64_t* d_valid_padding_lda, 
    __gm__ int64_t* d_valid_padding_ldb, 
    //
    __gm__ half** d_AicAivWorkspace_Pointer, 
    uint8_t is_alpha1_beta0 
){

    // 申请L1完整空间
    AscendC::TBuf<AscendC::TPosition::A1> l1_buf;
    AscendC::TPipe l1_pipe;
    l1_pipe.InitBuffer(l1_buf, L1_BYTES);
    // 这里为什么是uint8_t？完整空间统一uint8_t，具体划分再重新解释为指定类型？
    AscendC::LocalTensor<uint8_t> l1_tensor = l1_buf.Get<uint8_t>();
    l1_pipe.Destroy();

    // 申请L0A完整完整空间
    AscendC::TBuf<AscendC::TPosition::A2> l0a_buf;
    AscendC::TPipe l0a_pipe;
    l0a_pipe.InitBuffer(l0a_buf, L0AB_BYTES);
    AscendC::LocalTensor<uint8_t> l0a_tensor = l0a_buf.Get<uint8_t>();
    l0a_pipe.Destroy();

    // 申请L0B完整完整空间
    AscendC::TBuf<AscendC::TPosition::B2> l0b_buf;
    AscendC::TPipe l0b_pipe;
    l0b_pipe.InitBuffer(l0b_buf, L0AB_BYTES);
    AscendC::LocalTensor<uint8_t> l0b_tensor = l0b_buf.Get<uint8_t>();
    l0b_pipe.Destroy();

    // 申请L0C完整空间
    AscendC::TBuf<AscendC::TPosition::CO1> l0c_buf;
    AscendC::TPipe l0c_pipe;
    l0c_pipe.InitBuffer(l0c_buf, L0C_BYTES);
    AscendC::LocalTensor<uint8_t> l0c_tensor = l0c_buf.Get<uint8_t>();
    l0c_pipe.Destroy();
 
    // L1对于AB矩阵能装下的最大块（单位：元素个数）
    static constexpr uint32_t l1_blockASize = L1_M0 * L1_K0; // 128*256 = 2^15 = 32K
    static constexpr uint32_t l1_blockBSize = L1_K0 * L1_N0; // 256*256 = 2^16 = 64K
    // L0C计算结果块大小（单位：元素个数）
    static constexpr uint32_t l0c_blockSize = L1_M0 * L1_N0; // 128*256 = 2^15 = 32K

    // L1能给AB能开辟几个缓冲区（每个至多开4个？SetFlag WaitFlag的event_t范围是0-7）
    static constexpr uint32_t l1a_pingpongNum = (L1_BYTES / 2 / sizeof(half) / l1_blockASize) < 4 ? (L1_BYTES / 2 / sizeof(half) / l1_blockASize): 4 ;
    static constexpr uint32_t l1b_pingpongNum = (L1_BYTES / 2 / sizeof(half) / l1_blockBSize) < 4 ? (L1_BYTES / 2 / sizeof(half) / l1_blockBSize): 4 ;
    // 计算L0C可以装下几个结果块，进而开辟几个缓冲区。注意L0C输出的是float型
    static constexpr uint32_t l0c_pingpongNum = L0C_BYTES / (l0c_blockSize * sizeof(float));
    
    // 下面开辟缓冲区
    // 注意，L1和L0C缓冲区的选择需要根据aicore总任务来分配，而不只是对单块计算每次从0号开始分配
    // 具体体现在单块计算传入的 cur_aicore_block 和 cur_aicore_k_split（后面定义）
    // L1缓冲区数组（数组长度必须必须是 constant expr）
    AscendC::LocalTensor<half> l1a_pingpongBuf[l1a_pingpongNum];
    AscendC::LocalTensor<half> l1b_pingpongBuf[l1b_pingpongNum];
    // L0C缓冲区数组（数组长度必须必须是 constant expr）
    AscendC::LocalTensor<float> l0c_pingpongBuf[l0c_pingpongNum]; 
    // L1 A
    #pragma unroll
    for( uint32_t i = 0; i < l1a_pingpongNum; i++ ){
        l1a_pingpongBuf[i] = l1_tensor[ i * l1_blockASize * sizeof(half) ].template ReinterpretCast<half>();
    }
    // L1 B
    #pragma unroll
    for( uint32_t i = 0; i < l1b_pingpongNum; i++ ){
        l1b_pingpongBuf[i] = l1_tensor[ (l1a_pingpongNum * l1_blockASize * sizeof(half)) + i * l1_blockBSize * sizeof(half) ].template ReinterpretCast<half>();
    }
    // L0C
    #pragma unroll
    for(uint32_t i = 0; i < l0c_pingpongNum; i++){
        l0c_pingpongBuf[i] = l0c_tensor[ i * l0c_blockSize * sizeof(float) ].template ReinterpretCast<float>();
    }

    // 开辟完缓冲区需要有对应的set
    // L1缓冲区同步
    #pragma unroll
    for(uint32_t i = 0; i < l1a_pingpongNum; i++ ){
        AscendC::SetFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(i));
    }
    #pragma unroll
    for(uint32_t i = 0; i < l1b_pingpongNum; i++ ){
        AscendC::SetFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + i));
    }
    // L0C同步 
    #pragma unroll
    for(uint32_t i = 0; i < l0c_pingpongNum; i++){
        AscendC::SetFlag<AscendC::HardEvent::FIX_M>((event_t)(i));
    }

    // 全局内存，指向当前计算矩阵乘的ABC矩阵（在循环内进行地址设置）
    AscendC::GlobalTensor<half> gm_tensorA;
    AscendC::GlobalTensor<half> gm_tensorB;
    AscendC::GlobalTensor<half> gm_tensorC;
    // 全局内存，用于任务块间数据预取
    AscendC::GlobalTensor<half> gm_tensorAPreLoad;
    AscendC::GlobalTensor<half> gm_tensorBPreLoad;

    // 新的workspace
    AscendC::GlobalTensor<half> gm_workspace;
    // 数组长度为MAX_AICORE_NUM
    gm_workspace.SetGlobalBuffer( (__gm__ half*)d_AicAivWorkspace_Pointer[AscendC::GetBlockIdx()] );
    AscendC::GlobalTensor<half> gm_workspacePingpongBuf[WORKSPACE_NUM];
    for(uint32_t i = 0; i < WORKSPACE_NUM; i++){
        gm_workspacePingpongBuf[i] = gm_workspace[ i * l0c_blockSize ];
    }

    // 不同矩阵乘公用参数
    // none

    // 不同矩阵乘需要切换的参数
    // 当前ABC矩阵有效数据大小
    uint32_t M = 0;
    uint32_t N = 0;
    uint32_t K = 0 ;
    // 当前ABC矩阵lda（AB矩阵考虑行列优先，C矩阵只有行优先 TODO 未做列优先，未考虑padding）
    uint32_t strideA = 0;
    uint32_t strideB = 0;
    uint32_t strideC = 0 ;
    // 结果矩阵在MN维度的分块数
    uint32_t mLoops = 0;
    uint32_t nLoops = 0;

    // 当前aicore计算的第几个结果块，用于l0c缓冲区选择
    uint32_t cur_aicore_block = 0;
    // 当前aicore计算了多少行列块的ksplit，用于l1缓冲区选择
    uint32_t cur_aicore_k_split = 0;
    // groupgemm需要的参数
    uint32_t cur_result_blocks_sum = 0;
    // 当前计算第几个矩阵乘
    uint32_t batchNum = -1;
    int64_t loopSum = 0;

    // 分核计算    
    for(uint32_t loopIdx = 0; loopIdx < loopSum+1; loopIdx++){
        
        // groupgemm矩阵切换
        if( loopIdx == loopSum ){
            batchNum++;
            if(batchNum < batchCount){
                cur_result_blocks_sum = loopSum;
                gm_tensorA.SetGlobalBuffer( (__gm__ half*)d_A_pointer[batchNum] );
                gm_tensorB.SetGlobalBuffer( (__gm__ half*)d_B_pointer[batchNum] );
                gm_tensorC.SetGlobalBuffer( (__gm__ half*)d_C_pointer[batchNum] );
                // gm_workspace.SetGlobalBuffer( (__gm__ half*)d_AicAivWorkspace_Pointer[batchNum] );
                loopSum += CeilDiv<int64_t>(d_validM[batchNum], L1_M0) * CeilDiv<int64_t>(d_validN[batchNum], L1_N0);
                // 参数切换
                M = d_validM[batchNum];
                N = d_validN[batchNum];
                K = d_validK[batchNum];
                strideA = zeroPaddingK;
                strideB = zeroPaddingN;
                strideC = zeroPaddingN;
                mLoops = CeilDiv<uint32_t>(M, L1_M0);
                nLoops = CeilDiv<uint32_t>(N, L1_N0);
            }else {
                continue;
            }
        }

        // 分核
        if(loopIdx % AscendC::GetBlockNum() != AscendC::GetBlockIdx()){
            continue;
        }

        // 该循环计算的任务块号：blockIdx
        uint32_t blockIdx = loopIdx - cur_result_blocks_sum;
        // 将该任务块号定位到一个任务块（TODO swizzle未做）
        uint32_t blockMIdx = blockIdx / nLoops;
        uint32_t blockNIdx = blockIdx % nLoops;
        // 定位到A行块与B列块实际大小
        uint32_t gm_mActual = (blockMIdx == mLoops-1? M - blockMIdx * L1_M0 : L1_M0 );
        uint32_t gm_nActual = (blockNIdx == nLoops-1? N - blockNIdx * L1_N0 : L1_N0 );
        
        // TODO
        // 数据预取另一半？任务块之间的预取！需要计算当前aicore负责的对应矩阵的任务块号
        uint32_t loopIdxPreLoad = loopIdx + AscendC::GetBlockNum();
        uint32_t curResultBlockSumPreload = cur_result_blocks_sum; // 需求
        uint32_t batchNumPreLoad = batchNum; // 需求
        uint8_t isLastBlock = 0;// 需求
        while(batchNumPreLoad < batchCount){
            uint32_t temp = CeilDiv<int64_t>(d_validM[batchNumPreLoad], L1_M0) * CeilDiv<int64_t>(d_validN[batchNumPreLoad], L1_N0);
            // curResultBlockSumPreLoad + temp = 下一结果矩阵的第0块的总块号
            if(curResultBlockSumPreload + temp <= loopIdxPreLoad){    
                batchNumPreLoad++;
                curResultBlockSumPreload += temp;
            }else {
                // 这里只有1种情况: 
                // loopIdxPreLoad对应的任务块存在
                // 此时break是因为找到了对应的curResultBlockSumPreloadh和batchNumPreLoad，不应该有batchNumPreLoad == batchCount
                break;
            }
        }
        // 此时求得下一任务块对应的 cur_result_aicore_blocks_preload 和 batchNumPreLoad
        uint32_t MPreLoad = 0;
        uint32_t NPreLoad = 0;
        uint32_t KPreLoad = 0;
        uint32_t strideAPreLoad = 0;
        uint32_t strideBPreLoad = 0;
        uint32_t mLoopsPreLoad = 0;
        uint32_t nLoopsPreLoad = 0;

        uint32_t blockIdxPreLoad = 0;
        uint32_t blockMIdxPreLoad = 0;
        uint32_t blockNIdxPreLoad = 0;
        uint32_t gm_mActualPreLoad = 0;
        uint32_t gm_nActualPreLoad = 0;
        if(batchNumPreLoad == batchCount){// 此时无任务块间预取
            isLastBlock = 1;
        }else{
            isLastBlock = 0;
            gm_tensorAPreLoad.SetGlobalBuffer( (__gm__ half*)d_A_pointer[batchNumPreLoad] );
            gm_tensorBPreLoad.SetGlobalBuffer( (__gm__ half*)d_B_pointer[batchNumPreLoad] );
            MPreLoad = d_validM[batchNumPreLoad];
            NPreLoad = d_validN[batchNumPreLoad];
            KPreLoad = d_validK[batchNumPreLoad];
            strideAPreLoad = zeroPaddingK;
            strideBPreLoad = zeroPaddingN; 
            mLoopsPreLoad = CeilDiv<uint32_t>(MPreLoad, L1_M0);
            nLoopsPreLoad = CeilDiv<uint32_t>(NPreLoad, L1_N0);

            blockIdxPreLoad = loopIdxPreLoad - curResultBlockSumPreload;
            blockMIdxPreLoad = blockIdxPreLoad / nLoopsPreLoad;
            blockNIdxPreLoad = blockIdxPreLoad % nLoopsPreLoad;
            gm_mActualPreLoad = (blockMIdxPreLoad == mLoopsPreLoad-1? MPreLoad - blockMIdxPreLoad * L1_M0 : L1_M0);
            gm_nActualPreLoad = (blockNIdxPreLoad == nLoopsPreLoad-1? NPreLoad - blockNIdxPreLoad * L1_N0 : L1_N0);
        }
        

        // 完成该结果块计算
        // 更新参数
        MatmulForResultBlock<L1_M0, L1_N0, L1_K0>(
            gm_tensorA[ blockMIdx * L1_M0 * strideA ],
            gm_tensorB[ blockNIdx * L1_N0 ], 
            l1a_pingpongBuf, 
            l1a_pingpongNum, 
            l1b_pingpongBuf, 
            l1b_pingpongNum, 
            l0a_tensor, 
            l0b_tensor, 
            l0c_pingpongBuf,
            l0c_pingpongNum,  
            layoutA, 
            layoutB, 
            strideA, 
            strideB, 
            strideC, 
            gm_mActual, 
            gm_nActual, 
            K, 
            cur_aicore_block, 
            cur_aicore_k_split, 
            // 任务块间预取
            gm_tensorAPreLoad[ blockMIdxPreLoad * L1_M0 * strideAPreLoad ], 
            gm_tensorBPreLoad[ blockNIdxPreLoad * L1_N0 ], 
            strideAPreLoad, 
            strideBPreLoad, 
            gm_mActualPreLoad, 
            gm_nActualPreLoad,
            KPreLoad, 
            isLastBlock
        );

        // 计算完成，结果块已写入L0C，L0C可读
        AscendC::SetFlag<AscendC::HardEvent::M_FIX>((event_t)(cur_aicore_block%l0c_pingpongNum));

        // 读取L0C输入写入GM
        AscendC::WaitFlag<AscendC::HardEvent::M_FIX>((event_t)(cur_aicore_block%l0c_pingpongNum));

        // 最后计算的结果块MN维度是按Mmad最小基块的MN对齐过的
        uint32_t l1_mActualRoundBaseblock = RoundUp<uint32_t>(gm_mActual, L1L0_BASEBLOCK_M0);
        uint32_t l1_nActualRoundBaseblock = RoundUp<uint32_t>(gm_nActual, L1L0_BASEBLOCK_N0);
        
        //if(alpha == (half)(1.0f) && beta == (half)(0.0f)){ // aicore没有提供half精度的相等判断，只能在host侧先判断
        if(is_alpha1_beta0){
            L0C2Gm_Nz2Nd(
                gm_tensorC[blockMIdx * L1_M0 * strideC + blockNIdx * L1_N0], // 定位到对应结果块起始位置
                l0c_pingpongBuf[cur_aicore_block%l0c_pingpongNum], 
                gm_mActual, // 注意只搬出实际的数据
                gm_nActual, 
                l1_mActualRoundBaseblock, 
                l1_nActualRoundBaseblock, 
                strideC
            );
        }else{
            AscendC::CrossCoreWaitFlag(WORKSPACE_NUM + cur_aicore_block % WORKSPACE_NUM);
            L0C2Gm_Nz2Nd(
                // gm_workspace[blockMIdx * L1_M0 * strideC + blockNIdx * L1_N0], 
                gm_workspacePingpongBuf[cur_aicore_block % WORKSPACE_NUM], 
                l0c_pingpongBuf[cur_aicore_block%l0c_pingpongNum], 
                gm_mActual, 
                gm_nActual, 
                l1_mActualRoundBaseblock, 
                l1_nActualRoundBaseblock, 
                // strideC
                L1_N0
            );
            // workspace同步，让AIV读取
            AscendC::CrossCoreSetFlag<0x2, PIPE_FIX>(cur_aicore_block % WORKSPACE_NUM);
        }
        // 读取L0C结束，L0C可写了
        AscendC::SetFlag<AscendC::HardEvent::FIX_M>((event_t)(cur_aicore_block % l0c_pingpongNum));
        
        // 维护变量
        cur_aicore_block++;
        cur_aicore_k_split += CeilDiv<uint32_t>(K, L1_K0);
    }

    // L1最后一次Wait
    #pragma unroll
    for(uint32_t i = 0; i < l1a_pingpongNum; i++ ){
        AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(i));
    }
    #pragma unroll
    for(uint32_t i = 0; i < l1b_pingpongNum; i++ ){
        AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + i));
    }
    // L0C最后一次Wait
    #pragma unroll
    for(uint32_t i = 0; i < l0c_pingpongNum; i++){
        AscendC::WaitFlag<AscendC::HardEvent::FIX_M>((event_t)(i));
    }
    // 如果需要Aiv工作，接收最后一次CrossCoreSet
    if(!is_alpha1_beta0){
        for(uint32_t i = 0; i < WORKSPACE_NUM; i++){
            AscendC::CrossCoreWaitFlag(WORKSPACE_NUM + i);
        }
    }
}
