/*
Copyright (c) 2025 Huawei Technologies Co., Ltd.
This file is a part of the CANN Open Software.
Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
Please refer to the License for details. You may not use this file except in compliance with the License.
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
See LICENSE in the root of the software repository for the full text of the License. 
*/

#pragma once
#include "./custom_type.h"
#include "./data_transfer.h"
#include "./kernel_const.h"
#include "./kernel_utils.h"
#include "./swizzle.h"
#include "kernel_operator.h"


/**
 * @brief: 计算：单核单次矩阵乘结果块=A行块*B列块
 * @param [in] L1_M0: 搬入L1计算的A矩阵M维度大小
 * @param [in] L1_N0: 搬入L1计算的B矩阵N维度大小
 * @param [in] L1_K0: 搬入L1计算的AB矩阵K维度大小
 * @param [in] gm_tensorA: 本次计算的A矩阵行块首地址
 * @param [in] gm_tensorB: 本次计算的B矩阵列块首地址
 * @param [in] l1a_pingpongBuf: L1 Cache A矩阵缓冲区数组
 * @param [in] l1a_pingpongBum: L1 Cache A矩阵缓冲区数组长度
 * @param [in] l1b_pingpongBuf: L1 Cache B矩阵缓冲区数组
 * @param [in] l1b_pingpongBum: L1 Cache B矩阵缓冲区数组长度
 * @param [in] l0a_tensor: L0A完整空间
 * @param [in] l0b_tensor: L0B完整空间
 * @param [out] l0c_pingpongBuf: L0C Cache A*B结果矩阵缓冲区数组
 * @param [in] l0c_pingpongBum: L0C Cache A*B矩阵矩阵缓冲区数组长度
 * @param [in] layoutA: A矩阵排布格式
 * @param [in] layoutB: B矩阵排布格式
 * @param [in] strideA: 本次计算A矩阵L1_M0 * L1_K0块的不同行/列起始地址间距
 * @param [in] strideB: 本次计算B矩阵L1_K0 * L1_K0块的不同行/列起始地址间距
 * @param [in] gm_mActual: 本次计算A矩阵行块行数，C矩阵结果块行数
 * @param [in] gm_nActual: 本次计算B矩阵列块列数，C矩阵结果块列数
 * @param [in] K: 本次计算A矩阵行块列数、B矩阵列块行数（完整K维度）
 * @param [in] curAicoreBlock: 当前aicore计算的任务块数
 * @param [in] curAicoreKSplit: 当前aicore计算的K分段数
 * @param [in] gm_tensorAPreLoad: 下次计算的A矩阵行块首地址
 * @param [in] gm_tensorBPreLoad: 下次计算的B矩阵行块首地址
 * @param [in] strideAPreLoad: 下次计算A矩阵L1_M0 * L1_K0块的不同行/列起始地址间距
 * @param [in] strideBPreLoad: 下次计算B矩阵L1_K0 * L1_K0块的不同行/列起始地址间距
 * @param [in] gm_mActualPreLoad: 下次计算A矩阵行块行数，C矩阵结果块行数
 * @param [in] gm_nActualPreLoad: 下次计算B矩阵列块列数，C矩阵结果块列数
 * @param [in] KPreLoad: 下次计算A矩阵行块列数、B矩阵列块行数（完整K维度）
 * @param [in] isLastBlock: 是否为当前aicore计算的最后一个结果块
 */
template<
    uint32_t L1_M0,
    uint32_t L1_N0,
    uint32_t L1_K0
>
[aicore] inline __attribute__((always_inline)) void MatmulForResultBlock(
    AscendC::GlobalTensor<half> gm_tensorA, 
    AscendC::GlobalTensor<half> gm_tensorB, 
    AscendC::LocalTensor<half> l1a_pingpongBuf[], 
    uint32_t l1a_pingpongNum, 
    AscendC::LocalTensor<half> l1b_pingpongBuf[], 
    uint32_t l1b_pingpongNum, 
    AscendC::LocalTensor<uint8_t> l0a_tensor, 
    AscendC::LocalTensor<uint8_t> l0b_tensor, 
    AscendC::LocalTensor<float> l0c_pingpongBuf[], 
    uint32_t l0c_pingpongNum, 
    layoutType layoutA, 
    layoutType layoutB, 
    uint32_t strideABlock, 
    uint32_t strideAK, 
    uint32_t strideBBlock,
    uint32_t strideBK,  
    uint32_t gm_mActual, 
    uint32_t gm_nActual, 
    uint32_t K, 
    uint32_t curAicoreBlock, 
    uint32_t curAicoreKSplit, 
    AscendC::GlobalTensor<half> gm_tensorAPreLoad, 
    AscendC::GlobalTensor<half> gm_tensorBPreLoad, 
    uint32_t strideABlockPreLoad, 
    uint32_t strideAKPreLoad, 
    uint32_t strideBBlockPreLoad, 
    uint32_t strideBKPreLoad, 
    uint32_t gm_mActualPreLoad, 
    uint32_t gm_nActualPreLoad, 
    uint32_t KPreLoad, 
    uint8_t isLastBlock
){

    static constexpr uint32_t l1_blockASize = L1_M0 * L1_K0;
    static constexpr uint32_t l1_blockBSize = L1_K0 * L1_N0;
    static constexpr uint32_t l0c_blockSize = L1_M0 * L1_N0;

    uint32_t l1_mActualRoundBaseblock = RoundUp<uint32_t>(gm_mActual, L1L0_BASEBLOCK_M0);
    uint32_t l1_nActualRoundBaseblock = RoundUp<uint32_t>(gm_nActual, L1L0_BASEBLOCK_N0);
    uint32_t l1_kTotalLoop = CeilDiv<uint32_t>(K, L1_K0);

    uint32_t shuffleKData = 8;
    bool ifABBAOpen = false; 
    uint32_t l1_ABBABase = 0; 
    uint32_t l1_loopKLogic = 0;
    if(curAicoreBlock == 0)
    {

        uint32_t l1_loopK = (l1_loopKLogic + AscendC::GetBlockIdx() % shuffleKData) % l1_kTotalLoop; 
        uint32_t gm_kActual = (l1_loopK == l1_kTotalLoop-1 ? K - l1_loopK * L1_K0 : L1_K0 );
        uint32_t l1_kActualRoundBaseblock = RoundUp<uint32_t>(gm_kActual, L1L0_BASEBLOCK_K0);
        uint32_t l1_loopKPingpong = l1_loopKLogic + curAicoreKSplit;
        uint32_t l1_loopKABBA = (ifABBAOpen ? l1_ABBABase + l1_loopKLogic : 0); 
        
        if(l1_loopKABBA % 2 == 0){
            AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loopKPingpong % l1a_pingpongNum));
            Gm2L1_Nd2Nz(
                l1a_pingpongBuf[l1_loopKPingpong % l1a_pingpongNum], 
                gm_tensorA[l1_loopK * strideAK], 
                gm_mActual, 
                gm_kActual, 
                strideABlock, 
                l1_mActualRoundBaseblock, 
                l1_kActualRoundBaseblock
            );
            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loopKPingpong % l1a_pingpongNum));

            AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loopKPingpong % l1b_pingpongNum));
            Gm2L1_Nd2Nz(
                l1b_pingpongBuf[l1_loopKPingpong % l1b_pingpongNum], 
                gm_tensorB[l1_loopK * strideBK], 
                gm_kActual, 
                gm_nActual, 
                strideBBlock, 
                l1_kActualRoundBaseblock, 
                l1_nActualRoundBaseblock
            );
            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loopKPingpong % l1b_pingpongNum));
        
        }else{
            AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loopKPingpong % l1b_pingpongNum));
            Gm2L1_Nd2Nz(
                l1b_pingpongBuf[l1_loopKPingpong % l1b_pingpongNum], 
                gm_tensorB[l1_loopK * strideBK], 
                gm_kActual, 
                gm_nActual, 
                strideBBlock, 
                l1_kActualRoundBaseblock, 
                l1_nActualRoundBaseblock
            );
            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loopKPingpong % l1b_pingpongNum));
        
            AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loopKPingpong % l1a_pingpongNum));
            Gm2L1_Nd2Nz(
                l1a_pingpongBuf[l1_loopKPingpong % l1a_pingpongNum], 
                gm_tensorA[l1_loopK * strideAK], 
                gm_mActual, 
                gm_kActual, 
                strideABlock, 
                l1_mActualRoundBaseblock, 
                l1_kActualRoundBaseblock
            );
            AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loopKPingpong % l1a_pingpongNum));
        }

        
    }

    for( ; l1_loopKLogic < l1_kTotalLoop; l1_loopKLogic++ ){
        
        uint32_t l1_loopK = (l1_loopKLogic + AscendC::GetBlockIdx() % shuffleKData) % l1_kTotalLoop;

        uint32_t gm_kActual = (l1_loopK == l1_kTotalLoop-1 ? K - l1_loopK * L1_K0 : L1_K0 );
        uint32_t l1_kActualRoundBaseblock = RoundUp<uint32_t>(gm_kActual, L1L0_BASEBLOCK_K0);
        uint32_t l1_loopKPingpong = l1_loopKLogic + curAicoreKSplit;
        
        if( l1_loopKLogic + 1 < l1_kTotalLoop ){

            uint32_t l1_loopKNextLogic = l1_loopKLogic + 1;
            uint32_t l1_loopKNext = (l1_loopKNextLogic + AscendC::GetBlockIdx() % shuffleKData) % l1_kTotalLoop; 
            uint32_t gm_kActualPreLoad = (l1_loopKNext == l1_kTotalLoop-1 ? K - l1_loopKNext * L1_K0 : L1_K0);
            uint32_t l1_kActualRoundBaseblockPreLoad = RoundUp<uint32_t>(gm_kActualPreLoad, L1L0_BASEBLOCK_K0);
            uint32_t l1_loopKNextPingpong = l1_loopKNextLogic + curAicoreKSplit;
            uint32_t l1_loopKNextABBA = (ifABBAOpen ? l1_ABBABase + l1_loopKNextLogic : 0); 

            if(l1_loopKNextABBA % 2 == 0){
                AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loopKNextPingpong % l1a_pingpongNum));
                Gm2L1_Nd2Nz(
                    l1a_pingpongBuf[l1_loopKNextPingpong % l1a_pingpongNum], 
                    gm_tensorA[l1_loopKNext * strideAK], 
                    gm_mActual, 
                    gm_kActualPreLoad, 
                    strideABlock, 
                    l1_mActualRoundBaseblock, 
                    l1_kActualRoundBaseblockPreLoad
                );

                AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loopKNextPingpong % l1a_pingpongNum));

                AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loopKNextPingpong % l1b_pingpongNum));
                Gm2L1_Nd2Nz(
                    l1b_pingpongBuf[l1_loopKNextPingpong % l1b_pingpongNum], 
                    gm_tensorB[l1_loopKNext * strideBK], 
                    gm_kActualPreLoad, 
                    gm_nActual, 
                    strideBBlock, 
                    l1_kActualRoundBaseblockPreLoad, 
                    l1_nActualRoundBaseblock
                );
                AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loopKNextPingpong % l1b_pingpongNum));

            }else{
                AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loopKNextPingpong % l1b_pingpongNum));
                Gm2L1_Nd2Nz(
                    l1b_pingpongBuf[l1_loopKNextPingpong % l1b_pingpongNum], 
                    gm_tensorB[l1_loopKNext * strideBK], 
                    gm_kActualPreLoad, 
                    gm_nActual, 
                    strideBBlock, 
                    l1_kActualRoundBaseblockPreLoad, 
                    l1_nActualRoundBaseblock
                );
                AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loopKNextPingpong % l1b_pingpongNum));

                AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loopKNextPingpong % l1a_pingpongNum));
                Gm2L1_Nd2Nz(
                    l1a_pingpongBuf[l1_loopKNextPingpong % l1a_pingpongNum], 
                    gm_tensorA[l1_loopKNext * strideAK], 
                    gm_mActual, 
                    gm_kActualPreLoad, 
                    strideABlock, 
                    l1_mActualRoundBaseblock, 
                    l1_kActualRoundBaseblockPreLoad
                );
                AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loopKNextPingpong % l1a_pingpongNum));
            }

            
        }

        if( l1_loopKLogic == l1_kTotalLoop-1 && isLastBlock == 0){

            uint32_t l1_loopKPreLoadLogic = 0;

            uint32_t l1_kTotalLoopPreLoad = CeilDiv<uint32_t>(KPreLoad, L1_K0);
            uint32_t l1_loopKPreLoad = (l1_loopKPreLoadLogic + AscendC::GetBlockIdx() % shuffleKData) % l1_kTotalLoopPreLoad;

            uint32_t gm_kActualPreLoad = (l1_loopKPreLoad == l1_kTotalLoopPreLoad - 1 ? KPreLoad - l1_loopKPreLoad * L1_K0 : L1_K0);
            uint32_t l1_mActualPreLoadRoundBaseblock = RoundUp<uint32_t>(gm_mActualPreLoad, L1L0_BASEBLOCK_M0);
            uint32_t l1_nActualPreLoadRoundBaseblock = RoundUp<uint32_t>(gm_nActualPreLoad, L1L0_BASEBLOCK_N0);
            uint32_t l1_kActualPreLoadRoundBaseblock = RoundUp<uint32_t>(gm_kActualPreLoad, L1L0_BASEBLOCK_K0);
            uint32_t l1_loopKNextPingpong = l1_loopKPreLoadLogic + l1_kTotalLoop + curAicoreKSplit;
            uint32_t l1_loopKNextABBA = (ifABBAOpen ? (l1_ABBABase == 0 ? 0 : l1_ABBABase + l1_kTotalLoop + l1_loopKPreLoadLogic) : 0); 

            if(l1_loopKNextABBA % 2 == 0){
                AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loopKNextPingpong % l1a_pingpongNum));
                Gm2L1_Nd2Nz(
                    l1a_pingpongBuf[l1_loopKNextPingpong % l1a_pingpongNum], 
                    gm_tensorAPreLoad[l1_loopKPreLoad * strideAKPreLoad], 
                    gm_mActualPreLoad, 
                    gm_kActualPreLoad, 
                    strideABlockPreLoad, 
                    l1_mActualPreLoadRoundBaseblock, 
                    l1_kActualPreLoadRoundBaseblock
                );
                AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loopKNextPingpong % l1a_pingpongNum));

                AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loopKNextPingpong % l1b_pingpongNum));
                Gm2L1_Nd2Nz(
                    l1b_pingpongBuf[l1_loopKNextPingpong % l1b_pingpongNum], 
                    gm_tensorBPreLoad[l1_loopKPreLoad * strideBKPreLoad], 
                    gm_kActualPreLoad, 
                    gm_nActualPreLoad, 
                    strideBBlockPreLoad, 
                    l1_kActualPreLoadRoundBaseblock, 
                    l1_nActualPreLoadRoundBaseblock
                );
                AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loopKNextPingpong % l1b_pingpongNum));

            }else{
                AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loopKNextPingpong % l1b_pingpongNum));
                Gm2L1_Nd2Nz(
                    l1b_pingpongBuf[l1_loopKNextPingpong % l1b_pingpongNum], 
                    gm_tensorBPreLoad[l1_loopKPreLoad * strideBKPreLoad], 
                    gm_kActualPreLoad, 
                    gm_nActualPreLoad, 
                    strideBBlockPreLoad, 
                    l1_kActualPreLoadRoundBaseblock, 
                    l1_nActualPreLoadRoundBaseblock
                );
                AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loopKNextPingpong % l1b_pingpongNum));

                AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loopKNextPingpong % l1a_pingpongNum));
                Gm2L1_Nd2Nz(
                    l1a_pingpongBuf[l1_loopKNextPingpong % l1a_pingpongNum], 
                    gm_tensorAPreLoad[l1_loopKPreLoad * strideAKPreLoad], 
                    gm_mActualPreLoad, 
                    gm_kActualPreLoad, 
                    strideABlockPreLoad, 
                    l1_mActualPreLoadRoundBaseblock, 
                    l1_kActualPreLoadRoundBaseblock
                );
                AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loopKNextPingpong % l1a_pingpongNum));

            }
            
        } 

        static constexpr uint32_t l0ab_pingpongNum = 2;
        uint32_t l0ab_pingpongBufSize = L0AB_BYTES / l0ab_pingpongNum / sizeof(half);

        AscendC::LocalTensor<half> l0a_pingpongBuf[l0ab_pingpongNum];
        AscendC::LocalTensor<half> l0b_pingpongBuf[l0ab_pingpongNum];

        #pragma unroll
        for(uint32_t i = 0; i < l0ab_pingpongNum; i++){
            l0a_pingpongBuf[i] = l0a_tensor[i * l0ab_pingpongBufSize * sizeof(half)].template ReinterpretCast<half>();
            l0b_pingpongBuf[i] = l0b_tensor[i * l0ab_pingpongBufSize * sizeof(half)].template ReinterpretCast<half>();
        }

        #pragma unroll
        for(uint32_t i = 0; i < l0ab_pingpongNum; i++){
            AscendC::SetFlag<AscendC::HardEvent::M_MTE1>((event_t)i);
        }

        uint32_t l0_M0 = l1_mActualRoundBaseblock;
        uint32_t l0_N0 = l1_nActualRoundBaseblock;
        uint32_t l0_K0_l1a = RoundDown<uint32_t>(l0ab_pingpongBufSize / l0_M0, L1L0_BASEBLOCK_K0);
        uint32_t l0_K0_l1b = RoundDown<uint32_t>(l0ab_pingpongBufSize / l0_N0, L1L0_BASEBLOCK_K0);
        uint32_t l0_K0 = (l0_K0_l1a < l0_K0_l1b ? l0_K0_l1a : l0_K0_l1b);
        uint32_t l0_kTotalLoop = CeilDiv<uint32_t>(l1_kActualRoundBaseblock, l0_K0);

        for(uint32_t l0_loopK = 0; l0_loopK < l0_kTotalLoop; l0_loopK++){
            uint32_t l0_K0Actual = ( l0_loopK == l0_kTotalLoop-1 ? l1_kActualRoundBaseblock-l0_loopK*l0_K0 : l0_K0 );
            AscendC::WaitFlag<AscendC::HardEvent::M_MTE1>((event_t)(l0_loopK % l0ab_pingpongNum));

            if(l0_loopK == 0){
                AscendC::WaitFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1_loopKPingpong % l1a_pingpongNum));
            }
            L12L0_Nz2Zz(
                l0a_pingpongBuf[l0_loopK % l0ab_pingpongNum], 
                l1a_pingpongBuf[l1_loopKPingpong % l1a_pingpongNum][l0_M0 * l0_K0 * l0_loopK], 
                l0_M0, 
                l0_K0Actual, 
                l0_M0, 
                L1L0_BASEBLOCK_M0, 
                L1L0_BASEBLOCK_K0
            );
            if(l0_loopK == l0_kTotalLoop - 1){
                AscendC::SetFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1_loopKPingpong % l1a_pingpongNum));
            }
            AscendC::SetFlag<AscendC::HardEvent::MTE1_M>((event_t)(l0_loopK % l0ab_pingpongNum));

            
            if(l0_loopK == 0){
                AscendC::WaitFlag<AscendC::HardEvent::MTE2_MTE1>((event_t)(l1a_pingpongNum + l1_loopKPingpong % l1b_pingpongNum));
            }
            L12L0_Nz2Zn(
                l0b_pingpongBuf[l0_loopK % l0ab_pingpongNum], 
                l1b_pingpongBuf[l1_loopKPingpong % l1b_pingpongNum][l0_K0 * l0_loopK * L1L0_BASEBLOCK_N0], 
                l0_K0Actual, 
                l0_N0, 
                l1_kActualRoundBaseblock, 
                L1L0_BASEBLOCK_K0, 
                L1L0_BASEBLOCK_N0
            );
            if(l0_loopK == l0_kTotalLoop - 1){
                AscendC::SetFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + l1_loopKPingpong % l1b_pingpongNum));
            }
            AscendC::SetFlag<AscendC::HardEvent::MTE1_M>((event_t)(l0ab_pingpongNum + l0_loopK % l0ab_pingpongNum));


            AscendC::WaitFlag<AscendC::HardEvent::MTE1_M>((event_t)(l0_loopK % l0ab_pingpongNum));
            AscendC::WaitFlag<AscendC::HardEvent::MTE1_M>((event_t)(l0ab_pingpongNum + l0_loopK % l0ab_pingpongNum));

            bool isFirstMmad = (l0_loopK == 0 && l1_loopKLogic == 0);

            if(isFirstMmad)
                AscendC::WaitFlag<AscendC::HardEvent::FIX_M>((event_t)(curAicoreBlock % l0c_pingpongNum));
            
            AscendC::Mmad(
                l0c_pingpongBuf[curAicoreBlock % l0c_pingpongNum], 
                l0a_pingpongBuf[l0_loopK % l0ab_pingpongNum],
                l0b_pingpongBuf[l0_loopK % l0ab_pingpongNum],
                AscendC::MmadParams(
                    l0_M0, 
                    l0_N0, 
                    l0_K0Actual, 
                    0, 
                    false, 
                    isFirstMmad
                )
            );

            if(l0_M0 / L1L0_BASEBLOCK_M0 * l0_N0 / L1L0_BASEBLOCK_N0 < 10){
                AscendC::PipeBarrier<PIPE_M>();
            }
            

            AscendC::SetFlag<AscendC::HardEvent::M_MTE1>((event_t)(l0_loopK % l0ab_pingpongNum));
        }

        #pragma unroll
        for(uint32_t i = 0; i < l0ab_pingpongNum; i++){
            AscendC::WaitFlag<AscendC::HardEvent::M_MTE1>((event_t)i);
        }

    }

}

/**
 * @brief: 批量矩阵乘，完成分核和L0C缓存分配，然后调用单核计算
 * @param [in] L1_M0: 搬入L1计算的A矩阵M维度大小
 * @param [in] L1_N0: 搬入L1计算的B矩阵N维度大小
 * @param [in] L1_K0: 搬入L1计算的AB矩阵K维度大小
 * @param [in] WORKSPACE_NUM: 显存开辟的workspace数
 * @param [in] layoutA: A矩阵排布格式
 * @param [in] layoutB: B矩阵排布格式
 * @param [in] zeroPaddingM: A、C矩阵零填充后的M维度
 * @param [in] zeroPaddingN: B、C矩阵零填充后的N维度
 * @param [in] zeroPaddingK: A、B矩阵零填充后的K维度
 * @param [in] batchCount: 批量矩阵乘的batch数
 * @param [in] d_validM: 每批矩阵乘的A、C矩阵M维度有效长度数组
 * @param [in] d_validN: 每批矩阵乘的B、C矩阵N维度有效长度数组
 * @param [in] d_validK: 每批矩阵乘的A、B矩阵N维度有效长度数组
 * @param [in] alpha: alpha*AB+beta*C
 * @param [in] d_APointer: 每批矩阵乘的零填充A矩阵首地址数组
 * @param [in] d_BPointer: 每批矩阵乘的零填充B矩阵首地址数组
 * @param [in] beta:  alpha*AB+beta*C
 * @param [out] d_CPointer: 每批矩阵乘的零填充B矩阵首地址数组
 * @param [in] d_isAPadding: A矩阵是否进行padding
 * @param [in] d_isBPadding: B矩阵是否进行padding
 * @param [in] d_APointerPadding: padding后A矩阵的首地址
 * @param [in] d_BPointerPadding: padding后B矩阵的首地址
 * @param [out] d_AicAivWorkspacePointer: Aic Aiv 同步的GM空间首地址数组
 * @param [in] isAlpha1Beta0: 是否有 alpha==1.0 && beta==0.0
 */
template<
    uint32_t L1_M0,
    uint32_t L1_N0,
    uint32_t L1_K0,
    uint32_t WORKSPACE_NUM>
[aicore] inline __attribute__((always_inline)) void BatchMatmul(
    layoutType layoutA, 
    layoutType layoutB, 
    uint32_t zeroPaddingM, 
    uint32_t zeroPaddingN,
    uint32_t zeroPaddingK, 
    uint32_t batchCount,
    __gm__ uint32_t*  d_validM, 
    __gm__ uint32_t*  d_validN, 
    __gm__ uint32_t*  d_validK, 
    half alpha, 
    __gm__ half** d_APointer, 
    __gm__ half** d_BPointer, 
    half beta, 
    __gm__ half**  d_CPointer, 
    __gm__ uint8_t* d_isAPadding, 
    __gm__ uint8_t* d_isBPadding, 
    __gm__ half** d_APointerPadding, 
    __gm__ half** d_BPointerPadding, 
    uint8_t paddingDirA, 
    uint8_t paddingDirB, 
    __gm__ half** d_AicAivWorkspacePointer, 
    uint8_t isAlpha1Beta0 
){

    AscendC::TBuf<AscendC::TPosition::A1> l1_buf;
    AscendC::TPipe l1_pipe;
    l1_pipe.InitBuffer(l1_buf, L1_BYTES);
    AscendC::LocalTensor<uint8_t> l1_tensor = l1_buf.Get<uint8_t>();
    l1_pipe.Destroy();

    AscendC::TBuf<AscendC::TPosition::A2> l0a_buf;
    AscendC::TPipe l0a_pipe;
    l0a_pipe.InitBuffer(l0a_buf, L0AB_BYTES);
    AscendC::LocalTensor<uint8_t> l0a_tensor = l0a_buf.Get<uint8_t>();
    l0a_pipe.Destroy();

    AscendC::TBuf<AscendC::TPosition::B2> l0b_buf;
    AscendC::TPipe l0b_pipe;
    l0b_pipe.InitBuffer(l0b_buf, L0AB_BYTES);
    AscendC::LocalTensor<uint8_t> l0b_tensor = l0b_buf.Get<uint8_t>();
    l0b_pipe.Destroy();

    AscendC::TBuf<AscendC::TPosition::CO1> l0c_buf;
    AscendC::TPipe l0c_pipe;
    l0c_pipe.InitBuffer(l0c_buf, L0C_BYTES);
    AscendC::LocalTensor<uint8_t> l0c_tensor = l0c_buf.Get<uint8_t>();
    l0c_pipe.Destroy();
 
    static constexpr uint32_t l1_blockASize = L1_M0 * L1_K0; 
    static constexpr uint32_t l1_blockBSize = L1_K0 * L1_N0; 
    static constexpr uint32_t l0c_blockSize = L1_M0 * L1_N0; 
    static constexpr uint32_t l1a_pingpongNum = (L1_BYTES / 2 / sizeof(half) / l1_blockASize) < 4 ? (L1_BYTES / 2 / sizeof(half) / l1_blockASize): 4 ;
    static constexpr uint32_t l1b_pingpongNum = (L1_BYTES / 2 / sizeof(half) / l1_blockBSize) < 4 ? (L1_BYTES / 2 / sizeof(half) / l1_blockBSize): 4 ;
    static constexpr uint32_t l0c_pingpongNum = L0C_BYTES / (l0c_blockSize * sizeof(float));
    AscendC::LocalTensor<half> l1a_pingpongBuf[l1a_pingpongNum];
    AscendC::LocalTensor<half> l1b_pingpongBuf[l1b_pingpongNum];
    AscendC::LocalTensor<float> l0c_pingpongBuf[l0c_pingpongNum]; 
    #pragma unroll
    for( uint32_t i = 0; i < l1a_pingpongNum; i++ ){
        l1a_pingpongBuf[i] = l1_tensor[ i * l1_blockASize * sizeof(half) ].template ReinterpretCast<half>();
    }

    #pragma unroll
    for( uint32_t i = 0; i < l1b_pingpongNum; i++ ){
        l1b_pingpongBuf[i] = l1_tensor[ (l1a_pingpongNum * l1_blockASize * sizeof(half)) + i * l1_blockBSize * sizeof(half) ].template ReinterpretCast<half>();
    }

    #pragma unroll
    for(uint32_t i = 0; i < l0c_pingpongNum; i++){
        l0c_pingpongBuf[i] = l0c_tensor[ i * l0c_blockSize * sizeof(float) ].template ReinterpretCast<float>();
    }

    #pragma unroll
    for(uint32_t i = 0; i < l1a_pingpongNum; i++ ){
        AscendC::SetFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(i));
    }
    #pragma unroll
    for(uint32_t i = 0; i < l1b_pingpongNum; i++ ){
        AscendC::SetFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + i));
    }

    #pragma unroll
    for(uint32_t i = 0; i < l0c_pingpongNum; i++){
        AscendC::SetFlag<AscendC::HardEvent::FIX_M>((event_t)(i));
    }

    AscendC::GlobalTensor<half> gm_tensorA;
    AscendC::GlobalTensor<half> gm_tensorB;
    AscendC::GlobalTensor<half> gm_tensorC;
    AscendC::GlobalTensor<half> gm_tensorAPreLoad;
    AscendC::GlobalTensor<half> gm_tensorBPreLoad;
    AscendC::GlobalTensor<half> gm_workspace;

    gm_workspace.SetGlobalBuffer( (__gm__ half*)d_AicAivWorkspacePointer[AscendC::GetBlockIdx()] );
    AscendC::GlobalTensor<half> gm_workspacePingpongBuf[WORKSPACE_NUM];
    for(uint32_t i = 0; i < WORKSPACE_NUM; i++){
        gm_workspacePingpongBuf[i] = gm_workspace[ i * l0c_blockSize ];
    }

    uint32_t M = 0;
    uint32_t N = 0;
    uint32_t K = 0 ;

    uint32_t fullM = 0; 
    uint32_t fullN = 0; 
    uint32_t fullK = 0 ;

    uint32_t strideABlock = 0; 
    uint32_t strideAK = 0; 
    uint32_t strideBBlock = 0; 
    uint32_t strideBK = 0; 
    uint32_t strideC = 0; 

    uint32_t mLoops = 0;
    uint32_t nLoops = 0;
    uint32_t kLoops = 0; 

    uint32_t curAicoreBlock = 0;

    uint32_t curAicoreKSplit = 0;

    uint32_t curResultBlocksSum = 0;

    uint32_t batchNum = -1;
    uint32_t loopSum = 0;


    for(uint32_t loopIdx = 0; loopIdx < loopSum+1; loopIdx++){
        
        if( loopIdx == loopSum ){
            batchNum++;
            if(batchNum < batchCount){
                curResultBlocksSum = loopSum;
                if(d_isAPadding[batchNum] == 0){
                    gm_tensorA.SetGlobalBuffer( (__gm__ half*)d_APointer[batchNum] );
                }else{
                    gm_tensorA.SetGlobalBuffer( (__gm__ half*)d_APointerPadding[batchNum] );
                }
                if(d_isBPadding[batchNum] == 0){
                    gm_tensorB.SetGlobalBuffer( (__gm__ half*)d_BPointer[batchNum] );
                }else{
                    gm_tensorB.SetGlobalBuffer( (__gm__ half*)d_BPointerPadding[batchNum] );
                }
                gm_tensorC.SetGlobalBuffer( (__gm__ half*)d_CPointer[batchNum] );

                loopSum += CeilDiv<uint32_t>(d_validM[batchNum], L1_M0) * CeilDiv<uint32_t>(d_validN[batchNum], L1_N0);

                M = d_validM[batchNum];
                N = d_validN[batchNum];
                K = d_validK[batchNum];

                fullM = zeroPaddingM; 
                fullN = zeroPaddingN; 
                fullK = zeroPaddingK; 

                mLoops = CeilDiv<uint32_t>(M, L1_M0);
                nLoops = CeilDiv<uint32_t>(N, L1_N0);
                kLoops = CeilDiv<uint32_t>(K, L1_K0);

                if(d_isAPadding[batchNum] == 0){
                    strideABlock = fullK; 
                    strideAK = L1_K0; 
                }else{
                    if(paddingDirA == 0){
                        strideABlock = RoundUp<uint32_t>(fullK, L1_K0); 
                        strideAK = L1_K0; 
                    }else if(paddingDirA == 1){
                        strideABlock = L1_K0; 
                        strideAK = l1_blockASize; 
                    }else if(paddingDirA == 2){
                        strideABlock = L1_K0; 
                        strideAK = mLoops * l1_blockASize; 
                    }
                }
                if(d_isBPadding[batchNum] == 0){
                    strideBBlock = fullN; 
                    strideBK = L1_K0 * fullN; 
                }else{
                    if(paddingDirB == 0){
                        strideBBlock = RoundUp<uint32_t>(fullN, L1_N0); 
                        strideBK = L1_K0 * RoundUp<uint32_t>(fullN, L1_N0);
                    }else if(paddingDirB == 1){
                        strideBBlock = L1_N0; 
                        strideBK = nLoops * l1_blockBSize; 
                    }else if(paddingDirB == 2){
                        strideBBlock = L1_N0; 
                        strideBK = l1_blockBSize; 
                    }
                }
                strideC = fullN;
            }else {
                continue;
            }
        }

        if(loopIdx % AscendC::GetBlockNum() != AscendC::GetBlockIdx()){
            continue;
        }

        uint32_t blockIdx = loopIdx - curResultBlocksSum;
        uint32_t blockMIdx = blockIdx / nLoops;
        uint32_t blockNIdx = blockIdx % nLoops;
        BlockSwizzle(
            blockMIdx, 
            blockNIdx, 
            blockIdx, 
            mLoops, 
            nLoops
        ); 
        uint32_t gm_mActual = (blockMIdx == mLoops-1? M - blockMIdx * L1_M0 : L1_M0 );
        uint32_t gm_nActual = (blockNIdx == nLoops-1? N - blockNIdx * L1_N0 : L1_N0 );
        
        uint32_t loopIdxPreLoad = loopIdx + AscendC::GetBlockNum();
        uint32_t curResultBlockSumPreload = curResultBlocksSum; 
        uint32_t batchNumPreLoad = batchNum; 
        uint8_t isLastBlock = 0; 
        while(batchNumPreLoad < batchCount){
            uint32_t temp = CeilDiv<uint32_t>(d_validM[batchNumPreLoad], L1_M0) * CeilDiv<uint32_t>(d_validN[batchNumPreLoad], L1_N0);
            if(curResultBlockSumPreload + temp <= loopIdxPreLoad){    
                batchNumPreLoad++;
                curResultBlockSumPreload += temp;
            }else {
                break;
            }
        }
        uint32_t MPreLoad = 0;
        uint32_t NPreLoad = 0;
        uint32_t KPreLoad = 0;
        uint32_t fullMPreLoad = 0; 
        uint32_t fullNPreLoad = 0; 
        uint32_t fullKPreLoad = 0; 
        uint32_t strideABlockPreLoad = 0; 
        uint32_t strideAKPreLoad = 0; 
        uint32_t strideBBlockPreLoad = 0; 
        uint32_t strideBKPreLoad = 0; 
        uint32_t mLoopsPreLoad = 0;
        uint32_t nLoopsPreLoad = 0;
        uint32_t kLoopsPreLoad = 0; 
        uint32_t blockIdxPreLoad = 0;
        uint32_t blockMIdxPreLoad = 0;
        uint32_t blockNIdxPreLoad = 0;
        uint32_t gm_mActualPreLoad = 0;
        uint32_t gm_nActualPreLoad = 0;
        if(batchNumPreLoad == batchCount){
            isLastBlock = 1;
        }else{
            isLastBlock = 0;
            if(d_isAPadding[batchNumPreLoad] == 0){
                gm_tensorAPreLoad.SetGlobalBuffer( (__gm__ half*)d_APointer[batchNumPreLoad] );
            }else{
                gm_tensorAPreLoad.SetGlobalBuffer( (__gm__ half*)d_APointerPadding[batchNumPreLoad] );
            }
            if(d_isBPadding[batchNumPreLoad] == 0){
                gm_tensorBPreLoad.SetGlobalBuffer( (__gm__ half*)d_BPointer[batchNumPreLoad] );
            }else{
                gm_tensorBPreLoad.SetGlobalBuffer( (__gm__ half*)d_BPointerPadding[batchNumPreLoad] );
            }
            MPreLoad = d_validM[batchNumPreLoad];
            NPreLoad = d_validN[batchNumPreLoad];
            KPreLoad = d_validK[batchNumPreLoad];

            fullMPreLoad = zeroPaddingM; 
            fullNPreLoad = zeroPaddingN; 
            fullKPreLoad = zeroPaddingK; 

            mLoopsPreLoad = CeilDiv<uint32_t>(MPreLoad, L1_M0);
            nLoopsPreLoad = CeilDiv<uint32_t>(NPreLoad, L1_N0);
            kLoopsPreLoad = CeilDiv<uint32_t>(KPreLoad, L1_K0); 

            if(d_isAPadding[batchNumPreLoad] == 0){
                strideABlockPreLoad = fullKPreLoad; 
                strideAKPreLoad = L1_K0; 
            }else{
                if(paddingDirA == 0){
                    strideABlockPreLoad = RoundUp<uint32_t>(fullKPreLoad, L1_K0); 
                    strideAKPreLoad = L1_K0; 
                }else if(paddingDirA == 1){
                    strideABlockPreLoad = L1_K0; 
                    strideAKPreLoad = l1_blockASize; 
                }else if(paddingDirA == 2){
                    strideABlockPreLoad = L1_K0; 
                    strideAKPreLoad = mLoopsPreLoad * l1_blockASize; 
                }
            }
            if(d_isBPadding[batchNumPreLoad] == 0){
                strideBBlockPreLoad = fullNPreLoad; 
                strideBKPreLoad = L1_K0 * fullNPreLoad; 
            }else{
                if(paddingDirB == 0){
                    strideBBlockPreLoad = RoundUp<uint32_t>(fullNPreLoad, L1_N0); 
                    strideBKPreLoad = L1_K0 * RoundUp<uint32_t>(fullNPreLoad, L1_N0); 
                }else if(paddingDirB == 1){
                    strideBBlockPreLoad = L1_N0; 
                    strideBKPreLoad = nLoopsPreLoad * l1_blockBSize; 
                }else if(paddingDirB == 2){
                    strideBBlockPreLoad = L1_N0; 
                    strideBKPreLoad = l1_blockBSize; 
                }
            }

            blockIdxPreLoad = loopIdxPreLoad - curResultBlockSumPreload;
            blockMIdxPreLoad = blockIdxPreLoad / nLoopsPreLoad;
            blockNIdxPreLoad = blockIdxPreLoad % nLoopsPreLoad;
            BlockSwizzle(
                blockMIdxPreLoad, 
                blockNIdxPreLoad, 
                blockIdxPreLoad, 
                mLoopsPreLoad, 
                nLoopsPreLoad
            ); 
            gm_mActualPreLoad = (blockMIdxPreLoad == mLoopsPreLoad-1? MPreLoad - blockMIdxPreLoad * L1_M0 : L1_M0);
            gm_nActualPreLoad = (blockNIdxPreLoad == nLoopsPreLoad-1? NPreLoad - blockNIdxPreLoad * L1_N0 : L1_N0);
        }
        
        uint32_t gm_offsetA = 0; 
        uint32_t gm_offsetB = 0; 
        uint32_t gm_offsetAPreLoad = 0; 
        uint32_t gm_offsetBPreLoad = 0; 

        if(d_isAPadding[batchNum] == 0){
            gm_offsetA = blockMIdx * L1_M0 * fullK; 
        }else{
            if(paddingDirA == 0){
                gm_offsetA = blockMIdx * L1_M0 * RoundUp<uint32_t>(fullK, L1_K0); 
            }else if(paddingDirA == 1){
                gm_offsetA = blockMIdx * kLoops * l1_blockASize; 
            }else if(paddingDirA == 2){
                gm_offsetA = blockMIdx * l1_blockASize; 
            }
        }

        if(d_isBPadding[batchNum] == 0){
            gm_offsetB = blockNIdx * L1_N0; 
        }else{
            if(paddingDirB == 0){
                gm_offsetB = blockNIdx * L1_N0; 
            }else if(paddingDirB == 1){
                gm_offsetB = blockNIdx * l1_blockBSize; 
            }else if(paddingDirB == 2){
                gm_offsetB = blockNIdx * kLoops * l1_blockBSize; 
            }
        }

        if(d_isAPadding[batchNumPreLoad] == 0){
            gm_offsetAPreLoad = blockMIdxPreLoad * L1_M0 * fullKPreLoad; 
        }else{
            if(paddingDirA == 0){
                gm_offsetAPreLoad = blockMIdxPreLoad * L1_M0 * RoundUp<uint32_t>(fullKPreLoad, L1_K0); 
            }else if(paddingDirA == 1){
                gm_offsetAPreLoad = blockMIdxPreLoad * kLoopsPreLoad * l1_blockASize; 
            }else if(paddingDirA == 2){
                gm_offsetAPreLoad = blockMIdxPreLoad * l1_blockASize; 
            }
        }

        if(d_isBPadding[batchNumPreLoad] == 0){
            gm_offsetBPreLoad = blockNIdxPreLoad * L1_N0; 
        }else{
            if(paddingDirB == 0){
                gm_offsetBPreLoad = blockNIdxPreLoad * L1_N0; 
            }else if(paddingDirB == 1){
                gm_offsetBPreLoad = blockNIdxPreLoad * l1_blockBSize; 
            }else if(paddingDirB == 2){
                gm_offsetBPreLoad = blockNIdxPreLoad * kLoopsPreLoad * l1_blockBSize; 
            }
        }


        MatmulForResultBlock<L1_M0, L1_N0, L1_K0>(
            gm_tensorA[ gm_offsetA ], 
            gm_tensorB[ gm_offsetB ], 
            l1a_pingpongBuf, 
            l1a_pingpongNum, 
            l1b_pingpongBuf, 
            l1b_pingpongNum, 
            l0a_tensor, 
            l0b_tensor, 
            l0c_pingpongBuf,
            l0c_pingpongNum,  
            layoutA, 
            layoutB, 
            strideABlock, 
            strideAK, 
            strideBBlock, 
            strideBK, 
            gm_mActual, 
            gm_nActual, 
            K, 
            curAicoreBlock, 
            curAicoreKSplit, 
            gm_tensorAPreLoad[ gm_offsetAPreLoad ], 
            gm_tensorBPreLoad[ gm_offsetBPreLoad ], 
            strideABlockPreLoad, 
            strideAKPreLoad, 
            strideBBlockPreLoad, 
            strideBKPreLoad, 
            gm_mActualPreLoad, 
            gm_nActualPreLoad,
            KPreLoad, 
            isLastBlock
        );

        AscendC::SetFlag<AscendC::HardEvent::M_FIX>((event_t)(curAicoreBlock%l0c_pingpongNum));

        AscendC::WaitFlag<AscendC::HardEvent::M_FIX>((event_t)(curAicoreBlock%l0c_pingpongNum));

        uint32_t l1_mActualRoundBaseblock = RoundUp<uint32_t>(gm_mActual, L1L0_BASEBLOCK_M0);
        uint32_t l1_nActualRoundBaseblock = RoundUp<uint32_t>(gm_nActual, L1L0_BASEBLOCK_N0);
        if(isAlpha1Beta0){
            L0C2Gm_Nz2Nd(
                gm_tensorC[blockMIdx * L1_M0 * strideC + blockNIdx * L1_N0], 
                l0c_pingpongBuf[curAicoreBlock%l0c_pingpongNum], 
                gm_mActual, 
                gm_nActual, 
                l1_mActualRoundBaseblock, 
                l1_nActualRoundBaseblock, 
                strideC
            );
        }else{
            AscendC::CrossCoreWaitFlag(WORKSPACE_NUM + curAicoreBlock % WORKSPACE_NUM);
            L0C2Gm_Nz2Nd(
                gm_workspacePingpongBuf[curAicoreBlock % WORKSPACE_NUM], 
                l0c_pingpongBuf[curAicoreBlock%l0c_pingpongNum], 
                gm_mActual, 
                gm_nActual, 
                l1_mActualRoundBaseblock, 
                l1_nActualRoundBaseblock, 
                L1_N0
            );
            AscendC::CrossCoreSetFlag<0x2, PIPE_FIX>(curAicoreBlock % WORKSPACE_NUM);
        }
        AscendC::SetFlag<AscendC::HardEvent::FIX_M>((event_t)(curAicoreBlock % l0c_pingpongNum));
        
        curAicoreBlock++;
        curAicoreKSplit += CeilDiv<uint32_t>(K, L1_K0);
    }

    #pragma unroll
    for(uint32_t i = 0; i < l1a_pingpongNum; i++ ){
        AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(i));
    }
    #pragma unroll
    for(uint32_t i = 0; i < l1b_pingpongNum; i++ ){
        AscendC::WaitFlag<AscendC::HardEvent::MTE1_MTE2>((event_t)(l1a_pingpongNum + i));
    }

    #pragma unroll
    for(uint32_t i = 0; i < l0c_pingpongNum; i++){
        AscendC::WaitFlag<AscendC::HardEvent::FIX_M>((event_t)(i));
    }
    if(!isAlpha1Beta0){
        for(uint32_t i = 0; i < WORKSPACE_NUM; i++){
            AscendC::CrossCoreWaitFlag(WORKSPACE_NUM + i);
        }
    }
}
