#pragma once
#include "./custom_type.h"
#include "./data_transfer.h"
#include "./kernel_const.h"
#include "./kernel_utils.h"
#include "kernel_operator.h"


/**
 * @brief: 批量线性乘加，完成分核和UB缓存分配，然后先后做beta*C, alpha*AB, alpha*AB+beta*C
 * @param [in] L1_M0: 搬入L1计算的A矩阵M维度大小
 * @param [in] L1_N0: 搬入L1计算的B矩阵N维度大小
 * @param [in] L1_K0: 搬入L1计算的AB矩阵K维度大小
 * @param [in] zeroPaddingM: A、C矩阵零填充后的M维度
 * @param [in] zeroPaddingN: B、C矩阵零填充后的N维度
 * @param [in] batchCount: 批量矩阵乘的batch数
 * @param [in] d_validM: 每批矩阵乘的A、C矩阵M维度有效长度数组
 * @param [in] d_validN: 每批矩阵乘的B、C矩阵N维度有效长度数组
 * @param [in] alpha: alpha*AB+beta*C
 * @param [in] beta:  alpha*AB+beta*C
 * @param [out] d_C_pointer: 每批矩阵乘的零填充B矩阵首地址数组
 * @param [out] d_AicAivWorkspace_Pointer: Aic Aiv 同步的GM空间首地址数组
 * @param [in] is_alpha1_beta0: 是否有 alpha==1.0 && beta==0.0
 */
 template<
 uint32_t L1_M0, 
 uint32_t L1_N0, 
 uint32_t L1_K0,
 uint32_t WORKSPACE_NUM>
[aicore] inline __attribute__((always_inline)) void BatchMatmulEpilogue(
    int64_t zeroPaddingM, 
    int64_t zeroPaddingN, 
    int64_t batchCount, 
    __gm__ int64_t*  d_validM,
    __gm__ int64_t*  d_validN,
    half alpha,
    half beta,
    __gm__ half**  d_C_pointer, 
    __gm__ half** d_AicAivWorkspace_Pointer, 
    uint8_t is_alpha1_beta0 
){

    if(is_alpha1_beta0){

        // no epilogue

    }else{ // epilogue

        // 申请UB完整空间
        AscendC::TBuf<AscendC::TPosition::VECIN> ub_buf;
        AscendC::TPipe ub_pipe;
        ub_pipe.InitBuffer(ub_buf, UB_BYTES);
        AscendC::LocalTensor<uint8_t> ub_tensor = ub_buf.Get<uint8_t>();
        ub_pipe.Destroy();

        // 结果块的大小，每次从workspace读出的AB结果块和从C矩阵读出的c结果块最大的大小
        static constexpr uint32_t l0c_blockSize = L1_M0 * L1_N0;// 256 * 128, 128 * 128

        // 可以划分多少缓冲区？
        // 当前192KB需要完成的功能：读取AB结果块，读取C结果块，计算alpha*AB+beta*C。每个功能需要空间一样大，即192KB一分为三
        // 每个功能64KB，对着64KB划分缓冲区。每个缓冲区装入的是 AB、C、结果的(1 / GetSubBlockNum())，这里每个AIV处理一半
        // 故 64KB / ( L1_M0 * L1_N0 * sizeof(half) / 2 )，就是能开辟的缓冲数
        static constexpr uint32_t ub_pingpongNum = UB_BYTES / 3 / (l0c_blockSize * sizeof(half) / 2);
        AscendC::LocalTensor<half> ub_cBuf[ub_pingpongNum];// ub_pingpongNum = 2, 32KB
        AscendC::LocalTensor<half> ub_abBuf[ub_pingpongNum];
        AscendC::LocalTensor<half> ub_resultBuf[ub_pingpongNum];

        // 划分缓冲区
        // ub_cBuf
        uint32_t ub_pingpongBytes = l0c_blockSize * sizeof(half) / 2;
        #pragma unroll
        for(uint32_t i = 0; i < ub_pingpongNum; i++){
            ub_cBuf[i] = ub_tensor[                             i * ub_pingpongBytes].template ReinterpretCast<half>();
        }
        // ub_abBuf
        #pragma unroll
        for(uint32_t i = 0; i < ub_pingpongNum; i++){
            ub_abBuf[i] = ub_tensor[     (ub_pingpongNum +     i) * ub_pingpongBytes].template ReinterpretCast<half>();
        }
        // ub_resultBuf
        #pragma unroll
        for(uint32_t i = 0; i < ub_pingpongNum; i++){
            ub_resultBuf[i] = ub_tensor[ (ub_pingpongNum * 2 + i) * ub_pingpongBytes].template ReinterpretCast<half>();
        }

        // 第一次的Set同步
        // ub_cBuf
        #pragma unroll
        for(uint32_t i = 0; i < ub_pingpongNum; i++){
            AscendC::SetFlag<AscendC::HardEvent::V_MTE2>((event_t)(i));
            // AscendC::SetFlag<AscendC::HardEvent::MTE3_MTE2>((event_t)(i));// 0 1
        }
        // ub_abBuf
        #pragma unroll
        for(uint32_t i = 0; i < ub_pingpongNum; i++){
            AscendC::SetFlag<AscendC::HardEvent::V_MTE2>((event_t)(ub_pingpongNum + i));
            // AscendC::SetFlag<AscendC::HardEvent::MTE3_MTE2>((event_t)(ub_pingpongNum + i));// 2 3
        }
        // ub_resultBuf
        #pragma unroll
        for(uint32_t i = 0; i < ub_pingpongNum; i++){
            AscendC::SetFlag<AscendC::HardEvent::MTE3_V>((event_t)(2 * ub_pingpongNum + i));
        }

        // 新的workspace
        AscendC::GlobalTensor<half> gm_workspace;   
        // 数组长度为MAX_AICORE_NUM
        gm_workspace.SetGlobalBuffer( (__gm__ half*)d_AicAivWorkspace_Pointer[AscendC::GetBlockIdx()/2] );
        AscendC::GlobalTensor<half> gm_workspacePingpongBuf[WORKSPACE_NUM];
        for(uint32_t i = 0; i < WORKSPACE_NUM; i++){
            gm_workspacePingpongBuf[i] = gm_workspace[ i * l0c_blockSize ];
        }

        // 跨核的循环间同步第一次Set
        for(uint32_t i = 0; i < WORKSPACE_NUM; i++){
            AscendC::CrossCoreSetFlag<0x2, PIPE_MTE2>(WORKSPACE_NUM + i);
        }

        // 全局内存C矩阵，在循环内进行SetGlobalBuffer
        AscendC::GlobalTensor<half> gm_tensorC;

        // 当前aicore计算的第几个结果块
        uint32_t cur_aicore_block = 0;
        // groupgemm需要的参数
        uint32_t cur_result_blocks_sum = 0;
        // 当前正在计算的矩阵
        uint32_t batchNum = -1;
        int64_t loopSum = 0;

        //分核计算
        for(uint32_t loopIdx = 0; loopIdx < loopSum+1; loopIdx++){

            if( loopIdx == loopSum ){
                batchNum++; 
                if( batchNum < batchCount ){
                    cur_result_blocks_sum = loopSum;
                    gm_tensorC.SetGlobalBuffer( (__gm__ half*)d_C_pointer[batchNum] );
                    // gm_workspace.SetGlobalBuffer( (__gm__ half*)d_AicAivWorkspace_Pointer[batchNum] );
                    loopSum += CeilDiv<int64_t>(d_validM[batchNum], L1_M0) * CeilDiv<int64_t>(d_validN[batchNum], L1_N0);
                }else{
                    continue;
                } 

            }

            // 分核注意AIV的GetBlockIdx()返回值为0-39，AIC的GetBlockIdx()返回值为0-19；AIV的GetBlockNum()返回值为40，AIC的GetBlockIdx()返回值为20
            if(loopIdx % AscendC::GetBlockNum() != (AscendC::GetBlockIdx() / 2)){
                continue;
            }

            // AscendC::CrossCoreWaitFlag(0x8);

            uint32_t M = d_validM[batchNum];
            uint32_t N = d_validN[batchNum];
            // 当前C矩阵ldc（C矩阵只有行优先）
            uint32_t strideC = zeroPaddingN;
            // 该循环计算的任务块号：blockIdx
            uint32_t blockIdx = loopIdx - cur_result_blocks_sum;
            // 将该任务块号定位到一个任务块（TODO swizzle未做）
            uint32_t mLoops = CeilDiv<uint32_t>(M, L1_M0);
            uint32_t nLoops = CeilDiv<uint32_t>(N, L1_N0);
            uint32_t blockMIdx = blockIdx / nLoops;
            uint32_t blockNIdx = blockIdx % nLoops;
            // 定位到A行块与B列块实际大小
            uint32_t gm_mActual = (blockMIdx == mLoops-1? M-blockMIdx*L1_M0 : L1_M0 );
            uint32_t gm_nActual = (blockNIdx == nLoops-1? N-blockNIdx*L1_N0 : L1_N0 );


            // task0: beta*C

            // 等上一轮ub_cBuf用完，这轮才能开始读gm写ub_cBuf
            AscendC::WaitFlag<AscendC::HardEvent::V_MTE2>((event_t)(cur_aicore_block % ub_pingpongNum));
            // AscendC::WaitFlag<AscendC::HardEvent::MTE3_MTE2>((event_t)(cur_aicore_block % ub_pingpongNum));

            // 读C矩阵写入ub_cBuf
            if(AscendC::GetSubBlockIdx() == 0){ 
                Gm2Ub(
                    ub_cBuf[cur_aicore_block % ub_pingpongNum], 
                    gm_tensorC[blockMIdx * L1_M0 * strideC + blockNIdx * L1_N0], 
                    gm_mActual <= L1_M0 / 2 ? gm_mActual : L1_M0 / 2, 
                    gm_nActual, 
                    strideC
                );
            }else{// AscendC::GetSubBlockIdx() == 1
                if(gm_mActual > L1_M0 / 2){
                    Gm2Ub(
                        ub_cBuf[cur_aicore_block % ub_pingpongNum], 
                        gm_tensorC[(blockMIdx * L1_M0 + L1_M0 / 2) * strideC + blockNIdx * L1_N0 ], 
                        gm_mActual - L1_M0 / 2, 
                        gm_nActual, 
                        strideC
                    );
                }    
                // Gm2Ub(
                //     ub_cBuf[cur_aicore_block % ub_pingpongNum], 
                //     gm_tensorC[blockMIdx * L1_M0 * strideC + blockNIdx * L1_N0], 
                //     gm_mActual <= L1_M0 / 2 ? gm_mActual : L1_M0 / 2, 
                //     gm_nActual, 
                //     strideC
                // );
            }

            // 写完ub_cBuf，可以计算了，计算结果写回ub_cBuf
            AscendC::SetFlag<AscendC::HardEvent::MTE2_V>((event_t)(cur_aicore_block % ub_pingpongNum));
            
            // 等待ub_cBuf写完，才能进行计算
            AscendC::WaitFlag<AscendC::HardEvent::MTE2_V>((event_t)(cur_aicore_block % ub_pingpongNum));

            // beta*C计算，结果直接写回UB
            AscendC::Muls<half, false>(
                ub_cBuf[cur_aicore_block % ub_pingpongNum], 
                ub_cBuf[cur_aicore_block % ub_pingpongNum], 
                beta, 
                AscendC::MASK_PLACEHOLDER, 
                l0c_blockSize / 2 / (8 * DATABLOCK_BYTES / sizeof(half)), 
                AscendC::UnaryRepeatParams(1, 1, 8, 8)
            );

            // vector计算结果有前后依赖，相当于循环内流水
            AscendC::PipeBarrier<PIPE_V>();
            // 如果这里用SetFlag，流水应该是SetFlag<V_V>
            // ub_cBuf功能完成（还没能给出SetFlag使能下一轮使用cBuf，后面还要读cBuf计算）


            // task1: alpha*AB
            
            // 等上一轮ub_abBuf用完，这轮才能开始读gm写ub_abBuf
            AscendC::WaitFlag<AscendC::HardEvent::V_MTE2>((event_t)(ub_pingpongNum + cur_aicore_block % ub_pingpongNum));
            // AscendC::WaitFlag<AscendC::HardEvent::MTE3_MTE2>((event_t)(ub_pingpongNum + cur_aicore_block % ub_pingpongNum));

            // 跨核同步 等待AIC将A*B结果写入workspace
            // AscendC::CrossCoreWaitFlag(0x8);
            // AscendC::CrossCoreWaitFlag(0+cur_aicore_block%5);
            AscendC::CrossCoreWaitFlag(cur_aicore_block % WORKSPACE_NUM);

            // 读ab矩阵块写入ub_abBuf
            if(AscendC::GetSubBlockIdx() == 0){
                Gm2Ub(
                    ub_abBuf[cur_aicore_block % ub_pingpongNum],
                    // gm_workspace[blockMIdx * L1_M0 * strideC + blockNIdx * L1_N0], 
                    gm_workspacePingpongBuf[cur_aicore_block % WORKSPACE_NUM], 
                    gm_mActual <= L1_M0 / 2 ? gm_mActual : L1_M0 / 2, 
                    gm_nActual, 
                    // strideC
                    L1_N0
                );
            }else{// AscendC::GetSubBlockIdx() == 1
                if(gm_mActual > L1_M0 / 2){
                    Gm2Ub(
                        ub_abBuf[cur_aicore_block % ub_pingpongNum], 
                        // gm_workspace[(blockMIdx * L1_M0 + L1_M0 / 2) * strideC + blockNIdx * L1_N0], 
                        gm_workspacePingpongBuf[cur_aicore_block % WORKSPACE_NUM][L1_M0 / 2 * L1_N0], 
                        gm_mActual - L1_M0 / 2, 
                        gm_nActual, 
                        // strideC
                        L1_N0
                    );
                }
            }
            AscendC::CrossCoreSetFlag<0x2, PIPE_MTE2>(WORKSPACE_NUM + cur_aicore_block % WORKSPACE_NUM);

            // 写完ub_abBuf，可以计算了，计算结果直接写回ub_abBuf
            AscendC::SetFlag<AscendC::HardEvent::MTE2_V>((event_t)(ub_pingpongNum + cur_aicore_block % ub_pingpongNum));

            // 等待u_ubBuf写完，才能进行计算
            AscendC::WaitFlag<AscendC::HardEvent::MTE2_V>((event_t)(ub_pingpongNum + cur_aicore_block % ub_pingpongNum));

            // alpha*AB计算
            AscendC::Muls<half, false>(
                ub_abBuf[cur_aicore_block % ub_pingpongNum], 
                ub_abBuf[cur_aicore_block % ub_pingpongNum], 
                alpha, 
                AscendC::MASK_PLACEHOLDER, 
                l0c_blockSize / 2 / (8 * DATABLOCK_BYTES / sizeof(half)),
                AscendC::UnaryRepeatParams(1, 1, 8, 8)
            );

            // vector计算结果有前后依赖，相当于循环内流水
            AscendC::PipeBarrier<PIPE_V>();
            // 如果这里用SetFlag，流水应该是SetFlag<V_V>
            // ub_abBuf功能完成（还没能给出SetFlag使能下一轮使用abBuf，后面还要读abBuf计算


            // task2: alpha*AB+beta*C        
            // 二期blas：这里可不用加
            AscendC::WaitFlag<AscendC::HardEvent::MTE3_V>((event_t)(2 * ub_pingpongNum + cur_aicore_block % ub_pingpongNum));

            // 读ub_cBuf和ub_abBuf计算最终结果，求和后写入ub_resultBuf
            AscendC::Add<half, false>(
                ub_resultBuf[cur_aicore_block % ub_pingpongNum], 
                ub_abBuf[cur_aicore_block % ub_pingpongNum], 
                ub_cBuf[cur_aicore_block % ub_pingpongNum], 
                AscendC::MASK_PLACEHOLDER, 
                l0c_blockSize / 2 / (8 * DATABLOCK_BYTES / sizeof(half)),
                AscendC::BinaryRepeatParams(1, 1, 1, 8, 8, 8)
            );
            AscendC::PipeBarrier<PIPE_V>();

            // AIV已经计算完了，可以写回GM了
            AscendC::SetFlag<AscendC::HardEvent::V_MTE3>((event_t)(2 * ub_pingpongNum + cur_aicore_block % ub_pingpongNum));
            // ub_resultBuf功能完成

            
            AscendC::WaitFlag<AscendC::HardEvent::V_MTE3>((event_t)(2 * ub_pingpongNum + cur_aicore_block % ub_pingpongNum));
            // 将计算结果写回GM
            if(AscendC::GetSubBlockIdx() == 0){ 
                Ub2Gm(
                    gm_tensorC[blockMIdx * L1_M0 * strideC + blockNIdx * L1_N0], 
                    ub_resultBuf[cur_aicore_block % ub_pingpongNum], 
                    gm_mActual <= L1_M0 / 2 ? gm_mActual : L1_M0 / 2, 
                    gm_nActual, 
                    strideC
                );
            }else{
                if(gm_mActual > L1_M0 / 2){
                    Ub2Gm(
                        gm_tensorC[(blockMIdx * L1_M0 + L1_M0 / 2) * strideC + blockNIdx * L1_N0], 
                        ub_resultBuf[cur_aicore_block % ub_pingpongNum], 
                        gm_mActual - L1_M0 / 2, 
                        gm_nActual, 
                        strideC
                    );
                } 

            }

            // 全流程完成，需要归还3片缓冲区的使用权
            // AscendC::SetFlag<AscendC::HardEvent::MTE3_MTE2>((event_t)(cur_aicore_block % ub_pingpongNum));
            // AscendC::SetFlag<AscendC::HardEvent::MTE3_MTE2>((event_t)(ub_pingpongNum + cur_aicore_block % ub_pingpongNum));
            AscendC::SetFlag<AscendC::HardEvent::V_MTE2>((event_t)(cur_aicore_block % ub_pingpongNum));
            AscendC::SetFlag<AscendC::HardEvent::V_MTE2>((event_t)(ub_pingpongNum + cur_aicore_block % ub_pingpongNum));
            AscendC::SetFlag<AscendC::HardEvent::MTE3_V>((event_t)(2 * ub_pingpongNum + cur_aicore_block % ub_pingpongNum));

            cur_aicore_block++;
        }


        // 最后一次的Wait同步
        // ub_cBuf
        #pragma unroll
        for(uint32_t i = 0; i < ub_pingpongNum; i++){
            AscendC::WaitFlag<AscendC::HardEvent::V_MTE2>((event_t)(i));
            // AscendC::WaitFlag<AscendC::HardEvent::MTE3_MTE2>((event_t)(i));
        }
        // ub_abBuf
        #pragma unroll
        for(uint32_t i = 0; i < ub_pingpongNum; i++){
            AscendC::WaitFlag<AscendC::HardEvent::V_MTE2>((event_t)(ub_pingpongNum + i));
            // AscendC::WaitFlag<AscendC::HardEvent::MTE3_MTE2>((event_t)(ub_pingpongNum + i));
        }
        // ub_resultBuf
        #pragma unroll
        for(uint32_t i = 0; i < ub_pingpongNum; i++){
            AscendC::WaitFlag<AscendC::HardEvent::MTE3_V>((event_t)(2 * ub_pingpongNum + i));
        }
    }


}

