#include "kernel_operator.h"
#include "lib/matmul_intf.h"
using namespace matmul;
constexpr uint32_t BLOCK_NUM = 32;
constexpr uint32_t BUFFER_NUM = 2;  

__aicore__ inline uint32_t Ceiling(uint32_t a, uint32_t b)
{
    return (a + b - 1) / b;
}

__aicore__ inline uint32_t RoundUp(uint32_t a, uint32_t b)
{
    return (a + b - 1) / b * b;
}

class MatMulSubKernel {
public:
    __aicore__ inline MatMulSubKernel(){};
    __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR x3, GM_ADDR y, GM_ADDR workspace,
                                const TCubeTiling &tiling, uint32_t totalLength, uint32_t tileLength);
    template <bool needPad> __aicore__ inline void Process();
    __aicore__ inline void SubCompute();
    __aicore__ inline void SubCompute_Pad();
    __aicore__ inline void CalcOffset(int32_t blockIdx, const TCubeTiling &tiling, int32_t &offsetX1, int32_t &offsetX2,
                                      int32_t &offsetX3, int32_t &offsetY);

    Matmul<MatmulType<AscendC::TPosition::GM, CubeFormat::ND, DTYPE_X1>, MatmulType<AscendC::TPosition::GM, CubeFormat::ND, DTYPE_X2>,
           MatmulType<AscendC::TPosition::GM, CubeFormat::ND, DTYPE_Y>, MatmulType<AscendC::TPosition::GM, CubeFormat::ND, DTYPE_X3>, CFG_MDL>
        matmulObj;

    AscendC::GlobalTensor<DTYPE_X1> x1Global;
    AscendC::GlobalTensor<DTYPE_X2> x2Global;
    AscendC::GlobalTensor<DTYPE_X3> x3Global;
    AscendC::GlobalTensor<DTYPE_Y> yGlobal;
    AscendC::GlobalTensor<DTYPE_Y> outGlobal;

    TCubeTiling tiling;
    uint32_t totalLength, tileLength;
    uint32_t calcLength;

    AscendC::TQueBind<AscendC::TPosition::VECIN, AscendC::TPosition::VECOUT, BUFFER_NUM> InOutQueue; 
    AscendC::TQue<AscendC::TPosition::VECIN, 1> inQueue; 
    AscendC::TQue<AscendC::TPosition::VECOUT, 1> outQueue;
};


__aicore__ inline void MatMulSubKernel::Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR x3, GM_ADDR y, GM_ADDR workspace,
                                               const TCubeTiling &tiling, uint32_t totalLength, uint32_t tileLength)
{
    this->tiling = tiling;
    this->totalLength = totalLength;
    this->tileLength = tileLength;
    x1Global.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X1 *>(x1), tiling.M * tiling.Ka);
    x2Global.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X2 *>(x2), tiling.Kb * tiling.N);
    x3Global.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X3 *>(x3), tiling.M * tiling.N);
    yGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y), tiling.M * tiling.N);
    outGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y), tiling.M * tiling.N);

    int32_t offsetX1 = 0;
    int32_t offsetX2 = 0;
    int32_t offsetX3 = 0;
    int32_t offsetY = 0;

    CalcOffset(AscendC::GetBlockIdx(), tiling, offsetX1, offsetX2, offsetX3, offsetY); 

    x1Global = x1Global[offsetX1];
    x2Global = x2Global[offsetX2];
    yGlobal = yGlobal[offsetY];

    if (GetSysWorkSpacePtr() == nullptr) {
        return;
    }
}

template <bool needPad>
__aicore__ inline void MatMulSubKernel::Process()
{

    matmulObj.SetTensorA(x1Global);
    matmulObj.SetTensorB(x2Global);
    matmulObj.IterateAll(yGlobal);
    
    matmulObj.End();

    AscendC::SyncAll();
    GetTPipePtr()->InitBuffer(InOutQueue, BUFFER_NUM, (this->tileLength) * sizeof(DTYPE_X3)); 
    if constexpr(needPad == false)  {
        SubCompute();
    }
    else {
        SubCompute_Pad();
    }
}

__aicore__ inline void MatMulSubKernel::SubCompute()
{
    uint32_t coreIdx = AscendC::GetBlockIdx();
    uint32_t coreNum = AscendC::GetBlockNum();    

    this->calcLength = this->tileLength;
    // AscendC::printf("coreIdx = %d, coreNum = %d\n", coreIdx, coreNum);
    // AscendC::printf("this->totalLength = %d, this->tileLength =%d, this->calcLength = %d\n",  (this->totalLength), (this->tileLength), (this->calcLength));

    uint32_t offset = 2 * coreNum * ((this->tileLength));
    for(int i=coreIdx * (this->tileLength); i<totalLength; i+=offset) {
        if(i + (this->tileLength) > (this->totalLength)) {
            this->calcLength = (this->totalLength - i);
        }
        // this->calcLength = this->calcLength * 2;
           // AscendC::printf("i= %d, this->totalLength = %d, this->tileLength =%d, this->calcLength = %d\n", i,  (this->totalLength), (this->tileLength), (this->calcLength));
        AscendC::LocalTensor inoutLocal = InOutQueue.AllocTensor<DTYPE_X3>();
        
        AscendC::DataCopyExtParams copyParams{(1), static_cast<uint32_t>((this->calcLength) * sizeof(DTYPE_X3)), (0), (0), 0}; 
        AscendC::DataCopyPadExtParams<DTYPE_X3> padParams{false, 0, 0, 0};    
        AscendC::DataCopyPad(inoutLocal, x3Global[i], copyParams, padParams); //        
        
        // InOutQueue.EnQue(inoutLocal);
        // inoutLocal = InOutQueue.DeQue<DTYPE_X3>();
        
        
        int32_t eventIDMTE2_V = static_cast<int32_t>(GetTPipePtr()->FetchEventID(AscendC::HardEvent::MTE2_V));
        AscendC::SetFlag<AscendC::HardEvent::MTE2_V>(eventIDMTE2_V);
        AscendC::WaitFlag<AscendC::HardEvent::MTE2_V>(eventIDMTE2_V);   
        
        AscendC::Muls(inoutLocal, inoutLocal, (DTYPE_X3)(-1), (this->calcLength));
        
        int32_t eventIDV_MTE3 = static_cast<int32_t>(GetTPipePtr()->FetchEventID(AscendC::HardEvent::V_MTE3));
        AscendC::SetFlag<AscendC::HardEvent::V_MTE3>(eventIDV_MTE3);
        AscendC::WaitFlag<AscendC::HardEvent::V_MTE3>(eventIDV_MTE3);            

        AscendC::DataCopyExtParams out_copyParams{(1), static_cast<uint32_t>((this->calcLength) * sizeof(DTYPE_X3)), (0), (0), 0}; 
        AscendC::SetAtomicAdd<DTYPE_X3>();
        AscendC::DataCopyPad(outGlobal[i], inoutLocal, out_copyParams);   
        AscendC::SetAtomicNone();  
        InOutQueue.FreeTensor(inoutLocal);
    }  
}

__aicore__ inline void MatMulSubKernel::SubCompute_Pad()
{
    uint32_t NAlign = RoundUp(tiling.N, (BLOCK_NUM / sizeof(DTYPE_X3)));
    uint32_t coreIdx = AscendC::GetBlockIdx();
    uint32_t coreNum = AscendC::GetBlockNum();    

    GetTPipePtr()->InitBuffer(inQueue, 1, (NAlign) * sizeof(DTYPE_X3)); 
    GetTPipePtr()->InitBuffer(outQueue, 1, (NAlign) * sizeof(DTYPE_X3)); 

    AscendC::LocalTensor inLocal = inQueue.AllocTensor<DTYPE_X3>();

    AscendC::DataCopyExtParams copyParams{(1), static_cast<uint32_t>((tiling.N) * sizeof(DTYPE_X3)), (0), (0), 0}; 
    AscendC::DataCopyPadExtParams<DTYPE_X3> padParams{false, 0, 0, 0};    
    AscendC::DataCopyPad(inLocal, x3Global, copyParams, padParams); //  

    inQueue.EnQue(inLocal);
    inLocal = inQueue.DeQue<DTYPE_X3>();

    AscendC::LocalTensor outLocal = outQueue.AllocTensor<DTYPE_X3>();

    AscendC::Muls(outLocal, inLocal, (DTYPE_X3)(-1), (tiling.N));

    outQueue.EnQue(outLocal);
    inQueue.FreeTensor(inLocal);

    outLocal = outQueue.DeQue<DTYPE_X3>();
    for (int i = coreIdx; i< tiling.M; i+=2 * coreNum * ((1))) {
        AscendC::DataCopyExtParams out_copyParams{static_cast<uint16_t>(1), static_cast<uint32_t>((tiling.N) * sizeof(DTYPE_X3)), (0), (0), 0}; 
        AscendC::SetAtomicAdd<DTYPE_X3>();
        AscendC::DataCopyPad(outGlobal[i * tiling.N], outLocal, out_copyParams);   
        AscendC::SetAtomicNone();  
    }
    outQueue.FreeTensor(outLocal); 
}

__aicore__ inline void MatMulSubKernel::CalcOffset(int32_t blockIdx, const TCubeTiling &tiling,
                                                    int32_t &offsetX1, int32_t &offsetX2, int32_t &offsetX3,
                                                    int32_t &offsetY)
{
    auto mSingleBlocks = Ceiling(tiling.M, tiling.singleCoreM);
    auto mCoreIndx = blockIdx % mSingleBlocks;
    auto nCoreIndx = blockIdx / mSingleBlocks;

    offsetX1 = mCoreIndx * tiling.Ka * tiling.singleCoreM;
    offsetX2 = nCoreIndx * tiling.singleCoreN;
    offsetY = mCoreIndx * tiling.N * tiling.singleCoreM + nCoreIndx * tiling.singleCoreN;
    
    int tailM = tiling.M - mCoreIndx * tiling.singleCoreM;
    tailM = tailM < tiling.singleCoreM ? tailM : tiling.singleCoreM;
    int tailN = tiling.N - nCoreIndx * tiling.singleCoreN;
    tailN = tailN < tiling.singleCoreN ? tailN : tiling.singleCoreN;

    if (tailM < tiling.singleCoreM || tailN < tiling.singleCoreN) {
        matmulObj.SetTail(tailM, tailN);
    }
}

extern "C" __global__ __aicore__ void mat_mul_sub(GM_ADDR x1, GM_ADDR x2, GM_ADDR x3, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
    KERNEL_TASK_TYPE_DEFAULT(KERNEL_TYPE_MIX_AIC_1_2)
    AscendC::TPipe pipe;
    MatMulSubKernel OP;
    REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), OP.matmulObj, &tiling_data.cubeTilingData);
    OP.Init(x1, x2, x3, y, workspace, tiling_data.cubeTilingData, 
            tiling_data.totalLength, tiling_data.tileLength);
    if(TILING_KEY_IS(1)) {
        OP.Process<false>();   
    }
    else if (TILING_KEY_IS(2)) {
        OP.Process<true>();
    }  
}