#define K_MAX_SHAPE_DIM 0
#include "kernel_operator.h"
#include "lib/matmul_intf.h"

using namespace matmul;
using namespace AscendC;
__aicore__ inline uint32_t Ceiling(uint32_t a, uint32_t b) {
  return (a + b - 1) / b;
}

const static int BUFFER_NUM = 2;

template <typename x1Type, typename x2Type, typename x3Type, typename yType>
class MatMulSubKernel {
public:
  __aicore__ inline MatMulSubKernel(){};
  __aicore__ inline void Init(GM_ADDR __restrict x1, GM_ADDR __restrict x2,
                              GM_ADDR x3, GM_ADDR y, GM_ADDR workspace,
                              TCubeTiling &tiling, uint32_t x3ShapeType,
                              TPipe *pipeIn) {
    this->tiling = tiling;
    this->pipe = pipeIn;
    // REGIST_MATMUL_OBJ(this->pipe, GetSysWorkSpacePtr(), this->matmulObj);
    // matmulObj.Init(&this->tiling);
    x1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ x1Type *>(x1),
                         tiling.M * tiling.Ka);
    x2Gm.SetGlobalBuffer(reinterpret_cast<__gm__ x2Type *>(x2),
                         tiling.Kb * tiling.N);
    yGm.SetGlobalBuffer(reinterpret_cast<__gm__ yType *>(y),
                        tiling.M * tiling.N);
    int offsetX1 = 0;
    int offsetX2 = 0;
    int offsetY = 0;
    int offsetX3 = 0;
    CalcOffset(AscendC::GetBlockIdx(), tiling, offsetX1, offsetX2, offsetY,
               offsetX3);
    x1Gm = x1Gm[offsetX1];
    x2Gm = x2Gm[offsetX2];
    yGm = yGm[offsetY];
    // x3Gm = x3Gm[offsetX3];
    if (GetSysWorkSpacePtr() == nullptr) {
      return;
    }
  }
  __aicore__ inline void Process() {
    matmulObj.SetTensorA(x1Gm);
    matmulObj.SetTensorB(x2Gm);
    matmulObj.IterateAll(yGm); // 用于验证乘法的正确性
    // while (matmulObj.Iterate()) {
    //     matmulObj.template GetTensorC<false>(yGm);     //
    //     GetTensorC接口中的enAtomic设为1
    // }
    matmulObj.End();
    return;
  }
  // CALL OFFSET
  __aicore__ inline void CalcOffset(int32_t blockIdx, const TCubeTiling &tiling,
                                    int32_t &offsetX1, int32_t &offsetX2,
                                    int32_t &offsetY, int32_t &offsetX3) {
    auto mSingleBlocks = Ceiling(tiling.M, tiling.singleCoreM);
    auto mCoreIndx = blockIdx % mSingleBlocks;
    auto nCoreIndx = blockIdx / mSingleBlocks;
    // cal offset
    offsetX1 = mCoreIndx * tiling.Ka * tiling.singleCoreM;
    offsetX2 = nCoreIndx * tiling.singleCoreN;
    offsetY = mCoreIndx * tiling.N * tiling.singleCoreM +
              nCoreIndx * tiling.singleCoreN;
    // set tailBlock
    int tailM = tiling.M - mCoreIndx * tiling.singleCoreM;
    tailM = tailM < tiling.singleCoreM ? tailM : tiling.singleCoreM;
    int tailN = tiling.N - nCoreIndx * tiling.singleCoreN;
    tailN = tailN < tiling.singleCoreN ? tailN : tiling.singleCoreN;
    if (tailM < tiling.singleCoreM || tailN < tiling.singleCoreN) {
      matmulObj.SetTail(tailM, tailN);
    }
  };

public:
  TCubeTiling tiling;
  TPipe *pipe;
  uint32_t x3ShapeType;
  Matmul<MatmulType<TPosition::GM, CubeFormat::ND, x1Type, false,
                    LayoutMode::NONE>,
         MatmulType<TPosition::GM, CubeFormat::ND, x2Type, false,
                    LayoutMode::NONE>,
         MatmulType<TPosition::GM, CubeFormat::ND, yType>,
         MatmulType<TPosition::GM, CubeFormat::ND, x3Type>, CFG_MDL>
      matmulObj;
  GlobalTensor<x1Type> x1Gm;
  GlobalTensor<x2Type> x2Gm;
  GlobalTensor<yType> yGm;
};

template <typename x3Type, typename yType> class SubKernel {
public:
  __aicore__ inline SubKernel(){};
  __aicore__ inline void Init(GM_ADDR __restrict x3, GM_ADDR __restrict y,
                              const TCubeTiling &tiling, uint32_t x3ShapeType,
                              int nBigCore, int bigCoreRows, int smallCoreRows,
                              TPipe *pipeIn) {
    this->pipe = pipeIn;
    this->coreIdx = GetBlockIdx();
    this->tiling = tiling;
    this->x3ShapeType = x3ShapeType;
    uint32_t globalOffset;
    if (coreIdx < nBigCore) {
      this->rows = bigCoreRows;
      globalOffset = bigCoreRows * coreIdx * tiling.N;
      // printf("core%d globalOffset=%d\n", coreIdx, globalOffset);
    } else {
      this->rows = smallCoreRows;
      globalOffset =
          (bigCoreRows * nBigCore + (coreIdx - nBigCore) * smallCoreRows) *
          tiling.N;
    }
    if (x3ShapeType == 1) {
      x3Gm.SetGlobalBuffer(reinterpret_cast<__gm__ x3Type *>(x3), tiling.N);
    } else {
      x3Gm.SetGlobalBuffer(reinterpret_cast<__gm__ x3Type *>(x3) + globalOffset,
                           this->rows * tiling.N);
    }
    yGm.SetGlobalBuffer(reinterpret_cast<__gm__ yType *>(y) +
                            globalOffset, // 注意y不能直接+ y是uint8
                        this->rows * tiling.N);
    pipe->InitBuffer(inQueueX, 2, (tiling.N + 31) / 32 * 32 * sizeof(x3Type));
    pipe->InitBuffer(inQueueY, 2, (tiling.N + 31) / 32 * 32 * sizeof(x3Type));
    pipe->InitBuffer(outQueueY, 2, (tiling.N + 31) / 32 * 32 * sizeof(x3Type));
  }
  __aicore__ inline void Process() {
    if (x3ShapeType == 1) {
      for (int i = 0; i < rows; i++) {
        CopyInRow(i);
        Compute(i);
        CopyOut(i);
      }
    } else {
      for (int i = 0; i < rows; i++) {
        CopyInOFFSET(i);
        Compute(i);
        CopyOut(i);
      }
    }
  }
  __aicore__ inline void CopyInRow(int r) {
    LocalTensor<x3Type> x3Local = inQueueX.AllocTensor<x3Type>();
    LocalTensor<yType> yLocal = inQueueY.AllocTensor<yType>();
    DataCopy(yLocal, yGm[r * tiling.N], (tiling.N + 31) / 32 * 32);
    DataCopy(x3Local, x3Gm, (tiling.N + 31) / 32 * 32);
    inQueueX.EnQue(x3Local);
    inQueueY.EnQue(yLocal);
  }
  __aicore__ inline void CopyInOFFSET(int r) {
    LocalTensor<x3Type> x3Local = inQueueX.AllocTensor<x3Type>();
    LocalTensor<yType> yLocal = inQueueY.AllocTensor<yType>();
    DataCopy(yLocal, yGm[r * tiling.N], (tiling.N + 31) / 32 * 32);
    DataCopy(x3Local, x3Gm[r * tiling.N], (tiling.N + 31) / 32 * 32);
    inQueueX.EnQue(x3Local);
    inQueueY.EnQue(yLocal);
  }
  __aicore__ inline void Compute(int r) {
    LocalTensor<x3Type> x3Local = inQueueX.DeQue<x3Type>();
    LocalTensor<yType> yLocal = inQueueY.DeQue<yType>();
    LocalTensor<yType> result = outQueueY.AllocTensor<yType>();
    result = yLocal - x3Local;
    outQueueY.EnQue(result);
    inQueueX.FreeTensor(x3Local);
    inQueueY.FreeTensor(yLocal);
  }
  __aicore__ inline void CopyOut(int r) {
    LocalTensor<yType> result = outQueueY.DeQue<yType>();
    uint32_t blockLen = tiling.N * static_cast<uint32_t>(sizeof(x3Type));
    DataCopyExtParams copyParams{1, blockLen, 0, 0, 0};
    DataCopyPad(yGm[r * tiling.N], result, copyParams);
    outQueueY.FreeTensor(result);
  }

private:
  TPipe *pipe;
  int coreIdx;
  int rows;
  uint32_t x3ShapeType;
  TCubeTiling tiling;
  GlobalTensor<x3Type> x3Gm;
  GlobalTensor<yType> yGm;
  TQue<QuePosition::VECIN, 2> inQueueX, inQueueY;
  TQue<QuePosition::VECOUT, 2> outQueueY;
};

template <typename TYPE_X, typename TYPE_Y> class SubKernelUB {
public:
  __aicore__ inline SubKernelUB(){};
  __aicore__ inline void Init(GM_ADDR __restrict x, GM_ADDR __restrict y,
                              uint32_t nAcores, uint32_t nIterAcore,
                              uint32_t tailNElemA, uint32_t nElemPerIter,
                              uint32_t nIterBcore, uint32_t tailNElemB,
                              uint32_t nElemAcore, uint32_t nElemBcore,
                              TPipe *pipeIn) {
    this->coreIdx = GetBlockIdx();
    if (this->coreIdx % 2 == 1)
      return;
    this->coreIdx /= 2;
    uint32_t globalMemOffset;
    this->nElemPerIter = nElemPerIter;
    this->pipe = pipeIn;
    if (coreIdx < nAcores) {
      this->nElem = nElemAcore;
      this->nIter = nIterAcore;
      this->tailNElem = tailNElemA;
      globalMemOffset = nElemAcore * coreIdx;
    } else {
      this->nElem = nElemBcore;
      this->nIter = nIterBcore;
      this->tailNElem = tailNElemB;
      globalMemOffset = nElemAcore * nAcores + (coreIdx - nAcores) * nElemBcore;
    }
    // 处理单次迭代就能计算所有elem时的buffer大小
    uint32_t maxNElem = nElemAcore > nElemBcore ? nElemAcore : nElemBcore;
    uint32_t bufferNElem = maxNElem > nElemPerIter ? nElemPerIter : maxNElem;
    // 初始化buffer
    xGm.SetGlobalBuffer((__gm__ TYPE_X *)x + globalMemOffset, this->nElem);
    yGm.SetGlobalBuffer((__gm__ TYPE_Y *)y + globalMemOffset, this->nElem);
    pipe->InitBuffer(inQueueX, BUFFER_NUM, bufferNElem * sizeof(TYPE_X));
    // pipe->InitBuffer(inQueueY, BUFFER_NUM, bufferNElem * sizeof(TYPE_X));
    pipe->InitBuffer(outQueueY, BUFFER_NUM, bufferNElem * sizeof(TYPE_X));
  }
  __aicore__ inline void Process() {
    if (GetBlockIdx() % 2 == 1)
      return;
    // 对于该数据规模一定需要多次迭代
    this->curNElem = this->nElemPerIter;
    for (int32_t i = 0; i < this->nIter - 1; i++) {
      CopyIn(i);
      Compute(i);
      CopyOut(i);
    }
    this->curNElem = this->tailNElem;
    if (this->curNElem > 0) {
      CopyIn(this->nIter - 1);
      Compute(this->nIter - 1);
      CopyOut(this->nIter - 1);
    }
  }

private:
  __aicore__ inline void CopyIn(int32_t progress) {
    LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
    LocalTensor<TYPE_X> yLocal = outQueueY.AllocTensor<TYPE_X>();
    DataCopy(xLocal, xGm[progress * this->nElemPerIter], this->curNElem);
    DataCopy(yLocal, yGm[progress * this->nElemPerIter], this->curNElem);
    inQueueX.EnQue(xLocal);
    outQueueY.EnQue(yLocal);
  }
  __aicore__ inline void Compute(int32_t progress) {
    LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
    LocalTensor<TYPE_Y> yLocal = outQueueY.DeQue<TYPE_Y>();
    yLocal = yLocal - xLocal;
    outQueueY.EnQue<TYPE_Y>(yLocal);
    inQueueX.FreeTensor(xLocal);
  }
  __aicore__ inline void CopyOut(int32_t progress) {
    LocalTensor<TYPE_Y> yLocal = outQueueY.DeQue<TYPE_Y>();
    DataCopy(yGm[progress * this->nElemPerIter], yLocal, this->curNElem);
    outQueueY.FreeTensor(yLocal);
  }

private:
  TPipe *pipe;
  TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
  TQueBind<QuePosition::VECIN, QuePosition::VECOUT, BUFFER_NUM> outQueueY;
  GlobalTensor<TYPE_X> xGm;
  GlobalTensor<TYPE_Y> yGm;
  uint32_t coreIdx;
  uint32_t nElem;
  uint32_t nIter; // 为1时一次完成计算，计算元素数量为nElem
                  // 大于1时需要处理尾块，单次计算元素数为nElemPerIter/tailNelem
  uint32_t nElemPerIter;
  uint32_t tailNElem;
  uint32_t curNElem;
};

template <typename x1Type, typename x2Type, typename x3Type, typename yType>
class MatMulSubFuseKernel {
public:
  __aicore__ inline MatMulSubFuseKernel(){};
  __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR x3, GM_ADDR y,
                              GM_ADDR workspace, const TCubeTiling &tiling,
                              uint32_t x3ShapeType, TPipe *pipeIn) {
#ifdef LOG
    printf("M=%d N=%d K=%d singleCoreM=%d singleCoreN=%d baseM=%d baseN=%d\n",
           tiling.M, tiling.N, tiling.Ka, tiling.singleCoreM,
           tiling.singleCoreN, tiling.baseM, tiling.baseN);
#endif
    this->tiling = tiling;
    this->pipe = pipeIn;
    this->x3ShapeType = x3ShapeType;
    x1Gm.SetGlobalBuffer(reinterpret_cast<__gm__ x1Type *>(x1),
                         tiling.M * tiling.Ka);
    x2Gm.SetGlobalBuffer(reinterpret_cast<__gm__ x2Type *>(x2),
                         tiling.Kb * tiling.N);
    x3Gm.SetGlobalBuffer(reinterpret_cast<__gm__ x3Type *>(x3),
                         tiling.M * tiling.N);
    yGm.SetGlobalBuffer(reinterpret_cast<__gm__ yType *>(y),
                        tiling.M * tiling.N);
    workspaceGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ yType *>(workspace), tiling.M * tiling.N);
    workspaceGlobal = workspaceGlobal[GetBlockIdx() * tiling.singleCoreM * tiling.singleCoreN];
    int offsetX1 = 0;
    int offsetX2 = 0;
    int offsetY = 0;
    int offsetX3 = 0;
    CalcOffset(AscendC::GetBlockIdx(), tiling, offsetX1, offsetX2, offsetY,
               offsetX3);
    x1Gm = x1Gm[offsetX1];
    x2Gm = x2Gm[offsetX2];
    yGm = yGm[offsetY];
    x3Gm = x3Gm[offsetY];
pipe->InitBuffer(inQueueX, 1, 16*256*sizeof(x3Type));
pipe->InitBuffer(outQueueY, 1, 16*256*sizeof(x3Type));
pipe->InitBuffer(inQueueMat, 1, tiling.baseM * tiling.baseN *sizeof(x3Type));
  }
  __aicore__ inline void Process() {
    matmulObj.SetTensorA(x1Gm);
    matmulObj.SetTensorB(x2Gm);
    matmulObj.SetWorkspace(workspaceGlobal);
    //matmulObj.IterateAll(yGm); // 用于验证乘法的正确性
     matmulObj.template Iterate<false>();
    for(int i=0;i<4;i++){
        auto matLocal = inQueueMat.AllocTensor<yType>();
        matmulObj.template GetTensorC<false>(matLocal);
        int idxI = i%2;
        int idxJ = i/2;
        uint32_t blockOffset = idxI * tiling.N * tiling.baseM + idxJ * tiling.baseN;
        //PROCESS 16 ROWS PER ROUND
        uint16_t rowToProc = idxI?(this->tailM-tiling.baseM):tiling.baseM;
        uint32_t colToProc = idxJ?(this->tailN-tiling.baseN):tiling.baseN;
        uint32_t round = rowToProc / 16;
        inQueueMat.EnQue(matLocal);
        matLocal = inQueueMat.DeQue<yType>();
        matmulObj.WaitGetTensorC();
        for(int j=0;j<round;j++){
            uint32_t roundOffset = j*16*tiling.N;
            uint32_t roundN = ((colToProc+7)/8)*8;
            //COPY IN
            auto xLocal = inQueueX.AllocTensor<yType>();
            DataCopyExtParams copyParamsX3{16, static_cast<uint32_t>(roundN*sizeof(yType)), static_cast<uint32_t>((tiling.N-roundN)*sizeof(yType)),static_cast<uint32_t>(tiling.baseN-roundN)/8,0};
             DataCopyPadExtParams<yType> padParamsX3{false,0,0,static_cast<yType>(0.0)};
            DataCopyPad(xLocal, x3Gm[blockOffset+roundOffset], copyParamsX3, padParamsX3);
            inQueueX.EnQue(xLocal);
            //compute
            auto outLocal = outQueueY.AllocTensor<yType>();
            xLocal = inQueueX.DeQue<yType>();
            Sub(outLocal, matLocal[j*16*tiling.baseN], xLocal, 16*tiling.baseN);
            outQueueY.EnQue(outLocal);
            outLocal = outQueueY.DeQue<yType>(); 
            DataCopyParams copyParamsY{16, static_cast<uint16_t>(roundN/8), static_cast<uint16_t>((tiling.baseN-roundN)/8), static_cast<uint16_t>((tiling.N-roundN)/8)};
            DataCopy(yGm[blockOffset+roundOffset], outLocal, copyParamsY);
            inQueueX.FreeTensor(xLocal);
            //CopyOut
            outQueueY.FreeTensor(outLocal);
        }
        inQueueMat.FreeTensor(matLocal);
    }
    matmulObj.End();
    return;
  }
  // CALL OFFSET
  __aicore__ inline void CalcOffset(int32_t blockIdx, const TCubeTiling &tiling,
                                    int32_t &offsetX1, int32_t &offsetX2,
                                    int32_t &offsetY, int32_t &offsetX3) {
    auto mSingleBlocks = Ceiling(tiling.M, tiling.singleCoreM);
    auto mCoreIndx = blockIdx % mSingleBlocks;
    auto nCoreIndx = blockIdx / mSingleBlocks;
    // cal offset
    offsetX1 = mCoreIndx * tiling.Ka * tiling.singleCoreM;
    offsetX2 = nCoreIndx * tiling.singleCoreN;
    offsetY = mCoreIndx * tiling.N * tiling.singleCoreM +
              nCoreIndx * tiling.singleCoreN;
    // set tailBlock
    this->tailM = tiling.M - mCoreIndx * tiling.singleCoreM;
    this->tailM = this->tailM < tiling.singleCoreM ? this->tailM : tiling.singleCoreM;
    this->tailN = tiling.N - nCoreIndx * tiling.singleCoreN;
    this->tailN = this->tailN < tiling.singleCoreN ? this->tailN : tiling.singleCoreN;
    if (this->tailM < tiling.singleCoreM || this->tailN < tiling.singleCoreN) {
      matmulObj.SetTail(this->tailM,this->tailN);
    }
    // cal offset OF X3
    offsetX3 = offsetY;
  };

public:
  TPipe *pipe;
  uint32_t x3ShapeType, tailI, tailJ,tailM, tailN;
  Matmul<MatmulType<TPosition::GM, CubeFormat::ND, x1Type>,
         MatmulType<TPosition::GM, CubeFormat::ND, x2Type>,
         MatmulType<TPosition::GM, CubeFormat::ND, yType>,
         MatmulType<TPosition::GM, CubeFormat::ND, x3Type>,CFG_MDL>
      matmulObj;
  GlobalTensor<x1Type> x1Gm;
  GlobalTensor<x2Type> x2Gm;
  GlobalTensor<x3Type> x3Gm;
  GlobalTensor<yType> yGm;
  AscendC::GlobalTensor<yType> workspaceGlobal;
  TQue<QuePosition::VECIN, 1> inQueueX;  
  TQue<QuePosition::VECIN, 1> inQueueMat;  
  TQue<QuePosition::VECOUT, 1> outQueueY;  
  TCubeTiling tiling;
};

extern "C" __global__ __aicore__ void mat_mul_sub(GM_ADDR x1, GM_ADDR x2,
                                                  GM_ADDR x3, GM_ADDR y,
                                                  GM_ADDR workspace,
                                                  GM_ADDR tiling) {
  GET_TILING_DATA(tiling_data, tiling);
  // TODO: user kernel impl
  TPipe pipe;
  if (tiling_data.x3ShapeType != 1 && tiling_data.cubeTilingData.M >= 1024 &&
      tiling_data.cubeTilingData.N >= 1024) {
      MatMulSubFuseKernel<DTYPE_X1, DTYPE_X2, DTYPE_Y, DTYPE_Y> op;
      op.Init(x1, x2, x3, y, workspace, tiling_data.cubeTilingData,
          tiling_data.x3ShapeType, &pipe);
      REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), op.matmulObj,
                    &tiling_data.cubeTilingData);
      op.Process();
      return;
    // SubKernelUB<DTYPE_X3, DTYPE_Y> subOp;
    // subOp.Init(x3, y, tiling_data.nAcores, tiling_data.nIterAcore,
    //            tiling_data.tailNElemA, tiling_data.nElemPerIter,
    //            tiling_data.nIterBcore, tiling_data.tailNElemB,
    //            tiling_data.nElemAcore, tiling_data.nElemBcore, &pipe);
    // subOp.Process();
  } else {
      MatMulSubKernel<DTYPE_X1, DTYPE_X2, DTYPE_Y, DTYPE_Y> op;
      REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), op.matmulObj,
                    &tiling_data.cubeTilingData);
      op.Init(x1, x2, x3, y, workspace, tiling_data.cubeTilingData,
          tiling_data.x3ShapeType, &pipe);
      op.Process();
      SyncAll();
      SubKernel<DTYPE_X3, DTYPE_Y> subOp;
      subOp.Init(x3, y, tiling_data.cubeTilingData, tiling_data.x3ShapeType,
               tiling_data.nBigCore, tiling_data.bigCoreRows,
               tiling_data.smallCoreRows, &pipe);
      subOp.Process();
  }
}