#include "kernel_operator.h"
// tensor num for each queue
constexpr int32_t BUFFER_NUM = 2;
constexpr int32_t padding_length = 4;

template<typename TYPE_X, typename TYPE_Y, typename TYPE_Z> class KernelAdd {
    using T = TYPE_X;
public:
    __aicore__ inline KernelAdd() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z, uint32_t smallCoreDataNum,
                                uint32_t bigCoreDataNum, uint32_t finalBigTileNum, 
                                uint32_t finalSmallTileNum, uint32_t tileDataNum, 
                                uint32_t smallTailDataNum, uint32_t bigTailDataNum, 
                                uint32_t tailBlockNum, 
                                uint32_t rows, uint32_t columns, uint32_t unitCount) 
    {
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreNum = AscendC::GetBlockIdx();
        uint32_t globalBufferIndex = bigCoreDataNum * AscendC::GetBlockIdx();
        this->tileDataNum = tileDataNum;
        if (coreNum < tailBlockNum) { 
          this->coreDataNum = bigCoreDataNum;
          this->tileNum = finalBigTileNum;
          this->tailDataNum = bigTailDataNum;
        }
        else { 
          this->coreDataNum = smallCoreDataNum;
          this->tileNum = finalSmallTileNum;
          this->tailDataNum = smallTailDataNum;
          globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (AscendC::GetBlockIdx() - tailBlockNum);
        }

        this->rows = rows;
        this->columns = columns;
        this->unitCount = unitCount;

        xGm.SetGlobalBuffer((__gm__ TYPE_X*)x + globalBufferIndex, this->coreDataNum);
        // 本例子中paddings数组的长度应为4，表示的是上下左右四个方向的填充数量
        yGm.SetGlobalBuffer((__gm__ TYPE_Y*)y + globalBufferIndex, this->coreDataNum);
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(inQueueY, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_Y));

        // paddings的顺序依次为left, right, top, bottom
        this->top = static_cast<int32_t>(yGm.GetValue(2));
        this->bottom = static_cast<int32_t>(yGm.GetValue(3));
        this->left = static_cast<int32_t>(yGm.GetValue(0));
        this->right = static_cast<int32_t>(yGm.GetValue(1));
        uint32_t resultDataNum = this->unitCount * (this->rows + yGm.GetValue(2) + yGm.GetValue(3)) * (this->columns + yGm.GetValue(0) + yGm.GetValue(1));

        // todo: 此处对于输出zGm的大小似乎应该有所调整
        zGm.SetGlobalBuffer((__gm__ TYPE_Z*)z + globalBufferIndex, resultDataNum);
        // zGm.SetGlobalBuffer((__gm__ TYPE_Z*)z + globalBufferIndex, this->coreDataNum);
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_Z));
    }
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->tileDataNum;

        for (int32_t i = 0; i < loopCount; i++) {
          // 每个tile的处理
          if (i == this->tileNum - 1) {
            // 每次处理一行，所以需要处理的数据就是列数
            this->processDataNum = this->tailDataNum;
          }
          // tileRows表示的是当前tile需要处理的tile
          uint32_t tileRows = this->processDataNum / this->columns;
          // j的遍历取决于当前处理的是几行
          for(int j = 0; j < tileRows; j++) {
            
            CopyIn(i, j);
            Compute(i, j);
            CopyOut(i, j);
          }
        }
        
    }

private:
    __aicore__ inline void CopyIn(int32_t progress, int32_t j)
    {
      AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
      uint32_t startIndex = progress * this->tileDataNum + j * this->columns;
      // AscendC::DataCopy(xLocal, xGm[startIndex + progress * this->tileDataNum], this->processDataNum);
      for(int i = 0; i < this->columns; i++) {
        xLocal.SetValue(i, xGm.GetValue(startIndex + i));
      }
      inQueueX.EnQue(xLocal);
      
    }
    __aicore__ inline void Compute(int32_t progress, int32_t j)
    {
      AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
      AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();
      if constexpr (std::is_same_v<T, half>) {
        // todo：不通过计算，仅仅通过copyIn和copyOut来实现复杂的拷贝运算
          // 拷贝
          for(int i = 0; i < this->left; i++) {
            zLocal.SetValue(i, xLocal.GetValue(0));
          }
          for(int i = 0; i < this->columns; i++) {
            zLocal.SetValue(i + this->left, xLocal.GetValue(i));
          }
          for(int i = 0; i < this->right; i++) {
            zLocal.SetValue(i + this->columns + this->left, xLocal.GetValue(this->columns - 1));
          }
      }
      else {
        // FLOAT
        for(int i = 0; i < this->left; i++) {
          zLocal.SetValue(i, xLocal.GetValue(0));
        }
        for(int i = 0; i < this->columns; i++) {
          zLocal.SetValue(i + this->left, xLocal.GetValue(i));
        }
        for(int i = 0; i < this->right; i++) {
          zLocal.SetValue(i + this->columns + this->left, xLocal.GetValue(this->columns - 1));
        }
      }
      
      outQueueZ.EnQue<TYPE_Z>(zLocal);
      inQueueX.FreeTensor(xLocal);
      
    }
    __aicore__ inline void CopyOut(int32_t progress, int32_t j)
    {
      AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.DeQue<TYPE_Z>();  
      uint32_t newLength = this->left + this->right + this->columns;
      uint32_t newMatrixLength = (this->rows + this->top + this->bottom) * newLength;
      // realJ表示的是当前处理的tensor是位于原来的二维矩阵的哪一行
      uint32_t realJ = (j + (progress * this->tileDataNum) / this->columns) % this->rows;
      // k表示当前处理的矩阵前面完成了几个矩阵的处理
      uint32_t k = (j + (progress * this->tileDataNum) / this->columns) / this->rows;
      
      if(realJ == 0) {
        // 拷贝前top行
        for(int i = 0; i <= this->top; i++) {
          uint32_t startIndex = newLength * i + k * newMatrixLength;
          for(int l = 0; l < newLength; l++) {
            // 此处可能没有32B对齐，因此不能使用DataCopy
            zGm.SetValue(startIndex + l, zLocal.GetValue(l));
          }
        }
      } else if (realJ < (this->rows - 1)) {
        // 处理中间拷贝
        // 扩充后的每行长度为（this->left + this->right + this->columns)
        uint32_t startIndex = newLength * (realJ + this->top) + k * newMatrixLength;
        for(int l = 0; l < newLength; l++) {
          // 此处可能没有32B对齐，因此不能使用DataCopy
          zGm.SetValue(startIndex + l, zLocal.GetValue(l));
        }
      } else {
        // 处理最后一行
        for(int i = 0; i <= this->bottom; i++) {
          uint32_t startIndex = newLength * (realJ + this->top + i) + k * newMatrixLength;
          for(int l = 0; l < newLength; l++) {
            // 此处可能没有32B对齐，因此不能使用DataCopy
            zGm.SetValue(startIndex + l, zLocal.GetValue(l));
          }
        }
      }
      outQueueZ.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX, inQueueY;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueZ;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmp1, tmp2;
    AscendC::GlobalTensor<TYPE_X> xGm;
    AscendC::GlobalTensor<TYPE_Y> yGm;
    AscendC::GlobalTensor<TYPE_Z> zGm;
    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;
    uint32_t processDataNum;
    int32_t top = 0, bottom = 0, left = 0, right = 0;
    uint32_t rows, columns;
    uint32_t unitCount;
};


extern "C" __global__ __aicore__ void replication_pad2d(GM_ADDR x, GM_ADDR paddings, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelAdd<DTYPE_X, DTYPE_PADDINGS, DTYPE_Y> op;
    op.Init(x, paddings, y, tiling_data.smallCoreDataNum, 
            tiling_data.bigCoreDataNum, tiling_data.finalBigTileNum, 
            tiling_data.finalSmallTileNum, tiling_data.tileDataNum, 
            tiling_data.smallTailDataNum, tiling_data.bigTailDataNum, 
            tiling_data.tailBlockNum,
            tiling_data.rows, tiling_data.columns, tiling_data.unitCount);  
    op.Process();
}