#include "kernel_operator.h"
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2;

class KernelLayerNorm {
 public:
  __aicore__ inline KernelLayerNorm() {}
  __aicore__ inline void Init(GM_ADDR x, GM_ADDR gamma, GM_ADDR beta,
                              GM_ADDR z) {
    ASSERT(GetBlockNum() != 0 && "block dim can not be zero!"); //GetBlockNum获取当前任务配置的Block数，用于代码内部的多核逻辑控制等。
    /**
     * GetBlockIdx如果在[0,15]代表前16核，每个核处理的数据大小为342*1024，如果在[16,48]代表后32核，每个核处理的数据大小为341*1024
     */
    if (GetBlockIdx() < this->blockPivot) { //GetBlockIdx获取当前core的index，用于代码内部的多核逻辑控制及多核偏移量计算等。
      this->rowNum = this->rowNumSp;
    }
    this->leftRow = this->rowNum % this->tileLoop; //leftRow = 341 % 5 = 1 或者 leftRow = 342 % 5 = 2
    this->blockLength = this->rowNum * this->rowLength; //blockLength =341*1024或者342*1024，代表一个核处理的所有数据
    uint32_t offset = 0; //初始化偏移量
    if (GetBlockIdx() < this->blockPivot) {
      offset = this->blockLength * GetBlockIdx(); //如果当前核索引在[0,15]之间，则偏移量为blockIdx*blockLength
    } else {
      /**
       * 如GetBlockIdx=18，可以计算offset=341*1024*18 + 1024*16,这个看着不直观，转换一下其实就是342*1024*16+341*1024*2
       * 又可以写成1024*(342*16+341*2)=1024*((341+1)*16+341*2)=1024*(341*18+16)=341*1024*18 + 1024*16
       */
      offset = this->blockLength * GetBlockIdx() +
               this->rowLength * this->blockPivot; //如果当前核索引在[16,48]之间，则偏移量为blockIdx*blockLength+blockPivot*rowLength       
    }
    
    //根据index对每个核进行地址偏移，初始化GlobalTensor对象
    xGm.SetGlobalBuffer((__gm__ float *)x + offset, this->blockLength); 
    zGm.SetGlobalBuffer((__gm__ float *)z + offset, this->blockLength);

    gammaGm.SetGlobalBuffer((__gm__ float *)gamma, this->rowLength);
    betaGm.SetGlobalBuffer((__gm__ float *)beta, this->rowLength);

    //调用TBufPool::InitBuffer接口为TQue/TBuf进行内存分配
    pipe.InitBuffer(queueX, BUFFER_NUM, this->tileLength * sizeof(float)); //为x分配空间
    pipe.InitBuffer(queueZ, BUFFER_NUM, this->tileLength * sizeof(float)); //为z分配空间

    pipe.InitBuffer(queueGamma, 1, this->rowLength * sizeof(float)); //为gamma分配空间
    pipe.InitBuffer(queueBeta, 1, this->rowLength * sizeof(float)); //为beta分配空间

    pipe.InitBuffer(tmpBuffer1, this->tileLength * sizeof(float)); //初始化一个临时buffer
    pipe.InitBuffer(tmpBuffer2, this->tileLength * sizeof(float)); //初始化另一个临时buffer
  }
  __aicore__ inline void Process() {
    for (int32_t i = 0; i < this->loopCount; i++) {//loopCount为68，每个核分为68个datablock处理，相当于TILE_NUM=68
      CopyIn(i, this->tileLoop); //tileLoop=5，每个datablock处理5行数据
      Compute(i, this->tileLoop);
      CopyOut(i, this->tileLoop);
    }
    if (this->leftRow > 0) { //如果leftRow > 0说明分配到每个核的数据无法核TILE_NUM整除，均分剩下的需要单独处理 
      CopyIn(this->loopCount, this->leftRow);
      Compute(this->loopCount, this->leftRow);
      CopyOut(this->loopCount, this->leftRow);
    }
  }

 private:
  __aicore__ inline void CopyIn(int32_t progress, int32_t rowNum) {
    LocalTensor<float> xLocal = queueX.AllocTensor<float>();
    LocalTensor<float> gammaLocal = queueGamma.AllocTensor<float>();
    LocalTensor<float> betaLocal = queueBeta.AllocTensor<float>();
    //这里总共有68行datablock,每个datablock处理5行数据，每次复制1个datablock的数据到LocalTensor中，索引关系为：
    //(xLocal0, 0, 1024*5)->(xLocal1, 1*5 * 1024, 1024*5)->(xLocal2, 2*5 * 1024, 1024*5)->...->(xLocal67, 67*5 * 1024, 1024*5)
    DataCopy(xLocal, xGm[progress * this->tileLength],
             this->rowLength * rowNum);
    DataCopy(gammaLocal, gammaGm[0], this->rowLength);
    DataCopy(betaLocal, betaGm[0], this->rowLength);
    queueX.EnQue(xLocal);
    queueGamma.EnQue(gammaLocal);
    queueBeta.EnQue(betaLocal);
  }

  __aicore__ inline void Compute(int32_t progress, int32_t rowNum) {
    LocalTensor<float> xLocal = queueX.DeQue<float>();
    LocalTensor<float> gammaLocal = queueGamma.DeQue<float>();
    LocalTensor<float> betaLocal = queueBeta.DeQue<float>();

    LocalTensor<float> tmpTensor1 = tmpBuffer1.Get<float>();
    LocalTensor<float> tmpTensor2 = tmpBuffer2.Get<float>();
    LocalTensor<float> zLocal = queueZ.AllocTensor<float>();

    for (size_t j = 0; j < rowNum; j++) {
      //先处理每个datablock的第j行数据，每个核又5个datablock
      //形如
      LocalTensor<float> xLocalj = xLocal[j * this->rowLength]; 
      LocalTensor<float> zLocalj = zLocal[j * this->rowLength];
      LocalTensor<float> tmpTensor1j = tmpTensor1[j * this->rowLength];
      LocalTensor<float> tmpTensor2j = tmpTensor2[j * this->rowLength];

      //对所有数据求和,tmpTensor1j执行期间用于存储中间结果，输出到tmpTensor2j中，
      //如xLocalj=[1,1,1······，1,1,1]->tmpTensor2j=[1024,0,0······,0,0,0]
      ReduceSum<float>(tmpTensor2j, xLocalj, tmpTensor1j, this->rowLength); 
      //tmpTensor2j内每个element与标量求积，mfactor=-0.0009765625，结果存入tmpTensor1j中，1代表输入个数为1个
      //tmpTensor2j=[1024,0,0······,0,0,0]->tmpTensor1j=[-1,0,0······,0,0,0]
      Muls(tmpTensor1j, tmpTensor2j, this->mfactor, 1);
      //xLocalj矢量内每个element与标量求和
      //xLocalj=[1,1,1······，1,1,1]->tmpTensor2j=[0,0,0······,0,0,0]
      Adds(tmpTensor2j, xLocalj, tmpTensor1j.GetValue(0), this->rowLength);
      //按element求积
      //xLocalj=tmpTensor2j*tmpTensor2j=[0,0,0······,0,0,0]
      Mul(xLocalj, tmpTensor2j, tmpTensor2j, this->rowLength);
      //tmpTensor1j=[0,0,0······,0,0,0]*0.0009765625=[0,0,0······,0,0,0]
      Muls(tmpTensor1j, xLocalj, this->factor, this->rowLength);
      //xLocalj=[0,0,0······,0,0,0]
      ReduceSum<float>(xLocalj, tmpTensor1j, zLocalj, this->rowLength);
      //tmpTensor1j=[0,0,0······,0,0,0]+1e-5=[1e-5,1e-5,1e-5······,1e-5,1e-5,1e-5]
      Adds(tmpTensor1j, xLocalj, this->eps, 1);
      //zLocalj=[5Ln10,1e-5,1e-5······,1e-5,1e-5,1e-5]
      Ln(zLocalj, tmpTensor1j, 1);
      //tmpTensor1j=[5Ln10,1e-5,1e-5······,1e-5,1e-5,1e-5]* (-0.5)->[-2.5Ln10,1e-5,1e-5······,1e-5,1e-5,1e-5]
      Muls(tmpTensor1j, zLocalj, -0.5f, 1);
      //
      Exp(xLocalj, tmpTensor1j, 1);
      Muls(tmpTensor1j, tmpTensor2j, xLocalj.GetValue(0), this->rowLength);

      Mul(tmpTensor2j, tmpTensor1j, gammaLocal, this->rowLength);
      Add(zLocalj, tmpTensor2j, betaLocal, this->rowLength);
    }

    queueZ.EnQue<float>(zLocal);
    queueGamma.FreeTensor(gammaLocal);
    queueBeta.FreeTensor(betaLocal);
    queueX.FreeTensor(xLocal);
  }

  __aicore__ inline void CopyOut(int32_t progress, int32_t rowNum) {
    LocalTensor<float> zLocal = queueZ.DeQue<float>();

    DataCopy(zGm[progress * this->tileLength], zLocal,
             rowNum * this->rowLength);

    queueZ.FreeTensor(zLocal);
  }

 private:
  TPipe pipe;
  TBuf<QuePosition::VECCALC> tmpBuffer1, tmpBuffer2;
  TQue<QuePosition::VECIN, BUFFER_NUM> queueX;
  TQue<QuePosition::VECIN, 1> queueGamma, queueBeta;
  TQue<QuePosition::VECOUT, BUFFER_NUM> queueZ;
  GlobalTensor<float> xGm;
  GlobalTensor<float> gammaGm;
  GlobalTensor<float> betaGm;
  GlobalTensor<float> zGm;

  uint32_t blockLength = 0;
  uint32_t leftRow = 0;
  uint32_t rowNum = 341; //每个核处理的行数，16384/48≈341
  uint32_t rowNumSp = 342; //由于代码里面设置的是48核，16384/48除不开，因此剩下的核处理的数据平均都是342行
  //还剩16行数据，为了均衡处理数据再分到16个核上，因此会出现有16个核处理342*1024个数据，另外32核处理341*1024个数据
  uint32_t rowLength = 1024; //每行数据长度
  uint32_t blockPivot = 16; //16核处理342*1024个数据，剩余32核处理341*1024个数据
  uint32_t loopCount = 68;//单个核进行数据切分，即每个核平均分为68个datablock处理，相当于TILE_NUM=68
  uint32_t tileLoop = 5; //341/68≈5或者342/68≈5，每个tile处理的行数
  uint32_t tileLength = 5 * 1024; //每个tile处理的数据长度
  
  float factor = 0.0009765625;
  float mfactor = -0.0009765625;
  float eps = 1e-5;
};

extern "C" __global__ __aicore__ void layer_norm_custom(GM_ADDR x,
                                                        GM_ADDR gamma,
                                                        GM_ADDR beta,
                                                        GM_ADDR res_out) {
  KernelLayerNorm op;
  op.Init(x, gamma, beta, res_out);
  op.Process();
}

#ifndef ASCENDC_CPU_DEBUG
// call of kernel function
void layer_norm_custom_do(uint32_t blockDim, void *l2ctrl, void *stream,
                          uint8_t *x, uint8_t *gamma, uint8_t *beta,
                          uint8_t *res_out) {
  layer_norm_custom<<<blockDim, l2ctrl, stream>>>(x, gamma, beta, res_out);
}
#endif