#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;
class KernelMseLossGrad
{
public:
  __aicore__ inline KernelMseLossGrad() {}
  __aicore__ inline void Init(GM_ADDR predict, GM_ADDR label, GM_ADDR dout, GM_ADDR y, uint32_t blockLength,
                              uint32_t tileNum, uint32_t tileLength,
                              uint32_t lasttileLength,
                              uint32_t typeKey, uint32_t reduction, uint32_t totalLength)
  {
    ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
    this->typeKey = typeKey;
    if(reduction!=0 && reduction!=1){
      ASSERT(false && "reduction must be in ['sum','mean']");
    }
    this->reduction = reduction;
    if(reduction == 0){
      this->cof = (float)2.0 / totalLength;
    }else{
      this->cof = 2.0;
    }

    this->blockLength = blockLength;
    this->tileNum =
        tileNum ASSERT(tileNum != 0 && "tile num can not be zero!");
    this->tileLength = tileLength / BUFFER_NUM;
    this->lasttileLength = lasttileLength;
    predictGm.SetGlobalBuffer((__gm__ DTYPE_PREDICT *)predict,
                        this->blockLength);
    labelGm.SetGlobalBuffer((__gm__ DTYPE_PREDICT *)label,
                        this->blockLength);
    doutGm.SetGlobalBuffer((__gm__ DTYPE_PREDICT *)dout,
                        this->blockLength);
    yGm.SetGlobalBuffer((__gm__ DTYPE_Y *)y,
                        this->blockLength);

    pipe.InitBuffer(inQueueP, BUFFER_NUM, this->tileLength * sizeof(DTYPE_PREDICT));
    pipe.InitBuffer(inQueueL, BUFFER_NUM, this->tileLength * sizeof(DTYPE_PREDICT));
    pipe.InitBuffer(inQueueD, BUFFER_NUM, this->tileLength * sizeof(DTYPE_PREDICT));
    pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(DTYPE_PREDICT));
  }
  __aicore__ inline void Process()
  {
    int32_t loopCount = this->tileNum * BUFFER_NUM;
    for (int32_t i = 0; i < loopCount; i++)
    {
      CopyIn(i);
      Compute(i);
      CopyOut(i);
    }
  }

private:
  __aicore__ inline void CopyIn(int32_t progress)
  {
    LocalTensor<DTYPE_PREDICT> pLocal = inQueueP.AllocTensor<DTYPE_PREDICT>();
    LocalTensor<DTYPE_PREDICT> lLocal = inQueueL.AllocTensor<DTYPE_PREDICT>();
    LocalTensor<DTYPE_PREDICT> dLocal = inQueueD.AllocTensor<DTYPE_PREDICT>();

    if (BUFFER_NUM == 1) {
      if (progress == this->tileNum - 1) {
        if (progress == 0) {
          //如果只有一包，则搬运的起始地址为0，tileLength为实际分块的数据量
          DataCopy(pLocal[0], predictGm[0], this->tileLength);
          DataCopy(lLocal[0], labelGm[0], this->tileLength);
          DataCopy(dLocal[0], doutGm[0], this->tileLength);
        } else {
          //将最后一个分块的起始地址向前移动tileLength-lasttileLength
          DataCopy(
              pLocal[0],
              predictGm[(progress - 1) * this->tileLength + this->lasttileLength],
              this->tileLength);
          DataCopy(
              lLocal[0],
              labelGm[(progress - 1) * this->tileLength + this->lasttileLength],
              this->tileLength);
          DataCopy(
              dLocal[0],
              doutGm[(progress - 1) * this->tileLength + this->lasttileLength],
              this->tileLength);
        }
      } else {
        DataCopy(pLocal[0], predictGm[progress * this->tileLength],
                 this->tileLength);
        DataCopy(lLocal[0], labelGm[progress * this->tileLength],
                 this->tileLength);
        DataCopy(dLocal[0], doutGm[progress * this->tileLength],
                 this->tileLength);
      }
    }
    if (BUFFER_NUM == 2) {
      //开启double
      //buffer时，由于将输入数据分成了相等的2部分，分块大小为不开启double
      //buffer的一半， 所以需要对最后两个分块数据的起始地址做处理
      if ((progress == (this->tileNum * BUFFER_NUM - 2)) ||
          (progress == (this->tileNum * BUFFER_NUM - 1))) {
        //分块大小变为tileLength的一半
        //倒数第2个分块数据的起始地址向前移动（tileLength-lasttileLength)，最后一个分块的起始地址以此为基础进行移动
        DataCopy(
            pLocal[0],
            predictGm[(progress - 2) * (this->tileLength) + this->lasttileLength],
            (this->tileLength));
        DataCopy(
            lLocal[0],
            labelGm[(progress - 2) * (this->tileLength) + this->lasttileLength],
            (this->tileLength));
        DataCopy(
            dLocal[0],
            doutGm[(progress - 2) * (this->tileLength) + this->lasttileLength],
            (this->tileLength));
      }
      else {
        DataCopy(pLocal[0], predictGm[progress * (this->tileLength)],
                 (this->tileLength));
        DataCopy(lLocal[0], labelGm[progress * (this->tileLength)],
                 (this->tileLength));
        DataCopy(dLocal[0], doutGm[progress * (this->tileLength)],
                 (this->tileLength));
      }
    }
    inQueueP.EnQue(pLocal);
    inQueueL.EnQue(lLocal);
    inQueueD.EnQue(dLocal);
  }
  __aicore__ inline void Compute(int32_t progress)
  {
    LocalTensor<DTYPE_PREDICT> pLocal = inQueueP.DeQue<DTYPE_PREDICT>();
    LocalTensor<DTYPE_PREDICT> lLocal = inQueueL.DeQue<DTYPE_PREDICT>();
    LocalTensor<DTYPE_PREDICT> yLocal = outQueueY.AllocTensor<DTYPE_PREDICT>();
    LocalTensor<DTYPE_PREDICT> dLocal = inQueueD.DeQue<DTYPE_PREDICT>();
    // compute
    Sub(lLocal, pLocal, lLocal, this->tileLength);
    Muls(lLocal, lLocal, (DTYPE_PREDICT)this->cof, this->tileLength);
    Mul(yLocal, lLocal, dLocal, this->tileLength);
    outQueueY.EnQue<DTYPE_PREDICT>(yLocal);
    inQueueP.FreeTensor(pLocal);
    inQueueL.FreeTensor(lLocal);
    inQueueD.FreeTensor(dLocal);
  }
  __aicore__ inline void CopyOut(int32_t progress)
  {
    LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
    if (BUFFER_NUM == 1) {
      if (progress == this->tileNum - 1) {
        if (progress == 0) {
          //如果只有一包，则搬运的起始地址为0，tileLength为实际分块的数据量
          DataCopy(yGm[0], yLocal, this->tileLength);
        } else {
          //将最后一个分块的起始地址向前移动tileLength-lasttileLength
          DataCopy(
              yGm[(progress - 1) * this->tileLength + this->lasttileLength],
              yLocal, this->tileLength);
        }
      } else {
        DataCopy(yGm[progress * this->tileLength], yLocal,
                 this->tileLength);
      }
    }
    if (BUFFER_NUM == 2) {
      //开启double
      //buffer时，由于将输入数据分成了相等的2部分，分块大小为不开启double
      //buffer的一半， 所以需要对最后两个分块数据的起始地址做处理
      if ((progress == (this->tileNum * BUFFER_NUM - 2)) ||
          (progress == (this->tileNum * BUFFER_NUM - 1))) {
        //分块大小变为tileLength的一半
        //倒数第2个分块数据的起始地址向前移动（tileLength-lasttileLength)，最后一个分块的起始地址以此为基础进行移动
        DataCopy(
            yGm[(progress - 2) * (this->tileLength) + this->lasttileLength],
            yLocal, (this->tileLength));
      }

      else {
        DataCopy(yGm[progress * (this->tileLength)], yLocal,
                 (this->tileLength));
      }
    }
    outQueueY.FreeTensor(yLocal);
  }

private:
  TPipe pipe;
  TQue<QuePosition::VECIN, BUFFER_NUM> inQueueP, inQueueL, inQueueD;
  TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
  GlobalTensor<DTYPE_PREDICT> predictGm;
  GlobalTensor<DTYPE_PREDICT> labelGm;
  GlobalTensor<DTYPE_PREDICT> doutGm;
  GlobalTensor<DTYPE_Y> yGm;
  uint32_t blockLength;
  uint32_t tileNum;
  uint32_t tileLength;
  uint32_t lasttileLength;
  uint32_t typeKey;
  uint32_t reduction;
  DTYPE_Y cof;
};

extern "C" __global__ __aicore__ void mse_loss_grad(GM_ADDR predict, GM_ADDR label, GM_ADDR dout, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
  GET_TILING_DATA(tiling_data, tiling);
  // TODO: user kernel impl
  KernelMseLossGrad op;

  op.Init(predict, label, dout, y, tiling_data.blockLength,
          tiling_data.tileNum, tiling_data.tileLength,
          tiling_data.lasttileLength, tiling_data.typeKey,
          tiling_data.reduction, tiling_data.totalLength);
  op.Process();
}
#ifndef __CCE_KT_TEST__
void mse_loss_grad_do(uint32_t blockDim, void* l2ctrl, void* stream,
                       uint8_t* predict, uint8_t* label, uint8_t* dout, uint8_t* y,
                       uint8_t* workspace, uint8_t* tiling) {
  mse_loss_grad<<<blockDim, l2ctrl, stream>>>(predict, label, dout, y, workspace, tiling);
}
#endif