#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 1;
class KernelMseLoss
{
public:
  __aicore__ inline KernelMseLoss() {}
  __aicore__ inline void Init(GM_ADDR predict, GM_ADDR label, GM_ADDR y, uint32_t blockLength,
                              uint32_t tileNum, uint32_t tileLength,
                              uint32_t lasttileLength,
                              uint32_t typeKey, uint32_t reduction, uint32_t totalLength)
  {
    ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
    this->typeKey = typeKey;
    this->totalLength = totalLength;
    if(reduction!=0 && reduction!=1 && reduction!=2){
      ASSERT(false && "reduction must be in ['sum','mean','none']");
    }
    this->reduction = reduction;
    
    this->blockLength = blockLength;
    this->tileNum =
        tileNum ASSERT(tileNum != 0 && "tile num can not be zero!");
    this->tileLength = tileLength / BUFFER_NUM;
    this->lasttileLength = lasttileLength;

    this->divDown = (float)1.0 / this->totalLength;
    if(reduction != 2){
      if(this->typeKey == 0){
        this->yLength = 8;
        predictGm.SetGlobalBuffer((__gm__ float *)predict,
                          this->blockLength);
        labelGm.SetGlobalBuffer((__gm__ float *)label,
                          this->blockLength);
        yGm.SetGlobalBuffer((__gm__ float *)y,
                          this->yLength);
        pipe.InitBuffer(inQueueP, BUFFER_NUM, this->tileLength * sizeof(float));
        pipe.InitBuffer(inQueueL, BUFFER_NUM, this->tileLength * sizeof(float));
        pipe.InitBuffer(inQueueT, BUFFER_NUM, this->yLength * sizeof(float));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->yLength * sizeof(float));
        pipe.InitBuffer(calcBuf, BUFFER_NUM * this->tileLength * sizeof(float));
      }else{
        this->yLength = 16;
        yGm.SetGlobalBuffer((__gm__ float *)y,
                          this->blockLength);
        pipe.InitBuffer(inQueueP, BUFFER_NUM, this->tileLength * sizeof(float));
        pipe.InitBuffer(inQueueL, BUFFER_NUM, this->tileLength * sizeof(float));
        pipe.InitBuffer(inQueueT, BUFFER_NUM, this->yLength * sizeof(float));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->yLength * sizeof(float));
        pipe.InitBuffer(calcBuf, BUFFER_NUM * this->tileLength * sizeof(float));
        predictGm_h.SetGlobalBuffer((__gm__ half *)predict,
                          this->blockLength);
        labelGm_h.SetGlobalBuffer((__gm__ half *)label,
                          this->blockLength);
        yGm_h.SetGlobalBuffer((__gm__ half *)y,
                          this->yLength);
        pipe.InitBuffer(inQueueP_H, BUFFER_NUM, this->tileLength * sizeof(half));
        pipe.InitBuffer(inQueueL_H, BUFFER_NUM, this->tileLength * sizeof(half));
        pipe.InitBuffer(outQueueY_H, BUFFER_NUM, this->yLength * sizeof(half));
      }
    }else{
      if(this->typeKey == 0){
        predictGm.SetGlobalBuffer((__gm__ float *)predict,
                            this->blockLength);
        labelGm.SetGlobalBuffer((__gm__ float *)label,
                          this->blockLength);
        yGm.SetGlobalBuffer((__gm__ float *)y,
                          this->blockLength);
      }else{
        predictGm_h.SetGlobalBuffer((__gm__ half *)predict,
                          this->blockLength);
        labelGm_h.SetGlobalBuffer((__gm__ half *)label,
                          this->blockLength);
        yGm_h.SetGlobalBuffer((__gm__ half *)y,
                          this->blockLength);
      }
      pipe.InitBuffer(inQueueP, BUFFER_NUM, this->tileLength * sizeof(DTYPE_Y));
      pipe.InitBuffer(inQueueL, BUFFER_NUM, this->tileLength * sizeof(DTYPE_Y));
      pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(DTYPE_Y));
    }
    
  }
  __aicore__ inline void Process()
  {
    int32_t loopCount = this->tileNum * BUFFER_NUM;
    if(reduction != 2){
      if(typeKey == 0){
        for (int32_t i = 0; i < loopCount; i++)
        {
          CopyIn(i);
          Compute(i);
          CopyOut(i);
        }
        if(this->reduction == 0){
          // ========CopyIn======
          LocalTensor<float> tmpLocal = inQueueT.AllocTensor<float>();
          DataCopy(tmpLocal, yGm[0], this->yLength);
          inQueueT.EnQue(tmpLocal);
          // ========Compute======
          LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();
          tmpLocal = inQueueT.DeQue<float>();
          yLocal.SetValue(0, tmpLocal.GetValue(0) / totalLength);
          inQueueT.FreeTensor(tmpLocal);
          outQueueY.EnQue<float>(yLocal);
          // ========CopyOut======
          yLocal = outQueueY.DeQue<float>();
          DataCopy(yGm[0], yLocal, this->yLength);
          outQueueY.FreeTensor(yLocal);
        }
      }else{
        for (int32_t i = 0; i < loopCount; i++)
        {
          CopyInFp16(i);
          ComputeFp16(i);
          CopyOut(i);
        }
        if(this->reduction == 0){
          // ========CopyIn======
          LocalTensor<float> tmpLocal = inQueueT.AllocTensor<float>();
          DataCopy(tmpLocal, yGm[0], this->yLength);
          inQueueT.EnQue(tmpLocal);
          // ========Compute======
          LocalTensor<half> yLocal = outQueueY_H.AllocTensor<half>();
          tmpLocal = inQueueT.DeQue<float>();
          yLocal.SetValue(0, (half)(tmpLocal.GetValue(0) / totalLength));
          inQueueT.FreeTensor(tmpLocal);
          outQueueY_H.EnQue<half>(yLocal);
          // ========CopyOut======
          yLocal = outQueueY_H.DeQue<half>();
          DataCopy(yGm_h[0], yLocal, this->yLength);
          outQueueY_H.FreeTensor(yLocal);
        }
      }
    }else{
      if(typeKey == 0)
        for (int32_t progress = 0; progress < loopCount; progress++)
        {
          LocalTensor<float> pLocal = inQueueP.AllocTensor<float>();
          LocalTensor<float> lLocal = inQueueL.AllocTensor<float>();

          DataCopy(pLocal[0], predictGm[progress * this->tileLength],
                    this->tileLength);
          DataCopy(lLocal[0], labelGm[progress * this->tileLength],
                    this->tileLength);
          
          inQueueP.EnQue(pLocal);
          inQueueL.EnQue(lLocal);

          pLocal = inQueueP.DeQue<float>();
          lLocal = inQueueL.DeQue<float>();
          LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();

          // compute
          Sub(lLocal, pLocal, lLocal, this->tileLength);
          Mul(yLocal, lLocal, lLocal, this->tileLength);
          outQueueY.EnQue<float>(yLocal);
          inQueueP.FreeTensor(pLocal);
          inQueueL.FreeTensor(lLocal);

          yLocal = outQueueY.DeQue<float>();
          DataCopy(yGm[0], yLocal, this->tileLength);
          outQueueY.FreeTensor(yLocal);
        }
      else
        for (int32_t progress = 0; progress < loopCount; progress++)
        {

          LocalTensor<half> pLocal = inQueueP.AllocTensor<half>();
          LocalTensor<half> lLocal = inQueueL.AllocTensor<half>();

          DataCopy(pLocal[0], predictGm_h[progress * this->tileLength],
                    this->tileLength);
          DataCopy(lLocal[0], labelGm_h[progress * this->tileLength],
                    this->tileLength);
          
          inQueueP.EnQue(pLocal);
          inQueueL.EnQue(lLocal);

          pLocal = inQueueP.DeQue<half>();
          lLocal = inQueueL.DeQue<half>();
          LocalTensor<half> yLocal = outQueueY.AllocTensor<half>();

          // compute
          Sub(lLocal, pLocal, lLocal, this->tileLength);
          Mul(yLocal, lLocal, lLocal, this->tileLength);
          outQueueY.EnQue<half>(yLocal);
          inQueueP.FreeTensor(pLocal);
          inQueueL.FreeTensor(lLocal);

          yLocal = outQueueY.DeQue<half>();
          DataCopy(yGm_h[0], yLocal, this->tileLength);
          outQueueY.FreeTensor(yLocal);
        }
    }
  }

private:
  __aicore__ inline void CopyIn(int32_t progress)
  {
    LocalTensor<float> pLocal = inQueueP.AllocTensor<float>();
    LocalTensor<float> lLocal = inQueueL.AllocTensor<float>();
    LocalTensor<float> tLocal = inQueueT.AllocTensor<float>();

    DataCopy(pLocal[0], predictGm[progress * this->tileLength],
              this->tileLength);
    DataCopy(lLocal[0], labelGm[progress * this->tileLength],
              this->tileLength);
    
    DataCopy(tLocal, yGm[0], this->yLength);
    inQueueP.EnQue(pLocal);
    inQueueL.EnQue(lLocal);
    inQueueT.EnQue(tLocal);
    
  }
  __aicore__ inline void Compute(int32_t progress)
  {
    LocalTensor<float> pLocal = inQueueP.DeQue<float>();
    LocalTensor<float> lLocal = inQueueL.DeQue<float>();
    LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();
    LocalTensor<float> tLocal = inQueueT.DeQue<float>();
    LocalTensor<float> wLocal = calcBuf.Get<float>();
    // compute
    Sub(lLocal, pLocal, lLocal, this->tileLength);
    Mul(lLocal, lLocal, lLocal, this->tileLength);

    if (progress == this->tileNum - 1) {
      int32_t lasttileLengthAlign;
      if (lasttileLength % 8 != 0) {  //不对齐，先32位对齐
        lasttileLengthAlign =
            ((lasttileLength + 8 - 1) / 8) * 8;
      } else {
        lasttileLengthAlign = lasttileLength;
      }
      for(int i = lasttileLength; i<lasttileLengthAlign; i++){
        lLocal.SetValue(i, 0.0f);
      }
      ReduceSum(yLocal, lLocal, wLocal, lasttileLengthAlign);
    }else{
      ReduceSum(yLocal, lLocal, wLocal, this->tileLength);
    }
    if(progress > 0)
        yLocal.SetValue(0, yLocal.GetValue(0) + tLocal.GetValue(0));
    outQueueY.EnQue<float>(yLocal);
    inQueueP.FreeTensor(pLocal);
    inQueueL.FreeTensor(lLocal);
    inQueueT.FreeTensor(tLocal);
  }
  __aicore__ inline void CopyOut(int32_t progress)
  {
    LocalTensor<float> yLocal = outQueueY.DeQue<float>();
    DataCopy(yGm[0], yLocal, this->yLength);
    outQueueY.FreeTensor(yLocal);
  }

  __aicore__ inline void CopyInFp16(int32_t progress)
  {
    LocalTensor<half> pLocal = inQueueP_H.AllocTensor<half>();
    LocalTensor<half> lLocal = inQueueL_H.AllocTensor<half>();
    LocalTensor<float> tLocal_f = inQueueT.AllocTensor<float>();

    DataCopy(pLocal[0], predictGm_h[progress * this->tileLength],
              this->tileLength);
    DataCopy(lLocal[0], labelGm_h[progress * this->tileLength],
              this->tileLength);
        

    DataCopy(tLocal_f, yGm[0], this->yLength);

    inQueueT.EnQue(tLocal_f);
    inQueueP_H.EnQue(pLocal);
    inQueueL_H.EnQue(lLocal);
    
  }

  __aicore__ inline void ComputeFp16(int32_t progress)
  {
    LocalTensor<half> pLocal_h = inQueueP_H.DeQue<half>();
    LocalTensor<half> lLocal_h = inQueueL_H.DeQue<half>();
    LocalTensor<float> pLocal = inQueueP.AllocTensor<float>();
    LocalTensor<float> lLocal = inQueueL.AllocTensor<float>();
    LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();
    LocalTensor<float> tLocal = inQueueT.DeQue<float>();
    LocalTensor<float> wLocal = calcBuf.Get<float>();
    Cast(pLocal, pLocal_h, RoundMode::CAST_NONE, this->tileLength);
    Cast(lLocal, lLocal_h, RoundMode::CAST_NONE, this->tileLength);
    // compute
    Sub(lLocal, pLocal, lLocal, this->tileLength);
    Mul(lLocal, lLocal, lLocal, this->tileLength);
    if (progress == this->tileNum - 1) {
      int32_t lasttileLengthAlign;
      if (lasttileLength % 16 != 0) {  //不对齐，先32位对齐
        lasttileLengthAlign =
            ((lasttileLength + 16 - 1) / 16) * 16;
      } else {
        lasttileLengthAlign = lasttileLength;
      }
      for(int i = lasttileLength; i<lasttileLengthAlign; i++){
        lLocal.SetValue(i, 0.0f);
      }
      ReduceSum(yLocal, lLocal, wLocal, lasttileLengthAlign);
    }else{
      ReduceSum(yLocal, lLocal, wLocal, this->tileLength);
    }
    if(progress > 0)
      yLocal.SetValue(0, yLocal.GetValue(0) + tLocal.GetValue(0));
    outQueueY.EnQue<float>(yLocal);
    inQueueP_H.FreeTensor(pLocal_h);
    inQueueL_H.FreeTensor(lLocal_h);
    inQueueP.FreeTensor(pLocal);
    inQueueL.FreeTensor(lLocal);
    inQueueT.FreeTensor(tLocal);
  }

private:
  TPipe pipe;
  TQue<QuePosition::VECIN, BUFFER_NUM> inQueueP, inQueueL, inQueueT;
  TQue<QuePosition::VECIN, BUFFER_NUM> inQueueP_H, inQueueL_H;
  TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
  TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY_H;
  TBuf<TPosition::VECCALC> calcBuf;
  GlobalTensor<float> predictGm, labelGm, yGm;
  GlobalTensor<half> predictGm_h, labelGm_h, yGm_h;
  uint32_t blockLength;
  uint32_t tileNum;
  uint32_t tileLength;
  uint32_t lasttileLength;
  uint32_t typeKey;
  uint32_t reduction;
  uint32_t yLength;
  uint32_t totalLength;
  float divDown;
};

extern "C" __global__ __aicore__ void mse_loss(GM_ADDR predict, GM_ADDR label, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
  GET_TILING_DATA(tiling_data, tiling);
  // TODO: user kernel impl
  KernelMseLoss op;

  op.Init(predict, label, y, tiling_data.blockLength,
          tiling_data.tileNum, tiling_data.tileLength,
          tiling_data.lasttileLength, tiling_data.typeKey,
          tiling_data.reduction, tiling_data.totalLength);
  op.Process();
  // TODO: user kernel impl
}
#ifndef __CCE_KT_TEST__
void mse_loss_do(uint32_t blockDim, void* l2ctrl, void* stream,
                       uint8_t* predict, uint8_t* label, uint8_t* y,
                       uint8_t* workspace, uint8_t* tiling) {
  mse_loss<<<blockDim, l2ctrl, stream>>>(predict, label, y, workspace, tiling);
}
#endif