#include "kernel_operator.h"
// tensor num for each queue
constexpr int32_t BUFFER_NUM = 2;

template<typename TYPE_X, typename TYPE_Z> class KernelAdd {
    using T = TYPE_X;
public:
    __aicore__ inline KernelAdd() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR z, uint32_t smallCoreDataNum,
                                uint32_t bigCoreDataNum, uint32_t finalBigTileNum, 
                                uint32_t finalSmallTileNum, uint32_t tileDataNum, 
                                uint32_t smallTailDataNum, uint32_t bigTailDataNum, 
                                uint32_t tailBlockNum,
                                bool keepDim,
                                uint32_t unitCount, uint32_t stepSize,
                                uint32_t totalLength) 
    {
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreNum = AscendC::GetBlockIdx();
        uint32_t globalBufferIndex = bigCoreDataNum * AscendC::GetBlockIdx();
        this->tileDataNum = tileDataNum;
        this->unitCount = unitCount;
        this->stepSize = stepSize;
        this->totalLength = totalLength;
        this->keepDim = keepDim;
        if (coreNum < tailBlockNum) { 
          this->coreDataNum = bigCoreDataNum;
          this->tileNum = finalBigTileNum;
          this->tailDataNum = bigTailDataNum;
        }
        else { 
          this->coreDataNum = smallCoreDataNum;
          this->tileNum = finalSmallTileNum;
          this->tailDataNum = smallTailDataNum;
          // globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (AscendC::GetBlockIdx() - tailBlockNum);
        }

        xGm.SetGlobalBuffer((__gm__ TYPE_X*)x, this->coreDataNum);
        zGm.SetGlobalBuffer((__gm__ TYPE_Z*)z, this->coreDataNum);
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_Z));
        uint32_t workLocalLength = 0;
        if constexpr (std::is_same_v<T, half>) {
            auto firstMaxRepeat = this->tileDataNum / 128 ? this->tileDataNum / 128 : 1;
            workLocalLength = (firstMaxRepeat + 15) / 16 * 16;
            // workLocalLength = ((this->tileDataNum / 128) * 2 + 15) / 16;

            // 32B contain 16 half number
            this->minLength = 16;

        } else if constexpr (std::is_same_v<T, float>) {
            auto firstMaxRepeat = this->tileDataNum / 64 ? this->tileDataNum / 64 : 1;
            workLocalLength = (firstMaxRepeat + 7) / 8 * 8;

            // 32B contain 8 float number
            this->minLength = 8;
        }
        pipe.InitBuffer(tmp1, this->tileDataNum * sizeof(T));
        pipe.InitBuffer(tmp2, this->tileDataNum * sizeof(T));
        pipe.InitBuffer(tmp3, this->tileDataNum * sizeof(T));
    }
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->tileDataNum;
        for (int32_t k = 0; k < this->unitCount; k++)
        {
          for (int32_t j = 0; j < this->stepSize; j++)
          {
            for (int32_t i = 0; i < loopCount; i++) {
                if (i == this->tileNum - 1) {
                  this->processDataNum = this->tailDataNum;
                }
                CopyIn(i, j, k);
                Compute(i, j, k);
                CopyOut(i, j, k);
            }
          }
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress, int j, int k)
    {
      AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
      uint32_t startIndex = j + k * this->stepSize * this->totalLength +
                          progress * this->processDataNum * this->stepSize;
      for (int i = 0; i < this->processDataNum; i++)
      {
        xLocal.SetValue(i, xGm.GetValue(startIndex + i * this->stepSize));
      }
         
      inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t progress, int j, int k)
    {
      AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
      AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();
      AscendC::LocalTensor<T> tmpTensor1 = tmp1.Get<T>();
      AscendC::LocalTensor<T> workLocalTensor = tmp2.Get<T>();
      AscendC::LocalTensor<T> tmpTensor3 = tmp3.Get<T>();


    if constexpr (std::is_same_v<T, half>) {
      // 使用max + log(sum(e^(x - max)))方案避免溢出
      AscendC::ReduceMax<half>(tmpTensor1, xLocal, workLocalTensor, this->processDataNum, false);
      // aicore只支持float类型的运算符
      AscendC::Adds(tmpTensor3, xLocal, static_cast<half>(0.F - static_cast<float>(tmpTensor1.GetValue(0))), this->processDataNum);
      // 此处不能覆盖tmpTensor1因为后续还会用到
      AscendC::Exp(tmpTensor3, tmpTensor3, this->processDataNum);
      AscendC::ReduceSum<half>(tmpTensor3, tmpTensor3, workLocalTensor, this->processDataNum);
      // 直接取自然对数,不要被pytorch文档中的公式误导了
      // 此处这么做主要是为了处理keep_dim为true的情况
      AscendC::Duplicate<half>(tmpTensor3, static_cast<half>(tmpTensor3.GetValue(0)), this->processDataNum);
      AscendC::Ln(tmpTensor3, tmpTensor3, this->processDataNum);
      AscendC::Adds(zLocal, tmpTensor3, static_cast<half>(tmpTensor1.GetValue(0)), this->processDataNum);
    }
    else if constexpr (std::is_same_v<T, float>) {
      // 使用max + log(sum(e^(x - max)))方案避免溢出
      AscendC::ReduceMax<float>(tmpTensor1, xLocal, workLocalTensor, this->processDataNum, false);
      // aicore只支持float类型的运算符
      AscendC::Adds(tmpTensor3, xLocal, (0.F - static_cast<float>(tmpTensor1.GetValue(0))), this->processDataNum);
      // 此处不能覆盖tmpTensor1因为后续还会用到
      AscendC::Exp(tmpTensor3, tmpTensor3, this->processDataNum);
       AscendC::ReduceSum<float>(tmpTensor3, tmpTensor3, workLocalTensor, this->processDataNum);
      // 直接取自然对数,不要被pytorch文档中的公式误导了
      // 此处这么做主要是为了处理keep_dim为true的情况
      AscendC::Duplicate<float>(tmpTensor3, static_cast<float>(tmpTensor3.GetValue(0)), this->processDataNum);
      AscendC::Ln(tmpTensor3, tmpTensor3, this->processDataNum);
      AscendC::Adds(zLocal, tmpTensor3, static_cast<float>(tmpTensor1.GetValue(0)), this->processDataNum);
    }
      outQueueZ.EnQue<TYPE_Z>(zLocal);
      inQueueX.FreeTensor(xLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress, int j, int k)
    {
      AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.DeQue<TYPE_Z>();  
      if(!this->keepDim) {
        zGm.SetValue(j + k * this->stepSize, zLocal.GetValue(0));
      } else {
        // 如果keepDim的话，可以说和CopyIn的过程如出一辙
	      zGm.SetValue(j + k * this->stepSize, zLocal.GetValue(0));
      }
      outQueueZ.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueZ;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmp1, tmp2, tmp3;
    AscendC::GlobalTensor<TYPE_X> xGm;
    AscendC::GlobalTensor<TYPE_Z> zGm;
    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;
    uint32_t processDataNum;
    uint32_t stepSize;
    uint32_t unitCount;
    uint32_t totalLength;
    bool keepDim;
    // CopyOut时，可能面临着长度较小的情况，但是必须满足最小的单次拷贝，也即32B
    uint32_t minLength;
};


extern "C" __global__ __aicore__ void log_sum_exp(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelAdd<DTYPE_X, DTYPE_Y> op;
    op.Init(x, y, tiling_data.smallCoreDataNum, 
            tiling_data.bigCoreDataNum, tiling_data.finalBigTileNum, 
            tiling_data.finalSmallTileNum, tiling_data.tileDataNum, 
            tiling_data.smallTailDataNum, tiling_data.bigTailDataNum, 
            tiling_data.tailBlockNum, 
            tiling_data.keepDim,
            tiling_data.unitCount, tiling_data.stepSize,
            tiling_data.totalLength);  
    op.Process();
}

