#ifndef GELU_N_D_H
#define GELU_N_D_H

#include "kernel_operator.h"

namespace Gelu {
using namespace AscendC;

constexpr int32_t FP32_BUFFER_NUM = 2;
constexpr int32_t FP16_BUFFER_NUM = 1;
constexpr int32_t BYTE_BLOCK = 32;
constexpr int32_t FP16_TMP_BUF_COUNT = 4 * 2;
constexpr int32_t FP32_TMP_BUF_COUNT = 2;

constexpr float POS_ONE = 1.0;

constexpr float ERF_PARAM1 = -0.3512339572e-8;
constexpr float ERF_PARAM2 = 0.2645266170e-6;
constexpr float ERF_PARAM3 = -0.7929488134e-5;
constexpr float ERF_PARAM4 = 0.1106123840e-3;
constexpr float ERF_PARAM5 = 0.6518995814e-4;
constexpr float ERF_PARAM6 = -0.7266616915e-1;
constexpr float ERF_PARAM7 = -0.1595769883e1;
constexpr float ERF_MAX = 5.75;
constexpr float ERF_MIN = -13.25;

template <typename T>
class GeluND {
public:
    __aicore__ inline GeluND() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, const GeluTilingData* __restrict tilingData);
    __aicore__ inline void Process();

private:
    template <typename T1, typename T2>
    __aicore__ inline T1 CeilDiv(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : (a + bTemp - 1) / bTemp;
    };

    template <typename T1, typename T2>
    __aicore__ inline T1 CeilAlignA2B(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : CeilDiv(a, bTemp) * bTemp;
    };

    __aicore__ inline void CopyIn(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void Compute(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void CopyOut(int64_t gmOffset, int64_t dataCount);

private:
    TPipe pipe;
#if ORIG_DTYPE_X == DT_FLOAT
    TQue<QuePosition::VECIN, FP32_BUFFER_NUM> xQueue;
    TQue<QuePosition::VECOUT, FP32_BUFFER_NUM> yQueue;
#else
    TQue<QuePosition::VECIN, FP16_BUFFER_NUM> xQueue;
    TQue<QuePosition::VECOUT, FP16_BUFFER_NUM> yQueue;
#endif
    TBuf<QuePosition::VECCALC> tempValBuf;
    GlobalTensor<T> xGM, yGM;
    LocalTensor<float> tempValLT;
    int64_t blockIdx = 0;
    uint64_t perBlockCount = 0;

    // tiling params
    uint64_t ubMaxProcCount = 0;
    uint64_t totalDataCount = 0;
    uint64_t loopCount = 0;
    uint64_t tailCount = 0;
};

template <typename T>
__aicore__ inline void GeluND<T>::Init(GM_ADDR x, GM_ADDR y, const GeluTilingData* __restrict tilingData) {
    blockIdx = GetBlockIdx();
    perBlockCount = BYTE_BLOCK / sizeof(T);
    ubMaxProcCount = tilingData->ubMaxProcCount;
    totalDataCount = tilingData->totalDataCount;
    loopCount = tilingData->loopCount;
    tailCount = tilingData->tailCount;

    xGM.SetGlobalBuffer((__gm__ T*)x, totalDataCount);
    yGM.SetGlobalBuffer((__gm__ T*)y, totalDataCount);

    uint64_t singleBufferSize = ubMaxProcCount * sizeof(T);
#if ORIG_DTYPE_X == DT_FLOAT
    pipe.InitBuffer(xQueue, FP32_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(yQueue, FP32_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(tempValBuf, singleBufferSize * FP32_TMP_BUF_COUNT);
#else
    pipe.InitBuffer(xQueue, FP16_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(yQueue, FP16_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(tempValBuf, singleBufferSize * FP16_TMP_BUF_COUNT);
#endif

    tempValLT = tempValBuf.Get<float>();
}

template <typename T>
__aicore__ inline void GeluND<T>::Process() {
    int64_t gmOffset = 0;
    for (int64_t i = 0; i < loopCount; i++) {
        CopyIn(gmOffset, ubMaxProcCount);
        Compute(gmOffset, ubMaxProcCount);
        CopyOut(gmOffset, ubMaxProcCount);
        gmOffset += ubMaxProcCount;
    }
    if (tailCount) {
        int64_t alignDataCount = CeilAlignA2B(tailCount, perBlockCount);
        CopyIn(gmOffset, alignDataCount);
        Compute(gmOffset, alignDataCount);
        CopyOut(gmOffset, alignDataCount);
    }
}

template <typename T>
__aicore__ inline void GeluND<T>::CopyIn(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> xInLT = xQueue.AllocTensor<T>();
    DataCopy(xInLT, xGM[gmOffset], dataCount);
    xQueue.EnQue(xInLT);
}

template <typename T>
__aicore__ inline void GeluND<T>::Compute(int64_t gmOffset, int64_t dataCount) {
#if ORIG_DTYPE_X == DT_FLOAT
    LocalTensor<float> xLT = xQueue.DeQue<float>();
    LocalTensor<float> yLT = yQueue.AllocTensor<float>();
    LocalTensor<float> t1 = tempValLT;
    LocalTensor<float> xPow = tempValLT[ubMaxProcCount];
#else
    LocalTensor<half> xFP16 = xQueue.DeQue<half>();
    LocalTensor<half> yFP16 = yQueue.AllocTensor<half>();
    LocalTensor<float> xLT = tempValLT;
    LocalTensor<float> yLT = tempValLT[ubMaxProcCount];
    LocalTensor<float> t1 = tempValLT[ubMaxProcCount * 2];
    LocalTensor<float> xPow = tempValLT[ubMaxProcCount * 3];
    Cast(xLT, xFP16, RoundMode::CAST_NONE, dataCount);
#endif
    // res = x/(1+np.exp(((((((a1*x**2+a2)*x**2+a3)*x**2+a4)*x**2+a5)*x**2+a6)*x**2+a7)*x))
    Maxs(t1, xLT, ERF_MIN, dataCount);
    Mins(t1, t1, ERF_MAX, dataCount);

    Mul(xPow, t1, t1, dataCount);
    Muls(yLT, xPow, ERF_PARAM1, dataCount);

    Adds(yLT, yLT, ERF_PARAM2, dataCount);
    Mul(yLT, yLT, xPow, dataCount);

    Adds(yLT, yLT, ERF_PARAM3, dataCount);
    Mul(yLT, yLT, xPow, dataCount);

    Adds(yLT, yLT, ERF_PARAM4, dataCount);
    Mul(yLT, yLT, xPow, dataCount);

    Adds(yLT, yLT, ERF_PARAM5, dataCount);
    Mul(yLT, yLT, xPow, dataCount);

    Adds(yLT, yLT, ERF_PARAM6, dataCount);
    Mul(yLT, yLT, xPow, dataCount);

    Adds(yLT, yLT, ERF_PARAM7, dataCount);
    Mul(yLT, yLT, t1, dataCount);

    Exp(yLT, yLT, dataCount);

    Adds(yLT, yLT, POS_ONE, dataCount);
    Div(yLT, xLT, yLT, dataCount);

#if ORIG_DTYPE_X == DT_FLOAT
    yQueue.EnQue(yLT);
    xQueue.FreeTensor(xLT);
#else
    Cast(yFP16, yLT, RoundMode::CAST_RINT, dataCount);
    yQueue.EnQue(yFP16);
    xQueue.FreeTensor(xFP16);
#endif
}

template <typename T>
__aicore__ inline void GeluND<T>::CopyOut(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> yOutLT = yQueue.DeQue<T>();
    DataCopy(yGM[gmOffset], yOutLT, dataCount);
    yQueue.FreeTensor(yOutLT);
}
}  // namespace Gelu

#endif  // GELU_N_D_H