#ifndef MISH_N_D_H
#define MISH_N_D_H

#include "kernel_operator.h"

namespace Mish {
using namespace AscendC;

constexpr int32_t FP32_BUFFER_NUM = 2;
constexpr int32_t FP16_BUFFER_NUM = 1;
constexpr int32_t BYTE_BLOCK = 32;
constexpr int32_t FP16_TMP_BUF_COUNT = 4 * 2;
constexpr int32_t FP32_TMP_BUF_COUNT = 2;

constexpr float NEG_ONE = -1.0;
constexpr float POS_ONE = 1.0;
constexpr float SCALER_TWO = 2.0;
constexpr float MAX_TANH = 8.8;
constexpr float MIN_TANH = -8.8;

template <typename T>
class MishND {
public:
    __aicore__ inline MishND() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, const MishTilingData* __restrict tilingData);
    __aicore__ inline void Process();

private:
    template <typename T1, typename T2>
    __aicore__ inline T1 CeilDiv(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : (a + bTemp - 1) / bTemp;
    };

    template <typename T1, typename T2>
    __aicore__ inline T1 CeilAlignA2B(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : CeilDiv(a, bTemp) * bTemp;
    };

    __aicore__ inline void CopyIn(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void Compute(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void CopyOut(int64_t gmOffset, int64_t dataCount);

    __aicore__ inline void ComputeTanh(LocalTensor<float>& x, const int64_t& realProcCount);
private:
    TPipe pipe;
#if ORIG_DTYPE_X == DT_FLOAT
    TQue<QuePosition::VECIN, FP32_BUFFER_NUM> xQueue;
    TQue<QuePosition::VECOUT, FP32_BUFFER_NUM> yQueue;
#else
    TQue<QuePosition::VECIN, FP16_BUFFER_NUM> xQueue;
    TQue<QuePosition::VECOUT, FP16_BUFFER_NUM> yQueue;
#endif
    TBuf<QuePosition::VECCALC> tempValBuf;
    GlobalTensor<T> xGM, yGM;
    LocalTensor<float> tempValLT;
    int64_t blockIdx = 0;
    uint64_t perBlockCount = 0;

    // tiling params
    uint64_t ubMaxProcCount = 0;
    uint64_t totalDataCount = 0;
    uint64_t loopCount = 0;
    uint64_t tailCount = 0;
};

template <typename T>
__aicore__ inline void MishND<T>::Init(GM_ADDR x, GM_ADDR y, const MishTilingData* __restrict tilingData) {
    blockIdx = GetBlockIdx();
    perBlockCount = BYTE_BLOCK / sizeof(T);
    ubMaxProcCount = tilingData->ubMaxProcCount;
    totalDataCount = tilingData->totalDataCount;
    loopCount = tilingData->loopCount;
    tailCount = tilingData->tailCount;

    xGM.SetGlobalBuffer((__gm__ T*)x, totalDataCount);
    yGM.SetGlobalBuffer((__gm__ T*)y, totalDataCount);

    uint64_t singleBufferSize = ubMaxProcCount * sizeof(T);
#if ORIG_DTYPE_X == DT_FLOAT
    pipe.InitBuffer(xQueue, FP32_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(yQueue, FP32_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(tempValBuf, singleBufferSize * FP32_TMP_BUF_COUNT);
#else
    pipe.InitBuffer(xQueue, FP16_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(yQueue, FP16_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(tempValBuf, singleBufferSize * FP16_TMP_BUF_COUNT);
#endif

    tempValLT = tempValBuf.Get<float>();
}

template <typename T>
__aicore__ inline void MishND<T>::Process() {
    int64_t gmOffset = 0;
    for (int64_t i = 0; i < loopCount; i++) {
        CopyIn(gmOffset, ubMaxProcCount);
        Compute(gmOffset, ubMaxProcCount);
        CopyOut(gmOffset, ubMaxProcCount);
        gmOffset += ubMaxProcCount;
    }
    if (tailCount) {
        int64_t alignDataCount = CeilAlignA2B(tailCount, perBlockCount);
        CopyIn(gmOffset, alignDataCount);
        Compute(gmOffset, alignDataCount);
        CopyOut(gmOffset, alignDataCount);
    }
}

template <typename T>
__aicore__ inline void MishND<T>::CopyIn(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> xInLT = xQueue.AllocTensor<T>();
    DataCopy(xInLT, xGM[gmOffset], dataCount);
    xQueue.EnQue(xInLT);
}

template <typename T>
__aicore__ inline void MishND<T>::ComputeTanh(LocalTensor<float>& x, const int64_t& realProcCount) {
    LocalTensor<float> tempBuf3 = tempValLT[ubMaxProcCount];;
    // compute tanh = (e^2x - 1) / (e^2x + 1)
    Mins(x, x, MAX_TANH, realProcCount);
    Maxs(x, x, MIN_TANH, realProcCount);
    Muls(x, x, SCALER_TWO, realProcCount);
    Exp(x, x, realProcCount);
    Adds(tempBuf3, x, POS_ONE, realProcCount);
    Adds(x, x, NEG_ONE, realProcCount);
    Div(x, x, tempBuf3, realProcCount);
}

template <typename T>
__aicore__ inline void MishND<T>::Compute(int64_t gmOffset, int64_t dataCount) {
#if ORIG_DTYPE_X == DT_FLOAT
    LocalTensor<float> xLT = xQueue.DeQue<float>();
    LocalTensor<float> yLT = yQueue.AllocTensor<float>();
    LocalTensor<float> t1 = tempValLT;
    LocalTensor<float> xPow = tempValLT[ubMaxProcCount];
#else
    LocalTensor<half> xFP16 = xQueue.DeQue<half>();
    LocalTensor<half> yFP16 = yQueue.AllocTensor<half>();
    LocalTensor<float> xLT = tempValLT;
    LocalTensor<float> yLT = tempValLT[ubMaxProcCount];
    LocalTensor<float> t1 = tempValLT[ubMaxProcCount * 2];
    LocalTensor<float> xPow = tempValLT[ubMaxProcCount * 3];
    Cast(xLT, xFP16, RoundMode::CAST_NONE, dataCount);
#endif
    // softplus(x) = log(exp(x) + 1)
    // mish(x) = x * tf.tanh(tf.math.softplus(x))

    Exp(t1, xLT, dataCount);
    Adds(t1, t1, POS_ONE, dataCount);
    Ln(t1, t1, dataCount);
    ComputeTanh(t1, dataCount);
    Mul(yLT, xLT, t1, dataCount);

#if ORIG_DTYPE_X == DT_FLOAT
    yQueue.EnQue(yLT);
    xQueue.FreeTensor(xLT);
#else
    Cast(yFP16, yLT, RoundMode::CAST_RINT, dataCount);
    yQueue.EnQue(yFP16);
    xQueue.FreeTensor(xFP16);
#endif
}

template <typename T>
__aicore__ inline void MishND<T>::CopyOut(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> yOutLT = yQueue.DeQue<T>();
    DataCopy(yGM[gmOffset], yOutLT, dataCount);
    yQueue.FreeTensor(yOutLT);
}
}  // namespace Mish

#endif  // MISH_N_D_H