#ifndef DAWSN_N_D_H
#define DAWSN_N_D_H

#include "kernel_operator.h"

namespace Dawsn {
using namespace AscendC;

constexpr int32_t FP16_BUFFER_NUM = 1;
constexpr int32_t FP32_BUFFER_NUM = 2;
constexpr int32_t BYTE_BLOCK = 32;
constexpr int32_t BYTE_REPEAT = 256;

constexpr int32_t FP16_TMP_BUF_COUNT = 7 * 2;
constexpr int32_t FP32_TMP_BUF_COUNT = 5;

constexpr float ZERO = 0.0;
constexpr float POS_ONE = 1.0;
constexpr float NEG_ONE = -1.0;
constexpr float POS_HALF = 0.5;

constexpr float THREE_P_TWO_FIVE = 3.25;
constexpr float SIX_P_TWO_FIVE = 6.25;
constexpr float ONE_E_NINE = 1.0e9;

template <typename T>
class DawsnND {
public:
    __aicore__ inline DawsnND() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, const DawsnTilingData* __restrict tilingData);
    __aicore__ inline void Process();

private:
    template <typename T1, typename T2>
    __aicore__ inline T1 CeilDiv(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : (a + bTemp - 1) / bTemp;
    };

    template <typename T1, typename T2>
    __aicore__ inline T1 CeilAlignA2B(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : CeilDiv(a, bTemp) * bTemp;
    };

    __aicore__ inline void Polevl(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT, float* coef,
                                  int64_t N, int64_t dataCount);
    __aicore__ inline void P1evl(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT, float* coef,
                                 int64_t N, int64_t dataCount);

    __aicore__ inline void CopyIn(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void SubCompute1(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT,
                                       int64_t dataCount);
    __aicore__ inline void SubCompute2(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT,
                                       int64_t dataCount);
    __aicore__ inline void SubCompute3(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT,
                                       int64_t dataCount);
    __aicore__ inline void SubCompute4(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT,
                                       int64_t dataCount);
    __aicore__ inline void Compute(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void CopyOut(int64_t gmOffset, int64_t dataCount);

private:
    TPipe pipe;
#if ORIG_DTYPE_X == DT_FLOAT
    TQue<QuePosition::VECIN, FP32_BUFFER_NUM> xQue;
    TQue<QuePosition::VECOUT, FP32_BUFFER_NUM> yQue;
#else
    TQue<QuePosition::VECIN, FP16_BUFFER_NUM> xQue;
    TQue<QuePosition::VECOUT, FP16_BUFFER_NUM> yQue;
    LocalTensor<float> t5, t6;
#endif
    TBuf<QuePosition::VECCALC> tempValBuf;
    GlobalTensor<T> xGM, yGM;
    LocalTensor<float> t1, t2, t3, t4;
    LocalTensor<uint8_t> mask1, mask2, mask3, mask4;

    int64_t blockIdx = 0;
    uint64_t perBlockCount = 0;
    uint64_t perRepeatCount = 0;
    uint64_t maskTensorCount = 0;

    float COEF_AN[10] = {1.13681498971755972054E-11, 8.49262267667473811108E-10, 1.94434204175553054283E-8,
                         9.53151741254484363489E-7,  3.07828309874913200438E-6,  3.52513368520288738649E-4,
                         -8.50149846724410912031E-4, 4.22618223005546594270E-2,  -9.17480371773452345351E-2,
                         9.99999999999999994612E-1};
    int32_t COEF_AN_COUNT = 9;

    float COEF_AD[11] = {2.40372073066762605484E-11, 1.48864681368493396752E-9, 5.21265281010541664570E-8,
                         1.27258478273186970203E-6,  2.32490249820789513991E-5, 3.25524741826057911661E-4,
                         3.48805814657162590916E-3,  2.79448531198828973716E-2, 1.58874241960120565368E-1,
                         5.74918629489320327824E-1,  1.00000000000000000539E0};
    int32_t COEF_AD_COUNT = 10;

    float COEF_BN[11] = {5.08955156417900903354E-1,  -2.44754418142697847934E-1, 9.41512335303534411857E-2,
                         -2.18711255142039025206E-2, 3.66207612329569181322E-3,  -4.23209114460388756528E-4,
                         3.59641304793896631888E-5,  -2.14640351719968974225E-6, 9.10010780076391431042E-8,
                         -2.40274520828250956942E-9, 3.59233385440928410398E-11};
    int32_t COEF_BN_COUNT = 10;

    float COEF_BD[10] = {-6.31839869873368190192E-1, 2.36706788228248691528E-1,  -5.31806367003223277662E-2,
                         8.48041718586295374409E-3,  -9.47996768486665330168E-4, 7.81025592944552338085E-5,
                         -4.55875153252442634831E-6, 1.89100358111421846170E-7,  -4.91324691331920606875E-9,
                         7.18466403235734541950E-11};
    int32_t COEF_BD_COUNT = 10;

    float COEF_CN[5] = {-5.90592860534773254987E-1, 6.29235242724368800674E-1, -1.72858975380388136411E-1,
                        1.64837047825189632310E-2, -4.86827613020462700845E-4};
    int32_t COEF_CN_COUNT = 4;

    float COEF_CD[5] = {-2.69820057197544900361E0, 1.73270799045947845857E0, -3.93708582281939493482E-1,
                        3.44278924041233391079E-2, -9.73655226040941223894E-4};
    int32_t COEF_CD_COUNT = 5;

    // tiling params
    uint64_t ubMaxProcCount = 0;
    uint64_t totalDataCount = 0;
    uint64_t loopCount = 0;
    uint64_t tailCount = 0;
};

template <typename T>
__aicore__ inline void DawsnND<T>::Init(GM_ADDR x, GM_ADDR y, const DawsnTilingData* __restrict tilingData) {
    blockIdx = GetBlockIdx();
    perBlockCount = BYTE_BLOCK / sizeof(T);
    perRepeatCount = BYTE_REPEAT / sizeof(T);
    ubMaxProcCount = tilingData->ubMaxProcCount;
    totalDataCount = tilingData->totalDataCount;
    loopCount = tilingData->loopCount;
    tailCount = tilingData->tailCount;

    xGM.SetGlobalBuffer((__gm__ T*)x, totalDataCount);
    yGM.SetGlobalBuffer((__gm__ T*)y, totalDataCount);

    uint64_t singleBufferSize = ubMaxProcCount * sizeof(T);
#if ORIG_DTYPE_X == DT_FLOAT
    pipe.InitBuffer(xQue, FP32_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(yQue, FP32_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(tempValBuf, singleBufferSize * FP32_TMP_BUF_COUNT);
#else
    pipe.InitBuffer(xQue, FP16_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(yQue, FP16_BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(tempValBuf, singleBufferSize * FP16_TMP_BUF_COUNT);
#endif

    LocalTensor<float> t0 = tempValBuf.Get<float>();
    maskTensorCount = singleBufferSize / 4 / BYTE_BLOCK * BYTE_BLOCK;
    mask1 = t0.template ReinterpretCast<uint8_t>();
    mask2 = mask1[maskTensorCount];
    mask3 = mask1[maskTensorCount * 2];
    mask4 = mask1[maskTensorCount * 3];
    t1 = t0[ubMaxProcCount];
    t2 = t0[ubMaxProcCount * 2];
    t3 = t0[ubMaxProcCount * 3];
    t4 = t0[ubMaxProcCount * 4];

#if ORIG_DTYPE_X == DT_FLOAT16
    t5 = t0[ubMaxProcCount * 5];
    t6 = t0[ubMaxProcCount * 6];
#endif
}

template <typename T>
__aicore__ inline void DawsnND<T>::Process() {
    int64_t gmOffset = 0;
    for (int64_t i = 0; i < loopCount; i++) {
        CopyIn(gmOffset, ubMaxProcCount);
        Compute(gmOffset, ubMaxProcCount);
        CopyOut(gmOffset, ubMaxProcCount);
        gmOffset += ubMaxProcCount;
    }
    if (tailCount) {
        int64_t alignCopyCount = CeilAlignA2B(tailCount, perBlockCount);
        int64_t alignComputeCount = CeilAlignA2B(tailCount, perRepeatCount);
        CopyIn(gmOffset, alignCopyCount);
        Compute(gmOffset, alignComputeCount);
        CopyOut(gmOffset, alignCopyCount);
    }
}

template <typename T>
__aicore__ inline void DawsnND<T>::CopyIn(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> xInLT = xQue.AllocTensor<T>();
    DataCopy(xInLT, xGM[gmOffset], dataCount);
    xQue.EnQue(xInLT);
}

template <typename T>
__aicore__ inline void DawsnND<T>::Polevl(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT, float* coef,
                                          int64_t N, int64_t dataCount) {
    Duplicate(dstLT, coef[0], dataCount);
    for (int64_t i = 1; i < N + 1; i++) {
        Mul(dstLT, srcLT, dstLT, dataCount);
        Adds(dstLT, dstLT, coef[i], dataCount);
    }
}

template <typename T>
__aicore__ inline void DawsnND<T>::P1evl(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT, float* coef,
                                         int64_t N, int64_t dataCount) {
    Duplicate(dstLT, coef[0], dataCount);
    Add(dstLT, dstLT, srcLT, dataCount);
    for (int64_t i = 1; i < N; i++) {
        Mul(dstLT, srcLT, dstLT, dataCount);
        Adds(dstLT, dstLT, coef[i], dataCount);
    }
}

template <typename T>
__aicore__ inline void DawsnND<T>::SubCompute1(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT,
                                               int64_t dataCount) {
    Mul(t1, srcLT, srcLT, dataCount);
    Polevl(t2, t1, COEF_AN, COEF_AN_COUNT, dataCount);
    Mul(dstLT, srcLT, t2, dataCount);
    Polevl(t2, t1, COEF_AD, COEF_AD_COUNT, dataCount);
    Div(dstLT, dstLT, t2, dataCount);
}

template <typename T>
__aicore__ inline void DawsnND<T>::SubCompute2(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT,
                                               int64_t dataCount) {
    Mul(t1, srcLT, srcLT, dataCount);
    Duplicate(t2, POS_ONE, dataCount);
    Div(t1, t2, t1, dataCount);
    Div(t2, t2, srcLT, dataCount);
    Polevl(t3, t1, COEF_BN, COEF_BN_COUNT, dataCount);
    Mul(t3, t3, t1, dataCount);
    P1evl(dstLT, t1, COEF_BD, COEF_BD_COUNT, dataCount);
    Mul(dstLT, dstLT, srcLT, dataCount);
    Div(dstLT, t3, dstLT, dataCount);
    Add(dstLT, t2, dstLT, dataCount);
    Muls(dstLT, dstLT, POS_HALF, dataCount);
}

template <typename T>
__aicore__ inline void DawsnND<T>::SubCompute3(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT,
                                               int64_t dataCount) {
    Mul(t1, srcLT, srcLT, dataCount);
    Duplicate(t2, POS_ONE, dataCount);
    Div(t1, t2, t1, dataCount);
    Div(t2, t2, srcLT, dataCount);
    Polevl(t3, t1, COEF_CN, COEF_CN_COUNT, dataCount);
    Mul(t3, t3, t1, dataCount);
    P1evl(dstLT, t1, COEF_CD, COEF_CD_COUNT, dataCount);
    Mul(dstLT, dstLT, srcLT, dataCount);
    Div(dstLT, t3, dstLT, dataCount);
    Add(dstLT, t2, dstLT, dataCount);
    Muls(dstLT, dstLT, POS_HALF, dataCount);
}

template <typename T>
__aicore__ inline void DawsnND<T>::SubCompute4(const LocalTensor<float>& dstLT, const LocalTensor<float>& srcLT,
                                               int64_t dataCount) {
    Duplicate(t1, POS_HALF, dataCount);
    Div(dstLT, t1, srcLT, dataCount);
}

template <typename T>
__aicore__ inline void DawsnND<T>::Compute(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<uint16_t> u16Mask2 = mask2.ReinterpretCast<uint16_t>();
    LocalTensor<uint16_t> u16Mask3 = mask3.ReinterpretCast<uint16_t>();
    LocalTensor<uint16_t> u16Mask4 = mask4.ReinterpretCast<uint16_t>();

#if ORIG_DTYPE_X == DT_FLOAT
    LocalTensor<float> xLT = xQue.DeQue<float>();
    LocalTensor<float> yLT = yQue.AllocTensor<float>();
#else
    LocalTensor<half> xFP16 = xQue.DeQue<half>();
    LocalTensor<half> yFP16 = yQue.AllocTensor<half>();
    LocalTensor<float> xLT = t5;
    LocalTensor<float> yLT = t6;
    Cast(xLT, xFP16, RoundMode::CAST_NONE, dataCount);
#endif

    Duplicate(t1, ZERO, dataCount);
    Compare(mask1, xLT, t1, CMPMODE::LT, dataCount);
    mask1.Print(maskTensorCount);
    Muls(t1, xLT, NEG_ONE, dataCount);
    Select(xLT, mask1, t1, xLT, SELMODE::VSEL_TENSOR_TENSOR_MODE, dataCount);
    mask1.Print(maskTensorCount);

    Duplicate(t1, THREE_P_TWO_FIVE, dataCount);
    Compare(mask3, xLT, t1, CMPMODE::GE, dataCount);

    Duplicate(t1, SIX_P_TWO_FIVE, dataCount);
    Compare(mask2, xLT, t1, CMPMODE::LT, dataCount);
    Compare(mask4, xLT, t1, CMPMODE::GE, dataCount);
    And(u16Mask2, u16Mask2, u16Mask3, maskTensorCount / 2);

    Duplicate(t1, ONE_E_NINE, dataCount);
    Compare(mask3, xLT, t1, CMPMODE::LE, dataCount);
    And(u16Mask3, u16Mask3, u16Mask4, maskTensorCount / 2);
    Compare(mask4, xLT, t1, CMPMODE::GT, dataCount);

    xLT.Print(dataCount);
    SubCompute1(yLT, xLT, dataCount);
    SubCompute2(t4, xLT, dataCount);
    Select(yLT, mask2, t4, yLT, SELMODE::VSEL_TENSOR_TENSOR_MODE, dataCount);
    SubCompute3(t4, xLT, dataCount);
    Select(yLT, mask3, t4, yLT, SELMODE::VSEL_TENSOR_TENSOR_MODE, dataCount);
    SubCompute4(t4, xLT, dataCount);
    Select(yLT, mask4, t4, yLT, SELMODE::VSEL_TENSOR_TENSOR_MODE, dataCount);

    Muls(t1, yLT, NEG_ONE, dataCount);
    mask1.Print(maskTensorCount);
    Select(yLT, mask1, t1, yLT, SELMODE::VSEL_TENSOR_TENSOR_MODE, dataCount);

#if ORIG_DTYPE_X == DT_FLOAT
    yQue.EnQue(yLT);
    xQue.FreeTensor(xLT);
#else
    Cast(yFP16, yLT, RoundMode::CAST_RINT, dataCount);
    yQue.EnQue(yFP16);
    xQue.FreeTensor(xFP16);
#endif
}

template <typename T>
__aicore__ inline void DawsnND<T>::CopyOut(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> yOutLT = yQue.DeQue<T>();
    DataCopy(yGM[gmOffset], yOutLT, dataCount);
    yQue.FreeTensor(yOutLT);
}
}  // namespace Dawsn

#endif  // DAWSN_N_D_H