#ifndef CUMSUM_N_D_H
#define CUMSUM_N_D_H

#include "kernel_operator.h"

namespace Cumsum {
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2;
constexpr int32_t BYTE_BLOCK = 32;
constexpr int32_t BYTE_REPEAT = 256;
constexpr int32_t TMP_BUF_COUNT = 1;

constexpr float ZERO = 0.0;

template <typename T>
class CumsumND {
public:
    __aicore__ inline CumsumND() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR axes, GM_ADDR y, const CumsumTilingData* __restrict tilingData);
    __aicore__ inline void Process();

private:
    template <typename T1, typename T2>
    __aicore__ inline T1 CeilDiv(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : (a + bTemp - 1) / bTemp;
    };

    template <typename T1, typename T2>
    __aicore__ inline T1 CeilAlignA2B(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : CeilDiv(a, bTemp) * bTemp;
    };

    __aicore__ inline void CopyIn(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void DupZero(int64_t dataCount);
    __aicore__ inline void Compute(int64_t index, int64_t dataCount);
    __aicore__ inline void CopyOut(int64_t gmOffset, int64_t dataCount, int64_t tailCount = 0);

private:
    TPipe pipe;

    TQue<QuePosition::VECIN, BUFFER_NUM> xQue;
    TQue<QuePosition::VECOUT, BUFFER_NUM> yQue;
    TBuf<QuePosition::VECCALC> tempValBuf;
    GlobalTensor<T> xGM, yGM;
    LocalTensor<T> tempValLT;
    int64_t blockIdx = 0;
    uint64_t perBlockCount = 0;
    uint64_t perRepeatCount = 0;
    int64_t axesValue = 0;
    int64_t loopCount = 1;
    int64_t cSzie = 1;

    // tiling params
    uint64_t ubMaxProcCount = 0;
    uint64_t xDimCount = 0;
    const uint64_t* xDimArray = nullptr;
    int32_t exclusive = 0;
    int32_t reverse = 0;
};

template <typename T>
__aicore__ inline void CumsumND<T>::Init(GM_ADDR x, GM_ADDR axes, GM_ADDR y,
                                         const CumsumTilingData* __restrict tilingData) {
    blockIdx = GetBlockIdx();
    perBlockCount = BYTE_BLOCK / sizeof(T);
    perRepeatCount = BYTE_REPEAT / sizeof(T);
    ubMaxProcCount = tilingData->ubMaxProcCount;
    xDimCount = tilingData->xDimCount;
    xDimArray = tilingData->xDimArray;
    exclusive = tilingData->exclusive;
    reverse = tilingData->reverse;

    xGM.SetGlobalBuffer((__gm__ T*)x);
    yGM.SetGlobalBuffer((__gm__ T*)y);

    GlobalTensor<int32_t> axesGM;
    axesGM.SetGlobalBuffer((__gm__ int32_t*)axes, 1);
    int64_t tempDimValue = axesGM.GetValue(0);
    tempDimValue = tempDimValue < 0 ? xDimCount + tempDimValue : tempDimValue;
    axesValue = xDimArray[tempDimValue];

    for (int64_t i = 0; i < tempDimValue; i++) {
        loopCount *= xDimArray[i];
    }

    for (int64_t i = tempDimValue + 1; i < xDimCount; i++) {
        cSzie *= xDimArray[i];
    }
    // printf(
    //     "==zf==kernel info, ubMaxProcCount:%lu, xDimCount:%lu, exclusive:%d, reverse:%d, axesValue:%ld, loopCount:%ld, "
    //     "cSzie:%ld\n",
    //     ubMaxProcCount, xDimCount, exclusive, reverse, axesValue, loopCount, cSzie);

    uint64_t singleBufferSize = ubMaxProcCount * sizeof(T);
    pipe.InitBuffer(xQue, BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(yQue, BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(tempValBuf, singleBufferSize * TMP_BUF_COUNT);
    tempValLT = tempValBuf.Get<T>();
}

template <typename T>
__aicore__ inline void CumsumND<T>::Process() {
    int64_t gmOffset = 0;

    if (cSzie <= ubMaxProcCount) {
        int64_t cSizeAlign = CeilAlignA2B(cSzie, perBlockCount);
        int64_t tempData1 = cSzie / perBlockCount * perBlockCount;
        int64_t tempData2 = cSzie % perBlockCount;
        if (reverse == 1) {
            gmOffset = loopCount * axesValue * cSzie;
            for (int64_t i = 0; i < loopCount; i++) {
                for (int64_t j = 0; j < axesValue; j++) {
                    gmOffset -= cSzie;
                    CopyIn(gmOffset, cSizeAlign);
                    Compute(j, cSizeAlign);
                    CopyOut(gmOffset, tempData1, tempData2);
                }
            }
        } else {
            for (int64_t i = 0; i < loopCount; i++) {
                for (int64_t j = 0; j < axesValue; j++) {
                    CopyIn(gmOffset, cSizeAlign);
                    Compute(j, cSizeAlign);
                    CopyOut(gmOffset, cSizeAlign);
                    gmOffset += cSzie;
                }
            }
        }
    } else {
        int64_t loopTime = cSzie / ubMaxProcCount;
        int64_t tailCount = cSzie % ubMaxProcCount;
        int64_t tailCountAlign = CeilAlignA2B(tailCount, perBlockCount);
        int64_t tmpGmOffset = 0;
        int64_t tempData1 = tailCount / perBlockCount * perBlockCount;
        int64_t tempData2 = tailCount % perBlockCount;
        // printf("==zf==tempData1:%ld, tempData1:%ld\n",tempData1,tempData2 );
        if (reverse == 1) {
            gmOffset = loopCount * axesValue * cSzie;
            for (int64_t i = 0; i < loopCount; i++) {
                for (int64_t j = 0; j < axesValue; j++) {
                    gmOffset -= cSzie;
                    tmpGmOffset = gmOffset;
                    for (int64_t k = 0; k < loopTime; k++) {
                        CopyIn(tmpGmOffset, ubMaxProcCount);
                        Compute(j, ubMaxProcCount);
                        CopyOut(tmpGmOffset, ubMaxProcCount);
                        tmpGmOffset += ubMaxProcCount;
                    }
                    if (tailCount) {
                        CopyIn(tmpGmOffset, tailCountAlign);
                        Compute(j, tailCountAlign);
                        CopyOut(tmpGmOffset, tempData1, tempData2);
                    }
                }
            }
        } else {
            for (int64_t i = 0; i < loopCount; i++) {
                for (int64_t j = 0; j < axesValue; j++) {
                    tmpGmOffset = gmOffset;
                    for (int64_t k = 0; k < loopTime; k++) {
                        CopyIn(tmpGmOffset, ubMaxProcCount);
                        Compute(j, ubMaxProcCount);
                        CopyOut(tmpGmOffset, ubMaxProcCount);
                        tmpGmOffset += ubMaxProcCount;
                    }
                    if (tailCount) {
                        CopyIn(tmpGmOffset, tailCountAlign);
                        Compute(j, tailCountAlign);
                        CopyOut(tmpGmOffset, tailCountAlign);
                    }
                    gmOffset += cSzie;
                }
            }
        }
    }
}

template <typename T>
__aicore__ inline void CumsumND<T>::CopyIn(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> xInLT = xQue.AllocTensor<T>();
    DataCopy(xInLT, xGM[gmOffset], dataCount);
    xQue.EnQue(xInLT);
}

template <typename T>
__aicore__ inline void CumsumND<T>::DupZero(int64_t dataCount) {
    LocalTensor<T> yLT = yQue.AllocTensor<T>();
    Duplicate(yLT, T(ZERO), dataCount);
    yQue.EnQue(yLT);
}

template <typename T>
__aicore__ inline void CumsumND<T>::Compute(int64_t index, int64_t dataCount) {
    LocalTensor<T> xLT = xQue.DeQue<T>();
    LocalTensor<T> yLT = yQue.AllocTensor<T>();
    LocalTensor<T> t1 = tempValLT;
    if (index == 0) {
        Duplicate(t1, T(ZERO), dataCount);
    }
    if (exclusive == 1) {
        DataCopy(yLT, t1, dataCount);
        Add(t1, xLT, t1, dataCount);
    } else {
        Add(t1, xLT, t1, dataCount);
        DataCopy(yLT, t1, dataCount);
    }
    yQue.EnQue(yLT);
    xQue.FreeTensor(xLT);
}

template <typename T>
__aicore__ inline void CumsumND<T>::CopyOut(int64_t gmOffset, int64_t dataCount, int64_t tailCount /*= 0*/) {
    LocalTensor<T> yOutLT = yQue.DeQue<T>();
    if (dataCount) {
        DataCopy(yGM[gmOffset], yOutLT, dataCount);
    }
    if (tailCount) {
        for (int64_t i = 0; i < tailCount; i++) {
            yGM.SetValue(gmOffset + dataCount + i, yOutLT.GetValue(dataCount + i));
        }
    }

    yQue.FreeTensor(yOutLT);
}
}  // namespace Cumsum

#endif  // CUMSUM_N_D_H