#ifndef REDUCE_SUM_N_D_H
#define REDUCE_SUM_N_D_H

#include "kernel_operator.h"

namespace ReduceSum {
using namespace AscendC;

constexpr int32_t BYTE_BLOCK = 32;
constexpr int32_t BYTE_REPEAT = 256;
constexpr int32_t TMP_BUF_COUNT = 1;

constexpr float ZERO = 0.0;

template <typename T>
class ReduceSumND {
public:
    __aicore__ inline ReduceSumND() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR axes, GM_ADDR y, const ReduceSumTilingData* __restrict tilingData);
    __aicore__ inline void Process();

private:
    template <typename T1, typename T2>
    __aicore__ inline T1 CeilDiv(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : (a + bTemp - 1) / bTemp;
    };

    template <typename T1, typename T2>
    __aicore__ inline T1 CeilAlignA2B(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : CeilDiv(a, bTemp) * bTemp;
    };

    __aicore__ inline void CopyIn(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void Compute(int64_t index, int64_t dataCount);
    __aicore__ inline void CopyOut(int64_t gmOffset, int64_t dataCount, int64_t tailCount = 0);

private:
    TPipe pipe;

    TQue<QuePosition::VECIN, 2> xQue;
    TQue<QuePosition::VECOUT, 1> yQue;
    TBuf<QuePosition::VECCALC> tempValBuf;
    GlobalTensor<T> xGM, yGM;
    LocalTensor<T> tempValLT;
    int64_t blockIdx = 0;
    uint64_t perBlockCount = 0;
    uint64_t perRepeatCount = 0;
    int64_t axesValue = 0;
    int64_t loopCount = 1;
    int64_t cSzie = 1;

    // tiling params
    uint64_t ubMaxProcCount = 0;
    uint64_t xDimCount = 0;
    const uint64_t* xDimArray = nullptr;
    uint64_t ignoreNan = 0;
};

template <typename T>
__aicore__ inline void ReduceSumND<T>::Init(GM_ADDR x, GM_ADDR axes, GM_ADDR y,
                                            const ReduceSumTilingData* __restrict tilingData) {
    blockIdx = GetBlockIdx();
    perBlockCount = BYTE_BLOCK / sizeof(T);
    perRepeatCount = BYTE_REPEAT / sizeof(T);
    ubMaxProcCount = tilingData->ubMaxProcCount;
    xDimCount = tilingData->xDimCount;
    xDimArray = tilingData->xDimArray;
    ignoreNan = tilingData->ignoreNan;

    xGM.SetGlobalBuffer((__gm__ T*)x);
    yGM.SetGlobalBuffer((__gm__ T*)y);

    GlobalTensor<int32_t> axesGM;
    axesGM.SetGlobalBuffer((__gm__ int32_t*)axes, 1);
    int64_t tempDimValue = axesGM.GetValue(0);
    tempDimValue = tempDimValue < 0 ? xDimCount + tempDimValue : tempDimValue;
    if (tempDimValue < 0 || tempDimValue >= xDimCount) {
        tempDimValue = 0;
    };
    axesValue = xDimArray[tempDimValue];

    for (int64_t i = 0; i < tempDimValue; i++) {
        loopCount *= xDimArray[i];
    }

    for (int64_t i = tempDimValue + 1; i < xDimCount; i++) {
        cSzie *= xDimArray[i];
    }
    // printf(
    //     "==zf==kernel info, ubMaxProcCount:%lu, xDimCount:%lu, ignoreNan:%lu, axesValue:%ld, loopCount:%ld, "
    //     "cSzie:%ld\n",
    //     ubMaxProcCount, xDimCount, ignoreNan, axesValue, loopCount, cSzie);

    uint64_t singleBufferSize = ubMaxProcCount * sizeof(T);
    pipe.InitBuffer(xQue, 2, singleBufferSize);
    pipe.InitBuffer(yQue, 1, singleBufferSize);
    pipe.InitBuffer(tempValBuf, singleBufferSize * TMP_BUF_COUNT);
    tempValLT = tempValBuf.Get<T>();
}

template <typename T>
__aicore__ inline void ReduceSumND<T>::Process() {
    int64_t inGmOffset = 0, outGmOffset = 0;

    if (cSzie <= ubMaxProcCount) {
        int64_t cSizeAlign = CeilAlignA2B(cSzie, perBlockCount);
        for (int64_t i = 0; i < loopCount; i++) {
            for (int64_t j = 0; j < axesValue; j++) {
                CopyIn(inGmOffset, cSizeAlign);
                Compute(j, cSizeAlign);
                inGmOffset += cSzie;
            }
            LocalTensor<T> yLT = yQue.AllocTensor<T>();
            DataCopy(yLT, tempValLT, cSizeAlign);
            yQue.EnQue(yLT);
            CopyOut(outGmOffset, cSizeAlign);
            outGmOffset += cSzie;
        }
    } else {
        // printf("==zf==22222\n");
        int64_t loopTime = cSzie / ubMaxProcCount;
        int64_t tailCount = cSzie % ubMaxProcCount;
        int64_t tailCountAlign = CeilAlignA2B(tailCount, perBlockCount);
        int64_t tmpInGmOffset = 0, tmpOutGmOffset = 0;
        int64_t tempData1 = tailCount / perBlockCount * perBlockCount;
        int64_t tempData2 = tailCount % perBlockCount;
        for (int64_t i = 0; i < loopCount; i++) {
            tmpInGmOffset = inGmOffset;
            tmpOutGmOffset = outGmOffset;
            for (int64_t j = 0; j < loopTime; j++) {
                for (int64_t k = 0; k < axesValue; k++) {
                    CopyIn(tmpInGmOffset, ubMaxProcCount);
                    Compute(k, ubMaxProcCount);
                    tmpInGmOffset += cSzie;
                }
                LocalTensor<T> yLT = yQue.AllocTensor<T>();
                DataCopy(yLT, tempValLT, ubMaxProcCount);
                yQue.EnQue(yLT);
                CopyOut(tmpOutGmOffset, ubMaxProcCount);
                tmpOutGmOffset += cSzie;
            }
            tmpInGmOffset = inGmOffset + loopTime * ubMaxProcCount;
            tmpOutGmOffset = outGmOffset + loopTime * ubMaxProcCount;
            if (tailCount) {
                for (int64_t j = 0; j < axesValue; j++) {
                    CopyIn(tmpInGmOffset, tailCountAlign);
                    Compute(j, tailCountAlign);
                    tmpInGmOffset += cSzie;
                }
                LocalTensor<T> yLT = yQue.AllocTensor<T>();
                DataCopy(yLT, tempValLT, tailCountAlign);
                yQue.EnQue(yLT);
                CopyOut(tmpOutGmOffset, tempData1, tempData2);
                tmpOutGmOffset += cSzie;
            }
            inGmOffset += (cSzie * axesValue);
            outGmOffset += cSzie;
        }
    }
}

template <typename T>
__aicore__ inline void ReduceSumND<T>::CopyIn(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> xInLT = xQue.AllocTensor<T>();
    DataCopy(xInLT, xGM[gmOffset], dataCount);
    xQue.EnQue(xInLT);
}

template <typename T>
__aicore__ inline void ReduceSumND<T>::Compute(int64_t index, int64_t dataCount) {
    LocalTensor<T> xLT = xQue.DeQue<T>();
    LocalTensor<T> t1 = tempValLT;

    // if (ignoreNan) {
    //     LocalTensor<uint8_t> mask = t1.template ReinterpretCast<uint8_t>();
    //     int64_t alignRepeatCount = CeilAlignA2B(dataCount, perRepeatCount);
    //     Compare(mask, xLT, xLT, CMPMODE::EQ, alignRepeatCount);
    //     Select(xLT, mask, xLT, T(ZERO), SELMODE::VSEL_TENSOR_SCALAR_MODE, alignRepeatCount);
    // }

    if (index == 0) {
        Duplicate(t1, T(ZERO), dataCount);
    }
    Add(t1, xLT, t1, dataCount);

    xQue.FreeTensor(xLT);
}

template <typename T>
__aicore__ inline void ReduceSumND<T>::CopyOut(int64_t gmOffset, int64_t dataCount, int64_t tailCount /*= 0*/) {
    LocalTensor<T> yOutLT = yQue.DeQue<T>();
    if (dataCount) {
        DataCopy(yGM[gmOffset], yOutLT, dataCount);
    }
    if (tailCount) {
        for (int64_t i = 0; i < tailCount; i++) {
            yGM.SetValue(gmOffset + dataCount + i, yOutLT.GetValue(dataCount + i));
        }
    }

    yQue.FreeTensor(yOutLT);
}
}  // namespace ReduceSum

#endif  // REDUCE_SUM_N_D_H