#ifndef HISTOGRAM_N_D_H
#define HISTOGRAM_N_D_H

#include "kernel_operator.h"

namespace Histogram {
using namespace AscendC;

constexpr int32_t BYTE_BLOCK = 32;
constexpr int32_t BYTE_REPEAT = 256;
constexpr int32_t TMP_BUF_COUNT = 2;

constexpr float POS_ONE = 1.0;
constexpr float FLOAT_MAX = 4e38;
constexpr float ZERO = 0.0;

template <typename T, int32_t BUFFER_NUM>
class HistogramND {
public:
    __aicore__ inline HistogramND() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, const HistogramTilingData* __restrict tilingData);
    __aicore__ inline void ProcessCached();
    __aicore__ inline void ProcessNotCached();

private:
    template <typename T1, typename T2>
    __aicore__ inline T1 CeilDiv(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : (a + bTemp - 1) / bTemp;
    };

    template <typename T1, typename T2>
    __aicore__ inline T1 CeilAlignA2B(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : CeilDiv(a, bTemp) * bTemp;
    };

    __aicore__ inline void CopyInX(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void SingleProc(int64_t index, int64_t dataCount, int64_t alignRepeatCount);
    __aicore__ inline void GetMinMax(int64_t dataCount, int64_t alignRepeatCount);

private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> xQue;
    GlobalTensor<T> xGM, yGM;
    TBuf<QuePosition::VECCALC> tempValBuf;
    LocalTensor<float> tempValLT;
    int64_t blockIdx = 0;
    uint64_t perBlockCount = 0;
    uint64_t perRepeatCount = 0;

    // tiling params
    uint64_t ubMaxProcCount = 0;
    uint64_t totalDataCount = 0;
    int64_t binsValue = 0;
    float minValue = FLOAT_MAX;
    float maxValue = -FLOAT_MAX;
    int64_t isMinMaxEqual = 0;
    uint64_t loopTime = 0;
    uint64_t tailCount = 0;
};

template <typename T, int32_t BUFFER_NUM>
__aicore__ inline void HistogramND<T, BUFFER_NUM>::Init(GM_ADDR x, GM_ADDR y,
                                                        const HistogramTilingData* __restrict tilingData) {
    blockIdx = GetBlockIdx();
    perBlockCount = BYTE_BLOCK / sizeof(T);
    perRepeatCount = BYTE_REPEAT / sizeof(T);

    ubMaxProcCount = tilingData->ubMaxProcCount;
    totalDataCount = tilingData->totalDataCount;
    binsValue = tilingData->binsValue;
    minValue = tilingData->minValue;
    maxValue = tilingData->maxValue;
    isMinMaxEqual = tilingData->isMinMaxEqual;
    loopTime = tilingData->loopTime;
    tailCount = tilingData->tailCount;

    xGM.SetGlobalBuffer((__gm__ T*)x, totalDataCount);
    yGM.SetGlobalBuffer((__gm__ T*)y, binsValue);

    uint64_t singleBufferSize = ubMaxProcCount * 4;
    pipe.InitBuffer(xQue, BUFFER_NUM, singleBufferSize);
    pipe.InitBuffer(tempValBuf, singleBufferSize * TMP_BUF_COUNT);
    tempValLT = tempValBuf.Get<float>();

    // printf(
    //     "==zf==Kernel Tiling Data, ubMaxProcCount:%lu, totalDataCount:%lu, binsValue:%ld, minValue:%f, maxValue:%f, "
    //     "isMinMaxEqual:%ld, loopTime:%lu, tailCount:%lu\n ",
    //     ubMaxProcCount, totalDataCount, binsValue, minValue, maxValue, isMinMaxEqual, loopTime, tailCount);
}

template <typename T, int32_t BUFFER_NUM>
__aicore__ inline void HistogramND<T, BUFFER_NUM>::ProcessCached() {
    int64_t alignCount = CeilAlignA2B(totalDataCount, perRepeatCount);
    CopyInX(0, CeilAlignA2B(totalDataCount, perBlockCount));

#if ORIG_DTYPE_X == DT_INT32
    LocalTensor<T> xLTInt32 = xQue.template DeQue<T>();
    LocalTensor<float> xLT = xLTInt32.template ReinterpretCast<float>();
    Cast(xLT, xLTInt32, RoundMode::CAST_NONE, totalDataCount);
#elif ORIG_DTYPE_X == DT_FLOAT16
    LocalTensor<float> xLT = xQue.template DeQue<float>();
    LocalTensor<T> xLTFloat16 = xLT.ReinterpretCast<T>()[ubMaxProcCount];
    Cast(xLT, xLTFloat16, RoundMode::CAST_NONE, totalDataCount);
#else
    LocalTensor<T> xLT = xQue.template DeQue<T>();
#endif
    LocalTensor<float> t1 = tempValLT;
    LocalTensor<float> t2 = tempValLT[ubMaxProcCount];
    LocalTensor<uint8_t> mask = t1.template ReinterpretCast<uint8_t>();

    if (isMinMaxEqual) {
        DataCopy(t2, xLT, alignCount);
        Compare(mask, t2, t2, CMPMODE::EQ, alignCount);
        Select(t2, mask, t2, t2.GetValue(0), SELMODE::VSEL_TENSOR_SCALAR_MODE, alignCount);

        ReduceMin(t1, t2, t1, totalDataCount, false);
        minValue = t1.GetValue(0);
        ReduceMax(t1, t2, t1, totalDataCount, false);
        maxValue = t1.GetValue(0);
    }

    float stride = (maxValue - minValue) / binsValue;
    float increValue = minValue;
    int64_t halfWay = binsValue / 2;
    int64_t i = 0, next = 0;

    // printf("==zf==maxValue:%f, minValue:%f, stride:%f\n", maxValue, minValue, stride);
    for (; i < binsValue - 1; i++) {
        Duplicate(t1, increValue, alignCount);
        Compare(mask, xLT, t1, CMPMODE::GE, alignCount);
        next = i + 1;
        increValue = next < halfWay ? (minValue + stride * next) : (maxValue - stride * (binsValue - next));
        Select(t2, mask, xLT, maxValue + 1, SELMODE::VSEL_TENSOR_SCALAR_MODE, alignCount);
        Duplicate(t1, increValue, alignCount);
        Compare(mask, t2, t1, CMPMODE::LT, alignCount);
        Duplicate(t2, POS_ONE, alignCount);
        Select(t2, mask, t2, ZERO, SELMODE::VSEL_TENSOR_SCALAR_MODE, alignCount);
        ReduceSum(t1, t2, t1, totalDataCount);
        yGM.SetValue(i, t1.GetValue(0));
    }

    Duplicate(t1, increValue, alignCount);
    Compare(mask, xLT, t1, CMPMODE::GE, alignCount);
    Select(t2, mask, xLT, maxValue + 1, SELMODE::VSEL_TENSOR_SCALAR_MODE, alignCount);
    Duplicate(t1, maxValue, alignCount);
    Compare(mask, t2, t1, CMPMODE::LE, alignCount);
    Duplicate(t2, POS_ONE, alignCount);
    Select(t2, mask, t2, ZERO, SELMODE::VSEL_TENSOR_SCALAR_MODE, alignCount);
    ReduceSum(t1, t2, t1, totalDataCount);
    yGM.SetValue(i, t1.GetValue(0));

#if ORIG_DTYPE_X == DT_INT32
    xQue.FreeTensor(xLTInt32);
#else
    xQue.FreeTensor(xLT);
#endif
}

template <typename T, int32_t BUFFER_NUM>
__aicore__ inline void HistogramND<T, BUFFER_NUM>::ProcessNotCached() {
    int64_t gmOffset = 0;
    if (isMinMaxEqual) {
        for (int64_t i = 0; i < loopTime; i++) {
            CopyInX(gmOffset, ubMaxProcCount);
            GetMinMax(ubMaxProcCount, ubMaxProcCount);
            gmOffset += ubMaxProcCount;
        }
        if (tailCount) {
            CopyInX(gmOffset, CeilAlignA2B(tailCount, perBlockCount));
            GetMinMax(tailCount, CeilAlignA2B(tailCount, perRepeatCount));
        }
    }
    gmOffset = 0;
    for (int64_t i = 0; i < loopTime; i++) {
        CopyInX(gmOffset, ubMaxProcCount);
        SingleProc(i, ubMaxProcCount, ubMaxProcCount);
        gmOffset += ubMaxProcCount;
    }
    if (tailCount) {
        CopyInX(gmOffset, CeilAlignA2B(tailCount, perBlockCount));
        SingleProc(loopTime, tailCount, CeilAlignA2B(tailCount, perRepeatCount));
    }
}

template <typename T, int32_t BUFFER_NUM>
__aicore__ inline void HistogramND<T, BUFFER_NUM>::CopyInX(int64_t gmOffset, int64_t dataCount) {
#if ORIG_DTYPE_X == DT_FLOAT16
    LocalTensor<float> xInLT = xQue.template AllocTensor<float>();
    LocalTensor<T> xInFloat16 = xInLT.ReinterpretCast<T>()[ubMaxProcCount];
    DataCopy(xInFloat16, xGM[gmOffset], dataCount);
#else
    LocalTensor<T> xInLT = xQue.template AllocTensor<T>();
    DataCopy(xInLT, xGM[gmOffset], dataCount);
#endif
    xQue.EnQue(xInLT);
}

template <typename T, int32_t BUFFER_NUM>
__aicore__ inline void HistogramND<T, BUFFER_NUM>::SingleProc(int64_t index, int64_t dataCount,
                                                              int64_t alignRepeatCount) {
#if ORIG_DTYPE_X == DT_INT32
    LocalTensor<T> xLTInt32 = xQue.template DeQue<T>();
    LocalTensor<float> xLT = xLTInt32.template ReinterpretCast<float>();
    Cast(xLT, xLTInt32, RoundMode::CAST_NONE, dataCount);
#elif ORIG_DTYPE_X == DT_FLOAT16
    LocalTensor<float> xLT = xQue.template DeQue<float>();
    LocalTensor<T> xLTFloat16 = xLT.ReinterpretCast<T>()[ubMaxProcCount];
    Cast(xLT, xLTFloat16, RoundMode::CAST_NONE, dataCount);
#else
    LocalTensor<T> xLT = xQue.template DeQue<T>();
#endif
    LocalTensor<float> t1 = tempValLT;
    LocalTensor<float> t2 = tempValLT[ubMaxProcCount];
    LocalTensor<uint8_t> mask = t1.template ReinterpretCast<uint8_t>();

    float stride = (maxValue - minValue) / binsValue;
    float increValue = minValue;
    int64_t halfWay = binsValue / 2;
    int64_t i = 0, next = 0, oldValue = 0;

    // printf("==zf==maxValue:%f, minValue:%f, stride:%f\n", maxValue, minValue, stride);
    for (; i < binsValue - 1; i++) {
        Duplicate(t1, increValue, alignRepeatCount);
        Compare(mask, xLT, t1, CMPMODE::GE, alignRepeatCount);
        next = i + 1;
        increValue = next < halfWay ? (minValue + stride * next) : (maxValue - stride * (binsValue - next));
        Select(t2, mask, xLT, maxValue + 1, SELMODE::VSEL_TENSOR_SCALAR_MODE, alignRepeatCount);
        Duplicate(t1, increValue, alignRepeatCount);
        Compare(mask, t2, t1, CMPMODE::LT, alignRepeatCount);
        Duplicate(t2, POS_ONE, alignRepeatCount);
        Select(t2, mask, t2, ZERO, SELMODE::VSEL_TENSOR_SCALAR_MODE, alignRepeatCount);
        ReduceSum(t1, t2, t1, dataCount);
        if (index == 0) {
            yGM.SetValue(i, t1.GetValue(0));
        } else {
            oldValue = yGM.GetValue(i);
            yGM.SetValue(i, oldValue + t1.GetValue(0));
        }
    }

    Duplicate(t1, increValue, alignRepeatCount);
    Compare(mask, xLT, t1, CMPMODE::GE, alignRepeatCount);
    Select(t2, mask, xLT, maxValue + 1, SELMODE::VSEL_TENSOR_SCALAR_MODE, alignRepeatCount);
    Duplicate(t1, maxValue, alignRepeatCount);
    Compare(mask, t2, t1, CMPMODE::LE, alignRepeatCount);
    Duplicate(t2, POS_ONE, alignRepeatCount);
    Select(t2, mask, t2, ZERO, SELMODE::VSEL_TENSOR_SCALAR_MODE, alignRepeatCount);
    ReduceSum(t1, t2, t1, dataCount);
    if (index == 0) {
        yGM.SetValue(i, t1.GetValue(0));
    } else {
        oldValue = yGM.GetValue(i);
        yGM.SetValue(i, oldValue + t1.GetValue(0));
    }

#if ORIG_DTYPE_X == DT_INT32
    xQue.FreeTensor(xLTInt32);
#else
    xQue.FreeTensor(xLT);
#endif
}

template <typename T, int32_t BUFFER_NUM>
__aicore__ inline void HistogramND<T, BUFFER_NUM>::GetMinMax(int64_t dataCount, int64_t alignRepeatCount) {
    float tempMinValue = 0, tempMaxValue = 0;
#if ORIG_DTYPE_X == DT_INT32
    LocalTensor<T> xLTInt32 = xQue.template DeQue<T>();
    LocalTensor<float> xLT = xLTInt32.template ReinterpretCast<float>();
    Cast(xLT, xLTInt32, RoundMode::CAST_NONE, alignRepeatCount);
#elif ORIG_DTYPE_X == DT_FLOAT16
    LocalTensor<float> xLT = xQue.template DeQue<float>();
    LocalTensor<T> xLTFloat16 = xLT.ReinterpretCast<T>()[ubMaxProcCount];
    Cast(xLT, xLTFloat16, RoundMode::CAST_NONE, alignRepeatCount);
#else
    LocalTensor<T> xLT = xQue.template DeQue<T>();
#endif
    LocalTensor<float> t1 = tempValLT;
    LocalTensor<float> t2 = tempValLT[ubMaxProcCount];
    LocalTensor<uint8_t> mask = t1.template ReinterpretCast<uint8_t>();
    DataCopy(t2, xLT, alignRepeatCount);
    Compare(mask, t2, t2, CMPMODE::EQ, alignRepeatCount);
    Select(t2, mask, t2, t2.GetValue(0), SELMODE::VSEL_TENSOR_SCALAR_MODE, alignRepeatCount);

    ReduceMin(t1, t2, t1, dataCount, false);
    tempMinValue = t1.GetValue(0);
    ReduceMax(t1, t2, t1, dataCount, false);
    tempMaxValue = t1.GetValue(0);

    if (tempMinValue < minValue) {
        minValue = tempMinValue;
    }
    if (tempMaxValue > maxValue) {
        maxValue = tempMaxValue;
    }
#if ORIG_DTYPE_X == DT_INT32
    xQue.FreeTensor(xLTInt32);
#else
    xQue.FreeTensor(xLT);
#endif
}

}  // namespace Histogram

#endif  // HISTOGRAM_N_D_H