#ifndef AS_STRIDED_N_D_H
#define AS_STRIDED_N_D_H

#include "kernel_operator.h"

namespace AsStrided {
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2;
constexpr int32_t BYTE_BLOCK = 32;
constexpr int32_t MAX_DIM_COUNT = 8;

template <typename T>
class AsStridedND {
public:
    __aicore__ inline AsStridedND() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR size, GM_ADDR stride, GM_ADDR storage_offset, GM_ADDR y,
                                const AsStridedTilingData* __restrict tilingData);
    __aicore__ inline void ProcessNormal();

private:
    template <typename T1, typename T2>
    __aicore__ inline T1 CeilDiv(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : (a + bTemp - 1) / bTemp;
    };

    template <typename T1, typename T2>
    __aicore__ inline T1 CeilAlignA2B(T1 a, T2 b) {
        T1 bTemp(b);
        return bTemp == 0 ? a : CeilDiv(a, bTemp) * bTemp;
    };

    __aicore__ inline void UpdateXPos(int64_t& xGmIndex);
    __aicore__ inline void LoopCopy(int64_t xGmOffset, int64_t yGmOffset, int64_t loopTime, int64_t tailCount);
    __aicore__ inline void CopyIn(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void Compute(int64_t gmOffset, int64_t dataCount);
    __aicore__ inline void CopyOut(int64_t gmOffset, int64_t dataCount);

private:
    TPipe pipe;
    TQueBind<QuePosition::VECIN, QuePosition::VECOUT, BUFFER_NUM> dataQue;
    GlobalTensor<T> xGM, yGM;
    int64_t blockIdx = 0;
    uint64_t perBlockCount = 0;
    uint64_t sizeShape[MAX_DIM_COUNT] = {0};
    uint64_t strideShape[MAX_DIM_COUNT] = {0};
    uint64_t tempShape[MAX_DIM_COUNT] = {0};
    uint64_t outDataCount = 1;

    // tiling params
    uint64_t ubMaxProcCount = 0;
    uint64_t outDimCount = 0;
};

template <typename T>
__aicore__ inline void AsStridedND<T>::Init(GM_ADDR x, GM_ADDR size, GM_ADDR stride, GM_ADDR storage_offset, GM_ADDR y,
                                            const AsStridedTilingData* __restrict tilingData) {
    blockIdx = GetBlockIdx();
    perBlockCount = BYTE_BLOCK / sizeof(T);
    ubMaxProcCount = tilingData->ubMaxProcCount;
    outDimCount = tilingData->outDimCount;

    GlobalTensor<int32_t> sizeGM, strideGM, soGM;
    sizeGM.SetGlobalBuffer((__gm__ int32_t*)size, outDimCount);
    strideGM.SetGlobalBuffer((__gm__ int32_t*)stride, outDimCount);
    soGM.SetGlobalBuffer((__gm__ int32_t*)storage_offset, 1);

    for (int64_t i = 0; i < outDimCount; i++) {
        sizeShape[i] = sizeGM.GetValue(i);
        strideShape[i] = strideGM.GetValue(i);
        outDataCount *= sizeShape[i];
    }

    xGM.SetGlobalBuffer((__gm__ T*)x + soGM.GetValue(0));
    yGM.SetGlobalBuffer((__gm__ T*)y);

    pipe.InitBuffer(dataQue, BUFFER_NUM, ubMaxProcCount * sizeof(T));

    // printf("==zf==Tiling Data, ubMaxProcCount:%lu, outDimCount:%lu\n", ubMaxProcCount, outDimCount);
}

template <typename T>
__aicore__ inline void AsStridedND<T>::ProcessNormal() {
    int64_t yGmIndex = 0, xGmIndex = 0, tailSize = sizeShape[outDimCount - 1],
            tailStride = strideShape[outDimCount - 1];
    if (tailStride == 1) {
        int64_t tailSizeAlign = CeilAlignA2B(tailSize, perBlockCount);
        if (tailSizeAlign > ubMaxProcCount) {
            // printf("==zf==11111\n");
            int64_t loopTime = tailSizeAlign / ubMaxProcCount;
            int64_t tailCount = tailSizeAlign % ubMaxProcCount;
            do {
                LoopCopy(xGmIndex, yGmIndex, loopTime, tailCount);
                yGmIndex += tailSize;
                UpdateXPos(xGmIndex);
                outDataCount -= tailSize;
            } while (outDataCount);
        } else {
            // printf("==zf==22222\n");
            do {
                CopyIn(xGmIndex, tailSizeAlign);
                CopyOut(yGmIndex, tailSizeAlign);
                yGmIndex += tailSize;
                UpdateXPos(xGmIndex);
                outDataCount -= tailSize;
            } while (outDataCount);
        }

    } else {
        // printf("==zf==33333\n");
        do {
            for (int64_t i = 0; i < tailSize; i++) {
                yGM.SetValue(yGmIndex++, xGM.GetValue(xGmIndex));
                xGmIndex += tailStride;
            }
            UpdateXPos(xGmIndex);
            outDataCount -= tailSize;
        } while (outDataCount);
    }
}

template <typename T>
__aicore__ inline void AsStridedND<T>::UpdateXPos(int64_t& xGmIndex) {
    for (int64_t i = outDimCount - 2; i > -1; i--) {
        if (++tempShape[i] < sizeShape[i]) {
            break;
        }
        tempShape[i] = 0;
    }
    xGmIndex = 0;
    for (int64_t i = 0; i < outDimCount - 1; i++) {
        xGmIndex += tempShape[i] * strideShape[i];
    }
}

template <typename T>
__aicore__ inline void AsStridedND<T>::LoopCopy(int64_t xGmOffset, int64_t yGmOffset, int64_t loopTime,
                                                int64_t tailCount) {
    for (int64_t i = 0; i < loopTime; i++) {
        CopyIn(xGmOffset, ubMaxProcCount);
        CopyOut(yGmOffset, ubMaxProcCount);
        xGmOffset += ubMaxProcCount;
        yGmOffset += ubMaxProcCount;
    }

    if (tailCount) {
        CopyIn(xGmOffset, tailCount);
        CopyOut(yGmOffset, tailCount);
    }
}

template <typename T>
__aicore__ inline void AsStridedND<T>::CopyIn(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> xInLT = dataQue.AllocTensor<T>();
    DataCopy(xInLT, xGM[gmOffset], dataCount);
    dataQue.EnQue(xInLT);
}

template <typename T>
__aicore__ inline void AsStridedND<T>::CopyOut(int64_t gmOffset, int64_t dataCount) {
    LocalTensor<T> yOutLT = dataQue.DeQue<T>();
    DataCopy(yGM[gmOffset], yOutLT, dataCount);
    dataQue.FreeTensor(yOutLT);
}
}  // namespace AsStrided

#endif  // AS_STRIDED_N_D_H