#include "kernel_operator.h"
#define BUFFER_NUM 2
#define BLOCK_SIZE 32
using namespace AscendC;

template <typename T>
class KernelReshape {
    public:
    __aicore__ inline KernelReshape() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,uint32_t preiter, uint32_t totalBlocks, uint32_t totalLength, TPipe *pipeIn) {
        loop = totalBlocks/preiter ;
        // printf("totalBlocks:%d preiter:%d loop:%d\n",totalBlocks,preiter,totalBlocks/preiter);
        // if (totalBlocks%preiter) loop++;
        // printf("mod:%d\n",totalBlocks%preiter);
        // if(loop==0) {
        //     loop = 1;
        // }
        coreDataNum = preiter*(BLOCK_SIZE/sizeof(T));
        this->pipe = pipeIn;
        elemNum = (BLOCK_SIZE/sizeof(T));
        xGm.SetGlobalBuffer((__gm__ T *)x, totalLength);
        yGm.SetGlobalBuffer((__gm__ T *)y, totalLength);
        tailDataNum = coreDataNum;
        // printf()
        if (totalLength != coreDataNum*loop*(BLOCK_SIZE/sizeof(T))) {
            loop = loop + 1;
            tailDataNum = totalLength - (loop - 1)*coreDataNum;
        }
        // printf("loop:%d\n", loop);
        printf("totalLength:%d coreDataNum:%d tailDataNum:%d loop:%d\n",totalLength, coreDataNum, tailDataNum, loop);
        pipe->InitBuffer(inQueue, BUFFER_NUM, coreDataNum*sizeof(T));

    }
    __aicore__ inline void process() {
        int roundCoreDataNum = (coreDataNum + elemNum -1)/elemNum*elemNum;
        for (int i = 0; i < loop - 1; i++) {
            copyIn(i, roundCoreDataNum);
            copyOut(i, coreDataNum);
        }
        int roundtailDataNum = (tailDataNum + elemNum -1)/elemNum*elemNum;
        copyIn(loop-1, roundtailDataNum);
        copyOut(loop-1, tailDataNum);
    }
    __aicore__ inline void copyIn(int i, int count) {
        AscendC::LocalTensor<T> srcLocal = inQueue.AllocTensor<T>();
        AscendC::DataCopyExtParams copyParams{1,  static_cast<uint32_t>(count * sizeof(T)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        AscendC::DataCopyPadExtParams<T> padParams{false, 0, 0, 0};
        AscendC::DataCopyPad(srcLocal, xGm[i*coreDataNum], copyParams, padParams); // 从GM->VECIN搬运40Bytes
        // for (int i = 0; i < 16; i++) {
        //     printf("%f ",srcLocal.GetValue(i));
        // }
        // printf("\n");
        // AscendC::PipeBarrier<PIPE_V>();

        inQueue.EnQue<T>(srcLocal);
    }

    __aicore__ inline void copyOut(int i, int count) {
        AscendC::LocalTensor<T> dstLocal = inQueue.DeQue<T>();
        AscendC::DataCopyExtParams copyParams{1,  static_cast<uint32_t>(count * sizeof(T)), 0, 0, 0};
        AscendC::DataCopyPad(yGm[i*coreDataNum], dstLocal, copyParams); // 从VECIN->GM搬运40Bytes
        inQueue.FreeTensor(dstLocal);
    }
    private:
    TPipe *pipe;
    // TQue<QuePosition::VECIN, BUFFER_NUM> inQueue;
    TQueBind<QuePosition::VECIN, QuePosition::VECOUT, BUFFER_NUM> inQueue;
    GlobalTensor<T> yGm, xGm;
    int loop;
    int coreDataNum, tailDataNum;
    int elemNum;
};

template <typename T>
class KernelReshape64 {
    public:
    __aicore__ inline KernelReshape64() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,uint32_t preiter, uint32_t totalBlocks, uint32_t totalLength, TPipe *pipeIn) {

        totalBlocks *= 2;
        totalLength *= 2;
        loop = totalBlocks/preiter ;
        preiter /= 2;
        // if (totalBlocks%preiter) loop++;
        // printf("mod:%d\n",totalBlocks%preiter);
        // if(loop==0) {
        //     loop = 1;
        // }
        coreDataNum = preiter*(BLOCK_SIZE/sizeof(T));
        this->pipe = pipeIn;
        elemNum = (BLOCK_SIZE/sizeof(T));
        xGm.SetGlobalBuffer((__gm__ T *)x, totalLength);
        yGm.SetGlobalBuffer((__gm__ T *)y, totalLength);
        tailDataNum = coreDataNum;
        if (totalLength != coreDataNum*loop*(BLOCK_SIZE/sizeof(T))) {
            loop = loop + 1;
            tailDataNum = totalLength - (loop - 1)*coreDataNum;
        }
        // printf("loop:%d\n", loop);
        // printf("coreDataNum:%d tailDataNum:%d\n",coreDataNum, tailDataNum);
        pipe->InitBuffer(inQueue, BUFFER_NUM, coreDataNum*sizeof(T));

    }
    __aicore__ inline void process() {
        int roundCoreDataNum = (coreDataNum + elemNum -1)/elemNum*elemNum;
        for (int i = 0; i < loop - 1; i++) {
            copyIn(i, roundCoreDataNum);
            copyOut(i, coreDataNum);
        }
        int roundtailDataNum = (tailDataNum + elemNum -1)/elemNum*elemNum;
        copyIn(loop-1, roundtailDataNum);
        copyOut(loop-1, tailDataNum);
    }
    __aicore__ inline void copyIn(int i, int count) {
        AscendC::LocalTensor<T> srcLocal = inQueue.AllocTensor<T>();
        AscendC::DataCopyExtParams copyParams{1,  static_cast<uint32_t>(count * sizeof(T)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        AscendC::DataCopyPadExtParams<T> padParams{false, 0, 0, 0};
        AscendC::DataCopyPad(srcLocal, xGm[i*coreDataNum], copyParams, padParams); // 从GM->VECIN搬运40Bytes
        inQueue.EnQue<T>(srcLocal);
    }

    __aicore__ inline void copyOut(int i, int count) {
        AscendC::LocalTensor<T> dstLocal = inQueue.DeQue<T>();
        AscendC::DataCopyExtParams copyParams{1,  static_cast<uint32_t>(count * sizeof(T)), 0, 0, 0};
        AscendC::DataCopyPad(yGm[i*coreDataNum], dstLocal, copyParams); // 从VECIN->GM搬运40Bytes
        inQueue.FreeTensor(dstLocal);
    }
    private:
    TPipe *pipe;
    // TQue<QuePosition::VECIN, BUFFER_NUM> inQueue;
    TQueBind<QuePosition::VECIN, QuePosition::VECOUT, BUFFER_NUM> inQueue;
    GlobalTensor<T> yGm, xGm;
    int loop;
    int coreDataNum, tailDataNum;
    int elemNum;
};

template <typename T>
class KernelReshapeNULL {
    public:
    __aicore__ inline KernelReshapeNULL() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,uint32_t preiter, uint32_t totalBlocks, uint32_t totalLength, TPipe *pipeIn) {

        totalBlocks *= 2;
        totalLength *= 2;
        loop = totalBlocks/preiter ;
        // if (totalBlocks%preiter) loop++;
        // printf("mod:%d\n",totalBlocks%preiter);
        // if(loop==0) {
        //     loop = 1;
        // }
        coreDataNum = preiter*(BLOCK_SIZE/sizeof(T));
        this->pipe = pipeIn;
        elemNum = (BLOCK_SIZE/sizeof(T));
        xGm.SetGlobalBuffer((__gm__ T *)x, totalLength);
        yGm.SetGlobalBuffer((__gm__ T *)y, totalLength);
        tailDataNum = coreDataNum;
        if (totalLength != coreDataNum*loop*(BLOCK_SIZE/sizeof(T))) {
            loop = loop + 1;
            tailDataNum = totalLength - (loop - 1)*coreDataNum;
        }
        // printf("loop:%d\n", loop);
        // printf("coreDataNum:%d tailDataNum:%d\n",coreDataNum, tailDataNum);
        // pipe->InitBuffer(inQueue, BUFFER_NUM, coreDataNum*sizeof(T));

    }
    __aicore__ inline void process() {
        int roundCoreDataNum = (coreDataNum + elemNum -1)/elemNum*elemNum;
        for (int i = 0; i < loop - 1; i++) {
            copyIn(i, roundCoreDataNum);
            copyOut(i, coreDataNum);
        }
        int roundtailDataNum = (tailDataNum + elemNum -1)/elemNum*elemNum;
        copyIn(loop-1, roundtailDataNum);
        copyOut(loop-1, tailDataNum);
    }
    __aicore__ inline void copyIn(int i, int count) {
        AscendC::LocalTensor<T> srcLocal = inQueue.AllocTensor<T>();
        AscendC::DataCopyExtParams copyParams{1,  static_cast<uint32_t>(count * sizeof(T)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        AscendC::DataCopyPadExtParams<T> padParams{false, 0, 0, 0};
        AscendC::DataCopyPad(srcLocal, xGm[i*coreDataNum], copyParams, padParams); // 从GM->VECIN搬运40Bytes
        inQueue.EnQue<T>(srcLocal);
    }

    __aicore__ inline void copyOut(int i, int count) {
        AscendC::LocalTensor<T> dstLocal = inQueue.DeQue<T>();
        AscendC::DataCopyExtParams copyParams{1,  static_cast<uint32_t>(count * sizeof(T)), 0, 0, 0};
        AscendC::DataCopyPad(yGm[i*coreDataNum], dstLocal, copyParams); // 从VECIN->GM搬运40Bytes
        inQueue.FreeTensor(dstLocal);
    }
    private:
    TPipe *pipe;
    // TQue<QuePosition::VECIN, BUFFER_NUM> inQueue;
    TQueBind<QuePosition::VECIN, QuePosition::VECOUT, BUFFER_NUM> inQueue;
    GlobalTensor<T> yGm, xGm;
    int loop;
    int coreDataNum, tailDataNum;
    int elemNum;
};

extern "C" __global__ __aicore__ void reshape(GM_ADDR x, GM_ADDR shape, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    TPipe pipe;
    // if constexpr (std::is_same_v<DTYPE_X, uint8_t>||std::is_same_v<DTYPE_X, int8_t>||std::is_same_v<DTYPE_X, half>||std::is_same_v<DTYPE_X, float>||std::is_same_v<DTYPE_X, int16_t>||std::is_same_v<DTYPE_X,uint16_t>||std::is_same_v<DTYPE_X, int32_t>) {
    //     // printf("enter kernel A\n");
    //     KernelReshape<DTYPE_X>op;
    //     op.Init(x, y, tiling_data.preiter, tiling_data.totalBlocks, tiling_data.totalLength, &pipe);
    //     op.process();
    // } else {
    //     // printf("enter kernel B\n");
    //     // op.Init(x, y, tiling_data.preiter, tiling_data.totalBlocks, tiling_data.totalLength, &pipe);
    //     // op.process();
    // }
    if constexpr (std::is_same_v<DTYPE_X, uint64_t>||std::is_same_v<DTYPE_X, int64_t>) {
        KernelReshape64<int32_t>op;
        op.Init(x, y, tiling_data.preiter, tiling_data.totalBlocks, tiling_data.totalLength, &pipe);
        op.process();
    } else {
        KernelReshape<DTYPE_X>op;
        op.Init(x, y, tiling_data.preiter, tiling_data.totalBlocks, tiling_data.totalLength, &pipe);
        op.process();
    }
}