#include "kernel_operator.h"

constexpr int32_t row = 8;
constexpr int32_t col = 1024;
constexpr int32_t TOTAL_LENGTH = row * col;                            // total length of data
constexpr int32_t USE_CORE_NUM = 8;                                   // num of core used
constexpr int32_t row_percore = row / USE_CORE_NUM;                  // row num of each core
constexpr int32_t BLOCK_LENGTH = row_percore * col;                 // length computed of each core
constexpr int32_t TILE_NUM = 1;                                       // split data into 8 tiles for each core
constexpr int32_t BUFFER_NUM = 1;                                     // tensor num for each queue
constexpr int32_t TILE_LENGTH = BLOCK_LENGTH / TILE_NUM / BUFFER_NUM; // separate to 2 parts, due to double buffer
constexpr int32_t DEPTH = 63; //类别数
constexpr int32_t DEPTH_align = ((DEPTH + 8 - 1) / 8) * 8;
constexpr int32_t DEPTH_sub = DEPTH_align-DEPTH;
class KernelOnehot {    
public:
    __aicore__ inline KernelOnehot() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR depth, GM_ADDR z)
    {
        // global buffers
        xGm.SetGlobalBuffer((__gm__ int32_t *)x + BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);
        dGm.SetGlobalBuffer((__gm__ int32_t *)depth,DEPTH_align);
        zGm.SetGlobalBuffer((__gm__ int32_t *)z + BLOCK_LENGTH*DEPTH * AscendC::GetBlockIdx(), BLOCK_LENGTH*DEPTH);

        

        pipe.InitBuffer(cmpBuf, 8*DEPTH_align * sizeof(uint8_t));

        //pipe.InitBuffer(zeroBuf, TILE_LENGTH*DEPTH * sizeof(float));
        pipe.InitBuffer(oneBuf, DEPTH_align * sizeof(float));
        pipe.InitBuffer(zBuf, DEPTH_align * sizeof(float));

        pipe.InitBuffer(inQueueX, BUFFER_NUM, TILE_LENGTH * sizeof(int32_t));
        pipe.InitBuffer(inQueueD, BUFFER_NUM, DEPTH_align * sizeof(int32_t));
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, DEPTH_align * sizeof(int32_t));
    }
    __aicore__ inline void Process()
    {
        int32_t loopCount = TILE_NUM * BUFFER_NUM;
        for (int32_t i = 0; i < TILE_LENGTH; i++) {
            CopyInD();
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:

    __aicore__ inline void CopyInD()
    {
        AscendC::LocalTensor<int32_t> dLocal = inQueueD.AllocTensor<int32_t>();
        AscendC::DataCopy(dLocal, dGm, DEPTH_align);
        for(int i=DEPTH;i<DEPTH_align;i++)
        {
            dLocal.SetValue(i, -1);
        }
        inQueueD.EnQue(dLocal);
    }

    __aicore__ inline void CopyIn(int32_t progress)
    {
        AscendC::LocalTensor<int32_t> xLocal = inQueueX.AllocTensor<int32_t>();
        AscendC::DataCopy(xLocal, xGm, TILE_LENGTH);
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void Compute(int32_t progress)
{
    AscendC::LocalTensor<int32_t> xLocal = inQueueX.DeQue<int32_t>();
    AscendC::LocalTensor<int32_t> dLocal = inQueueD.DeQue<int32_t>();
    AscendC::LocalTensor<int32_t> zLocal = outQueueZ.AllocTensor<int32_t>();
    AscendC::LocalTensor<uint8_t> cmp = cmpBuf.AllocTensor<uint8_t>();
    AscendC::LocalTensor<float> zfloat = zBuf.AllocTensor<float>();
    AscendC::LocalTensor<float> oneLocal = oneBuf.AllocTensor<float>();
    AscendC::Duplicate(oneLocal, 1.0f, DEPTH_align);

    int32_t xVal = xLocal.GetValue(progress);
    AscendC::CompareScalar(cmp, dLocal, xVal, AscendC::CMPMODE::EQ, mask_cmp, repeat, repeatParams);
    AscendC::LocalTensor<uint16_t> mask_Dup = cmpBuf.AllocTensor<uint16_t>();
    AscendC::Select(zfloat, mask_Dup[0], oneLocal, 0.0f, AscendC::SELMODE::VSEL_TENSOR_SCALAR_MODE, DEPTH_align);
    AscendC::Cast(zLocal, zfloat, AscendC::RoundMode::CAST_CEIL,DEPTH_align);
    outQueueZ.EnQue<int32_t>(zLocal);
    inQueueX.FreeTensor(xLocal);
    inQueueD.FreeTensor(dLocal);
    cmpBuf.FreeTensor(cmp);
    oneBuf.FreeTensor(oneLocal);
    zBuf.FreeTensor(zfloat);
}


    __aicore__ inline void CopyOut(int32_t progress)
    {
        /*AscendC::DataCopyExtParams copyParams{static_cast<uint16_t>(1), static_cast<uint32_t>(DEPTH * sizeof(int32_t)), 0, 0, 0};
        AscendC::DataCopyPad(zGm[progress*DEPTH], zLocal, copyParams);*/
        AscendC::LocalTensor<int32_t> zLocal = outQueueZ.DeQue<int32_t>();
        /*if(progress==(TILE_LENGTH-1))
        {
            AscendC::DataCopyExtParams copyParams{static_cast<uint16_t>(1), static_cast<uint32_t>(DEPTH * sizeof(int32_t)), 0, 0, 0};
            AscendC::DataCopyPad(zGm[progress*DEPTH], zLocal, copyParams);
        }else{
            AscendC::DataCopy(zGm[progress*DEPTH], zLocal, DEPTH_align);
        }*/
        AscendC::DataCopyExtParams copyParams{static_cast<uint16_t>(1), static_cast<uint32_t>(DEPTH * sizeof(int32_t)), 0, 0, 0};
        AscendC::DataCopyPad(zGm[progress*DEPTH], zLocal, copyParams);
        outQueueZ.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueD;;
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueZ;

    AscendC::TBuf<AscendC::QuePosition::VECCALC> cmpBuf;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> zeroBuf;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> oneBuf;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> zBuf;


    AscendC::GlobalTensor<int32_t> xGm;
    AscendC::GlobalTensor<int32_t> dGm;
    AscendC::GlobalTensor<int32_t> zGm;

    uint64_t mask_cmp = DEPTH_align;
    int repeat = 1;
    AscendC::UnaryRepeatParams repeatParams = { 1, 1, 8, 8 };
    

};

extern "C" __global__ __aicore__ void onehot_custom(GM_ADDR x, GM_ADDR depth, GM_ADDR z)
{
    KernelOnehot op;
    op.Init(x, depth, z);
    op.Process();
}

#ifndef ASCENDC_CPU_DEBUG
void onehot_custom_do(uint32_t blockDim, void *stream, uint8_t *x, uint8_t *depth, uint8_t *z)
{
    onehot_custom<<<blockDim, nullptr, stream>>>(x, depth, z);
}
#endif
