#include "kernel_operator.h"
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2;      //昇腾双buffer技术

class KernelDiv {
public:
    __aicore__ inline KernelDiv() {}
    __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR y,  uint32_t CoreDataNum, uint32_t finalTileNum, uint32_t tileDataNum, uint32_t TailDataNum)
    {

        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->coreDataNum = CoreDataNum;
        this->tileNum = finalTileNum;
        this->tileDataNum = tileDataNum;
        this->tailDataNum = TailDataNum;

        xGm1.SetGlobalBuffer((__gm__ DTYPE_X1*)x1, this->coreDataNum);
        xGm2.SetGlobalBuffer((__gm__ DTYPE_X2*)x2, this->coreDataNum);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y*)y, this->coreDataNum);

        pipe.InitBuffer(inQueueX1, BUFFER_NUM, (this->tileDataNum)  * sizeof(DTYPE_X1));  
        pipe.InitBuffer(inQueueX2, BUFFER_NUM, (this->tileDataNum)  * sizeof(DTYPE_X2)); 
        pipe.InitBuffer(outQueueY, BUFFER_NUM, (this->tileDataNum) * sizeof(DTYPE_Y));   

        //分情况分配内存
        if constexpr (std::is_same_v<DTYPE_X1, int8_t> || std::is_same_v<DTYPE_X1, int16_t>)
        {
            pipe.InitBuffer(QueueY, (this->tileDataNum)  * sizeof(half));   
            pipe.InitBuffer(QueueY2, (this->tileDataNum)  * sizeof(half));     
        }
        else if constexpr (std::is_same_v<DTYPE_X1, int32_t> || std::is_same_v<DTYPE_X1, half>)
        {
            pipe.InitBuffer(QueueY, (this->tileDataNum)  * sizeof(float));   
            pipe.InitBuffer(QueueY2, (this->tileDataNum)  * sizeof(float));
        }
        else
        {}
    }
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;   //手动考虑了双buff
        this->processDataNum = this->tileDataNum;
        for (int32_t i = 0; i < loopCount; i++) {
            if (i == this->tileNum - 1) {
                this->processDataNum = this->tailDataNum;
            }
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        } 
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        //考生补充算子代码
        LocalTensor<DTYPE_X1> xLocal1 = inQueueX1.AllocTensor<DTYPE_X1>();
        LocalTensor<DTYPE_X2> xLocal2 = inQueueX2.AllocTensor<DTYPE_X2>();
        DataCopy(xLocal1, xGm1[progress * this->tileDataNum], this->processDataNum);  //tileDataNum>taileDataNum，所以不需要麻烦分情况讨论
        DataCopy(xLocal2, xGm2[progress * this->tileDataNum], this->processDataNum);
        inQueueX1.EnQue(xLocal1);
        inQueueX2.EnQue(xLocal2);
    }
    __aicore__ inline void Compute(int32_t progress)
    {
        //考生补充算子计算代码
        
        LocalTensor<DTYPE_X1> xLocal1 = inQueueX1.DeQue<DTYPE_X1>();
        LocalTensor<DTYPE_X2> xLocal2 = inQueueX2.DeQue<DTYPE_X2>();
        LocalTensor<DTYPE_Y>   yLocal = outQueueY.AllocTensor<DTYPE_Y>();

        if constexpr (std::is_same_v<DTYPE_X1, int8_t> || std::is_same_v<DTYPE_X1, int16_t>)
        {
            auto m_y   = QueueY.Get<half>();  
            auto m_y2  = QueueY2.Get<half>();  
            Cast(m_y,  xLocal1, RoundMode::CAST_NONE, this->processDataNum);      //类型转换
            Cast(m_y2, xLocal2, RoundMode::CAST_NONE, this->processDataNum); 
            Div(m_y, m_y, m_y2,  this->processDataNum);
            Cast(yLocal, m_y, RoundMode::CAST_TRUNC, this->processDataNum); 
        }
        // else if constexpr (std::is_same_v<DTYPE_X1, int16_t>)
        // {
        //     auto m_y   = QueueY.Get<half>();  
        //     auto m_y2  = QueueY2.Get<half>();  
        //     Cast(m_y,  xLocal1, RoundMode::CAST_NONE, this->processDataNum);      //类型转换
        //     Cast(m_y2, xLocal2, RoundMode::CAST_NONE, this->processDataNum); 
        //     Div(m_y, m_y, m_y2,  this->processDataNum);
        //     Cast(yLocal, m_y, RoundMode::CAST_TRUNC, this->processDataNum); 
        // }
        else if constexpr (std::is_same_v<DTYPE_X1, int32_t> || std::is_same_v<DTYPE_X1, half>)
        {
            auto m_y    = QueueY.Get<float>();
            auto m_y2   = QueueY2.Get<float>();
            Cast(m_y, xLocal1, RoundMode::CAST_NONE, this->processDataNum);      //类型转换
            Cast(m_y2,xLocal2, RoundMode::CAST_NONE, this->processDataNum); 
            Div(m_y, m_y, m_y2,  this->processDataNum);
            Cast(yLocal, m_y, RoundMode::CAST_TRUNC, this->processDataNum); 
        }
        else
        {
            Div(yLocal, xLocal1, xLocal2,  this->processDataNum);
        }

        outQueueY.EnQue<DTYPE_Y>(yLocal);
        inQueueX1.FreeTensor(xLocal1);
        inQueueX2.FreeTensor(xLocal2);

    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
        //考生补充算子代码
        LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
        DataCopy(yGm[progress * this->tileDataNum], yLocal, this->processDataNum);
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    //create queue for input, in this case depth is equal to buffer num
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX1, inQueueX2;
    //create queue for output, in this case depth is equal to buffer num
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    GlobalTensor<DTYPE_X1> xGm1;
    GlobalTensor<DTYPE_X2> xGm2;
    GlobalTensor<DTYPE_Y> yGm;
    TBuf<QuePosition::VECCALC> QueueY, QueueY2;


    //考生补充自定义成员变量
    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;
    uint32_t processDataNum;
};

extern "C" __global__ __aicore__ void div(GM_ADDR x1, GM_ADDR x2, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelDiv op;
    //补充init和process函数调用内容
    op.Init(x1, x2, y, tiling_data.CoreDataNum, tiling_data.finalTileNum, tiling_data.tileDataNum, tiling_data.TailDataNum);
    op.Process();
}

#ifndef ASCENDC_CPU_DEBUG
// call of kernel function
void div_do(uint32_t blockDim, void *l2ctrl, void *stream, uint8_t *x1, uint8_t *x2, uint8_t *y, 
                   uint8_t *workspace, uint8_t *tiling)
{
    div<<<blockDim, l2ctrl, stream>>>(x1, x2,  y, workspace, tiling);
}
#endif
