#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;                                     // tensor num for each queue

template<typename TYPE_START, typename TYPE_END, typename TYPE_WEIGHT, typename TYPE_Y> class KernelLerpBroadcast {
    using T = TYPE_START;
public:
    __aicore__ inline KernelLerpBroadcast() {}
    __aicore__ inline void Init(GM_ADDR start, GM_ADDR end, GM_ADDR weight, GM_ADDR y,
                                int32_t CoreDataNum, int32_t finalTileNum, int32_t tileDataNum, int32_t TailDataNum,
                                int32_t y_dimensional, 
                                int32_t* y_ndarray, int32_t* start_ndarray, int32_t* end_ndarray, int32_t* weight_ndarray,
                                int32_t* y_sumndarray, int32_t* start_sumndarray, int32_t* end_sumndarray, int32_t* weight_sumndarray) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->tileDataNum = tileDataNum;
        this->coreDataNum = CoreDataNum;
        this->tileNum = finalTileNum;
        this->tailDataNum = TailDataNum;

        this->y_dimensional = y_dimensional;

        this->y_ndarray = y_ndarray;
        this->start_ndarray = start_ndarray;
        this->end_ndarray = end_ndarray;
        this->weight_ndarray = weight_ndarray;

        this->y_sumndarray = y_sumndarray;
        this->start_sumndarray = start_sumndarray;
        this->end_sumndarray = end_sumndarray;
        this->weight_sumndarray = weight_sumndarray;

        startGm.SetGlobalBuffer((__gm__ DTYPE_START*)start, 1);
        endGm.SetGlobalBuffer((__gm__ DTYPE_END*)end, 1);
        weightGm.SetGlobalBuffer((__gm__ DTYPE_WEIGHT*)weight, 1);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y*)y, 1);
    }
    __aicore__ inline void Process() {

        int dim = this->y_dimensional;
        
        for(int j=0; j<this->y_sumndarray[dim]; j++)
        {
            int start_start = 0, end_start = 0, weight_start = 0;
            for(int k=0; k<dim; k++)
            {
                if(this->start_ndarray[k] != 1){
                    start_start += this->start_sumndarray[k] * (j / this->y_sumndarray[k] % this->y_ndarray[k]);
                }
                if(this->end_ndarray[k] != 1){
                    end_start += this->end_sumndarray[k] * (j / this->y_sumndarray[k] % this->y_ndarray[k]);
                }
                if(this->weight_ndarray[k] != 1){
                    weight_start += this->weight_sumndarray[k] * (j / this->y_sumndarray[k] % this->y_ndarray[k]);
                }
            }
            float start = startGm.GetValue(start_start);
            float end = endGm.GetValue(end_start);
            float weight = weightGm.GetValue(weight_start);
            float y;
            y = start + weight * (end - start);
            yGm.SetValue(j, (DTYPE_Y)y);
        }
    }
private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueSTART, inQueueEND, inQueueWEIGHT;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC> tmp1, tmp2, tmp3,boox;

    GlobalTensor<DTYPE_START> startGm;
    GlobalTensor<DTYPE_END> endGm;
    GlobalTensor<DTYPE_WEIGHT> weightGm;
    GlobalTensor<DTYPE_Y> yGm;
    int32_t coreDataNum;
    int32_t tileNum;
    int32_t tileDataNum;
    int32_t tailDataNum;
    int32_t processDataNum;

    int32_t y_dimensional;
    int32_t *y_ndarray;
    int32_t *start_ndarray;
    int32_t *end_ndarray;
    int32_t *weight_ndarray;

    int32_t * y_sumndarray;
    int32_t *start_sumndarray;
    int32_t *end_sumndarray;
    int32_t *weight_sumndarray;
};

template<typename TYPE_START, typename TYPE_END, typename TYPE_WEIGHT, typename TYPE_Y> class KernelLerp {
    using T = TYPE_START;
public:
    __aicore__ inline KernelLerp() {}
    __aicore__ inline void Init(GM_ADDR start, GM_ADDR end, GM_ADDR weight, GM_ADDR y,
                                int32_t CoreDataNum, int32_t finalTileNum, int32_t tileDataNum, int32_t TailDataNum) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->tileDataNum = tileDataNum;
        this->coreDataNum = CoreDataNum;
        this->tileNum = finalTileNum;
        this->tailDataNum = TailDataNum;

        startGm.SetGlobalBuffer((__gm__ DTYPE_START*)start, this->coreDataNum);
        endGm.SetGlobalBuffer((__gm__ DTYPE_END*)end, this->coreDataNum);
        weightGm.SetGlobalBuffer((__gm__ DTYPE_WEIGHT*)weight, this->coreDataNum);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y*)y, this->coreDataNum);

        // pipe alloc memory to queue, the unit is Bytes
        pipe.InitBuffer(inQueueSTART, BUFFER_NUM, this->tileDataNum * sizeof(DTYPE_START));
        pipe.InitBuffer(inQueueEND, BUFFER_NUM, this->tileDataNum * sizeof(DTYPE_END));
        pipe.InitBuffer(inQueueWEIGHT, BUFFER_NUM, this->tileDataNum * sizeof(DTYPE_WEIGHT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileDataNum * sizeof(DTYPE_Y));

        if constexpr (std::is_same_v<T, half>)
        {
            pipe.InitBuffer(tmp1, this->tileDataNum * sizeof(float));
            pipe.InitBuffer(tmp2, this->tileDataNum * sizeof(float));
            pipe.InitBuffer(tmp3, this->tileDataNum * sizeof(float));
        }
    }
    __aicore__ inline void Process() {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->tileDataNum;
        for (int32_t i = 0; i < loopCount; i++) {
            if (i == this->tileNum - 1) {
            this->processDataNum = this->tailDataNum;
            }
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        LocalTensor<DTYPE_START> startLocal = inQueueSTART.AllocTensor<DTYPE_START>();
        LocalTensor<DTYPE_END> endLocal = inQueueEND.AllocTensor<DTYPE_END>();
        LocalTensor<DTYPE_WEIGHT> weightLocal = inQueueWEIGHT.AllocTensor<DTYPE_WEIGHT>();

        DataCopy(startLocal, startGm[progress * this->tileDataNum], this->processDataNum);
        DataCopy(endLocal, endGm[progress * this->tileDataNum], this->processDataNum);
        DataCopy(weightLocal, weightGm[progress * this->tileDataNum], this->processDataNum);

        inQueueSTART.EnQue(startLocal);
        inQueueEND.EnQue(endLocal);
        inQueueWEIGHT.EnQue(weightLocal);
    }
    __aicore__ inline void Compute(int32_t progress)
    {
        LocalTensor<DTYPE_START> startLocal = inQueueSTART.DeQue<DTYPE_START>();
        LocalTensor<DTYPE_END> endLocal = inQueueEND.DeQue<DTYPE_END>();
        LocalTensor<DTYPE_WEIGHT> weightLocal = inQueueWEIGHT.DeQue<DTYPE_WEIGHT>();
        LocalTensor<DTYPE_Y> yLocal = outQueueY.AllocTensor<DTYPE_Y>();

        if constexpr (std::is_same_v<DTYPE_START, half>)
        {
            auto p1 = tmp1.Get<float>();
            auto p2 = tmp2.Get<float>();
            auto p3 = tmp3.Get<float>();

            Cast(p1, startLocal, RoundMode::CAST_NONE, this->processDataNum);
            Cast(p2, endLocal, RoundMode::CAST_NONE, this->processDataNum);
            Cast(p3, weightLocal, RoundMode::CAST_NONE, this->processDataNum);
            Sub(p2, p2, p1, this->processDataNum);  //(end − start)
            Mul(p2, p2, p3, this->processDataNum);   //weight × (end − start)
            Add(p2, p2, p1, this->processDataNum);    //start + weight × (end − start)
            Cast(yLocal, p2, RoundMode::CAST_NONE, this->processDataNum);   
        }
        else
        {
            Sub(yLocal, endLocal, startLocal, this->processDataNum);  //(end − start)
            Mul(yLocal, yLocal, weightLocal, this->processDataNum);   //weight × (end − start)
            Add(yLocal, yLocal, startLocal, this->processDataNum);    //start + weight × (end − start)
        }
        outQueueY.EnQue<DTYPE_Y>(yLocal);
        inQueueSTART.FreeTensor(startLocal);
        inQueueEND.FreeTensor(endLocal);
        inQueueWEIGHT.FreeTensor(weightLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
        LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
        DataCopy(yGm[progress * this->tileDataNum], yLocal, this->processDataNum);
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    // TBuf<QuePosition::VECCALC> tmpBuffer, signbitBuffer;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueSTART, inQueueEND, inQueueWEIGHT;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC> tmp1, tmp2, tmp3,boox;

    GlobalTensor<DTYPE_START> startGm;
    GlobalTensor<DTYPE_END> endGm;
    GlobalTensor<DTYPE_WEIGHT> weightGm;
    GlobalTensor<DTYPE_Y> yGm;
    int32_t coreDataNum;
    int32_t tileNum;
    int32_t tileDataNum;
    int32_t tailDataNum;
    int32_t processDataNum;
};
extern "C" __global__ __aicore__ void lerp(GM_ADDR start, GM_ADDR end, GM_ADDR weight, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
    if(TILING_KEY_IS(1))
    {
        KernelLerp<DTYPE_START, DTYPE_END, DTYPE_WEIGHT, DTYPE_Y> op;
        op.Init(start, end, weight, y, 
                tiling_data.CoreDataNum, 
                tiling_data.finalTileNum, tiling_data.tileDataNum, 
                tiling_data.TailDataNum);  
        op.Process();
    }
    else if(TILING_KEY_IS(2))
    {
        KernelLerpBroadcast<DTYPE_START, DTYPE_END, DTYPE_WEIGHT, DTYPE_Y> op;
        op.Init(start, end, weight, y, 
                tiling_data.CoreDataNum, 
                tiling_data.finalTileNum, tiling_data.tileDataNum, 
                tiling_data.TailDataNum, 
                tiling_data.y_dimensional,
                tiling_data.y_ndarray, tiling_data.start_ndarray, tiling_data.end_ndarray, tiling_data.weight_ndarray,
                tiling_data.y_sumndarray, tiling_data.start_sumndarray, tiling_data.end_sumndarray, tiling_data.weight_sumndarray);  
        op.Process();
    }
}