#include "kernel_operator.h"
#include <type_traits>
#include <cmath>
// #include "tiling/tiling_api.h"

using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;

template<typename T> class KernelSoftmax {
public:
    __aicore__ inline KernelSoftmax() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, 
                            int32_t cycles, int32_t interval, int32_t loopCount) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->cycles = cycles;
        this->interval = interval;
        this->loopCount = loopCount;

        xGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X *>(x), 1);
        yGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y), 1);
        
        //253952
        pipe.InitBuffer(QueueTmp1, 32 * sizeof(DTYPE_X));
    }
    __aicore__ inline void Process() {
        float temp_sum;
        float temp_add;
        auto tmp1 = QueueTmp1.Get<float>();
        for(int z = 0; z < this->loopCount; z++)
        {
            int32_t x_num = z/this->interval;
            x_num = x_num*this->cycles*this->interval + z%this->interval;
            for(int i = 0; i < this->cycles; i++)
            {
                int32_t temp_num = x_num + i*this->interval;
                temp_add = xGm.GetValue(temp_num);
                tmp1.SetValue(0, (float)temp_add);
                Exp(tmp1, tmp1, 1);
                temp_add = tmp1.GetValue(0);

                if(i == 0)
                {
                    temp_sum = (float)temp_add;
                }
                else
                {
                    temp_sum = (float)temp_sum + (float)temp_add;
                }
            }

            for(int i = 0; i < this->cycles; i++)
            {
                int32_t temp_num = x_num + i*this->interval;
                temp_add = xGm.GetValue(temp_num);
                tmp1.SetValue(0, (float)temp_add);
                Exp(tmp1, tmp1, 1);
                temp_add = tmp1.GetValue(0);
                temp_add = temp_add / temp_sum;
                yGm.SetValue(temp_num, (DTYPE_Y)temp_add);
            }
        }
    }
private:
    __aicore__ inline void CopyIn(int32_t x_start, int32_t lenght) {
        LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();
        DataCopy(xLocal, xGm[x_start], lenght);
        inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t num, int32_t lenght) {
        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();
        LocalTensor<DTYPE_Y> yLocal = outQueueY.AllocTensor<DTYPE_Y>();
        
        inQueueX.FreeTensor(xLocal);
        outQueueY.EnQue<DTYPE_Y>(yLocal);
    }
    __aicore__ inline void CopyOut(int32_t y_start, int32_t lenght) {
        LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
        DataCopy(yGm[y_start], yLocal, lenght);
        outQueueY.FreeTensor(yLocal);
    }
private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC> QueueTmp1, QueueTmp2, QueueTmp3, QueueTmpBuff;
    // TBuf<QuePosition::VECCALC> QueueBuff;
    
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_Y> yGm;

    int32_t cycles;
    int32_t interval;
    int32_t loopCount;

};

template<typename T> class KernelSoftmax_1 {
public:
    __aicore__ inline KernelSoftmax_1() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,
                            int32_t cycles, int32_t interval, int32_t loopCount, SoftMaxTiling softmaxTilingData) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->cycles = cycles;
        this->localworkspaceSize = interval;
        this->loopCount = loopCount;
        this->softmaxTilingData = softmaxTilingData;

        xGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X *>(x), 1);
        yGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y), 1);
        
        //253952
        pipe.InitBuffer(inQueueX, BUFFER_NUM, 32 * this->cycles * sizeof(DTYPE_X));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, 32 * this->cycles * sizeof(DTYPE_Y));
        
        // pipe.InitBuffer(QueueTmpMax, 1, 16 * this->cycles *  sizeof(DTYPE_X));
        // pipe.InitBuffer(QueueTmpSum, 1, 16 * this->cycles *  sizeof(DTYPE_X));
        
        pipe.InitBuffer(QueueTmp1, 32 * this->cycles *  sizeof(float));
        pipe.InitBuffer(QueueTmpMax, 16 * this->cycles *  sizeof(float));
        pipe.InitBuffer(QueueTmpSum, 16 * this->cycles *  sizeof(float));

        // pipe.InitBuffer(QueueTmpMax, 16 * this->cycles *  sizeof(DTYPE_X));
        // pipe.InitBuffer(QueueTmpSum, 16 * this->cycles *  sizeof(DTYPE_X));
        pipe.InitBuffer(QueueTmpBuffer, this->localworkspaceSize *  sizeof(uint8_t));

        // QueueTmpBuffer, QueueTmpMax, QueueTmpSum;
    }
    __aicore__ inline void Process() {

        for (int32_t i = 0; i < this->loopCount; i+=32) {
            CopyIn(i, 32);
            Compute(i, 32);
            CopyOut(i, 32);
        }
        if (this->loopCount%32) {
            CopyIn(this->loopCount/32*32, this->loopCount-this->loopCount/32*32);
            Compute(this->loopCount/32*32, this->loopCount-this->loopCount/32*32);
            CopyOut(this->loopCount/32*32, this->loopCount-this->loopCount/32*32);
        }
    }
private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t rowNum) {
        LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();
        DataCopy(xLocal, xGm[progress*this->cycles], rowNum*this->cycles);
        inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t progress, uint32_t rowNum) {
        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();
        LocalTensor<DTYPE_Y> yLocal = outQueueY.AllocTensor<DTYPE_Y>();

        // LocalTensor<DTYPE_Y> tmpMax = QueueTmpMax.AllocTensor<DTYPE_Y>();
        // LocalTensor<DTYPE_Y> tmpSum = QueueTmpSum.AllocTensor<DTYPE_Y>();

        auto tmpMax = QueueTmpMax.Get<float>();
        auto tmpSum = QueueTmpSum.Get<float>();
        auto tmpBuffer = QueueTmpBuffer.Get<uint8_t>();

        auto tmp1 = QueueTmp1.Get<float>();

        Cast(tmp1, xLocal, RoundMode::CAST_NONE, 32 * this->cycles);
        AscendC::SoftMaxShapeInfo srcShape = { rowNum, (uint32_t)this->cycles, rowNum, (uint32_t)this->cycles };
        AscendC::SoftMax<float, false>(tmp1, tmpSum, tmpMax, tmp1, tmpBuffer, softmaxTilingData, srcShape);
        Cast(yLocal, tmp1, RoundMode::CAST_NONE, 32 * this->cycles);
        // inQueueX.EnQue(xLocal);

        // QueueTmpMax.FreeTensor(tmpMax);
        // QueueTmpSum.FreeTensor(tmpSum);
        inQueueX.FreeTensor(xLocal);
        outQueueY.EnQue<DTYPE_Y>(yLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress, uint32_t rowNum) {
        LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
        // LocalTensor<DTYPE_X> yLocal = inQueueX.DeQue<DTYPE_X>();
        DataCopy(yGm[progress*this->cycles], yLocal, rowNum*this->cycles);
        outQueueY.FreeTensor(yLocal);
        // inQueueX.FreeTensor(yLocal);
    }
private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    // TQue<QuePosition::VECOUT, 1> QueueTmpMax, QueueTmpSum;
    TBuf<AscendC::QuePosition::VECCALC> QueueTmpBuffer;
    TBuf<QuePosition::VECCALC> QueueTmp1, QueueTmpMax, QueueTmpSum;
    
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_Y> yGm;

    int32_t cycles;
    int32_t localworkspaceSize;
    int32_t loopCount;

    SoftMaxTiling softmaxTilingData;

};
extern "C" __global__ __aicore__ void softmax(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
    

    if (TILING_KEY_IS(1)) {
        KernelSoftmax<DTYPE_X> op;
        op.Init(x, y, 
                    tiling_data.cycles, tiling_data.interval, tiling_data.loopCount);
        op.Process();
    }
    else if (TILING_KEY_IS(2)) {
        KernelSoftmax_1<DTYPE_X> op;
        op.Init(x, y, 
                    tiling_data.cycles, tiling_data.interval, tiling_data.loopCount, tiling_data.softmaxTilingData);
        op.Process();
    }
}