#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;    


template<typename TYPE_X, typename TYPE_Y> class KernelSoftmax {
    using T = TYPE_X;
public:
    __aicore__ inline KernelSoftmax() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, int64_t M, int64_t N, int64_t Z, int64_t dim) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->M = M; //行
        this->N = N; //列
        this->Z = Z; //循环次数
        this->row32 = (N+31)/32*32;

        this->dim = dim;

        xGm.SetGlobalBuffer((__gm__ DTYPE_X*)x,  M * N * Z);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y*)y,  M * N * Z);


        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->row32 * sizeof(DTYPE_X));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->row32 * sizeof(DTYPE_Y));
        
        //pipe.InitBuffer(QueueBuff1, this->row32 * sizeof(float));

        if constexpr (std::is_same_v<DTYPE_X, half>){
            
            pipe.InitBuffer(QueueBuff0, N * sizeof(float));
            pipe.InitBuffer(QueueBuff1, N * sizeof(float));
        //pipe.InitBuffer(QueueBuff3, this->row32* sizeof(float));

        }
    }

    __aicore__ inline void Process() {

        for(int32_t i = 0; i < Z; i ++)
        {
            for(int32_t j = 0; j < M; j++)
            {
                CopyIn(i, j);
                Compute(i, j);
                CopyOut(i, j);
            }
        }


    }
    
private:
    __aicore__ inline void CopyIn(int32_t i, int32_t j)
    {
        LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();
        DataCopy(xLocal, xGm[i*M*N + j*N], this->row32);
        inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t i, int32_t j)
    {
        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();
        LocalTensor<DTYPE_Y> yLocal = outQueueY.AllocTensor<DTYPE_Y>();
        //LocalTensor<DTYPE_X> p1 = QueueBuff0.Get<DTYPE_X>();
        // LocalTensor<float> tmpB1 = QueueBuff1.Get<float>();
        // LocalTensor<float> tmpB2 = QueueBuff1.Get<float>();

        if constexpr (std::is_same_v<DTYPE_X, half>){

            //auto p1 = tmpB0.Getfloat<>();
            auto p1 = QueueBuff0.Get<float>();
            auto p2 = QueueBuff1.Get<float>();
            //auto p3 = QueueBuff2.Get<float>();
            //auto p4 = QueueBuff3.Get<float>();

            Cast(p1, xLocal, RoundMode::CAST_NONE, N);

            Exp(p1,p1,N);

            ReduceSum(p2,p1,p2,N);

            Duplicate(p2,p2.GetValue(0),N);

            Div(p1,p1,p2,N);

            Cast(yLocal, p1, RoundMode::CAST_NONE, N);











        }

        else{



            Exp(xLocal,xLocal,N);

            ReduceSum(yLocal,xLocal,yLocal,N);

            
            Duplicate(yLocal,yLocal.GetValue(0),N);

            Div(yLocal,xLocal,yLocal,N);



        }




        

        outQueueY.EnQue<DTYPE_Y>(yLocal);
        inQueueX.FreeTensor(xLocal);
    }
    __aicore__ inline void CopyOut(int32_t i, int32_t j)
    {
        LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
        DataCopy(yGm[i*M*N + j*N], yLocal, this->row32);
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    // TBuf<QuePosition::VECCALC> tmpBuffer, signbitBuffer;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC>  QueueBuff0,QueueBuff1;

    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_Y> yGm;

    int64_t M;
    int64_t N;
    int64_t Z;


    int64_t dim;

    int64_t row32;



    


};

extern "C" __global__ __aicore__ void softmax(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl

    KernelSoftmax<DTYPE_X,DTYPE_Y> op;
    op.Init(x, y, tiling_data.M, tiling_data.N, tiling_data.Z, tiling_data.dim);
    op.Process();
}