#include "kernel_operator.h"
using namespace AscendC;

class KernelNLLLossSum {
public:
    __aicore__ inline KernelNLLLossSum() {}
    __aicore__ inline void Init(TPipe* pipe, 
                            GM_ADDR xAddr,GM_ADDR targetAddr,GM_ADDR weightAddr,GM_ADDR yAddr,GM_ADDR workAddr,
                            const uint32_t& N, const uint32_t& C, const uint32_t& bn, const uint32_t& fbn
                            , const uint32_t& fn
                            )
    {
        this->N = N;
        this->C = C;
        this->bn = bn;
        this->fbn = fbn;
        const uint32_t lenOffset = GetBlockIdx()*fbn*C;
        xGm.SetGlobalBuffer((__gm__ float*)xAddr + lenOffset, bn*C);
        targetGm.SetGlobalBuffer((__gm__ int32_t*)targetAddr, N);
        weightGm.SetGlobalBuffer((__gm__ float*)weightAddr, C);
        yGm.SetGlobalBuffer((__gm__ float*)yAddr, 1);

        if(GetBlockIdx()==0){
            InitGlobalMemory(yGm, 8, (float)(0));
        }
        
        pipe->InitBuffer(xIn, BUFFER_NUM, (C*bn+7)/8*32);
        const uint32_t calcSize = (N+7)/8*32;
        pipe->InitBuffer(tIn, BUFFER_NUM, calcSize);
        pipe->InitBuffer(tBuf, calcSize);
        pipe->InitBuffer(wBuf, calcSize);
        pipe->InitBuffer(wIn, BUFFER_NUM, (C+7)/8*32);
        pipe->InitBuffer(yQueOut, BUFFER_NUM, 32);
    }

    __aicore__ inline void Process()
    {
        auto x = xIn.AllocTensor<float>();
        auto t = tIn.AllocTensor<int32_t>();
        auto w = wIn.AllocTensor<float>();

        DataCopy(t,targetGm[GetBlockIdx()*this->fbn],(this->bn+7)/8*8);
        DataCopy(w,weightGm,(this->C+7)/8*8);
        DataCopy(x,xGm, (this->bn*this->C+7)/8*8);
        xIn.EnQue(x);
        tIn.EnQue(t);
        wIn.EnQue(w);
        xIn.DeQue<float>();
        tIn.DeQue<float>();
        wIn.DeQue<float>();

        auto tc = tBuf.AllocTensor<int32_t>();
        auto wc = wBuf.AllocTensor<float>();
        LocalTensor<uint32_t> t_u32 = t.template ReinterpretCast<uint32_t>();
        LocalTensor<uint32_t> tc_u32 = tc.template ReinterpretCast<uint32_t>();
        ArithProgression<int32_t>(tc, 0,C*4,this->bn);
        Muls(t,t,4,this->bn);
        Gather(wc,w,t_u32,0,this->bn);
        Add(tc,t,tc,this->bn);
        Gather(x,x,tc_u32,0,this->bn);
        Muls(wc,wc,float(-1),this->bn);
        Mul(x,x,wc,this->bn);
        WholeReduceSum<float>(x, x, this->bn, 1, 1, 1, 8);
        
        auto out = yQueOut.AllocTensor<float>();
        out.SetValue(0,x.GetValue(0));
        xIn.FreeTensor(x);
        wIn.FreeTensor(w);
        tIn.FreeTensor(t);
        yQueOut.EnQue(out);yQueOut.DeQue<float>();
        SetAtomicAdd<float>();
        DataCopy(yGm,out,8);
        SetAtomicNone();
        yQueOut.FreeTensor(out);

    }
private:
    TPipe* pipe;
    GlobalTensor<float> xGm;
    GlobalTensor<int32_t> targetGm;
    GlobalTensor<float> weightGm;
    GlobalTensor<float> yGm;

    TQue<QuePosition::VECIN, BUFFER_NUM> xIn;
    TQue<QuePosition::VECIN, BUFFER_NUM> tIn;
    TQue<QuePosition::VECIN, BUFFER_NUM> wIn;
    
    TQue<QuePosition::VECOUT, BUFFER_NUM> yQueOut;

    TBuf<QuePosition::VECCALC> tBuf;
    TBuf<QuePosition::VECCALC> wBuf;

    uint32_t N;
    uint32_t C;
    uint32_t bn;
    uint32_t fbn;

};
