#ifndef EXAMPLES_NLLOSSFAST_KERNEL_H
#define EXAMPLES_NLLOSSFAST_KERNEL_H
#define K_MAX_SHAPE_DIM 0
#include "kernel_operator.h"
#include <type_traits>
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 1;
class KernelNLLLoss_fast {
public:
    __aicore__ inline KernelNLLLoss_fast() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR target, GM_ADDR weight, GM_ADDR y, TPipe* pipeIn) {
        
        

        
        
        yGm.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(y), 1);
auto block_idx_user = GetBlockIdx();
        // yGm.SetL2CacheHint(CacheMode::CACHE_MODE_DISABLE);
        if(block_idx_user == 0)
        {
            InitGlobalMemory(yGm, 1, (float)0);
        }
        pipeIn->InitBuffer(outQueueY, BUFFER_NUM, 32 * sizeof(float));
        
        // LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();
        // Duplicate(yLocal, (float)0, 1);
        // DataCopyExtParams copyParams {1, static_cast<uint32_t>(1 * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        // if(block_idx_user == 0)
        // {
        //     DataCopyPad(yGm, yLocal, copyParams);
        // }
        // outQueueY.FreeTensor(yLocal);

        
        num_calcu = block_idx_user * loop_num;

        // SetPadValue((float)(0));


        
        

        targetGm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t *>(target+num_calcu*4), this->C); 
        pipeIn->InitBuffer(inQueueTarget, 1, (loop_num) * sizeof(int32_t));
        LocalTensor<int32_t> targetLocal = inQueueTarget.AllocTensor<int32_t>();
        DataCopy(targetLocal, targetGm, loop_num);
        inQueueTarget.EnQue(targetLocal);

        

        weightGm.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(weight), this->C);
        pipeIn->InitBuffer(inQueueWeight, 1, (this->C) * sizeof(float));
        LocalTensor<float> weightLocal = inQueueWeight.AllocTensor<float>();
        DataCopy(weightLocal, weightGm, this->C);
        inQueueWeight.EnQue(weightLocal);

        xGm.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(x+num_calcu*C*4));
        pipeIn->InitBuffer(inQueueX, BUFFER_NUM, (loop_num*8) * sizeof(float));
        CopyIn();

        pipeIn->InitBuffer(QueueTmp1, (loop_num) * sizeof(float));
    }

    __aicore__ inline void Process() {
        // CopyIn1();
        
        Compute();
        CopyOut();
    }
private:
     __aicore__ inline void CopyIn()
    {
        DataCopyExtParams copyParams1 {2, 1*sizeof(float), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        DataCopyPadExtParams<float> padParams1 {true, 0, 7, 0};
        
        uint32_t i_8 = 0, i_c = 0;

        LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        // for(int i=0;i<16;i+=2)
        // {
        //     uint32_t start = targetGm.GetValue(i);
        //     uint32_t end = targetGm.GetValue(i+1);
        //     copyParams1.srcStride = (C+ end - start - 1)*sizeof(float);
        //     DataCopyPad(xLocal[i_8], xGm[i_c + start], copyParams1, padParams1);
        //     i_8 += 16;
        //     i_c += C + C;
        // }
        // LocalTensor<int32_t> targetLocal = inQueueTarget.DeQue<int32_t>();
        // for(int i=16;i<48;i+=2)
        // {
        //     uint32_t start = targetLocal.GetValue(i);
        //     uint32_t end = targetLocal.GetValue(i+1);
        //     copyParams1.srcStride = (C+ end - start - 1)*sizeof(float);
        //     DataCopyPad(xLocal[i_8], xGm[i_c + start], copyParams1, padParams1);
        //     i_8 += 16;
        //     i_c += C + C;
        // }
        for(int i=0;i<loop_num;i+=2)
        {
            uint32_t start = targetGm.GetValue(i);
            // uint32_t end = targetGm.GetValue(i+1);
            copyParams1.srcStride = (C+ targetGm.GetValue(i+1) - start - 1)*sizeof(float);
            DataCopyPad(xLocal[i_8], xGm[i_c + start], copyParams1, padParams1);
            i_8 += 16;
            i_c += C<<1;
        }
        // inQueueTarget.EnQue(targetLocal);
        // for(int i=48;i<64;i+=2)
        // {
        //     uint32_t start = targetGm.GetValue(i);
        //     uint32_t end = targetGm.GetValue(i+1);
        //     copyParams1.srcStride = (C+ end - start - 1)*sizeof(float);
        //     DataCopyPad(xLocal[i_8], xGm[i_c + start], copyParams1, padParams1);
        //     i_8 += 16;
        //     i_c += C + C;
        // }
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void Compute()
    {
        // LocalTensor<float> yLocal = outQueueY.DeQue<float>();
        LocalTensor<int32_t> targetLocal = inQueueTarget.DeQue<int32_t>();
        LocalTensor<float> weightLocal = inQueueWeight.DeQue<float>();
        
        LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();
        LocalTensor<float> tmp1 = QueueTmp1.Get<float>();
        uint64_t rsvdCnt;
        Muls(targetLocal, targetLocal, (int32_t)(sizeof(float)), loop_num);
        Gather(tmp1, weightLocal, targetLocal.ReinterpretCast<uint32_t>(), 0, loop_num);
        Muls(tmp1, tmp1, (float)(-1), loop_num);
        LocalTensor<float> xLocal = inQueueX.DeQue<float>();

        GatherMask(xLocal, xLocal, 3, false, 0, { 1, 8, 8, 8 }, rsvdCnt); //64*8
        GatherMask(xLocal, xLocal, 1, false, 0, { 1, 2, 8, 8 }, rsvdCnt); //64*2

        Mul(xLocal, xLocal, tmp1, loop_num);
        // Add(xLocal, xLocal, xLocal[loop_num/2], loop_num/2);
        WholeReduceSum(yLocal, xLocal, loop_num, 1, 1, 1, 8);
        // Muls(yLocal, yLocal, (float)(-1), 1);

        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);
        inQueueWeight.FreeTensor(weightLocal);
        inQueueTarget.FreeTensor(targetLocal);
    }
    __aicore__ inline void CopyOut() 
    {
        DataCopyExtParams copyParams {1, static_cast<uint32_t>(1 * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        LocalTensor<float> yLocal = outQueueY.DeQue<float>();

        SetAtomicAdd<float>();
        DataCopyPad(yGm, yLocal, copyParams);
        outQueueY.FreeTensor(yLocal);
    }
private:
    TPipe* pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueTarget;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueWeight;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;

    TBuf<QuePosition::VECCALC> QueueTmp1,QueueTmp2,QueueTmp3;
    
    GlobalTensor<float> xGm;
    GlobalTensor<int32_t> targetGm;
    GlobalTensor<float> weightGm;
    GlobalTensor<float> yGm;
    
    static constexpr int16_t C = 1024;

    uint32_t num_calcu;
    static constexpr int32_t loop_num = 1024/16;
};
#endif
