#include "kernel_operator.h"
constexpr int32_t BUFFER_NUM = 2;
template<typename TYPE> class KernelBinaryCrossEntropyGradMS{
    using T = TYPE;
public:
    __aicore__ inline KernelBinaryCrossEntropyGradMS() {}
    __aicore__ inline void Init(GM_ADDR logits, 
        GM_ADDR labels, 
        GM_ADDR grad, 
        GM_ADDR weight, 
        GM_ADDR reduction,
        GM_ADDR outgrad, 
        uint32_t smallCoreDataNum,
        uint32_t bigCoreDataNum, 
        uint32_t finalBigTileNum, 
        uint32_t finalSmallTileNum, 
        uint32_t tileDataNum, 
        uint32_t smallTailDataNum, 
        uint32_t bigTailDataNum, 
        uint32_t tailBlockNum)
    {
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreNum = AscendC::GetBlockIdx();
        uint32_t globalBufferIndex = bigCoreDataNum * AscendC::GetBlockIdx();
        this->tileDataNum = tileDataNum;
        if (coreNum < tailBlockNum) { 
          this->coreDataNum = bigCoreDataNum;
          this->tileNum = finalBigTileNum;
          this->tailDataNum = bigTailDataNum;
        }
        else { 
          this->coreDataNum = smallCoreDataNum;
          this->tileNum = finalSmallTileNum;
          this->tailDataNum = smallTailDataNum;
          globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (AscendC::GetBlockIdx() - tailBlockNum);
        }
        logitsGm.SetGlobalBuffer((__gm__ TYPE *)logits + globalBufferIndex, this->coreDataNum);
        labelsGm.SetGlobalBuffer((__gm__ TYPE *)labels + globalBufferIndex, this->coreDataNum);
        gradGm.SetGlobalBuffer((__gm__ TYPE *)grad , 16);
        weightGm.SetGlobalBuffer((__gm__ TYPE *)weight + globalBufferIndex, this->coreDataNum);
        reductionGm.SetGlobalBuffer((__gm__ TYPE *)reduction, 16);
        outgradGm.SetGlobalBuffer((__gm__ TYPE *)outgrad + globalBufferIndex, this->coreDataNum);
        pipe.InitBuffer(inQueueLogits, BUFFER_NUM, this->tileDataNum * sizeof(TYPE));
        pipe.InitBuffer(inQueueLabels, BUFFER_NUM, this->tileDataNum * sizeof(TYPE));
        pipe.InitBuffer(inQueueGrad, BUFFER_NUM, 16 * sizeof(TYPE));
        pipe.InitBuffer(inQueueWeight, BUFFER_NUM, this->tileDataNum * sizeof(TYPE));   
        pipe.InitBuffer(inQueueReduction, BUFFER_NUM, 16*sizeof(TYPE));
        pipe.InitBuffer(outQueueOutGrad, BUFFER_NUM, this->tileDataNum * sizeof(TYPE));
    }

    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->tileDataNum;
        for (int32_t i = 0; i < loopCount; i++) {
            if (i == this->tileNum - 1) {
              this->processDataNum = this->tailDataNum;
            } 
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }
    uint32_t totalLength;
private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        AscendC::LocalTensor<TYPE> logitsLocal = inQueueLogits.AllocTensor<TYPE>();
        AscendC::LocalTensor<TYPE> labelsLocal = inQueueLabels.AllocTensor<TYPE>();
        AscendC::LocalTensor<TYPE> gradLocal = inQueueGrad.AllocTensor<TYPE>();
        AscendC::LocalTensor<TYPE> weightLocal = inQueueWeight.AllocTensor<TYPE>();
        AscendC::LocalTensor<TYPE> reductionLocal = inQueueReduction.AllocTensor<TYPE>();

        AscendC::DataCopy(logitsLocal, logitsGm[progress * this->tileDataNum], this->processDataNum);
        AscendC::DataCopy(labelsLocal, labelsGm[progress * this->tileDataNum], this->processDataNum);
        AscendC::DataCopy(gradLocal, gradGm, 16);
        AscendC::DataCopy(weightLocal, weightGm[progress * this->tileDataNum], this->processDataNum);
        AscendC::DataCopy(reductionLocal, reductionGm, 16);

        inQueueLogits.EnQue(logitsLocal);
        inQueueLabels.EnQue(labelsLocal);
        inQueueGrad.EnQue(gradLocal);
        inQueueWeight.EnQue(weightLocal);
        inQueueReduction.EnQue(reductionLocal);
    }

     __aicore__ inline void Compute(int32_t progress)
    {
        AscendC::LocalTensor<TYPE> logitsLocal = inQueueLogits.DeQue<TYPE>();
        AscendC::LocalTensor<TYPE> labelsLocal = inQueueLabels.DeQue<TYPE>();
        AscendC::LocalTensor<TYPE> gradLocal = inQueueGrad.DeQue<TYPE>();
        AscendC::LocalTensor<TYPE> weightLocal = inQueueWeight.DeQue<TYPE>();
        AscendC::LocalTensor<TYPE> reductionLocal = inQueueReduction.DeQue<TYPE>();
        AscendC::LocalTensor<TYPE> outgradLocal = outQueueOutGrad.AllocTensor<TYPE>();

        TYPE reductionScalar = reductionLocal.GetValue(0);
        TYPE scalar = gradLocal.GetValue(0);
        float scalarf = 0;
        float redcf = static_cast<float>(reductionScalar);
        if(redcf == 2)  scalarf = static_cast<float>(scalar)/this->totalLength;
        else scalarf = static_cast<float>(scalar);
        scalar = static_cast<TYPE>(scalarf);

        AscendC::Sub(logitsLocal, logitsLocal, labelsLocal, this->processDataNum);
        AscendC::Muls(logitsLocal, logitsLocal, scalar, this->processDataNum);
        AscendC::Mul(outgradLocal, logitsLocal, weightLocal, this->processDataNum);

        outQueueOutGrad.EnQue<TYPE>(outgradLocal);
        inQueueLogits.FreeTensor(logitsLocal);
        inQueueLabels.FreeTensor(labelsLocal);
        inQueueGrad.FreeTensor(gradLocal);
        inQueueWeight.FreeTensor(weightLocal);
        inQueueReduction.FreeTensor(reductionLocal);
    }

    __aicore__ inline void CopyOut(int32_t progress)
    {
        AscendC::LocalTensor<TYPE> outgradLocal = outQueueOutGrad.DeQue<TYPE>();
        AscendC::DataCopy(outgradGm[progress * this->tileDataNum], outgradLocal, this->processDataNum);
        outQueueOutGrad.FreeTensor(outgradLocal);
    }
    
private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueLogits;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueLabels;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueGrad;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueWeight;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueReduction;
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueOutGrad;

    AscendC::GlobalTensor<TYPE> logitsGm;
    AscendC::GlobalTensor<TYPE> labelsGm;
    AscendC::GlobalTensor<TYPE> gradGm;
    AscendC::GlobalTensor<TYPE> weightGm;
    AscendC::GlobalTensor<TYPE> reductionGm;
    AscendC::GlobalTensor<TYPE> outgradGm;

    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;
    uint32_t processDataNum;
      
};


