#include "kernel_operator.h"
// tensor num for each queue
constexpr int32_t BUFFER_NUM = 2;

class KernelSyncBNTrainingUpdate {
public:
    __aicore__ inline KernelSyncBNTrainingUpdate() {}
    __aicore__ inline void Init(GM_ADDR local_mean, GM_ADDR local_var, 
                               GM_ADDR global_mean, GM_ADDR global_var,
                               GM_ADDR updated_mean, GM_ADDR updated_var, uint32_t smallCoreDataNum,
                                uint32_t bigCoreDataNum, uint32_t finalBigTileNum, 
                                uint32_t finalSmallTileNum, uint32_t tileDataNum, 
                                uint32_t smallTailDataNum, uint32_t bigTailDataNum, 
                                uint32_t tailBlockNum,float momentum,float epsilon) 
    {
        this->momentum = momentum;
        this->epsilon = epsilon;
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreNum = AscendC::GetBlockIdx();
        uint32_t globalBufferIndex = bigCoreDataNum * AscendC::GetBlockIdx();
        this->tileDataNum = tileDataNum;
        if (coreNum < tailBlockNum) { 
          this->coreDataNum = bigCoreDataNum;
          this->tileNum = finalBigTileNum;
          this->tailDataNum = bigTailDataNum;
        }
        else { 
          this->coreDataNum = smallCoreDataNum;
          this->tileNum = finalSmallTileNum;
          this->tailDataNum = smallTailDataNum;
          globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (AscendC::GetBlockIdx() - tailBlockNum);
        }
        // 绑定全局内存缓冲区
        localMeanGm.SetGlobalBuffer((__gm__ float *)local_mean + globalBufferIndex, this->coreDataNum);
        localVarGm.SetGlobalBuffer((__gm__ float *)local_var + globalBufferIndex, this->coreDataNum);
        globalMeanGm.SetGlobalBuffer((__gm__ float *)global_mean + globalBufferIndex, this->coreDataNum);
        globalVarGm.SetGlobalBuffer((__gm__ float *)global_var + globalBufferIndex, this->coreDataNum);
        updatedMeanGm.SetGlobalBuffer((__gm__ float *)updated_mean + globalBufferIndex, this->coreDataNum);
        updatedVarGm.SetGlobalBuffer((__gm__ float *)updated_var + globalBufferIndex, this->coreDataNum);

        // 初始化片上队列缓冲区
        pipe.InitBuffer(inQueueLocalMean, BUFFER_NUM, this->tileDataNum * sizeof(float));
        pipe.InitBuffer(inQueueLocalVar, BUFFER_NUM, this->tileDataNum * sizeof(float));
        pipe.InitBuffer(inQueueGlobalMean, BUFFER_NUM, this->tileDataNum * sizeof(float));
        pipe.InitBuffer(inQueueGlobalVar, BUFFER_NUM, this->tileDataNum * sizeof(float));
        pipe.InitBuffer(outQueueUpdatedMean, BUFFER_NUM, this->tileDataNum * sizeof(float));
        pipe.InitBuffer(outQueueUpdatedVar, BUFFER_NUM, this->tileDataNum * sizeof(float));
    }
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->tileDataNum;
        for (int32_t i = 0; i < loopCount; i++) {
            if (i == this->tileNum - 1) {
              this->processDataNum = this->tailDataNum;
            }
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
         // 分配本地张量并从全局内存复制输入数据
        AscendC::LocalTensor<float> localMeanLocal = inQueueLocalMean.AllocTensor<float>();
        AscendC::LocalTensor<float> localVarLocal = inQueueLocalVar.AllocTensor<float>();
        AscendC::LocalTensor<float> globalMeanLocal = inQueueGlobalMean.AllocTensor<float>();
        AscendC::LocalTensor<float> globalVarLocal = inQueueGlobalVar.AllocTensor<float>();

        // 复制数据
        AscendC::DataCopy(localMeanLocal, localMeanGm[progress * this->tileDataNum], this->processDataNum);
        AscendC::DataCopy(localVarLocal, localVarGm[progress * this->tileDataNum], this->processDataNum);
        AscendC::DataCopy(globalMeanLocal, globalMeanGm[progress * this->tileDataNum], this->processDataNum);
        AscendC::DataCopy(globalVarLocal, globalVarGm[progress * this->tileDataNum], this->processDataNum);

        // 将本地张量入队
        inQueueLocalMean.EnQue(localMeanLocal);
        inQueueLocalVar.EnQue(localVarLocal);
        inQueueGlobalMean.EnQue(globalMeanLocal);
        inQueueGlobalVar.EnQue(globalVarLocal);
    }
    __aicore__ inline void Compute(int32_t progress)
    {
        // 从队列获取本地输入张量
        AscendC::LocalTensor<float> localMeanLocal = inQueueLocalMean.DeQue<float>();
        AscendC::LocalTensor<float> localVarLocal = inQueueLocalVar.DeQue<float>();
        AscendC::LocalTensor<float> globalMeanLocal = inQueueGlobalMean.DeQue<float>();
        AscendC::LocalTensor<float> globalVarLocal = inQueueGlobalVar.DeQue<float>();

        // 分配输出张量
        AscendC::LocalTensor<float> updatedMeanLocal = outQueueUpdatedMean.AllocTensor<float>();
        AscendC::LocalTensor<float> updatedVarLocal = outQueueUpdatedVar.AllocTensor<float>();


        // 计算更新后的均值：updated_mean = momentum * global_mean + (1 - momentum) * local_mean
        AscendC::Muls(globalMeanLocal, globalMeanLocal, this->momentum, this->processDataNum);  // global_mean * momentum
        AscendC::Muls(localMeanLocal, localMeanLocal, 1 - this->momentum, this->processDataNum);  // local_mean * (1 - momentum)
        AscendC::Add(updatedMeanLocal, globalMeanLocal, localMeanLocal, this->processDataNum);  // 两者相加得到updated_mean


        // 计算更新后的方差：updated_var = momentum * global_var + (1 - momentum) * local_var + epsilon
        AscendC::Muls(globalVarLocal, globalVarLocal, this->momentum, this->processDataNum);  // global_var * momentum
        AscendC::Muls(localVarLocal, localVarLocal, 1 - this->momentum, this->processDataNum);  // local_var * (1 - momentum)
        AscendC::Add(updatedVarLocal, globalVarLocal, localVarLocal, this->processDataNum);  
        AscendC::Adds(updatedVarLocal, updatedVarLocal, this->epsilon, this->processDataNum);


       // 输出张量入队
        outQueueUpdatedMean.EnQue<float>(updatedMeanLocal);
        outQueueUpdatedVar.EnQue<float>(updatedVarLocal);

        // 释放输入张量
        inQueueLocalMean.FreeTensor(localMeanLocal);
        inQueueLocalVar.FreeTensor(localVarLocal);
        inQueueGlobalMean.FreeTensor(globalMeanLocal);
        inQueueGlobalVar.FreeTensor(globalVarLocal);

    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
        // 从队列获取输出张量并写回全局内存
        AscendC::LocalTensor<float> updatedMeanLocal = outQueueUpdatedMean.DeQue<float>();
        AscendC::LocalTensor<float> updatedVarLocal = outQueueUpdatedVar.DeQue<float>();

        
        // 复制数据回全局内存
        AscendC::DataCopy(updatedMeanGm[progress * this->tileDataNum], updatedMeanLocal, this->processDataNum);
        AscendC::DataCopy(updatedVarGm[progress * this->tileDataNum], updatedVarLocal, this->processDataNum);

        // 释放输出张量
        outQueueUpdatedMean.FreeTensor(updatedMeanLocal);
        outQueueUpdatedVar.FreeTensor(updatedVarLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueLocalMean, inQueueLocalVar, inQueueGlobalMean, inQueueGlobalVar;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueUpdatedMean, outQueueUpdatedVar;
    AscendC::GlobalTensor<float> localMeanGm, localVarGm, globalMeanGm, globalVarGm, updatedMeanGm, updatedVarGm;
    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;
    uint32_t processDataNum;
    float momentum;
    float epsilon;
};
extern "C" __global__ __aicore__ void sync_bn_training_update_custom(GM_ADDR local_mean, GM_ADDR local_var, GM_ADDR global_mean,
                                                                 GM_ADDR global_var, GM_ADDR updated_mean, GM_ADDR updated_var,
                                                                  GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelSyncBNTrainingUpdate op;
    op.Init(local_mean, local_var, global_mean, global_var,
            updated_mean, updated_var, tiling_data.smallCoreDataNum, 
            tiling_data.bigCoreDataNum, tiling_data.finalBigTileNum, 
            tiling_data.finalSmallTileNum, tiling_data.tileDataNum, 
            tiling_data.smallTailDataNum, tiling_data.bigTailDataNum, 
            tiling_data.tailBlockNum,tiling_data.momentum, tiling_data.epsilon);  
    op.Process();
}
