/**
 * @file maximum_custom.cpp
 *
 * Copyright (C) 2025. Huawei Technologies Co., Ltd. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */
#include "syncbntrainingupdate_custom_tiling.h"
#include "kernel_operator.h"



constexpr int32_t BUFFER_NUM = 2; // tensor num for each queue
class KernelSyncBNTrainingUpdate {
public:
    __aicore__ inline KernelSyncBNTrainingUpdate() {}

    __aicore__ inline void Init(GM_ADDR local_mean, GM_ADDR local_var, 
                               GM_ADDR global_mean, GM_ADDR global_var,
                               GM_ADDR updated_mean, GM_ADDR updated_var, 
                               SyncBNTrainingUpdateCustomTilingData tilingData)
    {
         // 获取当前块索引和总块数
        uint32_t blockIdx = AscendC::GetBlockIdx();
        uint32_t blockNum = AscendC::GetBlockNum();

        // 根据分块策略设置参数
        if (tilingData.isEvenCore == 1U) {
            // 均匀分块
            this->blockLength = tilingData.blockLength;
            this->tileNum = tilingData.tileNum;
            this->tileLength = tilingData.tileLength;
            this->lastTileLength = tilingData.lasttileLength;
        } else {
            // 非均匀分块
            if (blockIdx < tilingData.formerNum) {
                // 前部核
                this->blockLength = tilingData.formerLength;
                this->tileNum = tilingData.formerTileNum;
                this->tileLength = tilingData.formerTileLength;
                this->lastTileLength = tilingData.formerLastTileLength;
            } else {
                // 尾部核
                this->blockLength = tilingData.tailLength;
                this->tileNum = tilingData.tailTileNum;
                this->tileLength = tilingData.tailTileLength;
                this->lastTileLength = tilingData.tailLastTileLength;
            }
        }

        // 设置计算参数
        this->momentum = tilingData.momentum;
        this->epsilon = tilingData.epsilon;

        // 计算全局内存偏移量
        uint32_t globalOffset = 0;
        if (tilingData.isEvenCore == 0U) {
            if (blockIdx < tilingData.formerNum) {
                // 前部核的偏移量
                globalOffset = blockIdx * tilingData.formerLength;
            } else {
                // 尾部核的偏移量
                globalOffset = tilingData.formerNum * tilingData.formerLength + 
                              (blockIdx - tilingData.formerNum) * tilingData.tailLength;
            }
        } else {
            // 均匀分块的偏移量
            globalOffset = blockIdx * tilingData.blockLength;
        }

        // 绑定全局内存缓冲区
        localMeanGm.SetGlobalBuffer((__gm__ float *)local_mean + globalOffset, blockLength);
        localVarGm.SetGlobalBuffer((__gm__ float *)local_var + globalOffset, blockLength);
        globalMeanGm.SetGlobalBuffer((__gm__ float *)global_mean + globalOffset, blockLength);
        globalVarGm.SetGlobalBuffer((__gm__ float *)global_var + globalOffset, blockLength);
        updatedMeanGm.SetGlobalBuffer((__gm__ float *)updated_mean + globalOffset, blockLength);
        updatedVarGm.SetGlobalBuffer((__gm__ float *)updated_var + globalOffset, blockLength);

        // 初始化片上队列缓冲区
        pipe.InitBuffer(inQueueLocalMean, BUFFER_NUM, tileLength * sizeof(float));
        pipe.InitBuffer(inQueueLocalVar, BUFFER_NUM, tileLength * sizeof(float));
        pipe.InitBuffer(inQueueGlobalMean, BUFFER_NUM, tileLength * sizeof(float));
        pipe.InitBuffer(inQueueGlobalVar, BUFFER_NUM, tileLength * sizeof(float));
        pipe.InitBuffer(outQueueUpdatedMean, BUFFER_NUM, tileLength * sizeof(float));
        pipe.InitBuffer(outQueueUpdatedVar, BUFFER_NUM, tileLength * sizeof(float));

        // 打印初始化信息
        // printf("[Init] 核心 %u, 块长度: %u, 分块数: %u, 分块长度: %u, 最后分块长度: %u\n",
        //        blockIdx, blockLength, tileNum, tileLength, lastTileLength);
    }

    __aicore__ inline void Process()
    {
        // 处理所有分块
        for (int32_t i = 0; i < tileNum; i++) {
            // 确定当前块的实际长度
            uint32_t currentTileLength = (i == tileNum - 1) ? lastTileLength : tileLength;
            
            // 处理当前块
            CopyIn(i, currentTileLength);
            Compute(i, currentTileLength);
            CopyOut(i, currentTileLength);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t currentTileLength)
    {
         // 分配本地张量并从全局内存复制输入数据
        AscendC::LocalTensor<float> localMeanLocal = inQueueLocalMean.AllocTensor<float>();
        AscendC::LocalTensor<float> localVarLocal = inQueueLocalVar.AllocTensor<float>();
        AscendC::LocalTensor<float> globalMeanLocal = inQueueGlobalMean.AllocTensor<float>();
        AscendC::LocalTensor<float> globalVarLocal = inQueueGlobalVar.AllocTensor<float>();

        // 计算全局内存偏移
        uint32_t globalOffset = progress * tileLength;

        // 复制数据
        AscendC::DataCopy(localMeanLocal, localMeanGm[globalOffset], currentTileLength);
        AscendC::DataCopy(localVarLocal, localVarGm[globalOffset], currentTileLength);
        AscendC::DataCopy(globalMeanLocal, globalMeanGm[globalOffset], currentTileLength);
        AscendC::DataCopy(globalVarLocal, globalVarGm[globalOffset], currentTileLength);
 // 打印调试信息
        // printf("[CopyIn] 核心 %ld, GM偏移: %u, 当前块长度: %u\n", 
        //        static_cast<int64_t>(AscendC::GetBlockIdx()), globalOffset, currentTileLength);
        
        // printf("[CopyIn] 拷贝尺寸: %lu 字节, UB缓冲区大小: %u 字节\n",
        //        static_cast<unsigned long>(currentTileLength * sizeof(float)), 
        //        localMeanLocal.GetSize());

        // 将本地张量入队
        inQueueLocalMean.EnQue(localMeanLocal);
        inQueueLocalVar.EnQue(localVarLocal);
        inQueueGlobalMean.EnQue(globalMeanLocal);
        inQueueGlobalVar.EnQue(globalVarLocal);
    }
    __aicore__ inline void Compute(int32_t progress, uint32_t currentTileLength)
    {
        // 从队列获取本地输入张量
        AscendC::LocalTensor<float> localMeanLocal = inQueueLocalMean.DeQue<float>();
        AscendC::LocalTensor<float> localVarLocal = inQueueLocalVar.DeQue<float>();
        AscendC::LocalTensor<float> globalMeanLocal = inQueueGlobalMean.DeQue<float>();
        AscendC::LocalTensor<float> globalVarLocal = inQueueGlobalVar.DeQue<float>();

        // 分配输出张量
        AscendC::LocalTensor<float> updatedMeanLocal = outQueueUpdatedMean.AllocTensor<float>();
        AscendC::LocalTensor<float> updatedVarLocal = outQueueUpdatedVar.AllocTensor<float>();

        // 计算更新后的均值：updated_mean = momentum * global_mean + (1 - momentum) * local_mean
        AscendC::Muls(globalMeanLocal, globalMeanLocal, momentum, this->tileLength);  // global_mean * momentum
        AscendC::Muls(localMeanLocal, localMeanLocal, 1 - momentum, this->tileLength);  // local_mean * (1 - momentum)
        AscendC::Add(updatedMeanLocal, globalMeanLocal, localMeanLocal, this->tileLength);  // 两者相加得到updated_mean


        // 计算更新后的方差：updated_var = momentum * global_var + (1 - momentum) * local_var + epsilon
        AscendC::Muls(globalVarLocal, globalVarLocal, momentum, this->tileLength);  // global_var * momentum
        AscendC::Muls(localVarLocal, localVarLocal, 1 - momentum, this->tileLength);  // local_var * (1 - momentum)
        AscendC::Add(updatedVarLocal, globalVarLocal, localVarLocal, this->tileLength);  
        AscendC::Adds(updatedVarLocal, updatedVarLocal, this->epsilon, this->tileLength);
        
         // 输出张量入队
        outQueueUpdatedMean.EnQue<float>(updatedMeanLocal);
        outQueueUpdatedVar.EnQue<float>(updatedVarLocal);

        // 释放输入张量
        inQueueLocalMean.FreeTensor(localMeanLocal);
        inQueueLocalVar.FreeTensor(localVarLocal);
        inQueueGlobalMean.FreeTensor(globalMeanLocal);
        inQueueGlobalVar.FreeTensor(globalVarLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress, uint32_t currentTileLength)
    {
        // 从队列获取输出张量并写回全局内存
        AscendC::LocalTensor<float> updatedMeanLocal = outQueueUpdatedMean.DeQue<float>();
        AscendC::LocalTensor<float> updatedVarLocal = outQueueUpdatedVar.DeQue<float>();
        // 计算全局内存偏移
        uint32_t globalOffset = progress * tileLength;
        
        // 复制数据回全局内存
        AscendC::DataCopy(updatedMeanGm[globalOffset], updatedMeanLocal, currentTileLength);
        AscendC::DataCopy(updatedVarGm[globalOffset], updatedVarLocal, currentTileLength);

        // 释放输出张量
        outQueueUpdatedMean.FreeTensor(updatedMeanLocal);
        outQueueUpdatedVar.FreeTensor(updatedVarLocal);
    }

private:
    AscendC::TPipe pipe;
    // 输入队列（对应4个输入）
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueLocalMean, inQueueLocalVar, inQueueGlobalMean, inQueueGlobalVar;
    // 输出队列（对应2个输出）
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueUpdatedMean, outQueueUpdatedVar;
    // 全局张量（float32类型）
    AscendC::GlobalTensor<float> localMeanGm, localVarGm, globalMeanGm, globalVarGm, updatedMeanGm, updatedVarGm;
    // 计算参数
    uint32_t blockLength;    // 当前核处理的数据长度
    uint32_t tileNum;        // 当前核的分块数量
    uint32_t tileLength;     // 当前核的每块长度
    uint32_t lastTileLength; // 当前核的最后一块长度
    float momentum;          // 用于均值和方差更新的动量参数
    float epsilon;           // 用于方差更新的平滑参数（避免除零）
};

extern "C" __global__ __aicore__ void syncbntrainingupdate_custom(GM_ADDR local_mean, GM_ADDR local_var,
    GM_ADDR global_mean, GM_ADDR global_var,
    GM_ADDR updated_mean, GM_ADDR updated_var,
    SyncBNTrainingUpdateCustomTilingData tiling)
{
    KernelSyncBNTrainingUpdate op;
    op.Init(local_mean, local_var, global_mean, global_var,
            updated_mean, updated_var,
            tiling);
    op.Process();
}