/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#include "coll_custom_small_all_reduce_mesh_executor.h"

namespace hccl {
CollCustomSmallAllReduceMeshExecutor::CollCustomSmallAllReduceMeshExecutor(const HcclDispatcher dispatcher,
                                                                           std::unique_ptr<TopoMatcher> &topoMatcher)
    : CollCommExecutor(dispatcher, topoMatcher)
{
    // DMAReduceFlag_ = true;
}

HcclResult CollCustomSmallAllReduceMeshExecutor::CalcScratchMemSize(u64 &scratchMemSize)
{
    // 计算所需要申请的 Scratch 内存大小
    // TODO: 选手可根据算法需要自行修改
    scratchMemSize = 0U;
    HCCL_WARNING("[HCCLContest][CollCustomSmallAllReduceMeshExecutor][CalcScratchMemSize] scratchMemSize: %u",
                 scratchMemSize);
    return HCCL_SUCCESS;
}

HcclResult CollCustomSmallAllReduceMeshExecutor::CalcStreamNum(u32 &streamNum)
{
    // 计算所需要申请的 Stream 数量
    // TODO: 选手可根据算法需要自行修改
    // u32 totalStreamNum = (topoAttr_.deviceNumPerAggregation;
    // streamNum = totalStreamNum - 1U;
    streamNum = static_cast<u32>(ceil(log2(topoAttr_.deviceNumPerAggregation)));
    HCCL_WARNING("[HCCLContest][CollCustomSmallAllReduceMeshExecutor][CalcStreamNum] streamNum: %u", streamNum);
    return HCCL_SUCCESS;
}

HcclResult CollCustomSmallAllReduceMeshExecutor::CalcNotifyNum(u32 streamNum, u32 &notifyNum)
{
    // 计算所需要申请的 Notify 数量
    // TODO: 选手可根据算法需要自行修改
    notifyNum = 2U * streamNum;
    HCCL_WARNING("[HCCLContest][CollCustomSmallAllReduceMeshExecutor][CalcNotifyNum] notifyNum: %u", notifyNum);
    return HCCL_SUCCESS;
}

HcclResult CollCustomSmallAllReduceMeshExecutor::CalcCommInfo(std::vector<LevelNSubCommTransport> &opTransport)
{
    // 计算通信域信息
    // TODO: 选手可根据算法需要自行修改
    HCCL_WARNING("[HCCLContest][CollCustomSmallAllReduceMeshExecutor][CalcNotifyNum]");

    // CCL_Input -> CCL_Output
    TransportMemType inputType = TransportMemType::CCL_INPUT;
    TransportMemType outputType = TransportMemType::CCL_OUTPUT;
    // 建立 Mesh 链路
    CommParaInfo commParaLevel0(COMM_LEVEL0, CommType::COMM_TAG_MESH);
    // 构造一级通信域资源请求
    // 最终将调用：CalcMeshTransportReq::CalcTransportRequest()
    CHK_RET(CalcCommPlaneInfo(tag_, commParaLevel0, opTransport[COMM_LEVEL0], inputType, outputType));
    return HCCL_SUCCESS;
}

u64 CollCustomSmallAllReduceMeshExecutor::CalcLoopMaxCount(const u64 cclBuffSize, const u32 unitSize)
{
    // 计算循环处理的迭代次数(数据个数)
    // TODO: 选手可根据算法需要自行修改

    u64 maxCountPerLoop = cclBuffSize / unitSize;
    HCCL_WARNING("[HCCLContest][CollCustomSmallAllReduceMeshExecutor][CalcLoopMaxCount] maxCountPerLoop: %u",
                 maxCountPerLoop);
    return maxCountPerLoop;
}

HcclResult CollCustomSmallAllReduceMeshExecutor::Orchestrate(OpParam &param, AlgResourceResponse &algRes)
{
    // 算法编排总入口
    // TODO: 选手可根据算法需要自行修改

    HCCL_WARNING("[HCCLContest][CollCustomSmallAllReduceMeshExecutor][Orchestrate] count: %u", param.DataDes.count);
    tag_ = param.tag;
    algResResp_ = &algRes;

    // User_Input 和 User_Output 指针
    u8 *userInputPtr = static_cast<u8 *>(param.inputPtr);
    u8 *userOutputPtr = static_cast<u8 *>(param.outputPtr);
    CHK_PTR_NULL(userInputPtr);
    CHK_PTR_NULL(userOutputPtr);

    u32 unitSize = SIZE_TABLE[param.DataDes.dataType];
    u64 maxCountPerLoop = CalcLoopMaxCount(algRes.cclInputMem.size(), unitSize);

    // 循环处理数据
    for (u64 countLeft = param.DataDes.count, curCount = 0, inputOffset = 0, outputOffset = 0; countLeft > 0;) {
        curCount = (countLeft > maxCountPerLoop) ? maxCountPerLoop : countLeft;
        u64 curSize = curCount * unitSize; // curSize 为三种数据量：512K/2M/64M

        // 构造本次循环所使用的内存信息
        ExecMem execMem;
        execMem.count = curCount;                         // 本次循环处理的数据量
        execMem.inputPtr = userInputPtr + inputOffset;    // 本次循环使用的 User_Input 内存指针
        execMem.outputPtr = userOutputPtr + outputOffset; // 本次循环使用的 User_Output 内存指针
        execMem.inputMem = algRes.cclInputMem;            // 本端的 CCL_Input 内存
        execMem.outputMem = algRes.cclOutputMem;          // 本端的 CCL_Output 内存
        execMem.scratchMem = algRes.scratchMem;           // 本端的 Scratch 内存

        // 处理本次循环
        CHK_RET(KernelRun(param, execMem));

        // 更新偏移量
        countLeft -= curCount;
        inputOffset += curSize;
        outputOffset += curSize;
    }
    return HCCL_SUCCESS;
}
u32 GetPartnerRank(u32 layer, u32 localRank, u32 totalRanks) {
    if (layer < 0 || localRank < 0 || totalRanks <= 0) {
        return -1; // 无效输入
    }
    u32 step = 1 << layer; // 步长 = 2^layer
    u32 partnerRank = (localRank & step) ? (localRank - step) : (localRank + step);
    // 检查伙伴rank是否在有效范围内
    if (partnerRank < 0 || partnerRank >= totalRanks) {
        return -1; // 超出范围，无有效伙伴
    }
    return partnerRank;
}
HcclResult CollCustomSmallAllReduceMeshExecutor::KernelRun(const OpParam &param, ExecMem &execMem)
{
    // 处理单次循环的数据
    // TODO: 选手可根据算法需要自行修改

    u32 unitSize = SIZE_TABLE[param.DataDes.dataType]; // 数据类型的字节数
    u64 curSize = execMem.count * unitSize; // 本次循环需要处理的数据大小，三种数据量：512K/2m/64m，单位：字节
    hccl::Stream &masterStream = const_cast<hccl::Stream &>(param.stream); // 主流

    // 本地搬运数据从userInput到CCL_Output
    DeviceMem UserInput = DeviceMem::create(execMem.inputPtr, curSize);//UserInput
    DeviceMem UserOutput = DeviceMem::create(static_cast<char *>(execMem.outputPtr), curSize);//UserOutput
    DeviceMem CCLInput = DeviceMem::create(execMem.inputMem.ptr(), curSize);  // INPUT_CCL（只读）
    // DeviceMem CCLOutput = DeviceMem::create(execMem.outputMem.ptr(), curSize);  // OUTPUT_CCL（可写）
    CHK_RET(HcclD2DMemcpyAsync(dispatcher_, CCLInput, UserInput, masterStream));  // 原始输入→INPUT_CCL
    CHK_RET(HcclD2DMemcpyAsync(dispatcher_, UserOutput, UserInput, masterStream)); // 原始输入→OUTPUT_User


    // TODO: 流同步
    // 主流通知从流启动任务
    // algResResp_->slaveStreams 是框架根据资源诉求创建的从流
    // 框架创建 从流数量 * 2 个notify，其中一半（algResResp_->notifiesAux）放在从流上（放在从流上表示在从流上执行wait）
    // 这边在主流上编排Post类型的Task
    // for (u32 signalIndex = 0; signalIndex < algResResp_->slaveStreams.size(); signalIndex++) {
    //     CHK_RET(LocalNotify::Post(masterStream, dispatcher_, algResResp_->notifiesAux[signalIndex],
    //         PROF_STAGE_1));
    // }

    // // 从流等待主流的通知
    // // 在从流上编排Wait类型的Task
    // for (u32 streamIndex = 0; streamIndex < algResResp_->slaveStreams.size(); streamIndex++) {
    //     CHK_RET(LocalNotify::Wait(algResResp_->slaveStreams[streamIndex], dispatcher_,
    //         algResResp_->notifiesAux[streamIndex], PROF_STAGE_1));
    // }

    // 获取第0层第0个子通信域
    CHK_RET(CheckCommSize(COMM_LEVEL0, COMM_INDEX_0 + 1));
    SubCommInfo level0CommInfo = GetSubCommInfo(COMM_LEVEL0, COMM_INDEX_0);
    HCCL_WARNING("[HCCLContest][CollCustomSmallAllReduceMeshExecutor][KernelRun] localRank: %u, localRankSize: %u",
                 level0CommInfo.localRank, level0CommInfo.localRankSize);
 
    // 从远端CCL_Output中读取数据到userOutput
    // 有localRankSize - 1个远端rank，也有localRankSize - 1个从流，一条流和一个远端rank通信
    //--------树结构reduce--------//
    u32 LayerNum = static_cast<u32>(ceil(log2(level0CommInfo.localRankSize)));
    for (int32_t layer = LayerNum - 1; layer >= 0; layer--){
        // // 计算本迭代通信的从流和远端rank
        // if(level0CommInfo.localRank + round >= level0CommInfo.localRankSize) continue; 
        // u32 round = 1 << layer;
        // u32 dstRank = (level0CommInfo.localRank + round) % (2<<LayerNum);
        // u32 streamIndex = (level0CommInfo.localRank + round - 1) % (2<<LayerNum);
        // Stream& subStream = algResResp_->slaveStreams[streamIndex];
        // 计算本迭代通信的从流和远端rank
        u32 round = 1 << layer;
        u32 dstRank = GetPartnerRank(layer, level0CommInfo.localRank, level0CommInfo.localRankSize);
        // if(dstRank >= level0CommInfo.localRankSize ) continiue;
        // u32 streamIndex = (level0CommInfo.localRank + round - 1) % level0CommInfo.localRankSize;
        u32 streamIndex = layer;
        Stream& subStream = algResResp_->slaveStreams[streamIndex];

        // 流同步
        CHK_RET(LocalNotify::Post(masterStream, dispatcher_, algResResp_->notifiesAux[streamIndex],
            PROF_STAGE_1));
        CHK_RET(LocalNotify::Wait(algResResp_->slaveStreams[streamIndex], dispatcher_,
            algResResp_->notifiesAux[streamIndex], PROF_STAGE_1));

        // 前同步
        CHK_RET(level0CommInfo.links[dstRank]->TxAck(subStream));
        CHK_RET(level0CommInfo.links[dstRank]->RxAck(subStream));
        // 获取远端CCL_Input地址
        void *remMemPtr = nullptr;
        CHK_RET(level0CommInfo.links[dstRank]->GetRemoteMem(UserMemType::INPUT_MEM, &remMemPtr));
        CCLInput = DeviceMem::create(static_cast<char *>(remMemPtr), curSize);
        // 使用 HcclReduceAsync 进行数据归约
        CHK_RET(HcclReduceAsync(
            dispatcher_,
            CCLInput.ptr(),             // 源数据（远端rank的数据）
            execMem.count,              // 数据元素数量
            param.DataDes.dataType,     // 数据类型
            param.reduceType,           // 归约操作类型（如求和）
            subStream,                  // 执行操作的流
            UserOutput.ptr(),           // 目标地址（本地内存）
            dstRank,                    // 远端rank的ID
            level0CommInfo.links[dstRank]->GetLinkType(), // 链路类型
            INLINE_REDUCE_BIT           // 归约属性
        ));
        // 后同步
        CHK_RET(level0CommInfo.links[dstRank]->TxDataSignal(subStream));
        CHK_RET(level0CommInfo.links[dstRank]->RxDataSignal(subStream));  

        // 流同步
        CHK_RET(LocalNotify::Post(algResResp_->slaveStreams[streamIndex], dispatcher_,
            algResResp_->notifiesMain[streamIndex], PROF_STAGE_1));
        CHK_RET(LocalNotify::Wait(masterStream, dispatcher_, algResResp_->notifiesMain[streamIndex],
            PROF_STAGE_1));

        //copy
        CCLInput = DeviceMem::create(execMem.inputMem.ptr(), curSize);//本地CCL_Input
        CHK_RET(HcclD2DMemcpyAsync(dispatcher_, CCLInput, UserOutput, masterStream)); // OUTPUT_User->INPUT_CCL
    }

    // 从流通知主流任务完成
    // algResResp_->slaveStreams 是框架根据资源诉求创建的从流
    // 框架创建 从流数量 * 2 个notify，其中一半（algResResp_->notifiesMain）放在主流上（放在主流上表示在主流上执行wait）
    // 这边在从流上编排Post类型的Task
    // for (u32 streamIndex = 0; streamIndex < algResResp_->slaveStreams.size(); streamIndex++) {
    //     CHK_RET(LocalNotify::Post(algResResp_->slaveStreams[streamIndex], dispatcher_,
    //         algResResp_->notifiesMain[streamIndex], PROF_STAGE_1));
    // }

    // // 主流等待从流的通知
    // // 在主流上编排Wait类型的Task
    // for (u32 signalIndex = 0; signalIndex < algResResp_->slaveStreams.size(); signalIndex++) {
    //     CHK_RET(LocalNotify::Wait(masterStream, dispatcher_, algResResp_->notifiesMain[signalIndex],
    //         PROF_STAGE_1));
    // }

    return HCCL_SUCCESS;
}

REGISTER_EXEC("CustomSmallAllReduceMeshExecutor", CustomSmallAllReduceMesh, CollCustomSmallAllReduceMeshExecutor);
} // namespace hccl
