/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#include "coll_custom_huge_all_reduce_mesh_executor.h"

namespace hccl {
CollCustomHugeAllReduceMeshExecutor::CollCustomHugeAllReduceMeshExecutor(const HcclDispatcher dispatcher,
                                                                         std::unique_ptr<TopoMatcher> &topoMatcher)
    : CollCommExecutor(dispatcher, topoMatcher)
{
}

HcclResult CollCustomHugeAllReduceMeshExecutor::CalcScratchMemSize(u64 &scratchMemSize)
{
    // 计算所需要申请的 Scratch 内存大小
    // TODO: 选手可根据算法需要自行修改
    scratchMemSize = 0U;
    HCCL_WARNING("[HCCLContest][CollCustomHugeAllReduceMeshExecutor][CalcScratchMemSize] scratchMemSize: %u",
                 scratchMemSize);
    return HCCL_SUCCESS;
}

HcclResult CollCustomHugeAllReduceMeshExecutor::CalcStreamNum(u32 &streamNum)
{
    // 计算所需要申请的 Stream 数量
    // TODO: 选手可根据算法需要自行修改
    u32 totalStreamNum = topoAttr_.deviceNumPerAggregation;
    streamNum = totalStreamNum - 1U;
    HCCL_WARNING("[HCCLContest][CollCustomHugeAllReduceMeshExecutor][CalcStreamNum] streamNum: %u", streamNum);
    return HCCL_SUCCESS;
}

HcclResult CollCustomHugeAllReduceMeshExecutor::CalcNotifyNum(u32 streamNum, u32 &notifyNum)
{
    // 计算所需要申请的 Notify 数量
    // TODO: 选手可根据算法需要自行修改
    notifyNum = 2U * streamNum;
    HCCL_WARNING("[HCCLContest][CollCustomHugeAllReduceMeshExecutor][CalcNotifyNum] notifyNum: %u", notifyNum);
    return HCCL_SUCCESS;
}

HcclResult CollCustomHugeAllReduceMeshExecutor::CalcCommInfo(std::vector<LevelNSubCommTransport> &opTransport)
{
    // 计算通信域信息
    // TODO: 选手可根据算法需要自行修改
    HCCL_WARNING("[HCCLContest][CollCustomHugeAllReduceMeshExecutor][CalcNotifyNum]");

    // CCL_Input -> CCL_Output
    TransportMemType inputType = TransportMemType::CCL_INPUT;
    TransportMemType outputType = TransportMemType::CCL_OUTPUT;
    // 建立 Mesh 链路
    CommParaInfo commParaLevel0(COMM_LEVEL0, CommType::COMM_TAG_MESH);
    // 构造一级通信域资源请求
    // 最终将调用：CalcMeshTransportReq::CalcTransportRequest()
    CHK_RET(CalcCommPlaneInfo(tag_, commParaLevel0, opTransport[COMM_LEVEL0], inputType, outputType));
    return HCCL_SUCCESS;
}

u64 CollCustomHugeAllReduceMeshExecutor::CalcLoopMaxCount(const u64 cclBuffSize, const u32 unitSize)
{
    // 计算循环处理的迭代次数
    // TODO: 选手可根据算法需要自行修改

    u64 maxCountPerLoop = cclBuffSize / unitSize;
    HCCL_WARNING("[HCCLContest][CollCustomHugeAllReduceMeshExecutor][CalcLoopMaxCount] maxCountPerLoop: %u",
                 maxCountPerLoop);
    return maxCountPerLoop;
}

HcclResult CollCustomHugeAllReduceMeshExecutor::Orchestrate(OpParam &param, AlgResourceResponse &algRes)
{
    // 算法编排总入口
    // TODO: 选手可根据算法需要自行修改

    HCCL_WARNING("[HCCLContest][CollCustomHugeAllReduceMeshExecutor][Orchestrate] count: %u", param.DataDes.count);
    tag_ = param.tag;
    algResResp_ = &algRes;

    // User_Input 和 User_Output 指针
    u8 *userInputPtr = static_cast<u8 *>(param.inputPtr);
    u8 *userOutputPtr = static_cast<u8 *>(param.outputPtr);
    CHK_PTR_NULL(userInputPtr);
    CHK_PTR_NULL(userOutputPtr);

    u32 unitSize = SIZE_TABLE[param.DataDes.dataType];
    u64 maxCountPerLoop = CalcLoopMaxCount(algRes.cclInputMem.size(), unitSize);

    // 循环处理数据
    for (u64 countLeft = param.DataDes.count, curCount = 0, inputOffset = 0, outputOffset = 0; countLeft > 0;) {
        curCount = (countLeft > maxCountPerLoop) ? maxCountPerLoop : countLeft;
        u64 curSize = curCount * unitSize; // curSize 为三种数据量：512K/2M/64M

        // 构造本次循环所使用的内存信息
        ExecMem execMem;
        execMem.count = curCount;                         // 本次循环处理的数据量
        execMem.inputPtr = userInputPtr + inputOffset;    // 本次循环使用的 User_Input 内存指针
        execMem.outputPtr = userOutputPtr + outputOffset; // 本次循环使用的 User_Output 内存指针
        execMem.inputMem = algRes.cclInputMem;            // 本端的 CCL_Input 内存
        execMem.outputMem = algRes.cclOutputMem;          // 本端的 CCL_Output 内存
        execMem.scratchMem = algRes.scratchMem;           // 本端的 Scratch 内存

        // 处理本次循环
        CHK_RET(KernelRun(param, execMem));

        // 更新偏移量
        countLeft -= curCount;
        inputOffset = curSize;
        outputOffset = curSize;
    }
    return HCCL_SUCCESS;
}

HcclResult CollCustomHugeAllReduceMeshExecutor::KernelRun(const OpParam &param, ExecMem &execMem)
{
    // 处理单次循环的数据
    // TODO: 选手可根据算法需要自行修改

    u32 unitSize = SIZE_TABLE[param.DataDes.dataType]; // 数据类型的字节数
    u64 curSize = execMem.count * unitSize; // 本次循环需要处理的数据大小，三种数据量：512K/2m/64m，单位：字节
    hccl::Stream &masterStream = const_cast<hccl::Stream &>(param.stream); // 主流

    // TODO: 流同步

    CHK_RET(CheckCommSize(COMM_LEVEL0, COMM_INDEX_0 + 1));
    SubCommInfo level0CommInfo = GetSubCommInfo(COMM_LEVEL0, COMM_INDEX_0);
    HCCL_WARNING("[HCCLContest][CollCustomHugeAllReduceMeshExecutor][KernelRun] localRank: %u, localRankSize: %u",
                 level0CommInfo.localRank, level0CommInfo.localRankSize);

    // TODO: 搬运数据

    return HCCL_SUCCESS;
}

REGISTER_EXEC("CustomHugeAllReduceMeshExecutor", CustomHugeAllReduceMesh, CollCustomHugeAllReduceMeshExecutor);
} // namespace hccl
