/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved.
 * Description: 集合通信算子图优化头文件
 * Author: lilianlin
 * Create: 2019-12-7
 */

#ifndef HCOM_GRAPH_OPTIMIZER_H
#define HCOM_GRAPH_OPTIMIZER_H

#include <string>
#include "common/optimizer/graph_optimizer.h"
#include "common/optimizer/graph_optimizer_types.h"
#include "graph/compute_graph.h"
#include "hccl/hccl_types.h"
#include "base.h"
#include "comm.h"
#include <unordered_set>
#include "es_private.h"
#include "hcom_pub.h"

namespace hccl {
const string HCCL_GRAPH_OPTIMIZER_NAME = "hccl_graph_optimizer";
constexpr std::int64_t HCCL_FORMAT_PAIRED_INPUT_OUTPUT = 2; // 输入输出地格式须相同

using GroupParaLabel = std::map<std::string, std::string>;

const std::map<std::string, HcclCMDType> HCCL_OPTYPE_NAME_MAP = {
    {HCCL_KERNEL_OP_TYPE_BROADCAST, HcclCMDType::HCCL_CMD_BROADCAST},
    {HCCL_KERNEL_OP_TYPE_SCATTER, HcclCMDType::HCCL_CMD_SCATTER},
    {HCCL_KERNEL_OP_TYPE_ALLREDUCE, HcclCMDType::HCCL_CMD_ALLREDUCE},
    {HCCL_KERNEL_OP_TYPE_REDUCE, HcclCMDType::HCCL_CMD_REDUCE},
    {HCCL_KERNEL_OP_TYPE_SEND, HcclCMDType::HCCL_CMD_SEND},
    {HCCL_KERNEL_OP_TYPE_RECEIVE, HcclCMDType::HCCL_CMD_RECEIVE},
    {HCCL_KERNEL_OP_TYPE_ALLGATHER, HcclCMDType::HCCL_CMD_ALLGATHER},
    {HCCL_KERNEL_OP_TYPE_ALLGATHERV, HcclCMDType::HCCL_CMD_ALLGATHER_V},
    {HCCL_KERNEL_OP_TYPE_REDUCESCATTER, HcclCMDType::HCCL_CMD_REDUCE_SCATTER},
    {HCCL_KERNEL_OP_TYPE_REDUCESCATTERV, HcclCMDType::HCCL_CMD_REDUCE_SCATTER_V},
    {HCCL_KERNEL_OP_TYPE_ALLTOALLV, HcclCMDType::HCCL_CMD_ALLTOALLV},
    {HCCL_KERNEL_OP_TYPE_ALLTOALLVC, HcclCMDType::HCCL_CMD_ALLTOALLVC},
    {HCCL_KERNEL_OP_TYPE_ALLTOALL, HcclCMDType::HCCL_CMD_ALLTOALL},
    {HCCL_KERNEL_OP_TYPE_GATHER, HcclCMDType::HCCL_CMD_GATHER}
};

class HcomGraphOptimizer : public ge::GraphOptimizer {
public:
    HcomGraphOptimizer();
    ~HcomGraphOptimizer() override;
    virtual ge::Status Initialize(const map<std::string, std::string>& options,
                                  ge::OptimizeUtility *const optimizeUtility) override;
    // close graphOptimizer
    ge::Status Finalize() override;
    // optimize original graph for FE quant optimize
    ge::Status OptimizeGraphPrepare(ge::ComputeGraph& graph) override;
    // optimize original graph, using in graph preparation stage
    ge::Status OptimizeOriginalGraph(ge::ComputeGraph& graph) override;
    // optimize fused graph
    ge::Status OptimizeFusedGraph(ge::ComputeGraph& graph) override;
    // optimize whole graph, using after graph merged stage
    ge::Status OptimizeWholeGraph(ge::ComputeGraph& graph) override;
    // get attribute of graph optimizer
    ge::Status GetAttributes(ge::GraphOptimizerAttribute& attrs) const override;

    static const std::unordered_set<std::string> ES_OPS;

protected:
    virtual HcclResult CheckSupportedOP(const std::string &sCollectiveType) const;
    virtual HcclResult CalcOpRunningParam(ge::Node& node, bool uknownShapeGraph);
    virtual HcclResult SetOpOutputMemSize(ge::Node& node, const std::string &sCollectiveType);
    virtual HcclResult CalcHCCLOutputMemSize(const std::string &sCollectiveType, int64_t& memSize);
    virtual HcclResult SetOpMemAttr(ge::Node& node, const std::string &sCollectiveType, const u64 &opMemSize);
    HcclResult HcomGraphOptimizeInitialize(const map<std::string, std::string>& options,
                            ge::OptimizeUtility *const optimizeUtility);
    HcclResult HcomOptimizeOriginalGraph(ge::ComputeGraph& graph, bool& uknownShapeGraph);
    HcclResult OriginalGraphShapeTypeCfg(ge::ComputeGraph &graph, bool &uknownShapeGraph);
    HcclResult SetUnknownShapeAttr(ge::ComputeGraph &graph, bool uknownShapeGraph);
    HcclResult UpdateFusionTensorSizeLimit(bool unknownShape, u64 &fusionTensorSize);
private:
    HcclResult FuseHcomAllReduceNode(ge::ComputeGraph& graph);
    HcclResult FuseHcomBroadcastNode(ge::ComputeGraph& graph, u64 fusionTensorSize);
    HcclResult FuseHcomReduceNode(ge::ComputeGraph& graph);
    HcclResult SetHcomOpAttrs(ge::OpDescPtr &opDescPtr);
    HcclResult SetHcomOpFormat(ge::OpDescPtr &opDescPtr);
    HcclResult SetHcomOpParallelLabel(ge::Node &node, std::string groupLabel);
    HcclResult GetOpWorkspaceMemSize(ge::Node& node, const std::string &sCollectiveType, u64 &opMemSize);
    HcclResult GetCountFromOpDesc(const ge::OpDescPtr &op, const std::string &sCollectiveType, \
        HcclDataType dataType, u64 &count);
    HcclResult GetCommFromOpDesc(const ge::OpDescPtr &op, int64_t hcomComm, std::string &sGroup);
    HcclResult SetOpAtomicInputIndex(ge::Node& node, const std::string &sCollectiveType);
    HcclResult GetHcomReceiveOpOutputSize(const ge::OpDescPtr &op, u32 dataTypeSize, u64 &outputSize);
    HcclResult HcomCalcOpRunningParam(ge::Node& node, bool uknownShapeGraph);
    HcclResult MarkRemoteAccessMemoryType(ge::Node& node);
    HcclResult GetOriginalGraphShapeTypeFromDesc(const ge::OpDescPtr &op, u32 &shapeType);
    HcclResult GetLookupUpdateWorkspace(ge::Node& node, u64 &opMemSize, s32 flags);
    HcclResult GetTaskNumAndCheckForceUnknown(const ge::Node &node, const std::string &sCollectiveType,
        const u64 &streamNum);
    HcclResult CheckForceUnknown(const ge::Node &node, u32 &taskNum);
    HcclResult CalTaskNum(const ge::Node &node, const std::string &sCollectiveType, const u64 &streamNum,
        u32 &masterTaskNum, u32 &slaveTaskNum, u32 &piplineTaskNum, u32 &taskNum);
    HcclResult CalAndSetOpWorkerSpaceForKnowShape(ge::Node& node, const std::string &sCollectiveType, u64 &opMemSize);

    bool IsSubgraphMultiBatch(ge::ComputeGraph& graph);
    inline HcclResult GetOpIntAttr(const ge::OpDescPtr &op, const string &attr, s32 &output)
    {
        if (ge::AttrUtils::HasAttr(op, attr)) {
            ge::AttrUtils::GetInt(op, attr, output);
        } else {
            return HCCL_E_NOT_FOUND;
        }
        return HCCL_SUCCESS;
    }
    HcclResult GetOpDataSize(const ge::Node &node, u64 &opDataSize);
    uint64_t fusionTensorSizeLimit_;
    int32_t hcomMultiMode_;
    int32_t optionFeatureBaseRefreshable_;
};
}
#endif