# Copyright 2020 Huawei Technologies Co., Ltd
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

""" Yet Another onnx Dummper """

from __future__ import division
from __future__ import print_function

import argparse
from collections import Counter
from collections import defaultdict
import onnx
import logging
import os
import sys
import copy
import enum
import csv

from google.protobuf import text_format

# pylint: disable=missing-docstring

#logging.basicConfig(stream=sys.stdout, format='[%(asctime)s] %(levelname)s: %(message)s', level=logging.INFO)
logging.basicConfig(stream=sys.stdout, format='%(message)s', level=logging.INFO)


def get_file_name(path):
    return os.path.basename(path)


def get_file_name_without_ext(path):
    return '.'.join(get_file_name(path).split('.')[:-1])


def replace_file_extension(path, ext):
    tokens = path.split('.')[:-1]
    if len(tokens) > 0:
        tokens[0] += '_out'
    tokens.append(ext)
    return '.'.join(tokens)


def append_file_name_suffix(path, suffix):
    tokens = path.split('.')
    tokens[-2] += '_' + suffix
    return '.'.join(tokens)


def get_file_directory(path):
    return os.path.dirname(path)


def get_file_directory_name(path):
    return os.path.basename(get_file_directory(path))


def create_directory(path):
    if not os.path.isdir(path):
        os.makedirs(path, exist_ok=True)


def load_graph_def_from_pb(path):
    with open(path, "rb") as f:
        data = f.read()
        model = onnx.ModelProto()
        text_format.Parse(data, model)
    return model.graph


def save_graph_def(graph_def, path, as_text=False):
    model_def = onnx.helper.make_model(graph_def, producer_name='onnx-subgraph')
    if as_text:
        with open(path, "w") as f:
            f.write(text_format.MessageToString(model_def))
    else:
        onnx.save(model_def, path)


def get_node_name(tensor_name):
    if tensor_name.startswith("^"):
        return tensor_name[1:]
    return tensor_name.split(":")[0]


def get_node_index_and_name(tensor_name):
    if tensor_name.startswith("^"):
        return 0, tensor_name[1:]
    # when a node has an input anchor but not connected, then input tensor name will be an empty string
    if len(tensor_name) == 0:
        return 0, ""
    name, index = tensor_name.split(":")
    return int(index), name


def find_attribute(node, attr_name):
    for attr in node.attribute:
        if attr.name == attr_name:
            return attr
    return None


def get_node_shape(node):
    shape_attr = getattr(node, "output_desc_shape:0", None)
    shape = [d.size for d in shape_attr.ints]
    return shape


def get_node_origin_shape(node, index=0):
    shape_attr = find_attribute(node, f"output_desc_origin_shape:{index}")
    shape = [d for d in shape_attr.ints]
    return shape


def get_graph_def_io_nodes(graph_def):
    consumed = set()
    inputs = []
    outputs = []
    input_shapes = []
    for node in graph_def.node:
        for i in node.input:
            consumed.add(get_node_name(i))
        if node.op_type in ["ge:Const", "ge:Data", "ge:Placeholder"]:
            inputs.append(node.name)
            shape = []
            try:
                shape = get_node_shape(node)
            except:  # pylint: disable=bare-except
                pass
            input_shapes.append(shape)

    for node in graph_def.node:
        if node.name not in consumed and node.name not in inputs:
            outputs.append(node.name)

    return inputs, outputs, input_shapes


class GraphSummary:
    def __init__(self):
        self.names_to_input_names = {}
        self.names_to_data_input_names = {}
        self.names_to_ctrl_input_names = {}
        self.names_to_output_names = defaultdict(list)
        self.names_to_data_output_names = defaultdict(list)
        self.names_to_node = {}
        self.types_to_nodes = defaultdict(list)
        # Keeps track of node sequences. It is important to still output the
        # operations in the original order.
        self.names_to_seq_num = {}
    
    def get_node_type_from_name(self, node_name):
        if node_name not in self.names_to_node:
            return None
        return self.names_to_node[node_name].op_type
    
    def get_node_type_from_anchor_name(self, io_name):
        return self.get_node_type_from_name(get_node_name(io_name))
    

def build_graph_summary(graph_def):
    gs = GraphSummary()
    seq = 0
    for node in graph_def.node:
        node_name = get_node_name(node.name)
        gs.names_to_node[node_name] = node
        input_indexes_and_names = [get_node_index_and_name(input_name) for input_name in node.input]
        gs.names_to_data_input_names[node_name] = \
            [index_and_name[1] for index_and_name in input_indexes_and_names if index_and_name[0] >= 0]
        gs.names_to_ctrl_input_names[node_name] = \
            [index_and_name[1] for index_and_name in input_indexes_and_names if index_and_name[0] < 0]
        gs.names_to_ctrl_input_names[node_name].sort()
        gs.names_to_input_names[node_name] =\
            gs.names_to_data_input_names[node_name] + gs.names_to_ctrl_input_names[node_name]
        if len(node.output) == 0:
            if len(gs.names_to_output_names[node_name]) != 0:
                print("The node {} does not has output record, but found input record from others".format(node_name))
        for input_name in node.input:
            gs.names_to_output_names[get_node_name(input_name)].append(node_name)
            index_and_name = get_node_index_and_name(input_name)
            if index_and_name[0] >= 0:
                gs.names_to_data_output_names[get_node_name(input_name)].append(node_name)
        gs.names_to_seq_num[node_name] = seq
        gs.types_to_nodes[node.op_type].append(node)
        seq += 1
    return gs


LOOKUP_FORWARD = 1
LOOKUP_BACKWARD = 2
LOOKUP_ALL = 3


def lookup_dump_nodes(gs:GraphSummary, start_node_names, stop_names, stop_types, stop_leaves_count, layer_num, lookup_directions) -> set:
    dump_tasks = []
    for start_node_name in start_node_names:
        if lookup_directions & LOOKUP_FORWARD:
            dump_tasks.append((start_node_name, 'forward', 0))
        if lookup_directions & LOOKUP_BACKWARD:
            dump_tasks.append((start_node_name, 'backward', 0))

    dump_node_names = set()
    while len(dump_tasks) != 0:
        task = dump_tasks.pop(0)
        if len(task[0]) == 0:
            continue
        dump_node_names.add(task[0])

        if task[0] in stop_names:
            continue
        tmp_node = gs.names_to_node.get(task[0])
        if tmp_node and tmp_node.op_type in stop_types:
            continue

        if task[2] >= layer_num:
            continue

        if task[1] == 'forward':
            output_node_names = gs.names_to_output_names.get(task[0], [])
            if len(output_node_names) < stop_leaves_count:
                for output_node_name in output_node_names:
                    dump_tasks.append((output_node_name, task[1], task[2] + 1))
        else:
            input_node_names = gs.names_to_input_names.get(task[0], [])
            if len(input_node_names) < stop_leaves_count:
                for input_node_name in input_node_names:
                    dump_tasks.append((input_node_name, task[1], task[2] + 1))
    return dump_node_names


def generate_graph(gs:GraphSummary, backbone_names, output_path, without_leaves):
    dump_names = set()
    for backbone in backbone_names:
        if len(backbone) == 0:
            continue
        dump_names.add(backbone)
        if not without_leaves:
            for input_name in gs.names_to_input_names.get(backbone, []):
                if input_name not in gs.names_to_node:
                    logging.debug("The leaf node {} does not exists, the graph may not complete".format(input_name))
                    continue
                dump_names.add(input_name)
            for output_name in gs.names_to_output_names.get(backbone, []):
                if output_name not in gs.names_to_node:
                    logging.debug("The leaf node {} does not exists, the graph may not complete".format(input_name))
                    continue
                dump_names.add(output_name)

    dump_nodes = []
    for dump_name in dump_names:
        dump_node = gs.names_to_node.get(dump_name, None)
        if dump_node is None:
            logging.warning("The node {} does not exists".format(dump_name))
        else:
            dump_nodes.append(dump_node)
    dump_nodes.sort(key=lambda node: gs.names_to_seq_num[node.name])

    out = onnx.GraphProto()
    for dump_node in dump_nodes:
        out.node.extend([copy.deepcopy(dump_node)])

    logging.info("save to %s", output_path)
    logging.info("total node = %s", len(out.node))
    save_graph_def(out, output_path, as_text=True)


def lookup_names_method1(gs:GraphSummary, name=None, stop_names=None, stop_types=None, stop_leaves_count=None,
                         layer_number=1, only_forward=False, only_backward=False) -> set:
    if name is None:
        return set()
    lookup_directions = LOOKUP_ALL
    if only_forward:
        lookup_directions ^= LOOKUP_BACKWARD
    if only_backward:
        lookup_directions ^= LOOKUP_FORWARD
    if lookup_directions == 0:
        logging.error("the --only_forward and --only_backward can not exists at the same time")
        return set()

    start_nodes = [node_name for node_name in name if node_name in gs.names_to_node]

    logging.info("Begin to find dump nodes, start nodes {}".format(start_nodes))
    return lookup_dump_nodes(gs, start_nodes, stop_names, stop_types, stop_leaves_count, layer_number, lookup_directions)


def lookup_names_method2(gs:GraphSummary, start_names=None, end_names=None) -> set:
    if start_names is None and end_names is None:
        return set()
    if start_names is None or end_names is None:
        logging.error("The number of --start_name and the --end_name must be the same")
        return set()
    if len(start_names) != len(end_names):
        logging.error("The number of --start_name and the --end_name must be the same")
        return set()

    logging.info("Begin to lookup nodes by start and end nodes...")
    dump_nodes = set()
    for start, end in list(zip(start_names, end_names)):
        nodes_to_end = set()
        que = [end, ]
        while len(que) > 0:
            node_name = que.pop(0)
            if len(node_name) == 0:
                continue
            if node_name in nodes_to_end:
                continue
            nodes_to_end.add(node_name)
            if node_name == start:
                continue
            que += gs.names_to_input_names.get(node_name, [])

        dump_nodes_this_pattern = set()
        que = [start, ]
        while len(que) > 0:
            node_name = que.pop(0)
            if node_name not in nodes_to_end:
                continue
            if node_name == end:
                continue
            if node_name in dump_nodes_this_pattern:
                continue
            dump_nodes_this_pattern.add(node_name)
            que += gs.names_to_output_names.get(node_name, [])

        dump_nodes_this_pattern.add(end)
        dump_nodes.update(dump_nodes_this_pattern)

    return dump_nodes


def lookup_names_method3(gs:GraphSummary, name_prefix) -> set:
    if name_prefix is None:
        return set()
    logging.info("Begin to lookup nodes by name prefixes...")
    dump_nodes = set()
    for name in gs.names_to_node.keys():
        for prefix in name_prefix:
            if name.startswith(prefix):
                dump_nodes.add(name)
                break
    return dump_nodes



def add_single_names(gs, single_name):
    dump_nodes = set()
    if single_name:
        for name in single_name:
            if name not in gs.names_to_node:
                raise Exception(f"The node specified by --single_name {name} does not exists")
            dump_nodes.add(name)
    return dump_nodes


def extract_sub_graph(input_path, output_path=None,
                      name=None, single_name=None, stop_name=None, stop_type=None, stop_leaves_count=None, layer_number=1,  # extract by node names
                      only_forward=False, only_backward=False,   # extract by node names
                      start_node=None, end_node=None,  # extract by start and end nodes
                      name_prefix=None,  # extract by name prefix
                      without_leaves=False):
    if not output_path:
        output_path = append_file_name_suffix(input_path, "sub")

    logging.info("Begin to read in graph from file {}".format(input_path))
    graph_def = load_graph_def_from_pb(input_path)

    gs = build_graph_summary(graph_def)
    if stop_name is None or len(stop_name) == 0:
        stop_name = set()
    else:
        stop_name = set(stop_name)
    if stop_type is None or len(stop_type) == 0:
        stop_type = set()
    else:
        stop_type = set(stop_type)
    dump_node_names = lookup_names_method1(gs, name, stop_name, stop_type, stop_leaves_count, layer_number, only_forward, only_backward)
    dump_node_names.update(lookup_names_method2(gs, start_node, end_node))
    dump_node_names.update(lookup_names_method3(gs, name_prefix))
    dump_node_names.update(add_single_names(gs, single_name))

    if len(dump_node_names) == 0:
        logging.error("No nodes to dump")
        return -1
    generate_graph(gs, dump_node_names, output_path, without_leaves)
    return 0

VECTOR_OP_TYPES = {
    "ge:Select",
    "ge:Fill",
    "ge:StridedSliceD", "ge:StridedSlice",
    "ge:ConcatV2D", "ge:ConcatV2", "ge:Concat",
    "ge:ReduceSum","ge:ReduceSumD",
    "ge:ReduceMax","ge:ReduceMaxD",
    "ge:ReduceMean", "ge:ReduceMeanD",
    "ge:ReduceProd",  "ge:ReduceProdD",
    "ge:Transpose", "ge:TransposeD", "ge:ConfusionTransposeD",
    "ge:Add", "ge:Sub", "ge:Mul", "ge:Div",
    "ge:Repeat",
    "ge:TileD", "ge:Tile",
    "ge:GatherV2", "ge:Gather",
    "ge:Pack", "ge:Unpack",
    "ge:AddN", "ge:Add",
    "ge:Cast",
    "ge:DynamicPartition",
    "ge:DynamicStitch",
    "ge:Equal",
    "ge:Exp",
    "ge:IsNan",
    "ge:LogicalAnd", "ge:LogicalNot", "ge:LogicalOr",
    "ge:Maximum", "ge:Minimum",
    "ge:Neg",
    "ge:Pow",
    "ge:RealDiv",
    "ge:Relu",
    "ge:Sigmoid",
    "ge:Slice", "ge:Split", "ge:SplitD", "ge:SplitV", "ge:SplitVD",
    "ge:Square", "ge:SquaredDifference",
    "ge:Tanh",
    "ge:ZerosLike",
    "ge:Rsqrt",
    "ge:ClipByValue",
    "ge:Greater",
    "ge:Sign",
    "ge:NotEqual"
}
CUBE_OP_TYPES = {
    "ge:MatMul", "ge:MatMulV2", "ge:BatchMatMul", "ge:BatchMatMulV2"
}
VIEW_TYPES = {
    "ge:Reshape", 
    "ge:Squeeze", "ge:SqueezeV2", "ge:SqueezeV3", "ge:Unsqueeze", 
    "ge:ExpandDims", "ge:Shape"
}
IO_OP_TYPES = {
    "ge:Data", "ge:Const", "ge:Constant", "ge:NetOutput"
}
AUTOFUSE_TYPES = {
    "ge:AscBc", "ge:FusedAscBc", "ge:AscBackend", "ge:FusedAscBackend"
}
TF_OTHER_TYPES = {
    "ge:FunctionOp",
    "ge:MultisliceConcat",
}

class OpCate(enum.Enum):
    IO = enum.auto() # like Data, Const, NetOutput..
    VEC = enum.auto() # vector
    CUBE = enum.auto() # cube
    VIEW = enum.auto() # like Reshape, Squeeze, Shape...
    AUTOFUSE = enum.auto()
    TF_OTHER = enum.auto()
    UNKNOWN = enum.auto()

    @staticmethod
    def get_op_cate(op_type):
        if op_type in IO_OP_TYPES:
            return OpCate.IO
        if op_type in VECTOR_OP_TYPES:
            return OpCate.VEC
        if op_type in CUBE_OP_TYPES:
            return OpCate.CUBE
        if op_type in VIEW_TYPES:
            return OpCate.VIEW
        if op_type in AUTOFUSE_TYPES:
            return OpCate.AUTOFUSE
        if op_type in TF_OTHER_TYPES:
            return OpCate.TF_OTHER
        return OpCate.UNKNOWN

class StringUnionFind:
    def __init__(self):
        self.names_to_key = {}
        self.keys_to_possibilities = defaultdict(set)

    def find(self, name):
        if name not in self.names_to_key:
            self.names_to_key[name] = name
            self.keys_to_possibilities[name].add(name)
            return name

        if self.names_to_key[name] != name:
            self.names_to_key[name] = self.find(self.names_to_key[name])
        return self.names_to_key[name]

    def join(self, name1, name2):
        root1 = self.find(name1)
        root2 = self.find(name2)

        if root1 == root2:
            return root1

        for element in self.keys_to_possibilities[root2]:
            self.names_to_key[element] = root1
        self.keys_to_possibilities[root1].update(self.keys_to_possibilities[root2])

        del self.keys_to_possibilities[root2]
        return root1

    def get_group(self, name):
        """
        返回和 name 同一集合的所有元素。
        如果 name 未出现过，则初始化并返回它自身所在集合。
        """
        root = self.find(name)
        return self.keys_to_possibilities[root]


def find_all_shape_calc_ops(gs: GraphSummary):
    ops = set()
    ops_types = set()
    nodes = [node.name for node in gs.types_to_nodes.get("ge:Shape", [])]
    while len(nodes) > 0:
        node_name = nodes.pop(0)
        if node_name in ops:
            continue

        node_type = gs.names_to_node[node_name].op_type
        if node_type in {"ge:Repeat", "ge:Fill", "ge:Reshape"}:
            continue

        ops.add(node_name)
        ops_types.add(node_type)

        for output_name in gs.names_to_data_output_names.get(node_name, []):
            nodes.append(output_name)

    # 一点点健壮性检查，如果一个cube算子被认为是shape计算了，肯定是搞错了
    if len(ops_types & CUBE_OP_TYPES) > 0:
        generate_graph(gs, ops, "shape_calc_ops.pbtxt", True)
        raise Exception(f"Shape calculation ops may not correct, cube ops in it {ops_types & CUBE_OP_TYPES}, see shape_calc_ops.pbtxt in current dir")
    if len(ops_types & IO_OP_TYPES) > 0:
        generate_graph(gs, ops, "shape_calc_ops.pbtxt", True)
        raise Exception(f"Shape calculation ops may not correct, IO ops in it {ops_types & IO_OP_TYPES}, see shape_calc_ops.pbtxt in current dir")

    return ops


def find_all_embedding_id_calc_ops(gs: GraphSummary):
    ops = set()
    ops_types = set()
    nodes = []

    for node in gs.types_to_nodes.get('ge:DynamicPartition', []):
        nodes.append(get_node_name(node.input[1]))
    for node in gs.types_to_nodes.get('ge:Select', []):
        # todo zi2 网络特殊处理
        if node.name in {"xd_logits_output_layer/Select",
                         "pay_logits_output_layer/Select",
                         "pclk_logits_output_layer/Select",
                         "lt_pay_logits_output_layer/Select",
                         "shopping_logits_output_layer/Select"}:
            continue

        nodes.append(get_node_name(node.input[0]))

    while len(nodes) > 0:
        node_name = nodes.pop(0)
        if node_name in ops:
            continue
        node = gs.names_to_node.get(node_name, None)
        if node is None:
            continue
        node_type = node.op_type

        ops.add(node_name)
        ops_types.add(node_type)

        for in_name in gs.names_to_data_input_names.get(node_name, []):
            if len(in_name) > 0:
                nodes.append(in_name)

    # 一点点健壮性检查，如果一个cube算子被认为是shape计算了，肯定是搞错了
    if len(ops_types & CUBE_OP_TYPES) > 0:
        generate_graph(gs, ops, "embedding_id_calc_ops.pbtxt", True)
        raise Exception(f"embedding id calculation ops may not correct, cube ops in it {ops_types & CUBE_OP_TYPES}, see embedding_id_calc_ops.pbtxt in current dir")

    return ops


def find_all_fuse(gs: GraphSummary, stop_forward, stop_concat_forward, ignore_unknown):
    stop_forward_names = set(stop_forward or [])
    shape_calc_ops = find_all_shape_calc_ops(gs)
    embedding_id_calc_ops = find_all_embedding_id_calc_ops(gs)

    fusion_possibilities = StringUnionFind()

    unknown_ops = set()
    def op_support(name):
        op_type = gs.get_node_type_from_name(name)
        # 基于切分后的图工作时，图上某个节点的input可能已经被切分掉了，这时候需要忽略这个节点
        if op_type is None:
            return False
        op_cate = OpCate.get_op_cate(op_type)
        if op_cate == OpCate.UNKNOWN:
            unknown_ops.add(op_type)
            return False
        return op_cate in {OpCate.VEC, OpCate.VIEW, OpCate.AUTOFUSE}

    def _get_concat_dim(node):
        dim = find_attribute(node, "concat_dim")
        if not dim:
            logging.error(f"no concat dim attribute for concat node {node.name}")
            return None
        dim = dim.i
        if dim >= 0:
            return dim
        shape = get_node_origin_shape(node)
        if shape == [-2]:
            return dim
        return len(shape) + dim

    def can_fuse(src, dst):
        src = gs.names_to_node[src]
        dst = gs.names_to_node[dst]
        if stop_concat_forward:
            concat_types = {"ge:Concat", "ge:ConcatV2", "ge:ConcatV2D"} 
            if src.op_type in concat_types:
                if dst.op_type not in concat_types:
                    return False
                src_dim = _get_concat_dim(src)
                dst_dim = _get_concat_dim(dst)
                if src_dim == dst_dim:
                    return True
                else:
                    return False

        return True

    for name in gs.names_to_node:
        if ((not op_support(name))
            or (name in shape_calc_ops)
            or (name in embedding_id_calc_ops)):
            continue

        for input_name in gs.names_to_data_input_names[name]:
            if (op_support(input_name) 
                and (input_name not in stop_forward_names)
                and (input_name not in shape_calc_ops) 
                and (input_name not in embedding_id_calc_ops)
                and can_fuse(input_name, name)):
                key = fusion_possibilities.join(input_name, name)
                nodes = fusion_possibilities.keys_to_possibilities[key]

    if len(unknown_ops) > 0:
        if ignore_unknown:
            logging.wraning(f"Unknown op type(s) {','.join(unknown_ops)}")
        else:
            raise Exception(f"Unknown op type(s) {','.join(unknown_ops)}")

    all_possibilities = []
    for key in fusion_possibilities.keys_to_possibilities:
        possibilities = fusion_possibilities.keys_to_possibilities[key]
        if len(possibilities) < 2:
            logging.info(f"Isolating node {gs.get_node_type_from_name(key)}({key})")
            continue
        all_possibilities.append(possibilities)
    return all_possibilities, shape_calc_ops, embedding_id_calc_ops

def print_warning_out_degree_abnormal(gs: GraphSummary, names, path):
    out_abnormals = []
    for name in names:
        out_names = gs.names_to_data_output_names.get(name, [])
        if len(out_names) < 8:
            continue
        out_degree_num = len(set(out_names) & names)
        if out_degree_num < 8:
            continue
        out_abnormals.append(f"{name}: {out_degree_num}")
    if len(out_abnormals) > 0:
        logging.warning(f"Path {path}, nodes have huge out degree: {','.join(out_abnormals)}")


def sum_duration(nodes):
    total = 0.0
    for node in nodes:
        tt = find_attribute(node, "marked-Task Type")
        if tt is None:
            continue
        if tt.s.decode('utf-8') != "AI_VECTOR_CORE":
            continue
        td = find_attribute(node, "marked-Task Duration(us)")
        if td is None:
            raise Exception(f"Matched Task Type = AI_VECTOR_CORE, but can not find Task Duration(us) in node {node.name}")
        total += td.f
    return total


def find_fuse(input_path, name=None, type=None, stop_forward=None, stop_concat_forward=None, without_leaves=None, ignore_unknown=False):
    names = name or []
    names = set(names)
    types = set([(t if t.startswith("ge:") else f"ge:{t}") for t in (type or [])])
    
    print(f"""Detact autofuse possibilities from {input_path} """)
    graph_def = load_graph_def_from_pb(input_path)
    gs = build_graph_summary(graph_def)

    possibilities, shape_calc_ops, embedding_id_calc_ops = find_all_fuse(gs, stop_forward, stop_concat_forward, ignore_unknown)
    def filter(possibility):
        if len(names) == 0 and len(types) == 0:
            return True
        if len(names) > 0:
            if len(possibility & names) > 0:
                return True
        if len(types) > 0:
            p_types = set([gs.get_node_type_from_name(name) for name in possibility])
            if len(p_types & types) > 0:
                return True
        return False

    filted = []
    for possibility in possibilities:
        if filter(possibility):
            filted.append(possibility)
    
    logging.info(f"Found autofuse possibilities, total num {len(possibilities)}, matched num {len(filted)}")
    for i, po in enumerate(filted):
        output_path = append_file_name_suffix(input_path, f"{i}")
        print_warning_out_degree_abnormal(gs, po, output_path)
        duration = sum_duration([gs.names_to_node[name] for name in po])
        if duration > 0:
            logging.info(f"File {output_path}, duration {duration} us")
        generate_graph(gs, po, output_path, without_leaves)
    
    if len(shape_calc_ops) > 0:
        generate_graph(gs, shape_calc_ops, append_file_name_suffix(input_path, "shape_calc"), without_leaves)
    if len(embedding_id_calc_ops) > 0:
        generate_graph(gs, embedding_id_calc_ops, append_file_name_suffix(input_path, "embedding_id_calc"), without_leaves)

def find(input_path, node_type=None):
    logging.info("Begin to read in graph from file {}".format(input_path))
    graph_def = load_graph_def_from_pb(input_path)

    node_types_to_names = {}
    if node_type is not None:
        node_type = set(node_type)
        for node in graph_def.node:
            if node.op_type in node_type:
                if node.op_type in node_types_to_names:
                    node_types_to_names[node.op_type].append(node.name)
                else:
                    node_types_to_names[node.op_type] = [node.name, ]
    for node_type, node_names in node_types_to_names.items():
        logging.info("Node type {}:".format(node_type))
        node_names.sort()
        for node_name in node_names:
            logging.info("  {}".format(node_name))
    return


def get_node_id(node):
    for attr in node.attribute:
        if attr.name == 'id':
            return attr.i


def detect_circle(input_path):
    logging.info("Begin to read in graph from file {}".format(input_path))
    graph_def = load_graph_def_from_pb(input_path)
    gs = build_graph_summary(graph_def)
    logging.info("Load graph end")

    # collect start nodes which has no input or has outer input nodes.
    # The `outer node` means that a node name can be found in the input field, but does not exists on the graph.
    # If the `outer node` exists, it always means the node
    nodes = []
    seen_nodes = set()
    for name in gs.names_to_node:
        input_names = gs.names_to_input_names[name]
        if len(input_names) == 0:
            nodes.append(name)
            continue
        has_input = False
        all_input_not_in_graph = True
        for input_name in input_names:
            if len(input_name) > 0:
                has_input = True
                if input_name not in gs.names_to_node:
                    seen_nodes.add(input_name)
                else:
                    all_input_not_in_graph = False
        if not has_input or all_input_not_in_graph:
            nodes.append(name)
            continue
    print("Start nodes count {}: {}".format(len(nodes), nodes))

    while len(nodes) > 0:
        node = nodes.pop(0)
        if len(node) == 0:
            continue
        seen_nodes.add(node)
        for out_name in set(gs.names_to_output_names.get(node, [])):
            if len(out_name) == 0:
                continue
            all_input_seen = True
            for out_input_name in set(gs.names_to_input_names.get(out_name, [])):
                if len(out_input_name) == 0:
                    continue
                if out_input_name not in seen_nodes:
                    all_input_seen = False
                    break
            if all_input_seen:
                if out_name in seen_nodes:
                    print("Error: node {} in seen node".format(out_name))
                    continue
                nodes.append(out_name)
    print("total num {}, seen num {}".format(len(gs.names_to_node), len(seen_nodes)))

    dump_node_names = set()
    for node_name in gs.names_to_node:
        if node_name in seen_nodes:
            continue
        dump_node_names.add(node_name)
    #    for out_name in gs.names_to_output_names.get(node_name, []):
    #        if out_name in seen_nodes:
    #            print("Maybe circle: {} -> {}".format(node_name, out_name))
        for in_name in gs.names_to_input_names.get(node_name, []):
            if in_name in seen_nodes:
                print("Maybe circle: {} -> {}".format(in_name, node_name))

    #output_path = r'E:\code\python\dump-analyse-sn\dump-analyse\example\ge_onnx_00612_graph_14_PreRunAfterBuild_sub_not_seen.pbtxt'
    #generate_graph(gs, dump_node_names, output_path, True)

def find_by_attr(input_path, key=None, value=None):
    logging.info("Begin to read in graph from file {}".format(input_path))
    if key is None or value is None:
        logging.info("No attribute keys or values")
        return
    if len(key) != len(value):
        logging.info("the count of key and value are different")
        return
    keys_to_values = dict(zip(key, value))
    graph_def = load_graph_def_from_pb(input_path)
    for node in graph_def.node:
        for attr in node.attribute:
            expect_va = keys_to_values.get(attr.name, None)
            if expect_va is None:
                continue
            if attr.type == 2: # int
                if str(attr.i) == expect_va:
                    print(node.name, node.op_type)
            else:
                print("Does not support attr type {} yet".format(attr.type))


def get_output_node_names(gs):
    node_names = []
    for node_name, output_names in gs.names_to_output_names.items():
        if len(output_names) == 0:
            node_names.append(node_name)
        if node_name == "Node_Output":
            return ["Node_Output", ]
    return node_names


def _print_diff_nodes(nodes1, nodes2, graph1_name, graph2_name):
    names_1_only = set(nodes1) - set(nodes2)
    if len(names_1_only) > 0:
        print("    Only in graph {}:".format(graph1_name))
        for name in names_1_only:
            print("        " + name)
    names_2_only = set(nodes1) - set(nodes2)
    if len(names_2_only) > 0:
        print("    Only in graph {}:".format(graph2_name))
        for name in names_2_only:
            print("        " + name)


def _print_diff_input_num(node1, node2, gs1, gs2, graph1_name, graph2_name):
    name1 = node1.name
    name2 = node2.name
    if name1 == name2:
        node_desc = "node {}".format(name1)
    else:
        node_desc = "node {} in graph {} and {} in graph {}".format(name1, graph1_name, name2, graph2_name)
    if len(gs1.names_to_data_input_names[name1]) != len(gs2.names_to_data_input_names[name2]):
        print("The data input count are different({}/{}) for {}".format(
            len(gs1.names_to_data_input_names[name1]), len(gs2.names_to_data_input_names[name2]),
            node_desc
        ))
        _print_diff_nodes(gs1.names_to_data_input_names[name1], gs2.names_to_data_input_names[name2],
                          graph1_name, graph2_name)
    if len(gs1.names_to_ctrl_input_names[name1]) != len(gs2.names_to_ctrl_input_names[name2]):
        print("The control input count are different({}/{}) for {}".format(
            len(gs1.names_to_ctrl_input_names[name1]), len(gs2.names_to_ctrl_input_names[name2]),
            node_desc
        ))
        _print_diff_nodes(gs1.names_to_ctrl_input_names[name1], gs2.names_to_ctrl_input_names[name2],
                          graph1_name, graph2_name)


def _get_key_by_type_name(type_no):
    type_name = onnx.AttributeProto.AttributeType.Name(type_no)
    if type_name.endswith('S'):
        return type_name.lower()
    else:
        return type_name.lower()[0]


def _compare_node(node1, node2, graph1_name, graph2_name, ignore_attr):
    def _print_head_once(node1, node2, graph1, graph2):
        if not getattr(_print_head_once, "printed", False):
            if node1 == node2:
                print("Node name {} in graph {} and graph {}".format(node1, graph1, graph2))
                return
            print("Node name {} in graph {} and node name {} in graph {}".format(node1, graph1, node2, graph2))
            _print_head_once.printed = True

    if node1.op_type != node2.op_type:
        print("The node type are different {}/{} for node {} in graph {} and node {} in graph {}".format(
            node1.op_type, node2.op_type,
            node1.name, graph1_name,
            node2.name, graph2_name
        ))
        return

    # compare attrs
    keys_to_attr1 = dict([(attr.name, attr) for attr in node1.attribute])
    keys_to_attr2 = dict([(attr.name, attr) for attr in node2.attribute])
    keys = list((set(keys_to_attr1.keys()) | set(keys_to_attr2.keys())) - ignore_attr)
    keys.sort()
    for key in keys:
        attr1 = keys_to_attr1.get(key, None)
        attr2 = keys_to_attr2.get(key, None)
        if attr1 is None:
            _print_head_once(node1.name, node2.name, graph1_name, graph2_name)
            print("    The attr {} does not exists on graph {}".format(key, graph1_name))
            continue
        if attr2 is None:
            _print_head_once(node1.name, node2.name, graph1_name, graph2_name)
            print("    The attr {} does not exists on graph {}".format(key, graph2_name))
            continue
        value1 = getattr(attr1, _get_key_by_type_name(attr1.type))
        value2 = getattr(attr2, _get_key_by_type_name(attr2.type))
        if value1 != value2:
            _print_head_once(node1.name, node2.name, graph1_name, graph2_name)
            print("    The value of attr {} are different {}/{}".format(key, value1, value2))


def compare(graph1, graph2, ignore_node=None, ignore_attr=None):
    logging.info("Begin to read in graph from file {}".format(graph1))
    if ignore_attr is None:
        ignore_attr = set()
    else:
        ignore_attr = set(ignore_attr)
    if ignore_node is None:
        ignore_node = set()
    else:
        ignore_node = set(ignore_node)
    g1 = load_graph_def_from_pb(graph1)
    gs1 = build_graph_summary(g1)
    logging.info("Begin to read in graph from file {}".format(graph2))
    g2 = load_graph_def_from_pb(graph2)
    gs2 = build_graph_summary(g2)

    # get nodes without output, we think that the name out output node should be the same
    output_names1 = get_output_node_names(gs1)
    output_names2 = get_output_node_names(gs2)
    output_names1.sort(), output_names2.sort()
    if output_names1 != output_names2:
        print("The output nodes of the two graph are different.")
        _print_diff_nodes(output_names1, output_names2, graph1, graph2)
        return
    if len(output_names1) == 0:
        print("Can not find output nodes from the graph, do not support the scene now")
        return
    node_names = list(zip(output_names1, output_names2))
    compared_node = set()

    # iterate the graph back to front, compare all node's attrs
    while len(node_names) > 0:
        name1, name2 = node_names.pop(0)
        if name1 in compared_node:
            continue
        compared_node.add(name1)
        if name1 in ignore_node or name2 in ignore_node:
            print("Skip to compare node {}".format(name1, name2))
            continue
        node1 = gs1.names_to_node.get(name1, None)
        node2 = gs2.names_to_node.get(name2, None)
        if node1 is None or node2 is None:
            continue
        _compare_node(node1, node2, graph1, graph2, ignore_attr)
        if len(gs1.names_to_input_names[name1]) != len(gs2.names_to_input_names[name2]):
            _print_diff_input_num(node1, node2, gs1, gs2, graph1, graph2)

        # Add nodes for the next iteration
        if len(gs1.names_to_data_input_names[name1]) == len(gs2.names_to_data_input_names[name2]):
            node_names += list(zip(gs1.names_to_data_input_names[name1], gs2.names_to_data_input_names[name2]))
        else:
            # todo find the input node has the same name/type
            pass

        if len(gs1.names_to_ctrl_input_names[name1]) == len(gs2.names_to_ctrl_input_names[name2]):
            node_names += list(zip(gs1.names_to_ctrl_input_names[name1], gs2.names_to_ctrl_input_names[name2]))
        else:
            # todo find the input node has the same name/type
            pass


def strip(input_path, output_path=None):
    if not output_path:
        output_path = append_file_name_suffix(input_path, "sub")
    logging.info("Begin to in file {} and output to {}".format(input_path, output_path))
    line_count = drop_count = 0
    c_count = drop_c_count = 0
    with open(output_path, "w") as fw:
        with open(input_path, "r") as fr:
            for line in fr:
                line_count += 1
                c_count += len(line)
                if line.startswith("      s:"):
                    if len(line) > 10240:
                        drop_count += 1
                        drop_c_count += len(line)
                        # fw.writelines(['      s: ""', ])
                        continue
                fw.write(line)
    logging.info("Dropped lines count {}, char size {}; total lines count {} char size {}".\
                 format(drop_count, drop_c_count, line_count, c_count))



def find_common_tree(input_path, name=None, layer_number=10, only_forward=None, only_backward=None):
    graph_def = load_graph_def_from_pb(input_path)
    gs = build_graph_summary(graph_def)
    names = set(name or [])
    names_to_tree = {}
    for node_name in names:
        names_to_tree[node_name] = lookup_names_method1(gs, [node_name, ],
                                                        stop_names=set(), stop_types=set(),stop_leaves_count=1000000,
                                                        layer_number=100000000, only_forward=only_forward, only_backward=only_backward)
    logging.info(f"Found {len(names_to_tree)} trees, {names_to_tree}")
    common_tree = set()
    trees = list(names_to_tree.values())
    for i in range(len(trees)):
        for j in range(i+1, len(trees)):
            common_tree.update(trees[i] & trees[j])

    output_path = append_file_name_suffix(input_path, "common_tree")
    generate_graph(gs, common_tree, output_path, True)

def is_float(s):
    try:
        float(s)
        return True
    except ValueError:
        return False
    
def mark_by_csv(graph_path, csv_path, name_col, select_col, output_path=None, map_file_path=None, print_unmatched=None):
    print_unmatched = print_unmatched or 'none'
    if name_col is None:
        raise ValueError("name_col can not be None")
    if select_col is None:
        raise ValueError("select_col can not be None")
    
    # read in pb
    graph_def = load_graph_def_from_pb(graph_path)
    gs = build_graph_summary(graph_def)

    # read in csv
    csv_to_pb = {}
    if map_file_path is not None:
        with open(map_file_path, 'r') as f:
            for line in f.readlines():
                line = line.strip()
                if len(line) == 0:
                    continue
                pb_key, csv_key = line.split(':')
                csv_to_pb[csv_key.strip()] = pb_key.strip()
    with open(csv_path, 'r') as f:
        reader = csv.DictReader(f)
        if name_col not in reader.fieldnames:
            raise ValueError(f"Can not find name_col {name_col} in csv file")
        for one_select_col in select_col:
            if one_select_col not in reader.fieldnames:
                raise ValueError(f"Can not find select_col {one_select_col} in csv file")
            
        name_to_select = {}
        for row in reader:
            key = row[name_col]
            key = csv_to_pb.get(key, key)
            if key in name_to_select:
                logging.warning(f"Duplicate key {key} in csv file, previous value will be replaced")
            name_to_select[key] = [[f"marked-{one_select_col}", row[one_select_col], onnx.AttributeProto.AttributeType.STRING] for one_select_col in select_col]

    # change data type...
    col_selects_types = {}
    for selects in name_to_select.values():
        for col_name, value, _ in selects:
            if len(value) == 0:
                continue
            if is_float(value):
                if col_name not in col_selects_types:
                    col_selects_types[col_name] = onnx.AttributeProto.AttributeType.FLOAT
            else:
                col_selects_types[col_name] = onnx.AttributeProto.AttributeType.STRING
    
    for col_name, dtype in col_selects_types.items():
        if dtype == onnx.AttributeProto.AttributeType.FLOAT:
            for selects in name_to_select.values():
                for select in selects:
                    if select[0] == col_name:
                        select[2] = dtype
                        if len(select[1]) == 0:
                            select[1] = 0.0
                        else:
                            select[1] = float(select[1])

    # mark
    matched_num = 0
    unmatched_num_in_pb = 0
    unmatched_num_in_csv = 0
    for name, node in gs.names_to_node.items():
        if name in name_to_select:
            matched_num += 1
            selects = name_to_select.pop(name)
            for col_name, select_value, select_type in selects:
                attr = onnx.AttributeProto(name=col_name, type=select_type)
                if select_type == onnx.AttributeProto.AttributeType.FLOAT:
                    attr.f = select_value
                else:
                    attr.s = select_value.encode('utf-8')
                node.attribute.append(attr)
        else:
            unmatched_num_in_pb += 1
            if print_unmatched in ('all', 'pb'):
                print(f"Can not find node {node.op_type}({name}) in csv file which in pb")
    
    if print_unmatched in ('all', 'csv'):
        unmatched_num_in_csv += 1
        for name in name_to_select:
            print(f"Can not find node {name} in pb file which exists in csv")

    logging.info(f"Marked {matched_num} nodes, unmatched num in pb {unmatched_num_in_pb}, unmatched num in csv {unmatched_num_in_csv}")
    
    if output_path is None:
        output_path = append_file_name_suffix(graph_path, 'marked')
    generate_graph(gs, gs.names_to_node.keys(), output_path, True)

def sum_on_file(input_path):
    graph_def = load_graph_def_from_pb(input_path)
    gs = build_graph_summary(graph_def)
    return sum_duration(gs.names_to_node.values())

def print_summary(input_path, sort=False):
    if os.path.isfile(input_path):
        sum = sum_on_file(input_path)
        print(f"{input_path}: {sum} us")

    all_sum = []
    if os.path.isdir(input_path):
        for root, dirs, files in os.walk(input_path):
            for file in files:
                if file.endswith(".pbtxt"):
                    file_path = os.path.join(root, file)
                    value = sum_on_file(file_path)
                    if sort:
                        all_sum.append((file_path, sum_on_file(file_path)))
                    else:
                        print(f"{file_path}: {value} us")
        if sort:
            all_sum = sorted(all_sum, key=lambda x: x[1], reverse=True)
            for file, sum in all_sum:
                print(f"{file}: {sum} us")

class main(object):
    @staticmethod
    def print_graph_stat(input_path):
        logging.info("load from %s", input_path)
        graph_def = load_graph_def_from_pb(input_path)

        op_stat = Counter()
        for node in graph_def.node:
            op_stat[node.op_type] += 1

        logging.info("graph stat:")
        for op, count in sorted(op_stat.items(), key=lambda x: x[0]):
            logging.info("\t%s = %s", op, count)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    subparsers = parser.add_subparsers()

    # stat
    subparser = subparsers.add_parser("stat", help="print stat")
    subparser.add_argument("--input", dest="input_path", required=True, help="input pb path")
    subparser.set_defaults(func=main.print_graph_stat)

    # strip
    subparser = subparsers.add_parser("strip", help="strip the weight data which have a huge size. This could be useful if the pbtxt has a huge size that can not even be loaded by the yad")
    subparser.add_argument("--input", dest="input_path", required=True, help="input pb path")
    subparser.add_argument("--output", dest="output_path", help="output pb path")
    subparser.set_defaults(func=strip)

    # find
    subparser = subparsers.add_parser("find", help="find")
    subparser.add_argument("--input", dest="input_path", required=True, help="input pb path")
    subparser.add_argument("--node_type", nargs='+', help="指要查找node的类型")
    subparser.set_defaults(func=find)

    # find by attr
    subparser = subparsers.add_parser("find-by-attr", help="find by attr")
    subparser.add_argument("--input", dest="input_path", required=True, help="input pb path")
    subparser.add_argument("--key", default=None, action="append",
                           help="attribute key")
    subparser.add_argument("--value", default=None, action="append",
                           help="attribute key")
    subparser.set_defaults(func=find_by_attr)

    # detect circle
    subparser = subparsers.add_parser("circle-detect", help="detect circle dependencies on the graph")
    subparser.add_argument("--input", dest="input_path", required=True, help="input pb path")
    subparser.set_defaults(func=detect_circle)

    # compare
    subparser = subparsers.add_parser("compare", help="Compare two graphs")
    subparser.add_argument("graph1", help="The first graph pbtxt path")
    subparser.add_argument("graph2", help="The second graph pbtxt path")
    subparser.add_argument("--ignore_node", help="Skip to compare nodes by name in the first graph", default=None, action="append")
    subparser.add_argument("--ignore_attr", help="Skip to compare attributes by name", default=None, action="append")
    subparser.set_defaults(func=compare)

    # extract
    subparser = subparsers.add_parser("extract", help="""抽取子图，子图有两种抽取方式，分别为：中心扩散，起始-终止。
    中心扩散方式为，指定一个到多个结点作为扩散中心，向上或向下扩散多层结点后做dump。
    起始-终止方式为，指定一组到多组起始结点和终止结点，dump出起始到终止结点中间的所有结点。""")
    subparser.add_argument("--input", dest="input_path", required=True, help="input pb path")
    subparser.add_argument("--output", dest="output_path", help="output pb path")
    subparser.add_argument("--start_node", help="dest nodes", default=None, action="append")
    subparser.add_argument("--end_node", help="source nodes", default=None, action="append")
    subparser.add_argument("--name", default=None, action="append",
                           help="指定dump的结点名，此选项可以重复使用来指定多个结点")
    subparser.add_argument("--single_name", default=None, action="append",
                           help="指定dump的结点名，此选项可以重复使用来指定多个结点，与 name 不同，single_name指定的节点不做任何扩散")
    subparser.add_argument("--layer_number", type=int, default=1,
                           help="front and back layers")
    subparser.add_argument("--only_forward", action='store_true',
                           help="only dump nodes forward")
    subparser.add_argument("--only_backward", action='store_true',
                           help="only dump nodes backward")
    subparser.add_argument("--name_prefix", default=None, action="append",
                           help="指定dump的结点名前缀，此选项可以重复使用来指定多个结点")
    subparser.add_argument("--without_leaves", action='store_true',
                           help="Without leaves when generate the result graph")
    subparser.add_argument("--stop_name", default=None, action="append",
                           help="Specify a node name which stop the extract iteration")
    subparser.add_argument("--stop_type", default=None, action="append",
                           help="Specify a node type which stop the extract iteration")
    subparser.add_argument("--stop_leaves_count", type=int, default=20,
                           help="Specify a node type which stop the extract iteration")
    subparser.set_defaults(func=extract_sub_graph)

    subparser = subparsers.add_parser("find-fuse", help="""查找自动融合(autofuse)机会，并将结果保存在 input 的相同路径。
    并可以基于结点名、类型过滤等条件过滤结果，如果不指定任何过滤条件，则将所有融合机会保存下来。""")
    subparser.add_argument("--input", dest="input_path", required=True, help="input pb path")
    subparser.add_argument("--name", default=None, action="append",
                           help="基于结点过滤融合机会，此选项可以重复使用来指定多个结点")
    subparser.add_argument("--type", default=None, action="append",
                           help="基于结点类型过滤融合机会，此选项可以重复使用来指定多种结点类型")
    subparser.add_argument("--stop_forward", default=None, action="append",
                           help="遇到该节点时，停止向前（输出方向）融合")
    subparser.add_argument("--stop_concat_forward", action='store_true',
                           help="遇到Concat，停止向前（输出方向）融合，但是如果是同一个concat dim的Concat级联，是允许的")
    subparser.add_argument("--without_leaves", action='store_true',
                           help="Without leaves when generate the result graph")
    subparser.add_argument("--ignore_unknown", action='store_true',
                           help="遇到不识别的结点类型时，是否忽略，默认为不忽略，报错退出")
    subparser.set_defaults(func=find_fuse)

    subparser = subparsers.add_parser("find-common-tree", help="查找两个节点的公共子树")
    subparser.add_argument("--input", dest="input_path", required=True, help="input pb path")
    subparser.add_argument("--name", default=None, action="append", help="节点名")
    subparser.add_argument("--layer_number", type=int, default=1000000, help="front and back layers")
    subparser.add_argument("--only_forward", action='store_true', help="仅向前（输出方向）查找")
    subparser.add_argument("--only_backward", action='store_true', help="仅向后（输入方向）查找")
    subparser.set_defaults(func=find_common_tree)

    subparser = subparsers.add_parser("mark-by-csv", help="根据csv的信息，为pbtxt打额外的标记")
    subparser.add_argument("--graph", dest="graph_path", required=True, help="input pb path")
    subparser.add_argument("--output", dest="output_path", help="output pb path")
    subparser.add_argument("--csv", dest="csv_path", required=True, help="input csv path")
    subparser.add_argument("--name_col", dest="name_col", required=True, help="which column to find node name")
    subparser.add_argument("--select_col", default=None, action="append", help="该列的值被标记到图上对应节点上，可以重复使用")
    subparser.add_argument("--map_file", dest="map_file_path", help="pb与cvs的映射文件")
    subparser.add_argument("--print_unmatched", choices=['all', 'pb', 'csv', 'none'], default='none', help="打印为匹配到的节点和csv行")
    subparser.set_defaults(func=mark_by_csv)

    subparser = subparsers.add_parser("print", help="Print Task Duration")
    subparser.add_argument("--input", dest="input_path", required=True, help="input pb path or dir path")
    subparser.add_argument("--sort", action='store_true', help="排序后输出")

    subparser.set_defaults(func=print_summary)

    if len(sys.argv) <= 2:
        parser.print_help()
        sys.exit()

    args = parser.parse_args()

    func = args.func
    del args.func

    args = dict(filter(lambda x: x[1], vars(args).items()))
    func(**args)
