# -*- coding: UTF-8 -*-
import logging
import argparse
import os
import matplotlib.pyplot as plt
import pandas as pd
import multiprocessing

# @todo draw with plotly
# 处理线程数
PROCESS_NUM = os.cpu_count()
INIT_ADDR_VALUE = 0.055
logger = logging.getLogger()
COMMUNICATION_NODES = ["AllReduce", "AllGather", "Broadcast", "ReduceScatter", "Send", "Receive", "StreamRecv",
                       "StreamSend"]


def SetLogger(logger):
    logger.setLevel(logging.INFO)  # Log等级总开关
    # 第二步，创建一个handler，用于写入日志文件
    logfile = "./MemoryAnalyze.log"
    fh = logging.FileHandler(logfile, mode='w')
    fh.setLevel(logging.INFO)  # 输出到file的log等级的开关
    # 第三步，再创建一个handler，用于输出到控制台
    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)  # 输出到console的log等级的开关
    # 第四步，定义handler的输出格式
    formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    # 第五步，将logger添加到handler里面
    logger.addHandler(fh)
    logger.addHandler(ch)


class Tensor:
    id = int
    size = 0
    align_size = 0
    offset = 0
    start_addr = float
    tensor_type = str
    lifelong = bool
    life_start = 0
    life_end = 0
    source_node_name = str
    source_node_out_index = 0
    end_addr = float
    source_node_id = int
    task_start_addr = float
    task_size = 0
    task_end_addr = float
    destionations = []
    source_stream_id = -1
    dst_stream_ids = []
    start_tick = -1
    end_tick = -1

    def __init__(self, id):
        self.id = id
        self.size = 0
        self.align_size = 0
        self.offset = 0
        self.start_addr = INIT_ADDR_VALUE
        self.end_addr = INIT_ADDR_VALUE
        self.tensor_type = ""
        self.source_node_name = ""
        self.source_node_out_index = -1
        self.source_node_id = -1
        self.task_size = 0
        self.task_start_addr = INIT_ADDR_VALUE
        self.task_end_addr = INIT_ADDR_VALUE
        self.destionations = []
        self.source_stream_id = -1
        self.dst_stream_ids = []
        self.start_tick = -1
        self.end_tick = -1

    def info(self):
        logger.info(
            "Tensor id: {} Start Address: {} Size: {} End Address: {}, source node:{} node id: {}".format(
                self.id, hex(int(self.start_addr)),
                self.size, hex(int(self.end_addr)), self.source_node_name, self.source_node_id))


class Parameter:
    id = int
    size = 0
    start_addr = float
    source_node_name = str
    end_addr = float
    source_node_out_index = int
    destionations = []

    def __init__(self, id):
        self.id = id
        self.size = 0
        self.start_addr = INIT_ADDR_VALUE
        self.end_addr = INIT_ADDR_VALUE
        self.source_node_name = ""
        self.source_node_out_index = 0
        self.destionations = []

    def info(self):
        logger.info(
            "Parameter id: {} Start Address: {} Size: {} End Address: {}, source node:{} node out index: {}".format(
                self.id, hex(int(self.start_addr)),
                self.size, hex(int(self.end_addr)), self.source_node_name, self.source_node_out_index))


class Node:
    id = int
    op_id = int
    name = str
    tensors = []
    parameters = []
    inputs = []
    outputs = []
    workspaces = []
    cur_total_size = int
    cur_alive_tensor_size = int
    stream = -1
    start_tick = -1
    end_tick = -1

    def __init__(self, id, op_id, name):
        self.id = id
        self.op_id = op_id
        self.name = name
        self.tensors = []
        self.parameters = []
        self.inputs = []
        self.outputs = []
        self.workspaces = []
        self.cur_total_size = 0
        self.cur_alive_tensor_size = 0
        self.stream = -1
        self.stream_group_id = -1
        self.up_stream = []
        self.down_stream = []
        self.start_tick = -1
        self.end_tick = -1
        self.tick_memory = 0
        self.is_communication_node = False

    def add_tensor(self, tensor_id):
        if tensor_id not in self.tensors:
            self.tensors.append(str(tensor_id) + "T")

    def add_parameter(self, parameter_id):
        if parameter_id not in self.parameters:
            self.parameters.append(parameter_id)

    def add_inputs(self, id):
        self.inputs.append(id)

    def add_outputs(self, tensor_id):
        self.outputs.append(tensor_id)

    def add_workspaces(self, tensor_id):
        self.workspaces.append(tensor_id)


class Graph:
    nodes = {}
    tensors = {}
    parameters = {}
    nodes_num = 0
    tensors_num = 0
    parameters_num = 0
    upper_bound = 0
    lower_bound = 0
    lifelong = 0
    parameter_total_size = 0
    nodes_dataframe = None
    parameters_dataframe = None
    tensors_dataframe = None
    min_tensor_addr = float
    max_tensor_addr = float
    min_param_addr = float
    max_param_addr = float
    task_addr_avaliable = False

    def __init__(self):
        self.nodes = {}
        self.tensors = {}
        self.parameters = {}
        self.stream_groups = {}  # key: stream_group_id, value: streams
        self.steam_map_to_group_id = {}  # key: stream, value: stream_group_id
        self.main_stream = 0
        self.stream_nodes = {}  # key: stream, value: nodes
        self.stream_group_nodes = {}  # key: stream_group_id, value: nodes
        self.nodes_num = 0
        self.tensors_num = 0
        self.parameters_num = 0
        self.upper_bound = 0
        self.lower_bound = 0
        self.lifelong = 0
        self.parameter_total_size = 0
        self.nodes_dataframe = None
        self.parameters_dataframe = None
        self.tensors_dataframe = None
        self.min_tensor_addr = INIT_ADDR_VALUE
        self.max_tensor_addr = INIT_ADDR_VALUE
        self.min_param_addr = INIT_ADDR_VALUE
        self.max_param_addr = INIT_ADDR_VALUE

    def add_node(self, node):
        key = node.id
        if key in self.nodes.keys():
            logger.error("Already Exist in node dict" + str(key))
        self.nodes[key] = node

    def add_tensor(self, tensor):
        key = tensor.id
        if key in self.tensors.keys():
            logger.error("Already Exist in tensor dict" + str(key))
        self.tensors[key] = tensor

    def add_parameter(self, parameter):
        key = parameter.id
        if key in self.parameters.keys():
            logger.error("Already Exist in parameter dict" + str(key))
        self.parameters[key] = parameter

    def trans_nodes_to_dataframes(self):
        nodes_list = []
        index_name = []
        param_size = self.parameter_total_size
        for key, node in self.nodes.items():
            nodes_list.append(
                [key, node.op_id, node.stream_group_id, node.stream, node.tensors, node.parameters, node.inputs,
                 node.outputs,
                 node.workspaces,
                 node.cur_alive_tensor_size, node.cur_total_size, node.cur_alive_tensor_size + param_size,
                 node.cur_total_size + param_size, node.up_stream, node.down_stream, node.start_tick, node.end_tick,
                 node.tick_memory])
            index_name.append(node.name)
        name = ["Index", "OpId", "Stream_Group", "Stream", "Tensors", "Parameters", "Inputs", "Outputs", "Workspaces",
                "AliveTensorsSize",
                "TotalTensorSize", "InUseMemory", "TotalMemory", "UpStream", "DownStream", "StartTick", "EndTick",
                "TickMemory"]
        self.nodes_dataframe = pd.DataFrame(columns=name, index=index_name, data=nodes_list)
        self.nodes_dataframe.to_csv("graph_nodes.csv")

    def trans_parameters_to_dataframes(self):
        parameters_list = []
        index_name = []
        for key, param in self.parameters.items():
            parameters_list.append(
                [param.source_node_name, param.source_node_out_index, param.start_addr, hex(int(param.start_addr)),
                 param.size, param.end_addr, hex(int(param.end_addr)), param.destionations])
            index_name.append(key)
        name = ["SourceNodeName", "OutputIndex", "StartAddr", "StartAddrHex", "Size", "EndAddr", "EndAddrHex",
                "Destinations"]
        self.parameters_dataframe = pd.DataFrame(columns=name, index=index_name, data=parameters_list)
        self.parameters_dataframe.to_csv("graph_parameters.csv")

    def trans_tensors_to_dataframes(self):
        tensors_list = []
        index_name = []
        for key, tensor in self.tensors.items():
            tensors_list.append(
                [tensor.source_node_id, tensor.source_node_name, tensor.source_node_out_index, tensor.source_stream_id,
                 tensor.start_addr, hex(int(tensor.start_addr)), tensor.size, tensor.align_size, tensor.end_addr,
                 hex(int(tensor.end_addr)), tensor.life_start, tensor.life_end, tensor.lifelong, tensor.tensor_type,
                 tensor.task_start_addr, hex(int(tensor.task_start_addr)), tensor.task_size, tensor.task_end_addr,
                 hex(int(tensor.task_end_addr)), list(set(tensor.dst_stream_ids)), tensor.destionations,
                 tensor.start_tick, tensor.end_tick]
            )
            index_name.append(key)
        name = ["SourceNodeId", "SourceNodeName", "OutputIndex", "SourceStream", "StartAddr", "StartAddrHex", "Size",
                "AlignSize",
                "EndAddr", "EndAddrHex", "LifeStart", "LifeEnd", "LifeLong", "TensorType", "TaskStartAddr",
                "TaskStartAddrHex", "TaskSize", "TaskEndAddr", "TaskEndAddrHex", "DestinationStreams", "Destinations",
                "StartTick", "EndTick"]
        self.tensors_dataframe = pd.DataFrame(columns=name, index=index_name, data=tensors_list)
        self.tensors_dataframe.to_csv("graph_tensors.csv")

    def trans_to_dataframes(self):
        self.trans_parameters_to_dataframes()
        self.trans_tensors_to_dataframes()
        self.trans_nodes_to_dataframes()

    def gen_statistic_info(self):
        folder_name = "StatisticInfo"
        os.system("mkdir " + folder_name)
        giga = 1024 * 1024 * 1024
        self.min_tensor_addr = self.tensors_dataframe.min().StartAddr
        self.max_tensor_addr = self.tensors_dataframe.max().EndAddr
        tensor_total_size = (self.max_tensor_addr - self.min_tensor_addr) / giga
        self.min_param_addr = self.parameters_dataframe.min().StartAddr
        self.max_param_addr = self.parameters_dataframe.max().EndAddr
        param_total_size = (self.max_param_addr - self.min_param_addr) / giga
        min_task_addr = self.tensors_dataframe.min().TaskStartAddr
        max_task_addr = self.tensors_dataframe.max().TaskEndAddr
        min_avaliable_addr = min(self.min_tensor_addr, self.min_param_addr)
        max_avaliable_addr = min_avaliable_addr + 32 * giga

        if (self.max_tensor_addr > max_avaliable_addr or self.max_param_addr > max_avaliable_addr):
            logger.error("Invalid memory addr: max tensor addr {} max parameter addr {} max avaliable addr {}".format(
                hex(int(self.max_tensor_addr)), hex(int(self.max_param_addr)), int(max_avaliable_addr)))
        logger.info("==========================Statistic Info===========================")
        stat_info = []
        stat_info.append("MindSpore VM HBM Address Range {} - {}".format(
            hex(int(min_avaliable_addr)), hex(int(max_avaliable_addr))))
        stat_info.append("Task Tensor Address: Min {}, Max  {}".format(
            hex(int(min_task_addr)), hex(int(max_task_addr))))
        stat_info.append("SOMAS Tensor Address: Min {}，Max {}, Total Size {:.4}GB".format(
            hex(int(self.min_tensor_addr)), hex(int(self.max_tensor_addr)),
            tensor_total_size))
        stat_info.append("Parameter & ValueNode : Min {}, Max {}, Total Size {:.4}GB".format(
            hex(int(self.min_param_addr)), hex(int(self.max_param_addr)), param_total_size))
        stat_info.append("Graph Total Input Static Memory Size {:.4}GB ({})".format(self.parameter_total_size / giga,
                                                                                    self.parameter_total_size))
        # Max In Use Size
        max_inuse_node = self.nodes_dataframe['InUseMemory'].idxmax()
        max_inuse_df = self.nodes_dataframe.loc[max_inuse_node, :]
        self.get_node_memory_frame(max_inuse_node, prefix=folder_name + os.sep + 'MaxAliveMemory_')
        stat_info.append(
            "Max In Use Memory Node {}, index {}, Alive Tensor Size {:.4}GB ({}), In Use Memory size: {:.4}GB ({})".format(
                max_inuse_node,
                max_inuse_df.Index,
                max_inuse_df.AliveTensorsSize / giga,
                max_inuse_df.AliveTensorsSize,
                max_inuse_df.InUseMemory / giga,
                max_inuse_df.InUseMemory))
        # Max Total Size
        stat_info.append("Max Total Use Memory Size {:.4}GB ({})".format(self.nodes_dataframe.max().TotalMemory / giga,
                                                                         self.nodes_dataframe.max().TotalMemory))
        stat_info.append(
            "Max Total Tensor Memory Size {:.4}GB ({})".format(self.nodes_dataframe.max().TotalTensorSize / giga,
                                                               self.nodes_dataframe.max().TotalTensorSize))
        # Max address without lifelong tensor
        condition = self.tensors_dataframe['LifeLong'] == False
        max_addr_index = self.tensors_dataframe[condition]['EndAddr'].idxmax()
        max_addr_df = self.tensors_dataframe.loc[max_addr_index, :]
        stat_info.append(
            "Max Addr without LifeLong Tensor: Node {}, Node Id {}, Output Index {}, End Address {}".format(
                max_addr_df['SourceNodeName'], max_addr_df['SourceNodeId'], max_addr_df['OutputIndex'],
                max_addr_df['EndAddrHex']))
        self.get_node_memory_frame(max_addr_df['SourceNodeName'], prefix=folder_name + os.sep + 'MaxAddress_')
        # Reuse Ratio
        stat_info.append("Memory Reuse Ratio {:.2%}".format(
            1 - self.nodes_dataframe.max().AliveTensorsSize / self.nodes_dataframe.max().TotalTensorSize))

        alive_df = self.nodes_dataframe[['InUseMemory', 'AliveTensorsSize']]
        alive_df.index = self.nodes_dataframe['Index'].values
        alive_df.plot()
        plt.grid(color="k", linestyle=":")
        plt.title("In Use Memeory")
        plt.xlabel("Node Index")
        plt.ylabel('Memory Size')
        # arrowprops = dict(
        #     arrowstyle="->",
        #     connectionstyle="angle,angleA=0,angleB=90,rad=10")
        # offset = max_inuse_df.InUseMemory / 4
        # plt.annotate(max_inuse_node + " " + str(max_inuse_df.InUseMemory), xy=(max_inuse_df.Index, max_inuse_df.InUseMemory), xytext=(max_inuse_df.Index, max_inuse_df.InUseMemory+offset), arrowprops=arrowprops)
        plt.savefig(folder_name + os.sep + "InUseMemory.png", dpi=500)

        total_df = self.nodes_dataframe[['TotalMemory', 'TotalTensorSize']]
        total_df.index = self.nodes_dataframe['Index'].values
        total_df.plot()
        plt.grid(color="k", linestyle=":")
        plt.title("Total Memeory")
        plt.xlabel("Node Index")
        plt.ylabel('Memory Size')
        plt.savefig(folder_name + os.sep + "TotalMemory.png", dpi=500)
        # plt.show()

        with open(folder_name + os.sep + "overview.txt", "w") as f:
            for info in stat_info:
                f.write(info + "\n")
                logger.info(info)
        logger.info("===================================================================")

    def get_node_memory_frame(self, node_name, prefix=''):
        if node_name not in self.nodes_dataframe.index:
            logger.error("Can't find node {}".format(node_name))
            return
        node_info_df = self.nodes_dataframe.loc[node_name, :]
        node_id = node_info_df.Index
        condition1 = self.tensors_dataframe['LifeStart'] <= node_id
        condition2 = self.tensors_dataframe['LifeEnd'] >= node_id
        node_memory_df = self.tensors_dataframe[condition1 & condition2]
        new_node_name = ""
        node_memory_df.to_csv(prefix + new_node_name + "_mem_frame.csv")

    def check_node(self, node_name):
        logger.info("===========================Check Node {}==============================".format(node_name))
        if node_name not in self.nodes_dataframe.index:
            logger.error("Can't find node {}".format(node_name))
            return
        os.system("mkdir " + node_name)
        self.get_node_memory_frame(node_name, prefix=node_name + os.sep)
        df = self.nodes_dataframe.loc[node_name, :]
        index = 0
        for tensor in df.Outputs:
            self.check_tensor(tensor, prefix=node_name + os.sep + "Output" + str(index) + "_", save_csv=True)
            index = index + 1
        index = 0
        for tensor in df.Workspaces:
            self.check_tensor(tensor, prefix=node_name + os.sep + "Workspace" + str(index) + "_", save_csv=True)
            index = index + 1
        index = 0
        for input in df.Inputs:
            if input[-1] == "P":
                continue
            input_id = input[:-1]
            self.check_tensor(input_id, prefix=node_name + os.sep + "Input" + str(index) + "_", save_csv=True)
            index = index + 1
        logger.info("===================================================================")

    def check_tensor(self, tensor_id, prefix="CheckTensor_", save_csv=False):
        ret = "Success"
        logger.info("===========================Check Tensor {}============================".format(tensor_id))
        tensor_id = int(tensor_id)
        if tensor_id not in self.tensors_dataframe.index:
            logger.error("Can't find tensor {}".format(tensor_id))
            return
        relate_node_index = []
        for index in self.nodes_dataframe.index:
            df = self.nodes_dataframe.loc[index, :]
            tensors = df.Tensors
            inputs = df.Inputs
            if str(tensor_id) + "T" in tensors:
                relate_node_index.append(index)
        relate_nodes_df = self.nodes_dataframe.loc[relate_node_index, :]
        if save_csv:
            node_file_name = prefix + "Tensor" + str(tensor_id) + "_relate_nodes.csv"
            relate_nodes_df.to_csv(node_file_name)
            logger.info('The nodes using tensor {} is export to {}'.format(tensor_id, node_file_name))

        tensor_df = self.tensors_dataframe.loc[tensor_id, :]
        if self.task_addr_avaliable and tensor_df.TaskStartAddr == INIT_ADDR_VALUE:
            logger.info("Check with Task Address")
            start_addr = tensor_df.TaskStartAddr
            end_addr = tensor_df.TaskEndAddr
            condition1 = (self.tensors_dataframe['TaskStartAddr'] <= start_addr) & (
                    self.tensors_dataframe['TaskEndAddr'] >= end_addr)
            condition2 = (self.tensors_dataframe['TaskStartAddr'] >= start_addr) & (
                    self.tensors_dataframe['TaskEndAddr'] <= end_addr)
            condition3 = (self.tensors_dataframe['TaskStartAddr'] > start_addr) & (
                    self.tensors_dataframe['TaskStartAddr'] < end_addr)
            condition4 = (self.tensors_dataframe['TaskEndAddr'] > start_addr) & (
                    self.tensors_dataframe['TaskEndAddr'] < end_addr)
        else:
            logger.info("Check with SOMAS Address")
            start_addr = tensor_df.StartAddr
            end_addr = tensor_df.EndAddr
            condition1 = (self.tensors_dataframe['StartAddr'] <= start_addr) & (
                    self.tensors_dataframe['EndAddr'] >= end_addr)
            condition2 = (self.tensors_dataframe['StartAddr'] >= start_addr) & (
                    self.tensors_dataframe['EndAddr'] <= end_addr)
            condition3 = (self.tensors_dataframe['StartAddr'] > start_addr) & (
                    self.tensors_dataframe['StartAddr'] < end_addr)
            condition4 = (self.tensors_dataframe['EndAddr'] > start_addr) & (
                    self.tensors_dataframe['EndAddr'] < end_addr)
        relate_tensors_df = self.tensors_dataframe[condition1 | condition2 | condition3 | condition4]
        if save_csv:
            tensor_file_name = prefix + "Tensor" + str(tensor_id) + "_relate_tensors.csv"
            relate_tensors_df.to_csv(tensor_file_name)
            logger.info('The tensors memory share with tensor {} is export to {}'.format(tensor_id, tensor_file_name))
        life_start = tensor_df.LifeStart
        life_end = tensor_df.LifeEnd
        condition1 = relate_tensors_df['LifeStart'] <= life_start
        condition2 = relate_tensors_df['LifeEnd'] >= life_end
        condition3 = relate_tensors_df['AlignSize'] != 0
        overwrite_tensor_df = relate_tensors_df[condition1 & condition2 & condition3]
        if len(overwrite_tensor_df.index) > 1:
            overwrite_file_name = prefix + "ERROR_Tensor" + str(tensor_id) + "_overwrite_tensors.csv"
            overwrite_tensor_df.to_csv(overwrite_file_name)
            logger.error(
                "The tensors' memory overwrite with tensor {} is export to {}".format(tensor_id, overwrite_file_name))
            ret = "The tensors' memory overwrite with tensor {} is export to {}".format(tensor_id, overwrite_file_name)
        return ret

    def check_address(self, address):
        logger.info("==========================Check Address============================")
        addr = float.fromhex(address)
        if addr > self.max_tensor_addr:
            logger.info("Address {} is a static address, No Memory Reuse.".format(addr))
            return
        if self.task_addr_avaliable:
            logger.info("Check with Task Address")
            condition1 = self.tensors_dataframe['TaskStartAddr'] <= addr
            condition2 = self.tensors_dataframe['TaskEndAddr'] > addr
        else:
            logger.info("Check with SOMAS Address")
            condition1 = self.tensors_dataframe['StartAddr'] <= addr
            condition2 = self.tensors_dataframe['EndAddr'] > addr
        addr_tensor_df = self.tensors_dataframe[condition1 & condition2]
        file_name = "CheckAddress_" + str(address) + "_tensors_list.csv"
        addr_tensor_df.to_csv(file_name)
        logger.info('The tensors using address {} is export to {}'.format(address, file_name))
        logger.info("===================================================================")

    def check_address_range(self, address_range):
        logger.info("==========================Check Address Range============================")
        start_addr = float.fromhex(address_range.split("-")[0])
        end_addr = float.fromhex(address_range.split("-")[1])
        if start_addr > self.max_tensor_addr:
            logger.info("Address {} is a static address, No Memory Reuse.".format(start_addr))
            return
        if end_addr > self.max_tensor_addr:
            logger.info("Address {} is a static address, No Memory Reuse.".format(end_addr))
            return
        if self.task_addr_avaliable:
            logger.info("Check with Task Address")
            condition1 = (self.tensors_dataframe['TaskStartAddr'] <= start_addr) & (
                    self.tensors_dataframe['TaskEndAddr'] >= end_addr)
            condition2 = (self.tensors_dataframe['TaskStartAddr'] >= start_addr) & (
                    self.tensors_dataframe['TaskEndAddr'] <= end_addr)
            condition3 = (self.tensors_dataframe['TaskStartAddr'] > start_addr) & (
                    self.tensors_dataframe['TaskStartAddr'] < end_addr)
            condition4 = (self.tensors_dataframe['TaskEndAddr'] > start_addr) & (
                    self.tensors_dataframe['TaskEndAddr'] < end_addr)
        else:
            logger.info("Check with SOMAS Address")
            condition1 = (self.tensors_dataframe['StartAddr'] <= start_addr) & (
                    self.tensors_dataframe['EndAddr'] >= end_addr)
            condition2 = (self.tensors_dataframe['StartAddr'] >= start_addr) & (
                    self.tensors_dataframe['EndAddr'] <= end_addr)
            condition3 = (self.tensors_dataframe['StartAddr'] > start_addr) & (
                    self.tensors_dataframe['StartAddr'] < end_addr)
            condition4 = (self.tensors_dataframe['EndAddr'] > start_addr) & (
                    self.tensors_dataframe['EndAddr'] < end_addr)
        addr_tensor_df = self.tensors_dataframe[condition1 | condition2 | condition3 | condition4]
        file_name = "CheckAddressRange_" + str(address_range) + "_tensors_list.csv"
        addr_tensor_df.to_csv(file_name)
        logger.info('The tensors using address {} is export to {}'.format(address_range, file_name))
        logger.info("===================================================================")

    def check_all_tensors(self):
        pool = multiprocessing.Pool(processes=PROCESS_NUM)
        logger.info('Check {} tenors with {} process.'.format(len(self.tensors), PROCESS_NUM))
        task_list = []
        for id, tensor in self.tensors.items():
            task_future = pool.apply_async(self.check_tensor, (id,))
            task_list.append(task_future)
        pool.close()
        pool.join()
        for task in task_list:
            ret = task.get(300)
            if ret == "Success":
                continue
            else:
                logger.error(ret)


def parse_task_info(filename, graph):
    file = open(filename, "r")
    lines = file.readlines()
    task_list = []
    task_info = {}
    for line in lines:
        if "op_name:" in line:
            task_info["op_name"] = line.split(":")[-1].replace("\n", "")
        elif "task_index:" in line:
            task_info["task_index"] = int(line.split("\t")[0].split(":")[-1])
        elif "input address:" in line:
            task_info["input"] = line.split(":")[-1].replace("\n", "").split("\t")[:-1]
        elif "output address" in line:
            task_info["output"] = line.split(":")[-1].replace("\n", "").split("\t")[:-1]
        elif "workspace address:" in line:
            task_info["workspace"] = line.split(":")[-1].replace("\n", "").split("\t")[:-1]
        elif line == "\n":
            task_list.append(task_info)
            task_info = {}

    index = 0
    tensor_num = 0
    if len(task_list) != len(graph.nodes):
        logger.error("Mismatch Node num: graph {} vs task info {} ".format(len(graph.nodes), len(task_list)))
        return False
    for task in task_list:
        node_name = task["op_name"].split("/")[-1]
        node_name = node_name.replace("-NonTask", "")
        op_id = int(node_name.split("-op")[-1])
        node = graph.nodes[index]
        if node.op_id != op_id:
            logger.error(
                "Mismatch op id: graph {} vs task info {} for node {} vs {}".format(node.op_id, op_id, node.name,
                                                                                    node_name))
            return False
        if "output" in task.keys():
            outputs = task["output"]
            output_index = 0
            if len(node.outputs) != len(outputs):
                logger.error("Mismatch output size : graph {} vs task info {} for node {}".format(len(node.outputs),
                                                                                                  len(outputs),
                                                                                                  node_name))
                return False
            for output in outputs:
                start_addr = float.fromhex(output.split("(")[0])
                size = int(output.split("(")[1][:-1])
                end_addr = start_addr + size
                tensor_id = node.outputs[output_index]
                tensor = graph.tensors[tensor_id]
                tensor.task_start_addr = start_addr
                tensor.task_end_addr = end_addr
                tensor.task_size = size

                output_index = output_index + 1
                tensor_num = tensor_num + 1

        if "workspace" in task.keys():
            workspaces = task["workspace"]
            worksapce_index = 0
            if len(node.workspaces) != len(workspaces):
                logger.error(
                    "Mismatch workspaces size : graph {} vs task info {} for node {}".format(len(node.workspaces),
                                                                                             len(workspaces),
                                                                                             node_name))
                return False
            for worksapce in workspaces:
                start_addr = float.fromhex(worksapce.split("(")[0])
                size = int(worksapce.split("(")[1][:-1])
                end_addr = start_addr + size
                tensor_id = node.workspaces[worksapce_index]
                tensor = graph.tensors[tensor_id]
                tensor.task_start_addr = start_addr
                tensor.task_end_addr = end_addr
                tensor.task_size = size

                worksapce_index = worksapce_index + 1
                tensor_num = tensor_num + 1

        if "input" in task.keys():
            inputs = task["input"]
            input_index = 0
            if len(node.inputs) != len(inputs):
                logger.error(
                    "Mismatch inputs size : graph {} vs task info {} for node {}".format(len(node.inputs), len(inputs),
                                                                                         node_name))
                return False
        index = index + 1
    logger.info("Parse Task Info Done. tasks num: {} tensors num: {}".format(len(task_list), tensor_num))
    graph.task_addr_avaliable = True
    return True


def parse_somas_info(file_name: str, graph: Graph):
    file = open(file_name, "r")
    lines = file.readlines()
    index = 0
    for line in lines:
        if "Total Dynamic Size (Upper Bound)" in line:
            graph.upper_bound = int(line.split(":")[-1])
        elif "Theoretical Optimal Size (Lower Bound)" in line:
            graph.lower_bound = int(line.split(":")[-1])
        elif "Total LifeLong All Tensor Size" in line:
            graph.lifelong = int(line.split(":")[-1])
        elif line == "All Parameters:\n":
            param_start = index + 3
        elif line == "All Tensors:\n":
            param_end = index - 2
            tensor_start = index + 3
        elif line == "All Nodes:\n":
            tensor_end = index - 2
            node_start = index + 2
        elif line == "All Stream Groups:\n":
            stream_group_start = index + 2
        elif "All Merged Blocks:" in line:
            stream_group_end = index - 2
            break
        elif "All Union Tensors Info" in line:
            node_end = index - 2
        index = index + 1
    parameters = lines[param_start:param_end]
    tensors = lines[tensor_start:tensor_end]
    nodes = lines[node_start:node_end]
    stream_groups = lines[stream_group_start:stream_group_end]
    max_node_id = len(nodes)

    for parameter in parameters:
        infos = parameter.split("\t")
        id = int(infos[0][1:-1])
        param = Parameter(id)
        param.size = int(infos[1][1:-1])
        param.start_addr = 0
        param.end_addr = 0
        param.source_node_name = infos[2]
        param.source_node_out_index = int(infos[3])
        graph.add_parameter(param)
        graph.parameter_total_size = graph.parameter_total_size + param.size

    for tensor in tensors:
        infos = tensor.split("\t")
        if infos[4] == "Control":
            continue
        id = int(infos[0][1:-1])
        new_tensor = Tensor(id)
        new_tensor.align_size = int(infos[1][1:-1])
        new_tensor.size = int(infos[2][1:-1])
        new_tensor.offset = int(infos[3][1:])
        new_tensor.start_addr = int(infos[3][1:])
        new_tensor.end_addr = int(infos[3][1:]) + new_tensor.size
        new_tensor.tensor_type = infos[4]
        new_tensor.lifelong = bool(infos[5] == "LifeLongGraphAll")
        new_tensor.life_start = int(infos[6])
        new_tensor.life_end = int(infos[7])
        new_tensor.source_node_name = infos[8][0:-1]
        if new_tensor.lifelong:
            new_tensor.life_start = 0
            new_tensor.life_end = max_node_id
        if infos[5] == "LifeLongGraphStart":
            new_tensor.life_start = 0
        if infos[5] == "LifeLongGraphEnd":
            new_tensor.life_end = max_node_id
        if new_tensor.tensor_type == "OutputOnly" or new_tensor.tensor_type == "RefNodeOutput":
            new_tensor.align_size = 0
        graph.add_tensor(new_tensor)

    cur_total_size = 0
    alive_tensors = []
    for node in nodes:
        node_id, name, node_type, inputs, outputs, workspaces, ctrl_inputs, ctrl_outputs, stream_str = node.split("\t")
        op_id = name.split("-op")[-1]
        node_type = name.split("-op")[0]
        is_communication_node = False
        if node_type in COMMUNICATION_NODES:
            is_communication_node = True
        node = Node(int(node_id[1:]), int(op_id), name)
        node.stream = int(stream_str[10:-2])
        node.is_communication_node = is_communication_node
        output_tensors = outputs[8:-1].replace(" ", "")
        if len(output_tensors):
            index = 0
            for output in output_tensors.split(",")[:-1]:
                tensor_id = int(output[1:-1])
                if tensor_id not in graph.tensors:
                    logger.error("Can't find tensor {} in graph".format(tensor_id))
                tensor = graph.tensors[tensor_id]
                node.add_tensor(tensor_id)
                node.add_outputs(tensor_id)
                if tensor_id not in alive_tensors: # if tensor is not registered yet
                    tensor.source_node_id = node.id
                    tensor.source_node_out_index = index
                    tensor.source_stream_id = node.stream
                    if is_communication_node:
                        if index == 0:
                            tensor.align_size = tensor.align_size - 512
                            tensor.end_addr = tensor.end_addr - 512
                        if index == len(output_tensors) - 1:
                            tensor.align_size = tensor.align_size - 512
                            tensor.end_addr = tensor.end_addr - 512
                    alive_tensors.append(tensor_id)
                    cur_total_size = cur_total_size + tensor.align_size
                index = index + 1
        workspace_tensors = workspaces[10:-1].replace(" ", "")
        if len(workspace_tensors):
            index = 0
            for workspace in workspace_tensors.split(",")[:-1]:
                tensor_id = int(workspace[1:-1])
                if tensor_id not in graph.tensors:
                    logger.error("Can't find tensor {} in graph".format(tensor_id))
                tensor = graph.tensors[tensor_id]
                node.add_tensor(tensor_id)
                node.add_workspaces(tensor_id)
                if tensor_id not in alive_tensors: # if tensor is not registered yet
                    tensor.source_node_id = node.id
                    tensor.source_node_out_index = index
                    tensor.source_stream_id = node.stream
                    alive_tensors.append(tensor_id)
                    cur_total_size = cur_total_size + tensor.align_size
                index = index + 1
        input_tensors = inputs[7:-1].replace(" ", "")
        if len(input_tensors):
            index = 0
            for input in input_tensors.split(",")[:-1]:
                type = input[-1]
                if type == "P":
                    parameter_id = int(input[1:-1])
                    if parameter_id not in graph.parameters:
                        logger.error("Can't find parameter {} in graph".format(parameter_id))
                    param = graph.parameters[parameter_id]
                    param.destionations.append([node.id, node.name])
                    node.parameters.append(param.id)
                else:
                    tensor_id = int(input[1:-1])
                    tensor = graph.tensors[tensor_id]
                    tensor.destionations.append([node.id, node.name])
                    tensor.dst_stream_ids.append(node.stream)
                    node.add_tensor(tensor_id)
                    if tensor.source_node_id != -1:
                        src_node = graph.nodes[tensor.source_node_id]
                        src_node.down_stream.append(node.id)
                        node.up_stream.append(src_node.id)
                    processed = [x[1] for x in tensor.destionations].count(node.name) > 1
                    if is_communication_node and not processed:
                        if index == 0:
                            tensor.align_size = tensor.align_size - 512
                            tensor.end_addr = tensor.end_addr - 512
                        if index == len(output_tensors) - 1:
                            tensor.align_size = tensor.align_size - 512
                            tensor.end_addr = tensor.end_addr - 512

                node.inputs.append(input[1:])
        node.cur_total_size = cur_total_size
        remove_tensor_list = []
        cur_alive_tensor_size = 0
        for tensor_id in alive_tensors:
            tensor = graph.tensors[tensor_id]
            if tensor.life_end < node.id:
                remove_tensor_list.append(tensor_id)
            else:
                cur_alive_tensor_size = cur_alive_tensor_size + tensor.align_size
        alive_tensors = [i for i in alive_tensors if i not in remove_tensor_list]
        node.cur_alive_tensor_size = cur_alive_tensor_size
        graph.add_node(node)

    group_index = 0
    for group in stream_groups:
        stream_ids = group.replace(" \n", "").replace("stm", "").split(" ")
        graph.stream_groups[group_index] = [int(id) for id in stream_ids]
        group_index = group_index + 1

    if len(stream_groups) != 0:
        max_group_len = 0
        for key, value in graph.stream_groups.items():
            graph.stream_group_nodes[key] = []
            group_len = len(value)
            if group_len > max_group_len:
                max_group_len = group_len
                graph.main_stream = key

        for key, value in graph.stream_groups.items():
            for stm in value:
                graph.steam_map_to_group_id[stm] = key
        for stm in graph.stream_groups[graph.main_stream]:
            graph.steam_map_to_group_id[stm] = graph.main_stream

        for key, node in graph.nodes.items():
            if "AtomicAddrClean" in node.name or node.is_communication_node:
                continue
            stream_id = node.stream
            group_id = graph.steam_map_to_group_id[stream_id]
            node.stream_group_id = group_id
            graph.stream_group_nodes[group_id].append(node.id)
            if stream_id in graph.stream_nodes.keys():
                graph.stream_nodes[stream_id].append(node.id)
            else:
                graph.stream_nodes[stream_id] = [node.id]

    graph.nodes_num = len(graph.nodes)
    graph.tensors_num = len(graph.tensors)
    graph.parameters_num = len(graph.parameters)

    logger.info("Process Graph Done. nodes num: {} tensors num: {} parameters num: {}".format(graph.nodes_num,
                                                                                              graph.tensors_num,
                                                                                              graph.parameters_num))
    return True


def update_up_streams_by_exec_order(graph: Graph):
    for _, nodes in graph.stream_nodes.items():
        for i in range(len(nodes) - 1):
            pre_node_id = nodes[i]
            cur_node_id = nodes[i + 1]
            pre_node = graph.nodes[pre_node_id]
            cur_node = graph.nodes[cur_node_id]
            pre_node.down_stream.append(cur_node_id)
            cur_node.up_stream.append(pre_node_id)
    for _, streams in graph.stream_groups.items():
        for i in range(len(streams) - 1):
            pre_node_id = graph.stream_nodes[streams[i]][-1]
            pre_node = graph.nodes[pre_node_id]
            cur_node_id = graph.stream_nodes[streams[i + 1]][0]
            cur_node = graph.nodes[cur_node_id]
            pre_node.down_stream.append(cur_node_id)
            cur_node.up_stream.append(pre_node_id)


class StreamExecutor:
    def __init__(self, nodes):
        self.all_nodes = [node for node in nodes]
        self.current_nodes = set()
        self.un_run_nodes = set([node for node in nodes])

    def run(self, tick, graph: Graph, node_id: Node):
        # 执行当前算子
        node = graph.nodes[node_id]
        node.start_tick = tick
        self.current_nodes.add(node_id)
        self.un_run_nodes.remove(node_id)

        run_end_nodes = []
        # 将当前执行器中的上一个算子标记为执行结束
        for cur_id in self.current_nodes:
            cur_node = graph.nodes[cur_id]
            if cur_node.start_tick != tick:
                cur_node.end_tick = tick
                run_end_nodes.append(cur_id)

        # 将输入中还没执行完的算子标记为执行结束
        for node_id in node.up_stream:
            input_node = graph.nodes[node_id]
            if input_node.end_tick == -1:
                input_node.end_tick = tick
                run_end_nodes.append(input_node.id)

        for end_id in run_end_nodes:
            if end_id in self.current_nodes:
                self.current_nodes.remove(end_id)
        return run_end_nodes, node.down_stream


class Executor:
    def __init__(self):
        self.cur_tick = 0
        self.active_executor = set()
        self.finished_nodes = set()
        self.candidate_nodes = set()
        self.tick_run_start_nodes = {}
        self.tick_run_end_nodes = {}
        self.stream_executor = {}

    def init_executor(self, graph: Graph):
        for group_id, nodes in graph.stream_group_nodes.items():
            stm_executor = StreamExecutor(nodes)
            self.stream_executor[group_id] = stm_executor
            # 找到无执行依赖的node，将其加到待选node列表中
            for node_id in nodes:
                node = graph.nodes[node_id]
                if not len(node.up_stream):
                    self.candidate_nodes.add(node_id)
                    self.active_executor.add(group_id)

    def execute_tick(self, tick, graph: Graph):
        # 检查所有待选算子，挑选可以执行的算子
        nodes_to_run = []
        new_candidates = set()
        for node_id in self.candidate_nodes:
            up_stream = set(graph.nodes[node_id].up_stream)
            un_ready = up_stream.difference(self.finished_nodes)
            if not len(un_ready):
                # 当前算子所有的执行依赖都已经具备
                nodes_to_run.append(node_id)

        # 执行算子
        self.tick_run_start_nodes[tick] = []
        self.tick_run_end_nodes[tick] = []
        for node_id in nodes_to_run:
            stm_grp_id = graph.steam_map_to_group_id[graph.nodes[node_id].stream]
            stream_executor = self.stream_executor[stm_grp_id]
            run_end_nodes, candidate = stream_executor.run(tick, graph, node_id)
            for id in candidate:
                new_candidates.add(id)
            for id in run_end_nodes:
                self.tick_run_end_nodes[tick].append(id)
            self.tick_run_start_nodes[tick].append(node_id)
            self.finished_nodes.add(node_id)
            self.candidate_nodes.remove(node_id)

        # 更新候选算子及激活执行器
        for node_id in new_candidates:
            self.candidate_nodes.add(node_id)
            stm_grp_id = graph.steam_map_to_group_id[graph.nodes[node_id].stream]
            self.active_executor.add(stm_grp_id)
        finish_executor = set()
        for exec_id in self.active_executor:
            if not len(self.stream_executor[exec_id].un_run_nodes):
                # 当前执行器的所有算子都已经执行完
                finish_executor.add(exec_id)
        for exec_id in finish_executor:
            # 将执行完的流执行器从激活执行器中去除
            self.active_executor.remove(exec_id)

    def execute(self, graph: Graph):
        while len(self.active_executor):
            self.execute_tick(self.cur_tick, graph)
            self.cur_tick = self.cur_tick + 1

    def _get_input_tensor_ids(self, inputs):
        input_tensor_ids = []
        for tensor in inputs:
            if "T" in tensor:
                input_tensor_ids.append(int(tensor.replace("T", "")))
        return input_tensor_ids

    def update_none_main_stream_node_tick(self, graph: Graph):
        # 假设主流是无需等待的，将主流的StreamSend/StreamRecv的开始、结束时刻映射到非主流的StreamSend/StreamRecv
        for group_id, nodes in graph.stream_group_nodes.items():
            if group_id == graph.main_stream:
                # 假设主流是无需等待的，将主流的StreamSend/StreamRecv的开始、结束时刻映射到非主流的StreamSend/StreamRecv
                for node_id in graph.stream_group_nodes[group_id]:
                    node = graph.nodes[node_id]
                    if "StreamSend" in node.name:
                        for next_node_id in node.down_stream:
                            next_node = graph.nodes[next_node_id]
                            if "StreamRecv" in next_node.name:
                                next_node.end_tick = node.start_tick
                                next_node.start_tick = node.start_tick - 1
                    elif "StreamRecv" in node.name:
                        for pre_node_id in node.up_stream:
                            pre_node = graph.nodes[pre_node_id]
                            if "StreamSend" in pre_node.name:
                                pre_node.start_tick = node.start_tick
                                pre_node.end_tick = node.start_tick + 1
                break
        # 非主流通过StreamSend与StreamRecv与主流进行同步，非主流的其他算子的起始、结束时刻通过StreamSend与StreamRecv确定
        for group_id, nodes in graph.stream_group_nodes.items():
            if group_id == graph.main_stream:
                continue
            left_node_index = 0
            while left_node_index < len(nodes):
                left_node = graph.nodes[nodes[left_node_index]]
                if "StreamRecv" in left_node.name or "StreamSend" in left_node.name:
                    # 找到起始的StreamRecv/StreamSend
                    right_node_index = left_node_index + 1
                    left_node_index = left_node_index + 1
                    while right_node_index < len(nodes):
                        right_node = graph.nodes[nodes[right_node_index]]
                        if "StreamSend" in right_node.name or "StreamRecv" in right_node.name:
                            # 找到结束的StreamSend/找到起始的StreamRecv， 更新中间node的起始、结束时刻
                            start_tick = left_node.end_tick
                            end_tick = right_node.start_tick
                            for i in range(left_node_index, right_node_index):
                                common_node = graph.nodes[nodes[i]]
                                common_node.start_tick = start_tick
                                common_node.end_tick = end_tick
                            left_node_index = right_node_index
                            break
                        else:
                            right_node_index = right_node_index + 1
                else:
                    left_node_index = left_node_index + 1

    def update_tensor_tick(self, graph: Graph):
        for _, node in graph.nodes.items():
            output_tensors = node.outputs
            wk_tensors = node.workspaces
            input_tensors = self._get_input_tensor_ids(node.inputs)
            start_tick = node.start_tick
            end_tick = node.end_tick
            for tensor_id in output_tensors:
                graph.tensors[tensor_id].start_tick = start_tick
                if graph.tensors[tensor_id].lifelong:
                    graph.tensors[tensor_id].start_tick = 0
                    graph.tensors[tensor_id].end_tick = self.cur_tick - 1
            for tensor_id in wk_tensors:
                graph.tensors[tensor_id].start_tick = start_tick
                graph.tensors[tensor_id].end_tick = end_tick
            for tensor_id in input_tensors:
                tensor_end_tick = graph.tensors[tensor_id].end_tick
                if end_tick > tensor_end_tick:
                    graph.tensors[tensor_id].end_tick = end_tick

    def update_tick_mem_usage(self, graph: Graph):
        max_size = 0
        max_node = graph.nodes[0]
        in_use_memory_size = 0
        for tick, end_nodes in self.tick_run_end_nodes.items():
            # 执行结束算子的输入内存、workspace内存先释放
            for node_id in end_nodes:
                node = graph.nodes[node_id]
                wk_tensors = node.workspaces
                input_tensors = self._get_input_tensor_ids(node.inputs)
                for tensor_id in input_tensors:
                    input_tensor = graph.tensors[tensor_id]
                    if input_tensor.end_tick == node.end_tick:
                        in_use_memory_size = in_use_memory_size - input_tensor.align_size
                for tensor_id in wk_tensors:
                    tensor_size = graph.tensors[tensor_id].align_size
                    in_use_memory_size = in_use_memory_size - tensor_size
            # 执行开始算子的输出内存、workspace内存占用
            start_nodes = self.tick_run_start_nodes[tick]
            for node_id in start_nodes:
                node = graph.nodes[node_id]
                wk_tensors = node.workspaces
                output_tensors = node.outputs
                for tensor_id in output_tensors:
                    tensor_size = graph.tensors[tensor_id].align_size
                    in_use_memory_size = in_use_memory_size + tensor_size
                for tensor_id in wk_tensors:
                    tensor_size = graph.tensors[tensor_id].align_size
                    in_use_memory_size = in_use_memory_size + tensor_size
            # 更新执行开始算子的内存使用值
            for node_id in start_nodes:
                node = graph.nodes[node_id]
                node.tick_memory = in_use_memory_size
                if in_use_memory_size > max_size:
                    max_size = in_use_memory_size
                    max_node = node
        logger.info(
            "Tick Max Size is {:.4}GB at node {}, start_tick:{}, end_tick:{}".format(max_size / (1024 * 1024 * 1024),
                                                                                     max_node.name, max_node.start_tick,
                                                                                     max_node.end_tick))

        # 处理atomic算子
        for _, node in graph.nodes.items():
            if "AtomicAddrClean" in node.name:
                next_node = graph.nodes[node.id + 1]
                node.start_tick = next_node.start_tick
                node.end_tick = next_node.end_tick
                node.tick_memory = next_node.tick_memory


def sim_executor(graph: Graph):
    logger.info("start to simulate executor graph, node num is {}".format(len(graph.nodes)))
    depend_execution_order = True
    if depend_execution_order:
        update_up_streams_by_exec_order(graph)
    executor = Executor()
    executor.init_executor(graph)
    executor.execute(graph)
    executor.update_none_main_stream_node_tick(graph)
    executor.update_tensor_tick(graph)
    executor.update_tick_mem_usage(graph)
    logger.info("finish simulate executor graph, max tick is {}".format(executor.cur_tick))


if __name__ == '__main__':
    '''
    usage:
    python MemAnalyzer.py -s somas_allocate_info_1.ir -t task_info_graph_1.ir -cat -cn FusedMulApplyMomentum-op3558 -ct 62 -ca 0x108830de2a00  -car 0x108830de2a20-0x108830de4a20
    or
    python MemAnalyzer.py --somas_info_file somas_allocate_info_1.ir --task_info_file task_info_graph_1.ir --check_all_tensors --check_node_name FusedMulApplyMomentum-op3558 --check_tensor_id 62 --check_address 0x108830de2a00 --check_address_range 0x108830de2a20-0x108830de4a20 
    '''
    SetLogger(logger)
    parser = argparse.ArgumentParser(description='Task Info Parser')
    parser.add_argument('-s', '--somas_info_file', type=str, default=None,
                        help='Somas info file path, such as somas_initial_info_1.ir，somas_pre_processed_info_1.ir or somas_allocate_info_1.ir')
    parser.add_argument('-t', '--task_info_file', type=str, default=None,
                        help='Task info file path, such as task_info_graph_1.ir, if specify, the analyzer will use task address to check')
    parser.add_argument('-cat', '--check_all_tensors', action='count', default=0,
                        help='Enable Check all tensors feature')
    parser.add_argument('-cn', '--check_node_name', type=str, default=None,
                        help='Check node name, such as GetNext-op62')
    parser.add_argument('-ct', '--check_tensor_id', type=str, default=None,
                        help='Check tensor id, such as 62')
    parser.add_argument('-ca', '--check_address', type=str, default=None,
                        help='Check address, such as 0x108830de2a20')
    parser.add_argument('-car', '--check_address_range', type=str, default=None,
                        help='Check address range, such as 0x108830de2a20-0x108830de4a20')
    args_opt = parser.parse_args()

    somas_info_file = args_opt.somas_info_file
    task_info_file = args_opt.task_info_file
    check_all_tensors = args_opt.check_all_tensors
    check_node_name = args_opt.check_node_name
    check_tensor_id = args_opt.check_tensor_id
    check_address = args_opt.check_address
    check_address_range = args_opt.check_address_range

    graph = Graph()
    if not somas_info_file:
        logger.error("--somas_info_file are required.")
    else:
        if somas_info_file:
            ret = parse_somas_info(somas_info_file, graph)
            if not ret:
                logger.error("Parse SOMAS file failed")
        # sim_executor(graph)
        if task_info_file:
            ret = parse_task_info(task_info_file, graph)
            if not ret:
                logger.error("Parse Task info file failed")
        if ret:
            graph.trans_to_dataframes()
            graph.gen_statistic_info()
            if check_all_tensors:
                graph.check_all_tensors()
            if check_node_name:
                graph.check_node(check_node_name)
            if check_tensor_id:
                graph.check_tensor(check_tensor_id, save_csv=True)
            if check_address:
                graph.check_address(check_address)
            if check_address_range:
                graph.check_address_range(check_address_range)
            logger.info("Process Done, Please Check the log")
