from typing import Dict, List, Tuple
#from loguru import logger
import logger
import pyparsing as pp
#from sklearn import metrics
import metrics
import torch
from graphviz import Digraph
import json
import os
from model.net import CLSNet, CLSNetV3, CLSNetV6, CLSNetV7
from my_utils.graph_pattern import GraphPattern
from tvm_graph.graph import Graph
from tvm_graph.node import Node

from model.correct_model import CorrectGNN, CorrectGNNWithShapeDim

func_type_list = [
    'LSTM', 'UpSampling2D', 'UpSampling3D', 'Cropping2D', 'ZeroPadding2D', 'ZeroPadding3D', 'SeparableConv2D', 'Conv2D',
    'DepthwiseConv2D', 'ReLU', 'ThresholdedReLU', 'LeakyReLU', 'Softmax', 'ELU', 'Conv3D', 'Dense', 'Reshape',
    'Flatten', 'Concatenate', 'Average', 'Maximum', 'Minimum', 'Add', 'Subtract', 'Multiply', 'Dot', 'MaxPooling2D',
    'MaxPooling3D', 'AveragePooling2D', 'AveragePooling3D', 'GlobalMaxPooling2D', 'GlobalMaxPooling3D',
    'GlobalAveragePooling2D', 'GlobalAveragePooling3D', 'BatchNormalization'
]

atom_list = [
    'divide', 'abs', 'expand_dims', 'negative', 'upsampling', 'batch_matmul', 'global_max_pool2d', 'subtract',
    'minimum', 'concatenate', 'add', 'relu', 'bias_add', 'global_avg_pool2d', 'exp', 'cast', 'multiply', 'clip',
    'upsampling3d', 'tanh', 'max_pool3d', 'log', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum', 'dense',
    'transpose', 'sqrt', 'sigmoid', 'adaptive_avg_pool3d', 'batch_flatten', 'conv3d', 'avg_pool2d', 'softmax',
    'greater', 'leaky_relu', 'adaptive_max_pool3d', 'conv2d'
]

in_shape_check_list = ['avg_pool3d', 'avg_pool2d', 'max_pool2d', 'max_pool3d']

no_weights_list = [
    'abs', 'expand_dims', 'upsampling', 'global_max_pool2d', 'minimum', 'concatenate', 'clip', 'upsampling3d',
    'max_pool3d', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum', 'transpose', 'adaptive_avg_pool3d',
    'batch_flatten', 'avg_pool2d', 'adaptive_max_pool3d', 'batch_flatten', 'softmax'
]

ness_weight_list = ['conv2d', 'conv3d', 'dense', 'bias_add']

# TYPE_CLASSIFIER_PATH = "/home/kxp/workspace/tvm-reversion/results/2021-11-14-23-45-21-(train_one_for_all.py)/checkpoints/model_ckpt_2.pth"
# TYPE_CLASSIFIER_PATH = "/home/kxp/workspace/tvm-reversion/results/2021-11-15-21-34-33-(train_one_for_all.py)/checkpoints/model_ckpt_6.pth"
# TYPE_CLASSIFIER_PATH = "/home/kxp/workspace/tvm-reversion/results/2021-11-10-20-07-31-(train_one_for_all_dist.py)/checkpoints/model_ckpt_8.pth"
# TYPE_CLASSIFIER_PATH = "/home/kxp/workspace/tvm-reversion/results/2021-11-11-13-01-52-(train_one_for_all_class_fix.py)/checkpoints/model_ckpt_6.pth"

# CLSNetV3 [value-seq]
# TYPE_CLASSIFIER_PATH = "/home/kxp/workspace/tvm-reversion/results/2021-11-17-14-49-38-(train_one_for_all_class_fix.py)/checkpoints/model_ckpt_40.pth"

# CLSNetV3 Checkpoint
# TYPE_CLASSIFIER_PATH = "/home/kxp/workspace/tvm-reversion/results/2021-11-22-22-07-08-(train_one_for_all_class_fix.py)/checkpoints/model_ckpt_20.pth"

# CLSNetV7 [ARM]
# TYPE_CLASSIFIER_PATH = "/home/kxp/workspace/tvm-reversion/results/2022-03-05-18-58-19-(train_one_for_all_class_fix_arm.py)/checkpoints/model_ckpt_18.pth"

# CLSNetV3 [ARM]
TYPE_CLASSIFIER_PATH = "/Users/mhli/workspace/tvm-reversion/model/clsnetv3-arm.pth"

# CLSNetV6 [mean all]
# TYPE_CLASSIFIER_PATH = "/home/kxp/workspace/tvm-reversion/results/2021-11-26-15-12-41-(train_one_for_all_class_fix.py)/checkpoints/model_ckpt_30.pth"

CLASSIFIER_PATH = "/Users/mhli/workspace/tvm-reversion/model/model_ckpt_{}.pth"

CORRECT_GCN_CKPT = "/Users/mhli/workspace/tvm-reversion/model/gcn_model.pth"
# CORRECT_GCN_CKPT = "/home/kxp/workspace/tvm-reversion/results/2021-12-22-20-48-06-(train_correct_gnn.py)/checkpoints/model_ckpt_52.pth"
# CORRECT_GCN_CKPT = "/home/kxp/workspace/tvm-reversion/results/2022-01-11-19-36-19-(train_correct_gcn_shape.py)/checkpoints/model_ckpt_70.pth"
# CORRECT_GCN_CKPT = "/home/kxp/workspace/tvm-reversion/results/2022-02-24-16-49-38-(train_correct_gnn.py)/checkpoints/model_ckpt_50.pth"

SLOT_LENGTH = 15
SLOT_COUNT = 11

PADDING_LABEL = ["same", "valid"]
ACTIVATION_LABEL = [
    'softmax', 'softplus', 'sigmoid', 'elu', 'softsign', 'linear', 'tanh', 'relu', 'selu', None, 'hard_sigmoid'
]
FILTERS_LABEL = [0] + [2**i for i in range(13)]
FILTERS_LABEL_LARGE = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]
STRIDES_0_LABEL = [1, 3, 5, 7, 9]
STRIDES_1_LABEL = [1, 3, 5, 7, 9]
STRIDE_LABELS = [i for i in range(1, 8)]
KERNEL_SIZE_LARGE = [i for i in range(1, 20)]
KERNEL_SIZE = [i for i in range(1, 20)]
DILATION_RATE_0_LABEL = [1, 3, 5, 7]
DILATION_RATE_1_LABEL = [1, 3, 5, 7]
DEPTH_MULTIPLIER = [i for i in range(1, 11)]

SAMPLING_SIZE = [i for i in range(1, 11)]
PADDING_SIZE = [i for i in range(1, 11)]
POOLING_SIZE = [i for i in range(1, 11)]

CROPPING_SIZE = [i for i in range(1, 11)]

INTERPOLATION_LABEL = ["bilinear", "nearest"]

UNITS_LABEL = [2**i for i in range(20)]


class NetStorage:
    func_type_net = None

    @staticmethod
    def get_func_type_net(self):
        return self.func_type_net


def get_checkpoint_path(date: str, epoch: int) -> str:
    """Return a absolute path of the checkpoint file

    :param date: The date string
    :type date: str
    :param epoch: The number of epochs
    :type epoch: int
    :return: A path string of corresponding checkpoint file
    :rtype: str
    """
    file_name = ["train_param.py", "train_param_value.py"]
    for file in file_name:
        if os.path.exists(CLASSIFIER_PATH.format(date, file, epoch)):
            return CLASSIFIER_PATH.format(date, file, epoch)
    return CLASSIFIER_PATH.format(date, file, epoch)


def decode(vector, label_list):
    device = torch.device("cuda:{}".format(os.environ["CUDA_VISIBLE_DEVICES"]) if torch.cuda.is_available() else "cpu")
    # Mask
    mask_vector = torch.tensor([0 for _ in range(9)] + [1 for _ in range(6)]).to(device)
    vector = mask_vector * vector
    s = ''.join([str(int(c)) for c in vector])
    if int(s, 2) >= len(label_list):
        return -1 if isinstance(label_list[0], int) else 'None'
    return label_list[int(s, 2)]


def infer_func_type_and_param(trace_data: List, has_shape=True, has_value=True) -> Tuple[str, Dict]:
    """Using the neural network to infer the type and parameter of operators

    :param trace_data: A list of basic blocks, each element is a list of tokens (e.g. mov, rax, r12, 0x12)
    :type trace_data: List
    :param has_shape: A flag to control the shape info, defaults to True
    :type has_shape: bool, optional
    :param has_value: A flag to control the value sequence info, defaults to True
    :type has_value: bool, optional
    :return: operator type and operator param
    :rtype: Tuple[str, Dict]
    """
    has_value = False
    device = torch.device("cuda:{}".format(os.environ["CUDA_VISIBLE_DEVICES"]) if torch.cuda.is_available() else "cpu")
    if NetStorage.func_type_net is None:
        # 11 for the number of slots, each slot is a binary vector
        net = CLSNetV3(
            in_d=200,
            out_classes=len(atom_list) + SLOT_LENGTH * SLOT_COUNT,
            # out_classes=SLOT_LENGTH * SLOT_COUNT,
            hiden=200,
            num_layers=2,
            # has_shape=has_shape,
            has_shape=False,
            has_value=has_value)
        net.to(device)
        net.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(TYPE_CLASSIFIER_PATH).items()})
        net.eval()
        NetStorage.func_type_net = net

    net = NetStorage.func_type_net
    pred = None
    pred = net([trace_data[0]],
               device,
               shape_info=trace_data[2] if has_shape else None,
               value_data=[trace_data[1]] if has_value else None)
    pred = pred[0]
    functype = atom_list[pred[:len(atom_list)].topk(1)[1][0]]

    pred_min, pred_max = torch.min(pred[:len(atom_list)]), torch.max(pred[:len(atom_list)])

    topk = 10
    topk_pred = [(atom_list[x[1]], (x[0] - pred_min) / (pred_max - pred_min))
                 for x in zip(*pred[:len(atom_list)].topk(topk))]

    logger.debug("Topk: {}".format(["{}[{}]".format(x[0], x[1]) for x in topk_pred]))

    pred = pred.round()
    pred = pred.clip(0, 1)
    # We need a magic threshold
    # thres = 0.5
    # logger.debug(pred[:SLOT_LENGTH])
    # pred[pred > thres] = 1
    # pred[pred <= thres] = 0
    # functype = decode(pred[:SLOT_LENGTH], atom_list)

    param = {}
    attr_list = []
    if functype == "conv2d":
        attr_list = [('padding', PADDING_LABEL), ('filters', FILTERS_LABEL), ('strides', STRIDE_LABELS),
                     ('strides', STRIDE_LABELS), ('kernel_size', KERNEL_SIZE), ('depth_multiplier', DEPTH_MULTIPLIER)]
    elif functype == "conv3d":
        attr_list = [('padding', PADDING_LABEL), ('filters', FILTERS_LABEL_LARGE), ('strides', STRIDE_LABELS),
                     ('strides', STRIDE_LABELS), ('strides', STRIDE_LABELS), ('kernel_size', KERNEL_SIZE_LARGE)]
    elif functype == "upsampling":
        attr_list = [('size', SAMPLING_SIZE), ('interpolation', INTERPOLATION_LABEL)]
    elif functype == "upsampling3d":
        attr_list = [('size', SAMPLING_SIZE)]
    elif functype == "pad":
        attr_list = [('padding', PADDING_SIZE)]
    elif functype == "max_pool2d" or functype == "max_pool3d" or functype == "avg_pool2d" or functype == "avg_pool3d":
        attr_list = [('pool_size', POOLING_SIZE), ('padding', PADDING_LABEL)]
    elif functype == "strided_slice":
        attr_list = [('cropping', CROPPING_SIZE), ('cropping', CROPPING_SIZE), ('cropping', CROPPING_SIZE),
                     ('cropping', CROPPING_SIZE)]

    # for i, (attr, label_list) in enumerate(attr_list):
    #     if attr not in param:
    #         param[attr] = decode(pred[(i + 1) * SLOT_LENGTH:(i + 2) * SLOT_LENGTH], label_list)
    #     else:
    #         if not isinstance(param[attr], List):
    #             param[attr] = [param[attr]]
    #         param[attr].append(decode(pred[(i + 1) * SLOT_LENGTH:(i + 2) * SLOT_LENGTH], label_list))

    for i, (attr, label_list) in enumerate(attr_list):
        if attr not in param:
            param[attr] = decode(pred[i * SLOT_LENGTH + 1:(i + 1) * SLOT_LENGTH + 1], label_list)
        else:
            if not isinstance(param[attr], List):
                param[attr] = [param[attr]]
            param[attr].append(decode(pred[i * SLOT_LENGTH + 1:(i + 1) * SLOT_LENGTH + 1], label_list))

    param["topk"] = [x[0] for x in topk_pred]

    del net
    torch.cuda.empty_cache()
    return functype, param


def infer_func_type_and_param_arm(trace_data: List, has_shape=True, has_value=True) -> Tuple[str, Dict]:
    """Using the neural network to infer the type and parameter of operators

    :param trace_data: A list of basic blocks, each element is a list of tokens (e.g. mov, rax, r12, 0x12)
    :type trace_data: List
    :param has_shape: A flag to control the shape info, defaults to True
    :type has_shape: bool, optional
    :param has_value: A flag to control the value sequence info, defaults to True
    :type has_value: bool, optional
    :return: operator type and operator param
    :rtype: Tuple[str, Dict]
    """
    device = torch.device("cuda:{}".format(os.environ["CUDA_VISIBLE_DEVICES"]) if torch.cuda.is_available() else "cpu")
    if NetStorage.func_type_net is None:
        # 11 for the number of slots, each slot is a binary vector
        # net = CLSNetV7(
        #     in_d=200,
        #     out_classes=len(atom_list),
        #     hiden=200,
        #     num_layers=2,
        # )
        net = CLSNetV3(in_d=200, out_classes=len(atom_list), hiden=200, num_layers=2, has_shape=False, has_value=False)

        net.to(device)
        net.load_state_dict(
            {k.replace('module.', ''): v
             for k, v in torch.load(TYPE_CLASSIFIER_PATH, map_location=device).items()})
        net.eval()
        NetStorage.func_type_net = net

    net = NetStorage.func_type_net
    pred = None
    pred = net([trace_data[0]], device)
    pred = pred[0]
    functype = atom_list[pred[:len(atom_list)].topk(1)[1][0]]

    pred_min, pred_max = torch.min(pred[:len(atom_list)]), torch.max(pred[:len(atom_list)])

    topk = 30
    topk_pred = [(atom_list[x[1]], (x[0] - pred_min) / (pred_max - pred_min))
                 for x in zip(*pred[:len(atom_list)].topk(topk))]

    logger.debug("Topk: {}".format(["{}[{}]".format(x[0], x[1]) for x in topk_pred]))

    pred = pred.round()
    pred = pred.clip(0, 1)
    param = {}
    param["topk"] = [x[0] for x in topk_pred]

    del net
    torch.cuda.empty_cache()
    return functype, param


def infer_func_type(trace_data: List) -> str:
    """Return a function type by analyzing the input data

    :param trace_data: A data list, [instruction sequence, value sequence, shape sequence]
    :type trace_data: List
    :return: A function type string
    :rtype: str
    """
    net = CLSNet(in_d=200, out_classes=len(func_type_list), hiden=200, num_layers=2)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net.to(device)
    net.load_state_dict(torch.load(TYPE_CLASSIFIER_PATH))
    net.eval()
    pred = None
    pred = net(trace_data, device)

    conf = (torch.nn.Softmax()(pred[0])).topk(1)[0][0].cpu().detach().numpy()
    pred = pred[0].topk(1)[1][0]
    pred = pred.cpu().detach().numpy()
    func_type = func_type_list[int(pred)]
    logger.debug("Pred result: {}, Confidence Score: {}".format(func_type, conf))
    return func_type, conf


def infer_param(trace_data, model_path, label_list, has_shape=False, has_value=False):
    net = CLSNet(in_d=200,
                 out_classes=len(label_list),
                 hiden=200,
                 num_layers=2,
                 has_shape=has_shape,
                 has_value=has_value)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net.to(device)
    net.load_state_dict(torch.load(model_path))
    net.eval()
    pred = net([trace_data[0]],
               device,
               shape_info=trace_data[2] if has_shape else None,
               value_data=[trace_data[1]] if has_value else None)
    pred = pred[0].topk(1)[1][0]
    pred = pred.cpu().detach().numpy()
    param = label_list[int(pred)]
    return param


def dfs_check(node: Node, cur_node_list: List[Node], type_list: List[str]) -> List[Node]:
    if node.dfs_skip:
        return []
    ans = cur_node_list
    pos = len(cur_node_list)
    if pos == (len(type_list) - 1):
        if node.atomic_type == type_list[pos]:
            return cur_node_list + [node]
        else:
            return []

    if node.atomic_type == type_list[pos]:
        for tmp_node in node.next_nodes:
            tmp_ans = dfs_check(tmp_node, cur_node_list + [node], type_list)
            if len(tmp_ans) == len(type_list):
                return tmp_ans
    # Skip some nop nodes
    if "nop" in node.name:
        for tmp_node in node.next_nodes:
            tmp_ans = dfs_check(tmp_node, cur_node_list, type_list)
            if len(tmp_ans) == len(type_list):
                return tmp_ans
    return ans


def infer_func_param(data: List, func_type: str) -> Dict:
    """Infer the parameter of a function and reurn a parameter dict

    :param data: An array of input data, [instruction sequence, value sequence, shape sequence]
    :type data: List
    :param func_type: A string of the function type which can be inferred by the `infer_param`
    :type func_type: str
    :return: Parameter Dict
    :rtype: Dict
    """
    func_param = {}
    trace_data_new, trace_data = data

    if func_type == "Conv2D":
        func_param["padding"] = infer_param(trace_data,
                                            get_checkpoint_path("2021-09-16-14-34-32", 399), ["same", "valid"],
                                            has_shape=True)
        func_param["activation"] = infer_param(trace_data,
                                               get_checkpoint_path("2021-09-20-14-33-46", 399), [
                                                   'softmax', 'softplus', 'sigmoid', 'elu', 'softsign', 'linear',
                                                   'tanh', 'relu', 'selu', None, 'hard_sigmoid'
                                               ],
                                               has_shape=True,
                                               has_value=True)
        func_param["filters"] = infer_param(trace_data,
                                            get_checkpoint_path("2021-10-15-19-06-33", 40), [32, 128, 512],
                                            has_shape=True)
        func_param["strides"] = [1, 1]
        func_param["strides"][0] = infer_param(trace_data,
                                               get_checkpoint_path("2021-10-16-13-44-22", 100), [1, 3, 5, 7],
                                               has_shape=True)
        func_param["strides"][1] = infer_param(trace_data,
                                               get_checkpoint_path("2021-10-16-13-45-41", 100), [1, 3, 5, 7],
                                               has_shape=True)
        func_param["kernel_size"] = infer_param(trace_data,
                                                get_checkpoint_path("2021-09-23-17-38-52", 399), [1, 3, 5],
                                                has_shape=True,
                                                has_value=True)
        func_param["dilation_rate"] = [1, 1]
        if func_param["strides"][0] == 1 and func_param["strides"][1] == 1:
            func_param["dilation_rate"][0] = infer_param(trace_data,
                                                         get_checkpoint_path("2021-09-24-21-50-42", 799), [1, 3, 5, 7],
                                                         has_shape=True,
                                                         has_value=True)
            func_param["dilation_rate"][1] = infer_param(trace_data,
                                                         get_checkpoint_path("2021-09-21-14-31-28", 399), [1, 3, 5, 7],
                                                         has_shape=True,
                                                         has_value=True)
    elif func_type == "SeparableConv2D":
        func_param["padding"] = infer_param(trace_data,
                                            get_checkpoint_path("2021-09-22-21-30-06", 399), ["same", "valid"],
                                            has_shape=True,
                                            has_value=True)
        func_param["activation"] = infer_param(trace_data,
                                               get_checkpoint_path("2021-09-20-14-34-23", 399), [
                                                   'softmax', 'softplus', 'sigmoid', 'elu', 'softsign', 'linear',
                                                   'tanh', 'relu', 'selu', None, 'hard_sigmoid'
                                               ],
                                               has_shape=True,
                                               has_value=True)
        func_param["filters"] = infer_param(trace_data,
                                            get_checkpoint_path("2021-10-15-21-40-39", 100), [32, 128, 512],
                                            has_shape=True,
                                            has_value=True)
        func_param["strides"] = [1, 1]
        func_param["strides"][0] = infer_param(trace_data,
                                               get_checkpoint_path("2021-10-15-21-41-40", 120), [1, 5, 9],
                                               has_shape=True,
                                               has_value=True)
        func_param["strides"][1] = infer_param(trace_data,
                                               get_checkpoint_path("2021-10-15-21-42-57", 120), [1, 5, 9],
                                               has_shape=True,
                                               has_value=True)
        func_param["kernel_size"] = infer_param(trace_data,
                                                get_checkpoint_path("2021-10-15-21-48-27", 150), [1, 3, 5],
                                                has_shape=True)
        func_param["depth_multiplier"] = infer_param(trace_data,
                                                     get_checkpoint_path("2021-10-15-21-49-48", 200), [1, 5, 6, 9],
                                                     has_shape=True)

        func_param["dilation_rate"] = [1, 1]
        if func_param["strides"][0] == 1 and func_param["strides"][1] == 1:
            func_param["dilation_rate"][0] = infer_param(trace_data,
                                                         get_checkpoint_path("2021-09-22-18-56-54", 399), [1, 3, 5, 7],
                                                         has_shape=True,
                                                         has_value=True)
            func_param["dilation_rate"][1] = infer_param(trace_data,
                                                         get_checkpoint_path("2021-09-22-18-57-23", 399), [1, 3, 5, 7],
                                                         has_shape=True,
                                                         has_value=True)
    elif func_type == "DepthwiseConv2D":
        func_param["padding"] = infer_param(trace_data,
                                            get_checkpoint_path("2021-10-15-19-32-04", 150), ["same", "valid"],
                                            has_shape=True)
        func_param["activation"] = infer_param(trace_data,
                                               get_checkpoint_path("2021-09-20-14-35-01", 399), [
                                                   'softmax', 'softplus', 'sigmoid', 'elu', 'softsign', 'linear',
                                                   'tanh', 'relu', 'selu', None, 'hard_sigmoid'
                                               ],
                                               has_shape=True,
                                               has_value=True)
        func_param["strides"] = [1, 1]
        func_param["strides"][0] = infer_param(trace_data,
                                               get_checkpoint_path("2021-10-15-19-34-42", 150), [1, 5, 9],
                                               has_shape=True)
        # func_param["strides"][1] = infer_param(trace_data,
        #                                        get_checkpoint_path("2021-09-14-14-14-36", 399), [1, 5, 9],
        #                                        has_shape=True)
        func_param["kernel_size"] = infer_param(trace_data,
                                                get_checkpoint_path("2021-10-15-21-51-03", 399), [1, 3, 5],
                                                has_shape=True)
        func_param["dilation_rate"] = [1, 1]
        if func_param["strides"][0] == 1 and func_param["strides"][1] == 1:
            func_param["dilation_rate"][0] = infer_param(trace_data,
                                                         get_checkpoint_path("2021-09-22-21-35-53", 799), [1, 3, 5, 7],
                                                         has_shape=True,
                                                         has_value=True)
            func_param["dilation_rate"][1] = infer_param(trace_data,
                                                         get_checkpoint_path("2021-09-22-21-38-40", 799), [1, 3, 5, 7],
                                                         has_shape=True,
                                                         has_value=True)
        func_param["depth_multiplier"] = infer_param(trace_data,
                                                     get_checkpoint_path("2021-10-16-13-21-07", 100), [1, 5, 9],
                                                     has_shape=True,
                                                     has_value=True)
    elif func_type == "Cropping2D":
        func_param["cropping"] = [1, 1, 1, 1]
        func_param["cropping"][0] = infer_param(trace_data,
                                                get_checkpoint_path("2021-09-18-18-53-33", 799), [x for x in range(7)],
                                                has_shape=True,
                                                has_value=True)
        func_param["cropping"][1] = infer_param(trace_data,
                                                get_checkpoint_path("2021-09-22-18-53-03", 799),
                                                [x + 1 for x in range(6)],
                                                has_shape=True,
                                                has_value=True)
        func_param["cropping"][2] = infer_param(trace_data,
                                                get_checkpoint_path("2021-10-15-19-22-55", 250),
                                                [x + 1 for x in range(6)],
                                                has_shape=True)
        func_param["cropping"][3] = infer_param(trace_data,
                                                get_checkpoint_path("2021-09-09-22-39-29", 399), [x for x in range(7)],
                                                has_shape=True)
    elif func_type == "UpSampling2D":
        size_list = [i for i in range(1, 11)]
        func_param["size"] = infer_param(trace_data,
                                         get_checkpoint_path("2021-10-15-20-30-34", 399),
                                         size_list,
                                         has_shape=True,
                                         has_value=True)
        func_param["interpolation"] = infer_param(trace_data,
                                                  get_checkpoint_path("2021-10-15-21-00-08", 20),
                                                  ["bilinear", "nearest"],
                                                  has_shape=True,
                                                  has_value=True)
    elif func_type == "UpSampling3D":
        size_list = [i for i in range(1, 11)]
        func_param["size"] = infer_param(trace_data,
                                         get_checkpoint_path("2021-10-15-21-04-12", 150),
                                         size_list,
                                         has_shape=True,
                                         has_value=True)
        pass
    elif func_type == "ZeroPadding3D":
        padding_list = [i for i in range(1, 11)]
        func_param["padding"] = infer_param(trace_data,
                                            get_checkpoint_path("2021-10-14-22-31-23", 399),
                                            padding_list,
                                            has_shape=True,
                                            has_value=True)
    elif func_type == "ZeroPadding2D":
        padding_list = [i for i in range(1, 11)]
        func_param["padding"] = infer_param(trace_data,
                                            get_checkpoint_path("2021-09-18-16-50-09", 799),
                                            padding_list,
                                            has_shape=True,
                                            has_value=True)
    elif func_type == "MaxPooling2D" or func_type == "MaxPooling3D":
        pool_size_list = [i for i in range(1, 11)]
        func_param["pool_size"] = infer_param(trace_data_new,
                                              get_checkpoint_path("2021-10-22-16-38-50", 399),
                                              pool_size_list,
                                              has_shape=True,
                                              has_value=True)
        func_param["padding"] = infer_param(trace_data_new,
                                            get_checkpoint_path("2021-10-22-16-40-57", 399), ["same", "valid"],
                                            has_shape=True,
                                            has_value=True)
    elif func_type == "Dense":
        func_param["activation"] = infer_param(trace_data_new,
                                               get_checkpoint_path("2021-10-23-18-39-29", 200), [
                                                   'softmax', 'softplus', 'sigmoid', 'elu', 'softsign', 'linear',
                                                   'tanh', 'relu', 'selu', None, 'hard_sigmoid'
                                               ],
                                               has_shape=True,
                                               has_value=True)
    else:
        logger.error("Unseen Function: {}".format(func_type))

    return func_param


def check_in_shape(node: Node) -> None:
    # logger.debug("Checking In Shape ...")

    # atom_list = [
    #     'divide', 'abs', 'expand_dims', 'negative', 'upsampling', 'batch_matmul', 'global_max_pool2d', 'subtract',
    #     'minimum', 'concatenate', 'add', 'relu', 'bias_add', 'global_avg_pool2d', 'exp', 'cast', 'multiply', 'clip',
    #     'upsampling3d', 'tanh', 'max_pool3d', 'log', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum',
    #     'dense', 'transpose', 'sqrt', 'sigmoid', 'adaptive_avg_pool3d', 'batch_flatten', 'conv3d', 'avg_pool2d',
    #     'softmax', 'greater', 'leaky_relu', 'adaptive_max_pool3d', 'conv2d'
    # ]

    # Check the dimension of the input shape
    ops_shape_2d = ["conv2d", "max_pool2d", "avg_pool2d", "global_max_pool2d", "global_avg_pool2d", "upsampling"]
    ops_shape_3d = ["conv3d", "max_pool3d", "avg_pool3d", "adaptive_max_pool3d", "adaptive_avg_pool3d", "upsampling3d"]

    if node.atomic_type == "avg_pool3d":
        if len(node.in_shape[0]) != 5:
            node.has_wrong_pred = True
            node.wrong_pred_category = node.INPUT_DIM_WRONG
            # node.atomic_type = 'avg_pool2d'
    elif node.atomic_type == "max_pool3d":
        logger.debug("Checking {} ...".format(node.atomic_type))
        if len(node.in_shape[0]) != 5:
            node.has_wrong_pred = True
            # logger.debug("Fixed max_pool3d!")
            node.wrong_pred_category = node.INPUT_DIM_WRONG
            # node.atomic_type = 'max_pool2d'
    if node.atomic_type in ops_shape_2d and len(node.in_shape[0]) != 4:
        node.has_wrong_pred = True
        node.wrong_pred_category = node.INPUT_DIM_WRONG
    elif node.atomic_type in ops_shape_3d and len(node.in_shape[0]) != 5:
        node.has_wrong_pred = True
        node.wrong_pred_category = node.INPUT_DIM_WRONG

    # Check the number of input node
    ops_multi_input = ["divide", "concatenate", "batch_matmul", "subtract", "add", "bias_add", "multiply", "greater"]
    if node.atomic_type in ops_multi_input and len(node.father_nodes) < 2:
        node.has_wrong_pred = True
        node.wrong_pred_category = node.INPUT_NUM_WRONG

    ops_single_input_no_weight = [
        "abs", "expand_dims", "negative", "upsampling", "global_max_pool2d", "minimum", "relu", "global_avg_pool2d",
        "exp", "cast", "clip", "upsampling3d", "tanh", "max_pool3d", "log", "strided_slice", "pad", "avg_pool3d",
        "max_pool2d", "maximum", "transpose", "sqrt", "sigmoid", "adaptive_avg_pool3d", "batch_flatten", "avg_pool2d",
        "softmax", "leaky_relu", "adpative_max_pool3d"
    ]
    if node.atomic_type in ops_single_input_no_weight and len(node.father_nodes) - len(node.extra_input) >= 2:
        logger.debug("Single Input No Weight Error!")
        node.has_wrong_pred = True
        node.wrong_pred_category = node.INPUT_NUM_WRONG

    ops_multi_input_single_weight = ["bias_add"]
    if node.atomic_type in ops_multi_input_single_weight and len(node.father_nodes) - len(node.extra_input) != 1:
        logger.debug("Multi Input Single Weight Error!")
        node.has_wrong_pred = True
        node.wrong_pred_category = node.INPUT_NUM_WRONG

    if node.atomic_type == "bias_add" and sum(
        [fn.atomic_type in ["conv2d", "conv3d", "dense"] for fn in node.father_nodes]) == 0:
        logger.debug("BiasAdd Wrong!")
        node.has_wrong_pred = True
        node.wrong_pred_category = node.INPUT_NUM_WRONG

    ops_single_in_no_change_shape = [
        "abs", "negative", "minimum", "relu", "exp", "cast", "clip", "tanh", "log", "maximum", "sqrt", "sigmoid",
        "softmax", "leaky_relu"
    ]
    if node.atomic_type in ops_single_in_no_change_shape:
        if len(node.in_shape) != 0 and (len(node.in_shape[0]) != len(node.out_shape) or sum(
            [node.in_shape[0][i] != node.out_shape[i] for i in range(len(node.out_shape))]) != 0):
            logger.debug("Shape Change Error!")
            node.has_wrong_pred = True
            node.wrong_pred_category = node.SHAPE_CHANGE_WRONG

    ops_multi_in_no_change_shape = ["add", "divide", "subtract"]
    if node.atomic_type in ops_multi_in_no_change_shape and len(node.in_shape) > 1:
        tmp_in_shape = node.in_shape[0]
        if len(tmp_in_shape) != len(node.out_shape):
            tmp_in_shape = node.out_shape[1]
        if sum([x != y for x, y in zip(tmp_in_shape, node.out_shape)]) != 0:
            node.has_wrong_pred = True
            node.wrong_pred_category = node.SHAPE_CHANGE_WRONG

    if node.atomic_type == "multiply" and len(node.in_shape) > 1:
        if node.in_shape[0] == node.in_shape[1] and sum([x != y
                                                         for x, y in zip(node.in_shape[0], node.out_shape)]) != 0:
            node.has_wrong_pred = True
            node.wrong_pred_category = node.SHAPE_CHANGE_WRONG
        if len(node.in_shape[0]) == 4 and len(node.in_shape[0]) == len(node.in_shape[1]) and sum(
            [x != y for x, y in zip(node.in_shape[0], node.in_shape[1])]) == 1:
            node.has_wrong_pred = True
            node.wrong_pred_category = node.SHAPE_CHANGE_WRONG


def check_weight_dim(node: Node):
    if node.atomic_type == 'conv2d' and len(node.extra_input) > 0:
        weight_nodes = []
        for x in node.extra_input:
            if not x.is_input:
                weight_nodes.append(x)
        kernel_weight_shape, bias_weight_shape = None, None
        if len(weight_nodes) == 1:
            kernel_weight_shape = weight_nodes[0].out_shape
        else:
            if len(weight_nodes[0].out_shape) == 1:
                kernel_weight_shape = weight_nodes[1].out_shape
                bias_weight_shape = weight_nodes[0].out_shape
            else:
                kernel_weight_shape = weight_nodes[0].out_shape
                bias_weight_shape = weight_nodes[1].out_shape
        if kernel_weight_shape is not None and len(kernel_weight_shape) != 4:
            node.has_wrong_pred = True
            node.wrong_pred_category = node.WEIGHT_DIM_WRONG
        if bias_weight_shape is not None and len(bias_weight_shape) != 1:
            node.has_wrong_pred = True
            node.wrong_pred_category = node.WEIGHT_DIM_WRONG
    elif node.atomic_type == "dense" and len(node.extra_input) > 0:
        mat_weight_shape, bias_weight_shape = None, None
        weight_nodes = []
        for x in node.extra_input:
            if not x.is_input:
                weight_nodes.append(x)
        if len(weight_nodes) == 1:
            mat_weight_shape = weight_nodes[0].out_shape
        else:
            if len(weight_nodes[0].out_shape) == 1:
                mat_weight_shape = weight_nodes[1].out_shape
                bias_weight_shape = weight_nodes[0].out_shape
            else:
                mat_weight_shape = weight_nodes[0].out_shape
                bias_weight_shape = weight_nodes[1].out_shape
        logger.debug("Mat Weight Shape: {}".format(mat_weight_shape))
        logger.debug("Mat Bias Shape: {}".format(bias_weight_shape))

        if mat_weight_shape is not None and len(mat_weight_shape) != 2:
            node.has_wrong_pred = True
            node.wrong_pred_category = node.WEIGHT_DIM_WRONG
        if bias_weight_shape is not None and len(bias_weight_shape) != 1:
            node.has_wrong_pred = True
            node.wrong_pred_category = node.WEIGHT_DIM_WRONG
        # logger.debug("Dense Weight Shape: {}".format(mat_weight_shape))


class Parser:
    def __init__(self) -> None:
        pass

    @staticmethod
    def parse_one_line(line):
        """Parsing the oprands

        Args:
            line (str): The oprands string

        Returns:
            list: A token list
        """
        if "ymmword ptr" in line:
            line = line.replace("ymmword ptr", "")
        if "zmmword ptr" in line:
            line = line.replace("zmmword ptr", "")
        if "qword ptr" in line:
            line = line.replace("qword ptr", "")
        if "dword ptr" in line:
            line = line.replace("dword ptr", "")
        if "word ptr" in line:
            line = line.replace("word ptr", "")

        line = line.strip()
        tokens = []
        nums = pp.Word(pp.nums)
        hexnums = pp.Combine("0x" + pp.Word(pp.hexnums)) * \
            (0, 1) + pp.Combine(pp.Word(pp.hexnums) + "h")*(0, 1)
        s_nums = pp.Combine("k" + nums)
        numbers = hexnums * (0, 1) + nums * (0, 1)
        arithOp = pp.oneOf("+ - * /")
        single_reg = pp.Combine(pp.Word(pp.alphas) + nums * (0, 2))
        reg = single_reg*(0, 1) \
            + ("[" + single_reg + ((arithOp + single_reg)*(0, 2)
               + (arithOp + numbers)*(0, 1))*(0, 2) + "]")*(0, 1) + ("{" + s_nums + "}")*(0, 1)
        reg_or_numbers = reg * (0, 1) + numbers * (0, 1)
        expr = reg_or_numbers + ("," + reg_or_numbers) * (0, 2)
        tokens = [x for x in expr.parseString(line)]
        return tokens

    @staticmethod
    def render_graph(node_arr):
        """Render the graph into a pdf file through the node_arr

        Args:
            node_arr (array): The 'nodes' array from the json file
        """
        dot = Digraph(comment='The structure after decompiling data from tvm')
        count = 0
        c_node_dict = {}
        for node in node_arr:
            # Skip some nodes
            if node['op'] == 'null':
                continue
            name = node['name']
            inputs = node['inputs']
            dot.node(name, name)
            count += 1
            for inp in inputs:
                idx = inp[0]
                if idx not in c_node_dict and idx > 0:
                    count += 1

            c_node_dict[count] = name

            # Add Edges
            for inp in inputs:
                idx = inp[0]
                if idx in c_node_dict:
                    dot.edge(c_node_dict[idx], name)

        dot.render('test-output')

    @staticmethod
    def parse_graph(path: str):
        """Get the node array from the json file

        Args:
            path (str): The path of the json file

        Returns:
            array: The json array of nodes
            array: The param shape array for each nodes
        """
        node_arr = []
        shape_arr = []
        with open(path, "r") as json_file:
            data = json.load(json_file)
            logger.info('The json file has {} elements: '.format(len(data)))
            for item in data:
                logger.info('[{}]'.format(item))
            node_arr = data['nodes']
            shape_arr = data['attrs']['shape'][1]
            logger.info('{} nodes have been loaded!'.format(len(node_arr)))
        return node_arr, shape_arr

    @staticmethod
    def convert(node_arr: List, shape_arr: List) -> Graph:
        """Convet these node array and shape array into a graph

        :param node_arr: An array of nodes
        :type node_arr: List
        :param shape_arr: An array of shapes
        :type shape_arr: List
        :return: A computation graph
        :rtype: Graph
        """
        graph = Graph()
        count = 0
        c_node_dict = {}

        for node, shape in zip(node_arr, shape_arr):
            # Skip some nodes
            name = node["name"]
            if "attrs" in node:
                name = "{}-{}".format(node['attrs']['func_name'], count)

            if node['op'] == 'null' and name.startswith('p'):
                tmp_node = Node(name, 0, shape)
                tmp_node.is_extra = True
                graph = graph.add_node(tmp_node)
                c_node_dict[count] = name
                count += 1
                continue
            elif node['op'] == 'null':
                tmp_node = Node(name, 0, shape)
                tmp_node.is_input = True
                tmp_node.is_extra = True
                graph = graph.add_node(tmp_node)
                c_node_dict[count] = name
                count += 1
                continue

            inputs = node['inputs']
            tmp_node = Node(name, int(node['attrs']['num_inputs']), shape)
            tmp_node.attrs = node["attrs"]

            c_node_dict[count] = name
            count += 1

            # Add Edges
            for inp in inputs:
                idx = inp[0]
                if idx in c_node_dict:
                    father_node = graph.find_node_by_name(c_node_dict[idx])
                    # logger.debug(tmp_node.name + ":" + c_node_dict[idx])

                    tmp_node.add_father_node(father_node)
                    father_node.add_next_node(tmp_node)

            graph = graph.add_node(tmp_node)

        return graph

    @staticmethod
    def analyzing_node(node: Node, trace_data: List, next_trace_data: List) -> None:
        if node.unknown:
            return
        trace_data_new, _ = trace_data
        next_trace_data_new, _ = next_trace_data
        node.function_type, confidence_1 = infer_func_type([trace_data_new[0]])
        if next_trace_data_new is not None:
            func_type, confidence_2 = infer_func_type([trace_data_new[0], next_trace_data_new[0]])
            if confidence_2 / confidence_1 >= 1:
                node.function_type = func_type
                nextnode = node.next_nodes[0]
                nextnode.unknown = True
                # node.next_nodes = nextnode.next_nodes

        node.function_param = infer_func_param(trace_data, node.function_type)
        return

    @staticmethod
    def analyzing_node_super(node: Node, trace_data: List) -> None:
        if node.unknown:
            return
        # trace_data_new, _ = trace_data
        # node.atomic_type, node.function_param = infer_func_type_and_param(trace_data)
        node.atomic_type, node.function_param = infer_func_type_and_param_arm(trace_data)
        # logger.debug(node.atomic_type)
        return

    @staticmethod
    def convert_to_pytorch(graph: Graph) -> Graph:
        return graph

    @staticmethod
    def convert_to_keras(graph: Graph) -> Graph:
        """Convert the tvm graph to the keras graph. We try to merge nodes in this function

        :param graph: tvm graph after the inference of NN
        :type graph: Graph
        :return: Keras Graph
        :rtype: Graph
        """

        # Merge Global Pooling Operators
        merge_tasks = []
        for node in graph.node_set:
            if (node.atomic_type == "global_max_pool2d" or node.atomic_type == "max_pool2d") and len(
                    node.next_nodes) == 1 and node.next_nodes[0].atomic_type == "batch_flatten":
                if node.atomic_type == "max_pool2d" and (node.out_shape[1] != 1 or node.out_shape[2] != 1):
                    continue
                merge_tasks.append([node, node.next_nodes[0], 'GlobalMaxPooling2D'])
            if node.atomic_type == "adaptive_max_pool3d" and len(
                    node.next_nodes) == 1 and node.next_nodes[0].atomic_type == "batch_flatten":
                merge_tasks.append([node, node.next_nodes[0], 'GlobalMaxPooling3D'])
            if (node.atomic_type == "global_avg_pool2d" or node.atomic_type == "avg_pool2d") and len(
                    node.next_nodes) == 1 and node.next_nodes[0].atomic_type == "batch_flatten":
                if node.atomic_type == "avg_pool2d" and (node.out_shape[1] != 1 or node.out_shape[2] != 1):
                    continue
                merge_tasks.append([node, node.next_nodes[0], 'GlobalAveragePooling2D'])
            if node.atomic_type == "adaptive_avg_pool3d" and len(
                    node.next_nodes) == 1 and node.next_nodes[0].atomic_type == "batch_flatten":
                merge_tasks.append([node, node.next_nodes[0], 'GlobalAveragePooling3D'])

        for task in merge_tasks:
            node_1, node_2, type_name = task
            merged_name = "{}-{}".format(node_1.name, node_2.name)
            graph.merge_node(node_1, node_2, merged_name, type_name)

        # Merge BatchNormalization Layer
        bn_merge_task = []
        graph.build_networkx_graph()
        bn_graph = GraphPattern.get_bn_graph()
        bn_graph.build_networkx_graph()
        sub_graph_dict_list = graph.find_sub_graph(bn_graph.nx_graph)
        tmp_attrs = {'center': True, 'scale': True}
        for sub_graph_dict in sub_graph_dict_list:
            sub_graph_dict = dict(sorted(sub_graph_dict.items(), key=lambda x: x[1]))
            tmp_nodes = [graph.node_set[t_node] for t_node in sub_graph_dict]
            bn_merge_task.append([tmp_nodes, 'BatchNormalization', tmp_attrs])

        tmp_attrs = {'center': False, 'scale': True}
        bn_graph = GraphPattern.get_bn_graph_no_center()
        bn_graph.build_networkx_graph()
        sub_graph_dict_list = graph.find_sub_graph(bn_graph.nx_graph)
        for sub_graph_dict in sub_graph_dict_list:
            sub_graph_dict = dict(sorted(sub_graph_dict.items(), key=lambda x: x[1]))
            tmp_nodes = [graph.node_set[t_node] for t_node in sub_graph_dict]
            bn_merge_task.append([tmp_nodes, 'BatchNormalization', tmp_attrs])

        tmp_attrs = {'center': True, 'scale': False}
        bn_graph = GraphPattern.get_bn_graph_no_scale()
        bn_graph.build_networkx_graph()
        sub_graph_dict_list = graph.find_sub_graph(bn_graph.nx_graph)
        for sub_graph_dict in sub_graph_dict_list:
            sub_graph_dict = dict(sorted(sub_graph_dict.items(), key=lambda x: x[1]))
            tmp_nodes = [graph.node_set[t_node] for t_node in sub_graph_dict]
            bn_merge_task.append([tmp_nodes, 'BatchNormalization', tmp_attrs])

        tmp_attrs = {'center': False, 'scale': False}
        bn_graph = GraphPattern.get_bn_graph_no_center_no_scale()
        bn_graph.build_networkx_graph()
        sub_graph_dict_list = graph.find_sub_graph(bn_graph.nx_graph)
        for sub_graph_dict in sub_graph_dict_list:
            sub_graph_dict = dict(sorted(sub_graph_dict.items(), key=lambda x: x[1]))
            tmp_nodes = [graph.node_set[t_node] for t_node in sub_graph_dict]
            bn_merge_task.append([tmp_nodes, 'BatchNormalization', tmp_attrs])

        for task in bn_merge_task:
            name_list = []
            for node in task[0]:
                if node is None:
                    name_list.append("None")
                else:
                    name_list.append(node.name)
            logger.debug("Merging {} ...".format(name_list))
        for task in bn_merge_task:
            if None not in task[0]:
                merged_name = 'fused' + ''.join(["_" + n.name for n in task[0]])
                graph.merge_multi_nodes(task[0], merged_name, task[1], task[2])

        # Merge activation operators
        act_merge_task = []
        for node in graph.node_set:
            if node.function_type != 'Unknown':
                continue
            if node.atomic_type == 'multiply':
                nodes = dfs_check(node, [], ['multiply', 'add', 'clip'])
                if len(nodes) == 3:
                    act_merge_task.append([nodes, 'HardSigmoid'])
                    continue
            elif node.atomic_type == "exp":
                nodes = dfs_check(node, [], ['exp', 'add', 'log'])
                if len(nodes) == 3:
                    act_merge_task.append([nodes, 'Softplus'])
                    continue
                nodes = dfs_check(node, [], ['exp', 'subtract', 'relu', 'multiply', 'add', 'multiply'])
                if len(nodes) == 5:
                    mul_father = None
                    for tmp_node in nodes[3].father_nodes:
                        if tmp_node.atomic_type == 'negative':
                            mul_father = tmp_node
                            break
                    nodes.insert(3, mul_father)
                    relu_father = None
                    for tmp_node in nodes[-2].father_nodes:
                        if tmp_node.atomic_type == 'relu':
                            relu_father = tmp_node
                            break
                    nodes.insert(4, relu_father)
                    act_merge_task.append([nodes, 'SELU'])
                    continue
                nodes = dfs_check(node, [], ['exp', 'subtract', 'relu', 'multiply', 'add'])
                if len(nodes) == 5:
                    mul_father = None
                    for tmp_node in nodes[3].father_nodes:
                        if tmp_node.atomic_type == 'negative':
                            mul_father = tmp_node
                            break
                    nodes.insert(3, mul_father)
                    relu_father = None
                    for tmp_node in nodes[-1].father_nodes:
                        if tmp_node.atomic_type == 'relu':
                            relu_father = tmp_node
                            break
                    nodes.insert(4, relu_father)
                    act_merge_task.append([nodes, 'ELU'])
                    continue
            elif node.atomic_type == "abs":
                nodes = dfs_check(node, [], ['abs', 'add', 'divide'])
                if len(nodes) == 3:
                    act_merge_task.append([nodes, 'Softsign'])
                    continue

        for task in act_merge_task:
            if None not in task[0]:
                merged_name = 'fused' + ''.join(["_" + n.name for n in task[0]])
                graph.merge_multi_nodes(task[0], merged_name, task[1])

        # TODO: Convert Depthwise/Separable Conv2D

        # atom_list = [
        #     'divide', 'abs', 'expand_dims', 'negative', 'upsampling', 'batch_matmul', 'global_max_pool2d', 'subtract',
        #     'minimum', 'concatenate', 'add', 'relu', 'bias_add', 'global_avg_pool2d', 'exp', 'cast', 'multiply', 'clip',
        #     'upsampling3d', 'tanh', 'max_pool3d', 'log', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum', 'dense',
        #     'transpose', 'sqrt', 'sigmoid', 'adaptive_avg_pool3d', 'batch_flatten', 'conv3d', 'avg_pool2d', 'softmax',
        #     'greater', 'leaky_relu', 'adaptive_max_pool3d', 'conv2d'
        # ]

        # Map single atomic operator to single keras operator
        for node in graph.node_set:
            if node.function_type != 'Unknown':
                continue
            if node.atomic_type == 'pad':
                node.function_type = "ZeroPadding2D" if len(node.out_shape) == 4 else "ZeroPadding3D"
            elif node.atomic_type == "batch_flatten":
                node.function_type = "Flatten"
            elif node.atomic_type == "strided_slice":
                node.function_type = "Cropping2D"
            elif node.atomic_type == "multiply":
                node.function_type = "Multiply"
            elif node.atomic_type == "maximum":
                node.function_type = "Maximum"
            elif node.atomic_type == "minimum":
                node.function_type = "Minimum"
            elif node.atomic_type == "avg_pool2d":
                node.function_type = "AveragePooling2D"
            elif node.atomic_type == "max_pool2d":
                node.function_type = "MaxPooling2D"
            elif node.atomic_type == "upsampling":
                node.function_type = "UpSampling2D"
            elif node.atomic_type == "upsampling3d":
                node.function_type = "UpSampling3D"
            elif node.atomic_type == "add":
                node.function_type = "Add"
            elif node.atomic_type == "softmax":
                node.function_type = "Softmax"
            elif node.atomic_type == "concatenate":
                node.function_type = "Concatenate"
            elif node.atomic_type == "leaky_relu":
                node.function_type = "LeakyReLU"
            elif node.atomic_type == "subtract":
                node.function_type = "Subtract"
            elif node.atomic_type == "relu":
                node.function_type = "ReLU"
            elif node.atomic_type == "tanh":
                node.function_type = "Tanh"
            elif node.atomic_type == "conv2d":
                node.function_type = "Conv2D"
            elif node.atomic_type == "bias_add":
                node.function_type = "BiasAdd"
            elif node.atomic_type == "dense":
                node.function_type = "Dense"
            elif node.atomic_type == "clip":
                # Need Fix?
                node.function_type = "ReLU"
                if "a_max" in node.attrs:
                    node.function_param["a_max"] = node.attrs["a_max"]

            if node.atomic_type == "global_max_pool2d" and node.function_type == "Unknown":
                node.function_type = "MaxPooling2D"
            elif node.atomic_type == "global_avg_pool2d" and node.function_type == "Unknown":
                node.function_type = "AveragePooling2D"

        return graph

    @staticmethod
    def convert_to_framework(graph: Graph, framework="pytorch") -> Graph:
        if framework == "pytorch":
            graph = Parser.convert_to_pytorch(graph)
        elif framework == "keras":
            graph = Parser.convert_to_keras(graph)
        return graph

    @staticmethod
    def check_wrong_pred(node: Node) -> None:
        # If the prediction of node is wrong, you have to set the flag:
        # node.has_wrong_pred = True
        check_in_shape(node)
        if node.has_wrong_pred:
            logger.debug("Wrong Pred: {}, Wrong Type: {}".format(node.name, node.wrong_pred_category))
            return
        # Check No Weight
        if node.atomic_type in no_weights_list and sum([(tmp_node.is_extra and not tmp_node.is_input
                                                         and sum(tmp_node.out_shape) > 0)
                                                        for tmp_node in node.extra_input]) > 0:
            # for tmp_node in node.extra_input:
            #     logger.debug(tmp_node.name)
            #     logger.debug(tmp_node.out_shape)
            node.has_wrong_pred = True
            node.wrong_pred_category = node.WEIGHT_NUM_WRONG

        # Check necessary weight
        if node.atomic_type in ness_weight_list and sum([(tmp_node.is_extra and not tmp_node.is_input
                                                          and sum(tmp_node.out_shape) > 0)
                                                         for tmp_node in node.extra_input]) == 0:
            node.has_wrong_pred = True
            node.wrong_pred_category = node.WEIGHT_NUM_WRONG

        # check weight dim
        check_weight_dim(node)

        if not node.has_wrong_pred and (node.atomic_type == "global_avg_pool2d"
                                        or node.atomic_type == "global_max_pool2d"):
            hasFlatten = False
            for tmp_node in node.next_nodes:
                if tmp_node.atomic_type == "batch_flatten":
                    hasFlatten = True
            if not hasFlatten:
                node.has_wrong_pred = True
                node.wrong_pred_category = node.INPUT_DIM_WRONG

        # if node.atomic_type == "max_pool2d" or node.atomic_type == "avg_pool2d" and not node.has_wrong_pred:
        #     hasFlatten = False
        #     for tmp_node in node.next_nodes:
        #         if tmp_node.atomic_type == "batch_flatten":
        #             hasFlatten = True
        #     if hasFlatten:
        #         node.atomic_type = "global_max_pool2d" if node.atomic_type == "max_pool2d" else "global_avg_pool2d"

        if node.has_wrong_pred:
            logger.debug("Wrong Pred: {}, Wrong Type: {}".format(node.name, node.wrong_pred_category))

    @staticmethod
    def check_pred_by_gcn(graph: Graph, device) -> None:

        net = CorrectGNN()
        # net = CorrectGNNWithShapeDim()
        net.load_state_dict(torch.load(CORRECT_GCN_CKPT))
        net.to(device)
        net.eval()
        graph.build_networkx_graph()
        labels = []
        gts = []
        node_names = []
        new_atom_list = atom_list + ['p', 'input']
        # weight_dim = []
        for node in graph.nx_graph.nodes:
            label = new_atom_list.index(graph.nx_graph.nodes[node]['type'])
            labels.append(label)
            target = new_atom_list.index(graph.nx_graph.nodes[node]['gt'])
            gts.append(target)
            node_names.append(graph.nx_graph.nodes[node]['name'])
            # Use the node name to compute the groundtruth
            # weight_dim.append(graph.nx_graph.nodes[node]['max_weight_dim'])

        edges = list(graph.nx_graph.edges)

        label_input = torch.LongTensor(labels)
        label_input = label_input.to(device)
        adj_data = torch.LongTensor(edges).to(device)
        # weight_dim_input = torch.LongTensor(weight_dim).to(device)
        pred = net(label_input, adj_data)
        # pred = net(label_input, adj_data, weight_dim_input)
        pred_arr = pred.topk(1)[1].reshape(-1).cpu().numpy()
        gt = [0 if label == gt else 1 for label, gt in zip(labels, gts)]
        logger.info(gt)
        logger.info(pred_arr)
        logger.info("Recall: {}".format(metrics.recall_score(gt, pred_arr)))
        logger.info("Precision {}".format(metrics.precision_score(gt, pred_arr)))
        for x, y, pred_label, gt_label in zip(gt, pred_arr, labels, gts):
            if x == 1 and y == 0:
                logger.info("Fail to find that wrong case: [Pred: {}] [GT: {}]".format(pred_label, gt_label))
            elif x == 0 and y == 1:
                logger.info("Wrong Alert: [Pred: {}] [GT: {}]".format(pred_label, gt_label))

        wrong_node_name_list = []
        for i, p in enumerate(pred_arr):
            if p == 1:
                wrong_node_name_list.append(node_names[i])
            if p == 0 and gt[i] == 1:
                logger.debug("Fail to find the wrong node [{}]".format(node_names[i]))
                logger.debug([fn.name for fn in graph.find_node_by_name(node_names[i]).father_nodes])
        return wrong_node_name_list

    @staticmethod
    def check_pred_by_gcn_with_bb(graph: Graph, device) -> None:

        net = CorrectGNN()
        # net = CorrectGNNWithShapeDim()
        net.load_state_dict(torch.load(CORRECT_GCN_CKPT))
        net.to(device)
        net.eval()
        graph.build_networkx_graph()
        labels = []
        gts = []
        node_names = []
        new_atom_list = atom_list + ['p', 'input']
        # weight_dim = []
        for node in graph.nx_graph.nodes:
            label = new_atom_list.index(graph.nx_graph.nodes[node]['type'])
            labels.append(label)
            target = new_atom_list.index(graph.nx_graph.nodes[node]['gt'])
            gts.append(target)
            node_names.append(graph.nx_graph.nodes[node]['name'])
            # Use the node name to compute the groundtruth
            # weight_dim.append(graph.nx_graph.nodes[node]['max_weight_dim'])

        edges = list(graph.nx_graph.edges)

        label_input = torch.LongTensor(labels)
        label_input = label_input.to(device)
        adj_data = torch.LongTensor(edges).to(device)
        # weight_dim_input = torch.LongTensor(weight_dim).to(device)
        pred = net(label_input, adj_data)
        # pred = net(label_input, adj_data, weight_dim_input)
        pred_arr = pred.topk(1)[1].reshape(-1).cpu().numpy()
        gt = [0 if label == gt else 1 for label, gt in zip(labels, gts)]
        logger.info(gt)
        logger.info(pred_arr)
        logger.info("Recall: {}".format(metrics.recall_score(gt, pred_arr)))
        logger.info("Precision {}".format(metrics.precision_score(gt, pred_arr)))
        for x, y, pred_label, gt_label in zip(gt, pred_arr, labels, gts):
            if x == 1 and y == 0:
                logger.info("Fail to find that wrong case: [Pred: {}] [GT: {}]".format(pred_label, gt_label))
            elif x == 0 and y == 1:
                logger.info("Wrong Alert: [Pred: {}] [GT: {}]".format(pred_label, gt_label))

        wrong_node_name_list = []
        for i, p in enumerate(pred_arr):
            if p == 1:
                wrong_node_name_list.append(node_names[i])
        return wrong_node_name_list


if __name__ == "__main__":
    pass
