from collections import defaultdict
from loguru import logger
import torch
from torch.utils.data import Dataset
import os
from my_utils.parsing_util import Parser
import pickle as pk
import networkx as nx
import numpy as np
import gensim
from torch.utils.data import DataLoader
from model.correct_model import CorrectGCNWithBB

atom_list = [
    'divide', 'abs', 'expand_dims', 'negative', 'upsampling', 'batch_matmul', 'global_max_pool2d', 'subtract',
    'minimum', 'concatenate', 'add', 'relu', 'bias_add', 'global_avg_pool2d', 'exp', 'cast', 'multiply', 'clip',
    'upsampling3d', 'tanh', 'max_pool3d', 'log', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum', 'dense',
    'transpose', 'sqrt', 'sigmoid', 'adaptive_avg_pool3d', 'batch_flatten', 'conv3d', 'avg_pool2d', 'softmax',
    'greater', 'leaky_relu', 'adaptive_max_pool3d', 'conv2d', 'input', 'p'
]

label_list = [
    'divide', 'abs', 'expand_dims', 'negative', 'upsampling', 'batch_matmul', 'global_max_pool2d', 'subtract',
    'minimum', 'concatenate', 'add', 'relu', 'bias_add', 'global_avg_pool2d', 'exp', 'cast', 'multiply', 'clip',
    'upsampling3d', 'tanh', 'max_pool3d', 'log', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum', 'dense',
    'transpose', 'sqrt', 'sigmoid', 'adaptive_avg_pool3d', 'batch_flatten', 'conv3d', 'avg_pool2d', 'softmax',
    'greater', 'leaky_relu', 'adaptive_max_pool3d', 'conv2d', 'input', 'p'
]


def change_nodes(labels, ratio=0.1):
    """Change some nodes' label and generate the target vector

    :param labels: A label list
    :type labels: Tensor
    :param ratio: The percentage of changable nodes, defaults to 0.1
    :type ratio: float, optional
    :return: The changed label list and the target vectpr
    :rtype: Tuple(Tensor, Tensor)
    """
    new_labels = labels
    tail_index = label_list.index('conv2d')
    normal_node_num = sum([x <= tail_index for x in labels])
    changable_node_num = int(normal_node_num * ratio)
    changable_index_list = []
    for i, x in enumerate(labels):
        if x <= tail_index:
            changable_index_list.append(i)
    target = torch.zeros(len(labels))
    for i in range(changable_node_num):
        pos = torch.randint(low=0, high=len(changable_index_list), size=[])
        changable_index = changable_index_list[pos]
        original_label = new_labels[changable_index]
        changed_label = torch.randint(low=0, high=len(label_list) - 2, size=[])
        while changed_label == original_label:
            changed_label = torch.randint(low=0, high=len(label_list) - 2, size=[])
        new_labels[changable_index] = changed_label
        target[changable_index] = 1

    return new_labels, target


class TypeGraphDataset(Dataset):
    def __init__(self, path) -> None:
        super().__init__()
        self.graph_data = []
        for filename in os.listdir(path):
            if ".json" in filename:
                logger.debug("Processing {} ...".format(filename))
                json_path = "{}/{}".format(path, filename)
                # Load json to a graph
                nodes, params = Parser.parse_graph(json_path)
                graph = Parser.convert(nodes, params)
                graph.remove_nop()
                graph.gen_label_by_name()
                graph.build_networkx_graph()
                labels = []
                for node in graph.nx_graph.nodes:
                    label = atom_list.index(graph.nx_graph.nodes[node]['type'])
                    labels.append(label)
                edges = list(graph.nx_graph.edges)
                self.graph_data.append((labels, edges))
                # break

    def __getitem__(self, index):
        return self.graph_data[index]

    def __len__(self) -> int:
        return len(self.graph_data)


# Expand the Basic Block
class TypeGraphDatasetWithEdge(Dataset):
    def __init__(self, path, w2v_model="word2vec-train-ida.model") -> None:
        super().__init__()
        self.w2v_model = gensim.models.Word2Vec.load(w2v_model)
        cs_words = [
            "csqword", "csdword", "cspword", "csxmmword", "csymmword", "csunk", "csoff", "csbyte", "aAssertFail",
            "csasc"
        ]
        self.graph_data = []
        self.extend_graph_data = []
        max_diameter = 0
        for filename in os.listdir(path):
            if ".json" in filename:
                logger.debug("Processing {} ...".format(filename))
                json_path = "{}/{}".format(path, filename)
                # Load json to a graph
                nodes, params = Parser.parse_graph(json_path)
                graph = Parser.convert(nodes, params)
                graph.remove_nop()
                graph.gen_label_by_name()
                graph.build_networkx_graph()
                labels = []
                node_name_list = []
                for node in graph.nx_graph.nodes:
                    label = atom_list.index(graph.nx_graph.nodes[node]['type'])
                    labels.append(label)
                    node_name_list.append(graph.nx_graph.nodes[node]['name'])
                edges = list(graph.nx_graph.edges)
                self.graph_data.append((labels, edges))

                # TODO: Extend Basic Block

                # Load the basic block data
                model_name = filename.split(".")[0]
                model_data = pk.load(open("{}/{}.so.pkl.new".format(path, model_name), "rb"))
                node_data_map = {}
                for node in graph.nx_graph.nodes:
                    # For each operator node, we load the basic block data and edge
                    tmp_node = graph.nx_graph.nodes[node]
                    if tmp_node['type'] == 'p' or tmp_node['type'] == 'input':
                        continue
                    funcname = tmp_node['name'].split("-")[0] + "_compute_"
                    # logger.debug(funcname)
                    # logger.debug(model_data[funcname].keys())
                    node_data_map[tmp_node['name']] = {}
                    node_data_map[tmp_node['name']]['blocks'] = model_data[funcname]['blocks']
                    node_data_map[tmp_node['name']]['edges'] = model_data[funcname]['edges']
                    # Compute the diameter
                    G = nx.Graph()
                    G.add_edges_from(model_data[funcname]['edges'])
                    G.remove_edges_from(nx.selfloop_edges(G))
                    if len(model_data[funcname]['edges']) == 0:
                        continue
                    tmp_diameter = nx.diameter(G)
                    # logger.debug(tmp_diameter)
                    max_diameter = max(tmp_diameter, max_diameter)

                for node in graph.nx_graph.nodes:
                    tmp_name = graph.nx_graph.nodes[node]['name']
                    if tmp_name not in node_data_map:
                        continue
                    # logger.debug(node_data_map[tmp_name].keys())

                # Connect all operator nodes at the basic block level
                sorted_node_list = nx.topological_sort(graph.nx_graph)
                tmp_pos = 0
                bblock_data = []
                edges_data = []
                in_out_node_map = {}  # A dict <node name, (in node position (indegree == 1), out node position)>
                start_end_node_map = {
                }  # A dict <node name, (start position, end position)> (the interval in bblock_data)

                for node in sorted_node_list:
                    tmp_node = graph.nx_graph.nodes[node]
                    if tmp_node['type'] == 'p' or tmp_node['type'] == 'input':
                        continue
                    bblock_length = len(node_data_map[tmp_node['name']]['blocks'])
                    bblock_data.extend(node_data_map[tmp_node['name']]['blocks'])
                    edges_data.extend([[edge[0] + tmp_pos, edge[1] + tmp_pos]
                                       for edge in node_data_map[tmp_node['name']]['edges']])
                    start_end_node_map[tmp_node['name']] = (len(bblock_data) - bblock_length, len(bblock_data))
                    tmp_indegree = [0 for _ in range(bblock_length)]
                    tmp_outdegree = [0 for _ in range(bblock_length)]
                    for edge in node_data_map[tmp_node['name']]['edges']:
                        fromb, tob = edge
                        tmp_indegree[tob] += 1
                        tmp_outdegree[fromb] += 1

                    in_out_node_map[tmp_node['name']] = defaultdict(lambda: [])
                    for i in range(bblock_length):
                        if tmp_indegree[i] == 0:
                            in_out_node_map[tmp_node['name']]['in'].append(i + tmp_pos)
                        if tmp_outdegree[i] == 0:
                            in_out_node_map[tmp_node['name']]['out'].append(i + tmp_pos)
                    # logger.debug(tmp_node['name'])
                    # logger.debug(in_out_node_map[tmp_node['name']])
                    tmp_pos += bblock_length

                # Add edge between operator nodes
                for edge in edges:
                    from_node, to_node = graph.nx_graph.nodes[edge[0]], graph.nx_graph.nodes[edge[1]]
                    from_node_type, to_node_type = from_node['type'], to_node['type']
                    if from_node_type == "p" or from_node_type == "input" or to_node_type == "p" or to_node_type == "input":
                        continue
                    # logger.debug("{} -> {}".format(graph.nx_graph.nodes[x]['name'], graph.nx_graph.nodes[y]['name']))
                    for end_node in in_out_node_map[from_node['name']]['out']:
                        for start_node in in_out_node_map[to_node['name']]['in']:
                            edges_data.append([end_node, start_node])
                            # logger.debug("Add edge {}".format([end_node, start_node]))
                # logger.debug(edges_data)
                intervals = []
                for node_name in node_name_list:
                    if node_name in start_end_node_map:
                        intervals.append(start_end_node_map[node_name])
                    else:
                        intervals.append((-1, -1))
                # logger.debug(intervals)

                bblock_vector_data = []
                embedding_dim = 200
                for bblock in bblock_data:
                    if len(bblock) == 0:
                        continue
                    bblock_vector_data.append(torch.zeros(1, len(bblock), embedding_dim))
                    for pos, t in enumerate(bblock):
                        for cs_word in cs_words:
                            if cs_word in t:
                                t = cs_word
                        if t.isdigit() or "0x" in t:
                            t = "num"
                        if t not in self.w2v_model.wv:
                            continue
                        vector = torch.from_numpy(np.copy(self.w2v_model.wv[t]))
                        bblock_vector_data[-1][:, pos, :] = vector

                edges_data = torch.from_numpy(np.array(edges_data)).long()

                self.extend_graph_data.append((bblock_vector_data, edges_data, intervals))

                # break
        logger.debug("Training Graph Data Number: {}".format(len(self.graph_data)))
        logger.debug("Training Graph Data Number [Ext] : {}".format(len(self.extend_graph_data)))
        logger.debug("Max Diameter: {}".format(max_diameter))

    def __getitem__(self, index):
        return self.graph_data[index], self.extend_graph_data[index]

    def __len__(self) -> int:
        return len(self.graph_data)


class TypeGraphDatasetWithWeightDim(Dataset):
    def __init__(self, path) -> None:
        super().__init__()
        self.graph_data = []
        for filename in os.listdir(path):
            if ".json" in filename:
                logger.debug("Processing {} ...".format(filename))
                json_path = "{}/{}".format(path, filename)
                # Load json to a graph
                nodes, params = Parser.parse_graph(json_path)
                graph = Parser.convert(nodes, params)
                graph.remove_nop()
                graph.gen_label_by_name()
                graph.build_networkx_graph()
                labels = []
                weight_dim = []
                for node in graph.nx_graph.nodes:
                    label = atom_list.index(graph.nx_graph.nodes[node]['type'])
                    labels.append(label)
                    weight_dim.append(graph.nx_graph.nodes[node]['max_weight_dim'])

                edges = list(graph.nx_graph.edges)
                self.graph_data.append((labels, edges, weight_dim))
                # break

    def __getitem__(self, index):
        return self.graph_data[index]

    def __len__(self) -> int:
        return len(self.graph_data)


if __name__ == "__main__":
    # path = "./data/model_results"
    # dataset = TypeGraphDataset(path=path)
    # for (labels, edges) in dataset:
    #     logger.debug(len(labels))
    #     adj_data = torch.LongTensor(edges)
    #     print(adj_data.shape)
    path = "./tvm_data/model_graph_data_with_edge/playground/"
    dataset = TypeGraphDatasetWithEdge(path=path)
    train_dataloader = DataLoader(dataset, batch_size=1, shuffle=True, pin_memory=False, num_workers=0)
    net = CorrectGCNWithBB(embed_dim=200)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net.to(device)
    net.train()

    for (labels, edges), (x_bb, adj_bb, intervals) in train_dataloader:
        label_input = torch.LongTensor(labels)
        # Random change some node type and Generate the target vector
        # print(len(x_bb))
        label_input, target = change_nodes(label_input, 0.1)
        label_input, target = label_input.to(device), target.long().to(device)
        adj_data = torch.LongTensor(edges).to(device)
        pred = net(label_input, adj_data, x_bb, adj_bb.to(device), intervals)