import torch_geometric.utils
from torch_geometric.data import HeteroData
import torch
import matplotlib.pyplot as plt
import networkx as nx
import torch_geometric.transforms as T
# from tokenizer_tools import get_sentence_vector
from memory import memory_usage
from load_data_001 import alarms_df
from word2vec import save_word2vec_status,get_vector_for_word

class AlarmsData(object):
    def __init__(self, alarms_df, start_id=0, end_id=400):
        self.alarms_df = alarms_df
        self.start_id = start_id if start_id > 0 else 0
        self.end_id = end_id if end_id else len(self.alarms_df)
        self.get_link_items()
        self.save_explain_data_db()
        self.create_hetero_data()



    def get_link_items(self):
        df = self.alarms_df
        # print(df.head())
        df["combined"] = df.apply(
            lambda row: "|".join([
                row["ip"],
                row["content"],
                row["idc_name"],
                row["product_labels"]
            ]
            ), axis=1)
        ori_product_labels = df["product_labels"].tolist()
        product_labels = []
        for l in ori_product_labels:
            product_labels.extend(l.split(","))


        self.btrees = sorted(list(set(product_labels)))
        self.ips = sorted(list(set(df["ip"].tolist())))
        self.contents = sorted(list(set(df["combined"].tolist())))

        self.btree_id = list(range(len(self.btrees)))
        self.ips_node_id = list(range(len(self.ips)))
        self.contents_node_id = list(range(len(self.contents)))
        # print("self.contents_node_id",self.contents_node_id)

        self.local_contents = self.contents[self.start_id: self.end_id]
        self.local_contents_node_id = self.contents_node_id[self.start_id: self.end_id]
        # print("self.local_contents_node_id", self.start_id,self.end_id,self.local_contents_node_id)

    def save_explain_data_db(self):
        alarm_dict = {i: self.contents[i] for i in self.contents_node_id}
        ip_dict = {i: self.ips[i] for i in self.ips_node_id}
        btree_dict = {i: self.btrees[i] for i in self.btree_id}
        # print(alarm_dict)
        # print(ip_dict)
        # print(btree_dict)
        # alarm_length = len(self.contents_node_id)
        ip_length = len(self.ips_node_id)
        btree_length = len(self.btree_id)

        all_origin_infos = {}
        all_origin_infos.update(btree_dict)
        ip_dict_for_all = {i + btree_length: self.ips[i] for i in self.ips_node_id}
        btree_dict_for_all = {i + btree_length + ip_length: self.contents[i] for i in self.contents_node_id}
        all_origin_infos.update(ip_dict_for_all)
        all_origin_infos.update(btree_dict_for_all)

        self.btrees_origin_infos = btree_dict
        self.ips_origin_infos = ip_dict
        self.alarms_origin_infos = alarm_dict
        self.all_origin_infos = all_origin_infos

    def data_expalin(self, edge_index, type="homo", edge_type="a2h"):
        if type == "hetero":
            for i in edge_index.t().tolist():
                if edge_type == "a2h":
                    print(self.alarms_origin_infos[i[0]] + "!!!!" + self.ips_origin_infos[i[1]])
                elif edge_type == "h2b":
                    print(self.ips_origin_infos[i[0]] + "!!!!" + self.btrees_origin_infos[i[1]])
                else:
                    print(self.alarms_origin_infos[i[0]] + "!!!!" + self.btrees_origin_infos[i[1]])
        else:
            for i in edge_index.t().tolist():
                print(self.all_origin_infos[i[0]] + "!!!!" + self.all_origin_infos[i[1]])

    def create_hetero_data(self):
        data = HeteroData()

        data["btree"].node_id = torch.IntTensor(self.btree_id)
        data['btree'].x = torch.IntTensor(self.btree_id)

        data["host"].node_id = torch.IntTensor(self.ips_node_id)
        data['host'].x = torch.IntTensor(self.ips_node_id)

        # data["alarm"].node_id = torch.IntTensor(self.local_contents_node_id)
        # data['alarm'].x = torch.IntTensor(self.local_contents_node_id)

        self._data = data
        # data[('alarm', 'on', 'host')].edge_index = self.a2h_edge_index_data().to(torch.int64)
        # data[('alarm', 'to', 'btree')].edge_index = self.a2b_edge_index_data().to(torch.int64)
        data[('host', 'belongsto', 'btree')].edge_index = self.h2b_edge_index_data().to(torch.int64)


    def all_a2h_edge_index_data(self):
        alarm2ip_list = []
        for i in range(len(self.contents)):
            alarm2ip_list.append(
                [self.start_id + i, self.ips.index(self.contents[i].split("|")[0])])

        alarm2ip_idx = torch.IntTensor(alarm2ip_list).T
        return alarm2ip_idx

    def all_a2b_edge_index_data(self):
        alarm2product_label_list = []
        for i in range(len(self.contents)):
            labels = self.contents[i].split("|")[-1].split(",")
            for l in labels:
                alarm2product_label_list.append([self.start_id + i, self.btrees.index(l)])
        alarm2btree_idx = torch.IntTensor(alarm2product_label_list).T
        return alarm2btree_idx

    def all_h2b_edge_index_data(self):
        ip2product_label_list = []
        for i in range(len(self.contents)):
            ip = self.contents[i].split("|")[0]
            labels = self.contents[i].split("|")[-1].split(",")
            for l in labels:
                if [self.ips.index(ip), self.btrees.index(l)] not in ip2product_label_list:
                    ip2product_label_list.append([self.ips.index(ip), self.btrees.index(l)])
        ip2label_idx = torch.IntTensor(ip2product_label_list).T
        return ip2label_idx



    def a2h_edge_index_data(self):
        alarm2ip_list = []
        for i in range(len(self.local_contents)):
            alarm2ip_list.append(
                [self.start_id + i, self.ips.index(self.local_contents[i].split("|")[0])])

        alarm2ip_idx = torch.IntTensor(alarm2ip_list).T
        return alarm2ip_idx

    def a2b_edge_index_data(self):
        alarm2product_label_list = []
        for i in range(len(self.local_contents)):
            labels = self.local_contents[i].split("|")[-1].split(",")
            for l in labels:
                alarm2product_label_list.append([self.start_id + i, self.btrees.index(l)])
        alarm2btree_idx = torch.IntTensor(alarm2product_label_list).T
        return alarm2btree_idx

    def h2b_edge_index_data(self):
        ip2product_label_list = []
        for i in range(len(self.local_contents)):
            ip = self.local_contents[i].split("|")[0]
            labels = self.local_contents[i].split("|")[-1].split(",")
            for l in labels:
                if [self.ips.index(ip), self.btrees.index(l)] not in ip2product_label_list:
                    ip2product_label_list.append([self.ips.index(ip), self.btrees.index(l)])
        ip2label_idx = torch.IntTensor(ip2product_label_list).T
        return ip2label_idx

    @property
    def data(self):
        return self._data

    def get_item_indexes(self, df):
        df["combined"] = df.apply(
            lambda row: "|".join([row["ip"], row["content"], row["idc_name"], row["product_labels"]]), axis=1)

        ori_product_labels = df["product_labels"].tolist()
        product_labels = []
        for l in ori_product_labels:
            product_labels.extend(l.split(","))

        contents = df["combined"].tolist()
        product_labels = list(set(product_labels))
        ips = list(set(df["ip"].tolist()))

        idx_dict = {
            "contents_keys": contents,
            "product_labels_keys": product_labels,
            "ips_keys": ips,
        }
        return idx_dict

    # def get_feature_vector(self, list_A, num_features_alarm):
    #     f_list = []
    #     for i in list_A:
    #         memory_usage()
    #         alarm_explain = get_sentence_vector(i)
    #         f_list.append(alarm_explain)
    #     return torch.stack(f_list, dim=0).view([-1, num_features_alarm])


# data = AlarmsData(alarms_df, 0, 1)
# print(data.data)
# # print(data.alarms_origin_infos)
# # print(data.ips_origin_infos)
# # print(data.btrees_origin_infos)
# print(data.all_origin_infos)
data = AlarmsData(alarms_df, 0, 1000)
ALL_ORIGIN_INFOS = data.all_origin_infos
# print(data.data)
# print(ALL_ORIGIN_INFOS)
#
#
# nodes = data.btrees +data.ips+data.contents
# # print(data.btrees)
# # print(data.ips)
# # print(data.contents)
#
# a2h_list = data.all_a2h_edge_index_data().t().tolist()
# a2b_list = data.all_a2b_edge_index_data().t().tolist()
# h2b_list = data.all_h2b_edge_index_data().t().tolist()
#
# edge_list = []
# for i in a2h_list:
#     edge_list.append((data.alarms_origin_infos[i[0]],data.ips_origin_infos[i[1]]))
#     edge_list.append((data.ips_origin_infos[i[1]], data.alarms_origin_infos[i[0]]))
#
# for i in a2b_list:
#     edge_list.append((data.alarms_origin_infos[i[0]],data.btrees_origin_infos[i[1]]))
#     edge_list.append((data.btrees_origin_infos[i[1]], data.alarms_origin_infos[i[0]]))
#
# for i in h2b_list:
#     edge_list.append((data.ips_origin_infos[i[0]],data.btrees_origin_infos[i[1]]))
#     edge_list.append((data.btrees_origin_infos[i[1]], data.ips_origin_infos[i[0]]))


# save_word2vec_status(nodes,edge_list)
# get_vector_for_word("aaa")
#
# for key in data.btrees_origin_infos:
#     print(data.btrees_origin_infos[key])
#
# for key in data.ips_origin_infos:
#     print(data.ips_origin_infos[key])
#
# for key in data.alarms_origin_infos:
#     print(data.alarms_origin_infos[key])

# print(data.data)
# print(data.data.to_homogeneous())
# print(data.data.node_types)
# print(data.data.to_homogeneous().node_type)

