import random
import re
import torch
import os
import numpy as np
import torch.nn.functional as F
import networkx as nx
from torch_geometric.nn.conv import MessagePassing
from torch_scatter import scatter_add
from torch_geometric.utils import degree
from torch_geometric.utils import remove_self_loops, add_self_loops
from torch_geometric.data import InMemoryDataset
from torch_geometric.io import read_tu_data
from torch.utils.data import Dataset


class TFFDatasetExt(InMemoryDataset):

    def __init__(self,
                 root,
                 name,
                 transform=None,
                 pre_transform=None,
                 pre_filter=None,
                 use_node_attr=False,
                 processed_filename=None,
                 pruning_percent=0):
        self.name = name
        self.processed_filename = processed_filename
        self.pruning_percent = pruning_percent
        self.processed_filename = processed_filename
        super(TFFDatasetExt, self).__init__(root, transform, pre_transform, pre_filter)
        if self.pruning_percent > 0:

            self.pruned_data_path = self.processed_paths[0][:-3] + "_" + str(pruning_percent * 100) + \
                                    self.processed_paths[0][-3:]
            if not os.path.exists(self.pruned_data_path):
                self.process()
            self.data, self.slices = torch.load(self.pruned_data_path)
        else:
            self.data, self.slices = torch.load(self.processed_paths[0])
        if self.data.x is not None and not use_node_attr:
            self.data.x = self.data.x[:, self.num_node_attributes:]

    @property
    def num_node_labels(self):
        if self.data.x is None:
            return 0

        for i in range(self.data.x.size(1)):
            if self.data.x[:, i:].sum().item() == self.data.x.size(0):
                return self.data.x.size(1) - i

        return 0

    @property
    def num_node_attributes(self):
        if self.data.x is None:
            return 0

        return self.data.x.size(1) - self.num_node_labels

    @property
    def raw_file_names(self):
        names = ['A', 'graph_indicator']
        return ['{}_{}.txt'.format(self.name, name) for name in names]

    @property
    def processed_file_names(self):
        return self.processed_filename

    def download(self):
        pass

    def process(self):
        data_list = [...]
        self.data, self.slices, self.size = read_tu_data(self.raw_dir, self.name)
        if self.pre_filter is not None:
            data_list = [self.get(idx) for idx in range(len(self))]
            data_list = [data for data in data_list if self.pre_filter(data)]

        if self.pre_transform is not None:
            data_list = [self.get(idx) for idx in range(len(self))]
            data_list = [self.pre_transform(data) for data in data_list]
        self.data, self.slices = self.collate(data_list)
        torch.save((self.data, self.slices), self.processed_paths[0])

    def __repr__(self):
        return '{}({})'.format(self.name, len(self))


class FeatureExpander(MessagePassing):
    r"""Expand features.

    Args:
        degree (bool): whether to use degree feature.
        onehot_maxdeg (int): whether to use one_hot degree feature with
            with max degree capped. disableid with 0.
        AK (int): whether to use a^kx feature. disabled with 0.
        centrality (bool): whether to use centrality feature.
        remove_edges (strings): whether to remove edges, partially or totally.
        edge_noises_add (float): adding random edges (in ratio of current edges).
        edge_noises_delete (float): remove random ratio of edges.
        group_degree (int): group nodes to create super nodes, set 0 to disable.
    """

    def __init__(self, degree=True, onehot_maxdeg=0, AK=1,
                 centrality=False, remove_edges="none",
                 edge_noises_add=0, edge_noises_delete=0, group_degree=0):
        super(FeatureExpander, self).__init__()

        # super(FeatureExpander, self).__init__('add', 'source_to_target')
        self.degree = degree
        self.onehot_maxdeg = onehot_maxdeg
        self.AK = AK
        self.centrality = centrality
        self.remove_edges = remove_edges
        self.edge_noises_add = edge_noises_add
        self.edge_noises_delete = edge_noises_delete
        self.group_degree = group_degree
        assert remove_edges in ["none", "nonself", "all"], remove_edges

        self.edge_norm_diag = 1e-8  # edge norm is used, and set A diag to it

    def transform(self, data):
        if data.x is None:
            data.x = torch.ones([data.num_nodes, 1], dtype=torch.float)

        # Adding noises to edges before computing anything else.
        if self.edge_noises_delete > 0:
            num_edges_new = data.num_edges - int(
                data.num_edges * self.edge_noises_delete)
            idxs = torch.randperm(data.num_edges)[:num_edges_new]
            data.edge_index = data.edge_index[:, idxs]
        if self.edge_noises_add > 0:
            num_new_edges = int(data.num_edges * self.edge_noises_add)
            idx = torch.LongTensor(num_new_edges * 2).random_(0, data.num_nodes)
            new_edges = idx.reshape(2, -1)
            data.edge_index = torch.cat([data.edge_index, new_edges], 1)

        deg, deg_onehot = self.compute_degree(data.edge_index, data.num_nodes)
        akx = self.compute_akx(data.num_nodes, data.x, data.edge_index)
        cent = self.compute_centrality(data)
        data.x = torch.cat([data.x, deg, deg_onehot, akx, cent], -1)

        if self.remove_edges != "none":
            if self.remove_edges == "all":
                self_edge = None
            else:  # only keep self edge
                self_edge = torch.tensor(range(data.num_nodes)).view((1, -1))
                self_edge = torch.cat([self_edge, self_edge], 0)
            data.edge_index = self_edge

        # Reduce nodes by degree-based grouping
        if self.group_degree > 0:
            assert self.remove_edges == "all", "remove all edges"
            x_base = data.x
            deg_base = deg.view(-1)
            super_nodes = []
            for k in range(1, self.group_degree + 1):
                eq_idx = deg_base == k
                gt_idx = deg_base > k
                x_to_group = x_base[eq_idx]
                x_base = x_base[gt_idx]
                deg_base = deg_base[gt_idx]
                group_size = torch.zeros([1, 1]) + x_to_group.size(0)
                if x_to_group.size(0) == 0:
                    super_nodes.append(
                        torch.cat([group_size, data.x[:1] * 0], -1))
                else:
                    super_nodes.append(
                        torch.cat([group_size,
                                   x_to_group.mean(0, keepdim=True)], -1))
            if x_base.size(0) == 0:
                x_base = data.x[:1] * 0
            data.x = x_base
            data.xg = torch.cat(super_nodes, 0).view((1, -1))

        return data

    def compute_degree(self, edge_index, num_nodes):
        row, col = edge_index
        deg = degree(row, num_nodes)
        deg = deg.view((-1, 1))

        if self.onehot_maxdeg is not None and self.onehot_maxdeg > 0:
            max_deg = torch.tensor(self.onehot_maxdeg, dtype=deg.dtype)
            deg_capped = torch.min(deg, max_deg).type(torch.int64)
            deg_onehot = F.one_hot(
                deg_capped.view(-1), num_classes=self.onehot_maxdeg + 1)
            deg_onehot = deg_onehot.type(deg.dtype)
        else:
            deg_onehot = self.empty_feature(num_nodes)

        if not self.degree:
            deg = self.empty_feature(num_nodes)

        return deg, deg_onehot

    def compute_centrality(self, data):
        if not self.centrality:
            return self.empty_feature(data.num_nodes)

        G = nx.Graph(data.edge_index.numpy().T.tolist())
        G.add_nodes_from(range(data.num_nodes))  # in case missing node ids
        closeness = nx.algorithms.closeness_centrality(G)
        betweenness = nx.algorithms.betweenness_centrality(G)
        pagerank = nx.pagerank_numpy(G)
        centrality_features = torch.tensor(
            [[closeness[i], betweenness[i], pagerank[i]] for i in range(
                data.num_nodes)])
        return centrality_features

    def compute_akx(self, num_nodes, x, edge_index, edge_weight=None):
        if self.AK is None or self.AK <= 0:
            return self.empty_feature(num_nodes)

        edge_index, norm = self.norm(
            edge_index, num_nodes, edge_weight, diag_val=self.edge_norm_diag)

        xs = []
        for k in range(1, self.AK + 1):
            x = self.propagate(edge_index, x=x, norm=norm)
            xs.append(x)
        return torch.cat(xs, -1)

    def message(self, x_j, norm):
        return norm.view(-1, 1) * x_j

    @staticmethod
    def norm(edge_index, num_nodes, edge_weight, diag_val=1e-8, dtype=None):
        if edge_weight is None:
            edge_weight = torch.ones((edge_index.size(1),),
                                     dtype=dtype,
                                     device=edge_index.device)
        edge_weight = edge_weight.view(-1)
        assert edge_weight.size(0) == edge_index.size(1)

        edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)
        edge_index = add_self_loops(edge_index, num_nodes=num_nodes)
        # Add edge_weight for loop edges.
        loop_weight = torch.full((num_nodes,),
                                 diag_val,
                                 dtype=edge_weight.dtype,
                                 device=edge_weight.device)
        edge_weight = torch.cat([edge_weight, loop_weight], dim=0)

        row, col = edge_index
        deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
        deg_inv_sqrt = deg.pow(-0.5)
        deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0

        return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]

    def empty_feature(self, num_nodes):
        return torch.zeros([num_nodes, 0])


def get_dataset(name, feat_str='deg+odeg100', pruning_percent=0):
    path = r"F:\论文\TII\Causal_Open_set\data\TFF"
    # path = r"..\data\TFF" #直接运行TFF_utils.py
    degree = feat_str.find("deg") >= 0
    centrality = feat_str.find("cent") >= 0
    onehot_max_deg_str = re.findall("odeg(\d+)", feat_str)
    onehot_max_deg = int(onehot_max_deg_str[0]) if onehot_max_deg_str else None

    pre_transform = FeatureExpander(
        degree=degree, onehot_maxdeg=onehot_max_deg, AK=0,
        centrality=centrality, remove_edges="none",
        edge_noises_add=0, edge_noises_delete=0,
        group_degree=0).transform

    dataset = TFFDatasetExt(
        path,
        name,
        pre_transform=pre_transform,
        use_node_attr=True,
        processed_filename="TFF_data.pt",
        pruning_percent=0
    )

    dataset.data.edge_attr = None
    return dataset


class TFF_Dataset(Dataset):
    def __init__(self):
        self.dataset = get_dataset("TFF")
        self.classDict = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7}
        # self.transform = transforms.Compose([transforms.ToTensor()])

    def sampler(self, seed, args):
        # 0: 194         1: 172         2: 433        3: 226        4: 106        5: 96        6: 667
        if seed is not None:
            random.seed(seed)
        all_classes = [0, 1, 2, 3, 4, 5, 6]
        seen_classes = [0, 1, 3, 5, 6]
        # seen_classes = random.sample(range(0, 7), 5)
        unseen_classes = [idx for idx in range(7) if idx not in seen_classes]
        print('seen_classes:{}'.format(seen_classes))
        print('unseen_classes:{}'.format(unseen_classes))
        idx = list(np.arange(0, 1894))
        random.shuffle(idx)
        train_idx = idx[0:1515]
        test_idx = idx[1515:]

        train_dataset = self.dataset[train_idx]
        test_dataset = self.dataset[test_idx]

        osr_trainset = []
        osr_valset = []
        osr_testset = []
        for scla in seen_classes:
            tmp1 = []
            for it in train_dataset:
                if it.y == scla:
                    tmp1.append(it)
            osr_trainset.append(tmp1)
            tmp2 = []
            for ti in test_dataset:
                if ti.y == scla:
                    tmp2.append(ti)
            osr_valset.append(tmp2)
        for ucla in unseen_classes:
            tmp3 = []
            for it in train_dataset:
                if it.y == ucla:
                    tmp3.append(it)
            for tt in test_dataset:
                if tt.y == ucla:
                    tmp3.append(tt)
            osr_testset.append(tmp3)
        # ucla1 = unseen_classes[0]
        # ucla2 = unseen_classes[1]
        # tmp3 = []
        # tmp4 = []
        # for it in train_dataset:
        #     if it.y == ucla1:
        #         tmp3.append(it)
        # for tt in test_dataset:
        #     if tt.y == ucla1:
        #         tmp3.append(tt)
        # for it in train_dataset:
        #     if it.y == ucla2:
        #         tmp4.append(it)
        # for tt in test_dataset:
        #     if tt.y == ucla1:
        #         tmp4.append(tt)
        # osr_testset.append(tmp3[:28]+tmp3[35:41]+[tmp3[45]]+[tmp3[51]]+[tmp3[56]]+[tmp3[60]]+[tmp3[69]]+tmp3[76:101])
        # osr_testset.append(tmp4[0:0])

        allll = []
        for cla in all_classes:
            al_tmp = []
            for item in self.dataset:
                if item.y == cla:
                    al_tmp.append(item)
            allll.append(al_tmp)

        osr_trainset = DataBuild(osr_trainset)
        osr_valset = DataBuild(osr_valset)
        osr_testset = DataBuild(osr_testset)
        all_dataset = DataBuild(allll)

        return osr_trainset, osr_valset, osr_testset


class DataBuild(Dataset):
    def __init__(self, datasets):
        """
        datasets: a list of get_class_i outputs, i.e. a list of list of images for selected classes
        """
        self.datasets = datasets
        self.lengths = [len(d) for d in self.datasets]

    def __getitem__(self, i):
        class_label, index_wrt_class = self.index_of_which_bin(self.lengths, i)

        img = self.datasets[class_label][index_wrt_class]

        return img, class_label

    def __len__(self):
        return sum(self.lengths)

    def index_of_which_bin(self, bin_sizes, absolute_index, verbose=False):
        """
        Given the absolute index, returns which bin it falls in and which element of that bin it corresponds to.
        """
        # Which class/bin does i fall into?
        accum = np.add.accumulate(bin_sizes)
        if verbose:
            print("accum =", accum)
        bin_index = len(np.argwhere(accum <= absolute_index))
        if verbose:
            print("class_label =", bin_index)
        # Which element of the fallent class/bin does i correspond to?
        index_wrt_class = absolute_index - np.insert(accum, 0, 0)[bin_index]
        if verbose:
            print("index_wrt_class =", index_wrt_class)

        return bin_index, index_wrt_class
