"""
数据集说明:
ind.*.x : 训练集节点特征向量，保存对象为：scipy.sparse.csr.csr_matrix
ind.*.tx : 测试集节点特征向量，保存对象为：scipy.sparse.csr.csr_matrix
ind.*.allx : 包含有标签和无标签的训练节点特征向量，保存对象为：scipy.sparse.csr.csr_matrix，可以理解为除测试集以外的其他节点特征集合，训练集是它的子集
ind.*.y : one-hot表示的训练节点的标签，保存对象为：numpy.ndarray
ind.*.ty : one-hot表示的测试节点的标签，保存对象为：numpy.ndarray
ind.*.ally : one-hot表示的ind.cora.allx对应的标签，保存对象为：numpy.ndarray
ind.*.graph : 保存节点之间边的信息，保存格式为：{ index : [ index_of_neighbor_nodes ] }
ind.*.test.index : 保存测试集节点的索引，保存对象为：List，用于后面的归纳学习设置
"""
import pickle as pkl
import sys

import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch


def prepare_data(dataset, sparse):
    adj, features, labels, idx_train, idx_test = load_data(dataset)
    features, _ = preprocess_features(features)

    # 节点数量
    nb_nodes = features.shape[0]
    # 特征维度
    ft_size = features.shape[1]
    # 节点类型数
    nb_classes = labels.shape[1]
    adj = normalize_adj(adj + sp.eye(adj.shape[0]))
    if sparse:
        adj = sparse_mx_to_torch_sparse_tensor(adj)
    else:
        adj = (adj + sp.eye(adj.shape[0])).todense()

    features = torch.FloatTensor(features)

    if not sparse:
        adj = torch.FloatTensor(adj[np.newaxis])

    labels = torch.FloatTensor(labels)
    idx_train = torch.LongTensor(idx_train)
    idx_test = torch.LongTensor(idx_test)

    return features, adj, labels, idx_train, idx_test, nb_nodes, ft_size, nb_classes


def load_data(dataset):
    """
    读取数据
    :param dataset: 数据集名称
    :return: 邻接权重矩阵、特征矩阵、节点类型、训练集、验证集、测试集
    """
    names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
    objects = []
    for name in names:
        with open('data/{}/ind.{}.{}'.format(dataset, dataset, name), 'rb') as f:
            if sys.version_info > (3, 0):
                objects.append(pkl.load(f, encoding='latin1'))
            else:
                objects.append(pkl.load(f))

    x, y, tx, ty, allx, ally, graph = tuple(objects)
    test_idx_reorder = parse_index_file('data/{}/ind.{}.test.index'.format(dataset, dataset))
    test_idx_range = np.sort(test_idx_reorder)

    if dataset == 'citeseer':
        # Fix citeseer dataset (there are some isolated nodes in the graph)
        # Find isolated nodes, add them as zero-vecs into the right position
        test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
        tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
        tx_extended[test_idx_range - min(test_idx_range), :] = tx
        tx = tx_extended
        ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
        ty_extended[test_idx_range - min(test_idx_range), :] = ty
        ty = ty_extended

    features = sp.vstack((allx, tx)).tolil()
    features[test_idx_reorder, :] = features[test_idx_range, :]
    adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))

    labels = np.vstack((ally, ty))
    labels[test_idx_reorder, :] = labels[test_idx_range, :]

    idx_test = test_idx_range.tolist()
    idx_train = range(len(y))

    return adj, features, labels, idx_train, idx_test


def parse_index_file(filename):
    index = []
    for line in open(filename):
        index.append(int(line.strip()))
    return index


def preprocess_features(features):
    row_sum = np.array(features.sum(1))
    r_inv = np.power(row_sum, -1).flatten()
    r_inv[np.isinf(r_inv)] = 0.
    r_mat_inv = sp.diags(r_inv)
    features = r_mat_inv.dot(features)
    return features.todense(), sparse_to_tuple(features)


def sparse_to_tuple(sparse_mx, insert_batch=False):
    def to_tuple(mx):
        if not sp.isspmatrix_coo(mx):
            mx = mx.tocoo()
        if insert_batch:
            coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()
            values = mx.data
            shape = (1,) + mx.shape
        else:
            coords = np.vstack((mx.row, mx.col)).transpose()
            values = mx.data
            shape = mx.shape
        return coords, values, shape

    if isinstance(sparse_mx, list):
        for i in range(len(sparse_mx)):
            sparse_mx[i] = to_tuple(sparse_mx[i])
    else:
        sparse_mx = to_tuple(sparse_mx)

    return sparse_mx


def normalize_adj(adj):
    adj = sp.coo_matrix(adj)
    row_sum = np.array(adj.sum(1))
    d_inv_sqrt = np.power(row_sum, -0.5).flatten()
    d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
    d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
    return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()


def sparse_mx_to_torch_sparse_tensor(sparse_mx):
    """Convert a scipy sparse matrix to a torch sparse tensor."""
    sparse_mx = sparse_mx.tocoo().astype(np.float32)
    indices = torch.from_numpy(
        np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
    values = torch.from_numpy(sparse_mx.data)
    shape = torch.Size(sparse_mx.shape)
    return torch.sparse.FloatTensor(indices, values, shape)
