"""
加载数据
"""
from collections import defaultdict

import numpy as np


def load_cora():
    # 点的数量
    num_nodes = 2708
    # 特征数量
    num_feats = 1433
    # 构建特征矩阵
    feat_data = np.zeros((num_nodes, num_feats))
    # 构建节点的ground truth标签
    labels = np.empty((num_nodes, 1), dtype=np.int64)
    # 做一个点的id映射
    node_map = {}
    label_map = {}

    # 读节点特征
    # cora.content第一列是node id, 中间为点的特征，最后一列为label
    # with open("cora/cora.content") as fp:
    with open("../../data/cora/cora.content") as fp:
        for i, line in enumerate(fp):
            info = line.strip().split()
            # 特征，全部转换成float类型
            # feat_data[i,:] = map(float, info[1:-1])
            tmp = []
            for ss in info[1:-1]:
                tmp.append(float(ss))
            feat_data[i, :] = tmp

            # 将点的id转换，映射到从0开始的。info[0]是node old id,
            node_map[info[0]] = i
            # info[-1]是label, 字符串, 比如'Neural_Networks'和'Rule_Learning', 转换成int来表示类
            if not info[-1] in label_map:
                label_map[info[-1]] = len(label_map)
            labels[i] = label_map[info[-1]]

    # 读图存储成邻接表
    adj_lists = defaultdict(set)
    with open("../../data/cora/cora.cites") as fp:
        for i, line in enumerate(fp):
            # 每一行是一条边
            info = line.strip().split()
            paper1 = node_map[info[0]]
            paper2 = node_map[info[1]]
            adj_lists[paper1].add(paper2)
            adj_lists[paper2].add(paper1)
    # 举例：(a, b) (a, c) (a, d) (b, c) (b, d)
    # 存储后 {a: set(b, c, d), b: set(a, c, d), c: set(a, b), d: set(a, b)}
    return feat_data, labels, adj_lists


def load_pubmed():
    # hardcoded for simplicity...
    num_nodes = 19717
    num_feats = 500
    feat_data = np.zeros((num_nodes, num_feats))
    labels = np.empty((num_nodes, 1), dtype=np.int64)
    node_map = {}
    with open("../../data/pubmed-data/Pubmed-Diabetes.NODE.paper.tab") as fp:
        fp.readline()
        feat_map = {entry.split(":")[1]: i - 1 for i, entry in enumerate(fp.readline().split("\t"))}
        for i, line in enumerate(fp):
            info = line.split("\t")
            node_map[info[0]] = i
            labels[i] = int(info[1].split("=")[1]) - 1
            for word_info in info[2:-1]:
                word_info = word_info.split("=")
                feat_data[i][feat_map[word_info[0]]] = float(word_info[1])
    adj_lists = defaultdict(set)
    with open("../../data/pubmed-data/Pubmed-Diabetes.DIRECTED.cites.tab") as fp:
        fp.readline()
        fp.readline()
        for line in fp:
            info = line.strip().split("\t")
            paper1 = node_map[info[1].split(":")[1]]
            paper2 = node_map[info[-1].split(":")[1]]
            adj_lists[paper1].add(paper2)
            adj_lists[paper2].add(paper1)
    return feat_data, labels, adj_lists
