"""
加载数据
"""
import numpy as np
from torch.utils.data import DataLoader


class bAbIDataset():
    """
    Load bAbI tasks for GGNN
    """

    def __init__(self, path, task_id, is_train):
        # 读图
        all_data = load_graphs_from_file(path)
        # 最大边id
        self.n_edge_types = find_max_edge_id(all_data)
        # 最大任务id(evaluation中)
        self.n_tasks = find_max_task_id(all_data)
        # 最大点id
        self.n_node = find_max_node_id(all_data)
        # 分成train和validation
        all_task_train_data, all_task_val_data = split_set(all_data)

        # 训练集
        if is_train:
            # 按照边的类型数量将数据分组(训练集)
            all_task_train_data = data_convert(all_task_train_data, 1)
            # 取出边类型的数据(训练集)
            self.data = all_task_train_data[task_id]
        # 测试集
        else:
            # 按照边的类型数量将数据分组(测试集)
            all_task_val_data = data_convert(all_task_val_data, 1)
            # 取出边类型的数据(测试集)
            self.data = all_task_val_data[task_id]

    def __getitem__(self, index):
        # 构建邻接表
        am = create_adjacency_matrix(self.data[index][0], self.n_node, self.n_edge_types)
        annotation = self.data[index][1]
        target = self.data[index][2] - 1
        return am, annotation, target

    def __len__(self):
        return len(self.data)


class bAbIDataloader(DataLoader):

    def __init__(self, *args, **kwargs):
        super(bAbIDataloader, self).__init__(*args, **kwargs)


def load_graphs_from_file(file_name):
    """
    按每一组读图
    图的每一行是(node1, edge, node2)
    evaluation的每一行是(?, edge, node1, node2)
    :param file_name:
    :return:
    """
    data_list = []
    edge_list = []
    target_list = []
    with open(file_name, 'r') as f:
        for line in f:
            # 开始新的一组数据
            if len(line.strip()) == 0:
                # 将目前读完这组数据放入data_list
                # data_list[0]是edge_list, data_list[1]是target_list
                data_list.append([edge_list, target_list])
                # 情况edge/target_list准备读入下一组数据
                edge_list = []
                target_list = []
            # 更新同一组数据
            else:
                digits = []
                line_tokens = line.split(" ")
                # 要evaluation的问题
                if line_tokens[0] == "?":
                    # 0是"？", 从1开始
                    for i in range(1, len(line_tokens)):
                        digits.append(int(line_tokens[i]))
                    # target_list存的是问题
                    target_list.append(digits)
                # 读图
                else:
                    for i in range(len(line_tokens)):
                        digits.append(int(line_tokens[i]))
                    # edge_list存的是图的边列表
                    edge_list.append(digits)
    return data_list


def find_max_edge_id(data_list):
    """
    最大边id
    :param data_list:
    :return:
    """
    max_edge_id = 0
    # data_list[0]是edge_list, data_list[1]是target_list
    for data in data_list:
        edges = data[0]
        for item in edges:
            if item[1] > max_edge_id:
                max_edge_id = item[1]
    return max_edge_id


def find_max_node_id(data_list):
    """
    最大点id
    :param data_list:
    :return:
    """
    max_node_id = 0
    # data_list[0]是edge_list, data_list[1]是target_list
    for data in data_list:
        edges = data[0]
        for item in edges:
            if item[0] > max_node_id:
                max_node_id = item[0]
            if item[2] > max_node_id:
                max_node_id = item[2]
    return max_node_id


def find_max_task_id(data_list):
    """
    最大任务id
    :param data_list:
    :return:
    """
    max_node_id = 0
    # data_list[0]是edge_list, data_list[1]是target_list
    for data in data_list:
        targe = data[1]
        for item in targe:
            if item[0] > max_node_id:
                max_node_id = item[0]
    return max_node_id


def split_set(data_list):
    """
    前50作为train, 后50作为validation
    :param data_list:
    :return:
    """
    n_examples = len(data_list)
    idx = range(n_examples)
    train = idx[:50]
    val = idx[-50:]
    return np.array(data_list)[train], np.array(data_list)[val]


def data_convert(data_list, n_annotation_dim):
    """
    按每一种边的类型(n_tasks)分别训练一个模型
    数据预处理将data_list按照边的类型分组得到task_data_list
    :param data_list:
    :param n_annotation_dim:
    :return:
    """
    n_nodes = find_max_node_id(data_list)
    n_tasks = find_max_task_id(data_list)
    task_data_list = []
    # 按照边的类型数量分组
    for i in range(n_tasks):
        task_data_list.append([])
    for item in data_list:
        edge_list = item[0]
        target_list = item[1]
        for target in target_list:
            # 边的类型
            task_type = target[0]
            # 目标输出
            task_output = target[-1]
            # node annotations其余节点全为0
            annotation = np.zeros([n_nodes, n_annotation_dim])
            # node annotations的evaluation第一个节点为1
            annotation[target[1] - 1][0] = 1
            task_data_list[task_type - 1].append([edge_list, annotation, task_output])
    return task_data_list


def create_adjacency_matrix(edges, n_nodes, n_edge_types):
    # n_nodes * n_edge_types * 2: 点数 * 边的类型 * 2(in/out边的两个方向)
    # 邻接矩阵 A = [A_in; A_out]; A_in里面有多个类型
    a = np.zeros([n_nodes, n_nodes * n_edge_types * 2])
    for edge in edges:
        # 以edge = [a e b]为例, 表示从a -> b的边的类型为e
        # 点a
        src_idx = edge[0]
        # 边e
        e_type = edge[1]
        # 点b
        tgt_idx = edge[2]
        # (e_type - 1) * n_nodes定位A_in中的e_type这个类型
        a[tgt_idx - 1][(e_type - 1) * n_nodes + src_idx - 1] = 1
        # (e_type - 1 + n_edge_types) * n_nodes分开看:
        # n_edge_types * n_nodes划过整个A_in
        # (e_type - 1) * n_nodes定位A_out中的e_type这个类型
        a[src_idx - 1][(e_type - 1 + n_edge_types) * n_nodes + tgt_idx - 1] = 1
    return a
