"""
@Filename       : dataloader.py
@Create Time    : 2020/11/2 16:38
@Author         : Rylynn
@Description    : 

"""
import os

import networkx as nx
import torch
import torch.nn.utils.rnn as rnn_utils
import pickle as pkl
import random
from torch.utils.data import Dataset, DataLoader


class DiffuseSequenceDataset(Dataset):
    def __init__(self, root_path, dataset, train, max_len=None):
        super(DiffuseSequenceDataset, self).__init__()
        self.root_path = root_path
        self.dataset = dataset
        self.train = train
        self.max_len = max_len
        print('Loading dataset: {}'.format(dataset))
        self.node2id = self.__load_vocab_table()
        self.instances = self.__load_data()
        print('Finish dataset loading with {} nodes and {} cascades'.format(len(self.node2id), len(self.instances)))

    def __load_data(self):
        filepath = os.path.join(self.root_path, self.dataset, 'train.txt' if self.train else 'test.txt')

        instances_list = []
        with open(filepath, 'r') as f:
            for line in f:
                cascade = line.strip().split(' ')
                sequence = [self.node2id[eval(c)] for c in cascade[1:-2:2]]
                time = [eval(t) for t in cascade[2:-1:2]]
                if len(sequence) < 2:
                    continue
                if self.max_len is not None:
                    sequence = sequence[:self.max_len]
                    time = time[:self.max_len]
                instances_list.append((sequence, time))

        return instances_list

    def __load_vocab_table(self):
        root_path = os.path.join(self.root_path, self.dataset)
        node2id = dict()
        filepath = os.path.join(root_path, 'train.txt')
        max_id = 1
        with open(filepath) as f:
            for line in f:
                cascade = line.strip().split(' ')
                sequence = [eval(c) for c in cascade[1:-2:2]]
                root_user = eval(cascade[0])
                if not node2id.get(root_user):
                    node2id[root_user] = max_id
                    max_id = max_id + 1
                for s in sequence:
                    if not node2id.get(s):
                        node2id[s] = max_id
                        max_id = max_id + 1

        filepath = os.path.join(root_path, 'test.txt')
        with open(filepath) as f:
            for line in f:
                cascade = line.strip().split(' ')
                sequence = [eval(c) for c in cascade[1:-2:2]]
                root_user = eval(cascade[0])
                if not node2id.get(root_user):
                    node2id[root_user] = max_id
                    max_id = max_id + 1
                for s in sequence:
                    if not node2id.get(s):
                        node2id[s] = max_id
                        max_id = max_id + 1

        return node2id

    def __len__(self):
        return len(self.instances)

    def __getitem__(self, idx):
        return self.instances[idx]

    def __add__(self, other):
        self.instances.append(other)


def sequence_collate_fn(data):
    data.sort(key=lambda x: len(x[0]), reverse=True)
    seq = [instance[0] for instance in data]
    time = [instance[1] for instance in data]
    seq = [torch.LongTensor(s) for s in seq]
    time = [torch.FloatTensor(t) for t in time]
    seq_length = [len(s) for s in seq]
    seq = rnn_utils.pad_sequence(seq, batch_first=True, padding_value=0)
    time = rnn_utils.pad_sequence(time, batch_first=True, padding_value=0)

    return seq, time, seq_length


def load_graph(rootpath):
    g = nx.DiGraph()
    node2id = dict()
    max_id = 1
    filepath = os.path.join(rootpath, 'graph.txt')
    with open(filepath) as f:
        for line in f:
            source, target = line.split(',')
            source = int(source)
            target = int(target)
            if not node2id.get(source):
                node2id[source] = max_id
                max_id = max_id + 1
                g.add_node(node2id[source])
            if not node2id.get(target):
                node2id[target] = max_id
                max_id = max_id + 1
                g.add_node(node2id[target])
            g.add_edge(node2id[source], node2id[target])
    print('Graph Node: {}, Edge: {}'.format(g.number_of_nodes(), g.size()))
    return g, node2id


def run():
    train_dataset = DiffuseSequenceDataset('../../data', 'memes', train=True)
    train_data_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, collate_fn=sequence_collate_fn)

    for i, (sequence, time, data_length) in enumerate(train_data_loader):
        print(sequence.shape)
        print(time.shape)
        print(data_length)


if __name__ == '__main__':
    # test()
    run()