"""
@Filename       : dataloader.py
@Create Time    : 2020/11/2 16:38
@Author         : Rylynn
@Description    : 

"""

import networkx as nx
import torch
import torch.nn.utils.rnn as rnn_utils
import pickle as pkl
from torch.utils.data import Dataset, DataLoader


# class DiffuseSequenceDataSet(Dataset):
#     def __init__(self, filepath):
#         super(DiffuseSequenceDataSet, self).__init__()
#         self.filepath = filepath
#         self.instances = self.__load_data()
# 
#     def __load_data(self):
#         instances = pkl.load(open(self.filepath, 'rb'))
# 
#         return instances
# 
#     def __len__(self):
#         return len(self.instances)
# 
#     def __getitem__(self, idx):
#         return self.instances[idx]
# 
#     def __add__(self, other):
#         self.instances.append(other)


class DiffuseSequenceDataSet(Dataset):
    def __init__(self, filepath, node2id):
        super(DiffuseSequenceDataSet, self).__init__()
        self.filepath = filepath
        self.node2id = node2id
        self.instances = self.__load_data()

    def __load_data(self):
        """read all cascade from training or testing files. """
        total_len = 0
        t_cascades = []
        for line in open(self.filepath):
            if len(line.strip()) == 0:
                continue
            userlist = []
            chunks = line.strip().split()
            for chunk in chunks:
                user, timestamp = chunk.split(',')
                if self.node2id.get(user):
                    userlist.append(self.node2id[user])

            if 1 < len(userlist) <= 500:
                total_len += len(userlist)
                t_cascades.append(userlist + [1])
        return t_cascades

    def __len__(self):
        return len(self.instances)

    def __getitem__(self, idx):
        return self.instances[idx]

    def __add__(self, other):
        self.instances.append(other)


def load_graph(filepath):
    g = nx.Graph()
    node2id = dict()
    max_id = 1
    with open(filepath) as f:
        for line in f:
            source, target = line.split(' ')
            source = int(source)
            target = int(target)
            if not node2id.get(source):
                node2id[source] = max_id
                max_id = max_id + 1
            if not node2id.get(target):
                node2id[target] = max_id
                max_id = max_id + 1
            if not g.has_edge(node2id[source], node2id[target]):
                g.add_edge(node2id[source], node2id[target])
    print('Graph Node: {}, Edge: {}'.format(g.number_of_nodes(), g.size()))
    return g, node2id


# def sequence_collate_fn(data):
#     print(data)
#     data.sort(key=lambda x: len(x[0]), reverse=True)
#     seq = [instance[0] for instance in data]
#     next = [instance[1] for instance in data]
#     seq = [torch.LongTensor(s) for s in seq]
#     next = torch.LongTensor(next)
#     seq_length = [len(s) for s in seq]
#     seq = rnn_utils.pad_sequence(seq, batch_first=True, padding_value=0)
#     return seq, next, seq_length


def sequence_collate_fn(data):
    # cascade_length = [len(cascade) - 2 for _, cascade in data]
    seq = []
    for cascade in data:
        seq.append(cascade)

    seq.sort(key=lambda x: len(x), reverse=True)
    seq = [torch.LongTensor(s) for s in seq]
    seq_length = [len(s) for s in seq]
    seq = rnn_utils.pad_sequence(seq, padding_value=0, batch_first=True)
    return seq, seq_length

# def sequence_collate_fn(data):
#     # cascade_length = [len(cascade) - 2 for _, cascade in data]
#     seq = []
#     next = []
#     for cascade in data:
#         for i in range(1, len(cascade)):
#             seq.append((cascade[:i], next))
#
#     seq.sort(key=lambda x: len(x[0]), reverse=True)
#     cascade = [torch.LongTensor(s[0]) for s in seq]
#     next = torch.LongTensor([s[1] for s in seq])
#
#     seq_length = [len(s) for s in seq]
#     cascade = rnn_utils.pad_sequence(cascade, padding_value=0, batch_first=)
#     return cascade, next, seq_length