"""
@Filename       : preprocess.py
@Create Time    : 2020/11/2 21:10
@Author         : Rylynn
@Description    : 

"""

import os
import pickle as pkl
import random

import networkx as nx

from model.het_diffuse.het_graph_builder import load_vocab_dict


def convert_cascade_to_instances(sequence,
                                 node2id,
                                 inference):
    length = len(sequence)
    examples = []
    sequence = [node2id[int(node)] for node in sequence]
    for idx, node in enumerate(sequence):
        prefix = sequence[: idx + 1]
        if idx == length - 1 and not inference:
            return examples
        if idx < length - 1 and inference:
            continue
        if not inference:
            label = sequence[idx + 1]
        else:
            label = None

        example = (prefix, label)

        if not inference:
            examples.extend([example])
        else:
            return example


def process_dataset(root_dir, file_name, node2id, maxlen=None, inference=True):
    instances_list = []
    filename = os.path.join(root_dir, file_name)
    with open(filename, 'r') as f:
        for line in f:
            cascade = line.strip().split(' ')
            sequence = [eval(c) for c in cascade[1:-2:2]]
            time = [eval(t) for t in cascade[2:-1:2]]
            if len(sequence) < 2:
                continue
            if maxlen is not None:
                sequence = sequence[:maxlen]
                time = time[:maxlen]
            instances_list.extend(convert_cascade_to_instances(sequence,
                                                               node2id=node2id,
                                                               inference=inference))

    dump_file = open(os.path.join(root_dir, file_name.split('.')[0] + '.pkl'), mode='wb+')
    pkl.dump(instances_list, dump_file)
    return instances_list


def load_vocab_table(rootpath):
    node2id = dict()
    filepath = os.path.join(rootpath, 'train.txt')
    graph_path = os.path.join(rootpath, 'graph.txt')

    max_id = 1

    with open(graph_path) as gf:
        for line in gf.readlines():
            source, target = line.strip().split(' ')
            source = eval(source)
            target = eval(target)

            if not node2id.get(source):
                node2id[source] = max_id
                max_id = max_id + 1
            if not node2id.get(target):
                node2id[target] = max_id
                max_id = max_id + 1

    with open(filepath) as f:
        for line in f:
            cascade = line.strip().split(' ')
            sequence = [eval(c) for c in cascade[1:-2:2]]
            root_user = eval(cascade[0])
            if not node2id.get(root_user):
                node2id[root_user] = max_id
                max_id = max_id + 1
            for s in sequence:
                if not node2id.get(s):
                    node2id[s] = max_id
                    max_id = max_id + 1

    filepath = os.path.join(rootpath, 'test.txt')
    with open(filepath) as f:
        for line in f:
            cascade = line.strip().split(' ')
            sequence = [eval(c) for c in cascade[1:-2:2]]
            root_user = eval(cascade[0])
            if not node2id.get(root_user):
                node2id[root_user] = max_id
                max_id = max_id + 1
            for s in sequence:
                if not node2id.get(s):
                    node2id[s] = max_id
                    max_id = max_id + 1

    return node2id


def load_graph(rootpath):
    g = nx.DiGraph()
    node2id = dict()
    max_id = 1
    filepath = os.path.join(rootpath, 'graph.txt')
    with open(filepath) as f:
        for line in f:
            source, target = line.split(' ')
            source = int(source)
            target = int(target)
            if not node2id.get(source):
                node2id[source] = max_id
                max_id = max_id + 1
                g.add_node(node2id[source])
            if not node2id.get(target):
                node2id[target] = max_id
                max_id = max_id + 1
                g.add_node(node2id[target])
            g.add_edge(node2id[source], node2id[target])
    dl = list(g.degree)
    dl = sorted(dl, key=lambda x: x[1])
    # node2id = dict()
    # with open(filepath) as f:
    #     idx = 1
    #     for line in f:
    #         node = line.replace('\n', '')
    #         node = int(node)
    #         node2id[node] = idx
    #         idx = idx + 1

    print('Graph Node: {}, Edge: {}'.format(g.number_of_nodes(), g.size()))
    pkl.dump(node2id, open(os.path.join(rootpath, 'node2id.pkl'), 'wb'))
    return g, node2id


def main():
    node2id, _, _ = load_vocab_dict('../../data', 'twitter')
    process_dataset('../../data/twitter', 'train.txt', node2id, maxlen=100, inference=False)
    process_dataset('../../data/twitter', 'test.txt', node2id, maxlen=100, inference=False)
    # node2id, _, _ = load_vocab_dict('../../data', 'digg')
    # process_dataset('../../data/digg', 'train.txt', node2id, maxlen=100, inference=False)
    # process_dataset('../../data/digg', 'test.txt', node2id, maxlen=100, inference=False)
    # node2id, _, _ = load_vocab_dict('../../data', 'memes')
    # process_dataset('../../data/memes', 'train.txt', node2id, maxlen=100, inference=False)
    # process_dataset('../../data/memes', 'test.txt', node2id, maxlen=100, inference=False)


if __name__ == '__main__':
    main()
