"""
@Filename       : het_graph_builder.py
@Create Time    : 2020/12/12 8:11
@Author         : Rylynn
@Description    : 

"""
import os

import dgl
import numpy as np
import torch as th
import matplotlib.pyplot as plt
from dgl import convert


def node2id(vocab_dict, node):
    max_id = len(vocab_dict.values()) + 1
    optional_id = vocab_dict.get(node)
    if not optional_id:
        vocab_dict[node] = max_id + 1
        return max_id + 1
    else:
        return optional_id


def load_vocab_dict(root_path, dataset):
    vocab_dict = dict()
    diffusion_sources_dict = dict()
    info_dict = dict()
    data_root_path = os.path.join(root_path, dataset)

    graph_file_path = os.path.join(data_root_path, 'edges.txt')
    cascade_train_file_path = os.path.join(data_root_path, 'cascade.txt')
    cascade_valid_file_path = os.path.join(data_root_path, 'cascadevalid.txt')
    cascade_test_file_path = os.path.join(data_root_path, 'cascadetest.txt')
    # load user nodes in training cascades
    train_diffusion_sources = []
    with open(cascade_train_file_path) as cf:
        for line in open(cascade_train_file_path):
            if len(line.strip()) == 0:
                continue
            cascade = []
            chunks = line.strip().split()
            for chunk in chunks:
                user, timestamp = chunk.split(',')
                cascade.append(user)
                node2id(vocab_dict, user)
            train_diffusion_sources.append('S{}'.format(cascade[0]))

    # load user nodes in valid cascades
    valid_diffusion_sources = []
    with open(cascade_valid_file_path) as cf:
        for line in open(cascade_valid_file_path):
            if len(line.strip()) == 0:
                continue
            cascade = []
            chunks = line.strip().split()
            for chunk in chunks:
                user, timestamp = chunk.split(',')
                cascade.append(user)
                node2id(vocab_dict, user)
            valid_diffusion_sources.append('S{}'.format(cascade[0]))

    # load user nodes in testing cascades
    test_diffusion_sources = []
    with open(cascade_test_file_path) as cf:
        for line in open(cascade_test_file_path):
            if len(line.strip()) == 0:
                continue
            cascade = []
            chunks = line.strip().split()
            for chunk in chunks:
                user, timestamp = chunk.split(',')
                cascade.append(user)
                node2id(vocab_dict, user)
            test_diffusion_sources.append('S{}'.format(cascade[0]))

    # load diffusion sources in training cascades
    for source in train_diffusion_sources:
        node2id(diffusion_sources_dict, source)

    # load diffusion sources in testing cascades
    for source in test_diffusion_sources:
        node2id(diffusion_sources_dict, source)

    # load information nodes in training cascades
    with open(cascade_train_file_path) as cf:
        for information_idx, _ in enumerate(cf.readlines()):
            information_node = 'I{}'.format(information_idx)
            node2id(info_dict, information_node)

    return vocab_dict, diffusion_sources_dict, info_dict


def load_social_context(graph_file, vocab_dict):
    uu_edges = []
    # add self loop edges
    for k, v in vocab_dict.items():
        uu_edges.append((v, v))

    if not os.path.exists(graph_file):
        return uu_edges

    with open(graph_file) as gf:
        for line in gf:
            source, target = line.strip().split(',')
            if vocab_dict.get(source) and vocab_dict.get(target):
                source_id = vocab_dict[source]
                target_id = vocab_dict[target]
                uu_edges.append((source_id, target_id))

    return uu_edges


def load_cascade_context(cascade_file, vocab_dict, ds_dict, info_dict, maxlen):
    ui_edges = []
    iu_edges = []
    is_edges = []
    si_edges = []
    us_edges = []
    su_edges = []

    with open(cascade_file) as cf:
        for cascade_idx, line in enumerate(cf.readlines()):
            if len(line.strip()) == 0:
                continue
            cascade = []
            chunks = line.strip().split()
            for chunk in chunks:
                user, timestamp = chunk.split(',')
                cascade.append(vocab_dict[user])

            if len(cascade) <= 2:
                continue
            cascade = cascade[:maxlen]
            root_user = ds_dict['S{}'.format(chunks[0].split(',')[0])]
            is_edges.append((info_dict['I{}'.format(cascade_idx)], root_user))
            si_edges.append((root_user, info_dict['I{}'.format(cascade_idx)]))

            for u in cascade:
                ui_edges.append((u, info_dict['I{}'.format(cascade_idx)]))
                iu_edges.append((info_dict['I{}'.format(cascade_idx)], u))
                us_edges.append((u, root_user))
                su_edges.append((root_user, u))
    return ui_edges, iu_edges, is_edges, si_edges, us_edges, su_edges


def build_het_graph(root_path, dataset, maxlen=100):
    print('Building heterogeneous network from dataset: {}...'.format(dataset))
    data_root_path = os.path.join(root_path, dataset)

    vocab_dict, ds_dict, info_dict = load_vocab_dict(root_path, dataset)
    graph_file_path = os.path.join(data_root_path, 'edges.txt')

    cascade_file_path = os.path.join(data_root_path, 'cascade.txt')
    uu_edges = load_social_context(graph_file_path, vocab_dict)
    ui_edges, iu_edges, is_edges, si_edges, us_edges, su_edges = load_cascade_context(cascade_file_path, vocab_dict, ds_dict, info_dict, maxlen)

    # remove the repeated edges
    uu_edges = list(set(uu_edges))
    ui_edges = list(set(ui_edges))
    iu_edges = list(set(iu_edges))
    is_edges = list(set(is_edges))
    si_edges = list(set(si_edges))
    us_edges = list(set(us_edges))
    su_edges = list(set(su_edges))

    het_graph: dgl.DGLHeteroGraph = dgl.heterograph({('U', 'UU', 'U'): uu_edges,
                                                     ('U', 'UI', 'I'): ui_edges,
                                                     ('I', 'IU', 'U'): iu_edges,
                                                     ('S', 'SI', 'I'): si_edges,
                                                     ('I', 'IS', 'S'): is_edges,
                                                     })
    print('Graph building finish...')
    print(het_graph)
    return het_graph


def metapath_reachable_graph(g, metapath, threshold):
    adj = 1
    for etype in list(metapath):
        adj = adj * g.adj(etype=etype, scipy_fmt='csr', transpose=True)

    adj = (adj >= threshold).tocsr()
    srctype = g.to_canonical_etype(metapath[0])[0]
    dsttype = g.to_canonical_etype(metapath[-1])[2]
    new_g = convert.heterograph({(srctype, '_E', dsttype): adj.nonzero()},
                                {srctype: adj.shape[0], dsttype: adj.shape[1]},
                                idtype=g.idtype, device=g.device)

    # copy srcnode features
    new_g.nodes[srctype].data.update(g.nodes[srctype].data)
    # copy dstnode features
    if srctype != dsttype:
        new_g.nodes[dsttype].data.update(g.nodes[dsttype].data)

    return new_g


def similarity_test(g, metapath1, metapath2):
    g1: dgl.DGLGraph = metapath_reachable_graph(g, metapath1, 1)
    g1_set = set()

    source, target = g1.edges()
    for u, v in zip(source, target):
        g1_set.add((int(u), int(v)))

    g2: dgl.DGLGraph = metapath_reachable_graph(g, metapath2, 6)
    g2_set = set()

    source, target = g2.edges()
    for u, v in zip(source, target):
        g2_set.add((int(u), int(v)))

    print(len(g1_set))
    print(len(g2_set))
    print(len(g1_set & g2_set))
    print(len(g1_set.intersection(g2_set)) / len(g2_set))


def main():
    # vocab_dict = load_vocab_dict('memes')
    # print(vocab_dict)
    dataset = 'twitter'
    maxlen = 500
    het_graph = build_het_graph('../../../data', dataset, maxlen)
    print(het_graph)
    similarity_test(het_graph, ['UU'], ['UI', 'IS', 'SI', 'IU'])

    # new_g: dgl.DGLGraph = dgl.metapath_reachable_graph(het_graph, ['UI', 'IU'])
    print('- ' * 50)
    print('Metapath U-I-U')
    new_g: dgl.DGLGraph = metapath_reachable_graph(het_graph, ['UI', 'IU'], 5)
    sampled_g = dgl.sampling.sample_neighbors(new_g, th.arange(0, new_g.number_of_nodes()), fanout=10)
    print(np.mean(list(map(lambda x: x[1], dgl.to_networkx(new_g).degree))))
    print(new_g)
    print(sampled_g)
    print('- ' * 50)
    print('Metapath U-S-I-S-U')
    new_g = metapath_reachable_graph(het_graph, ['UI', 'IS', 'SI', 'IU'], 6)
    sampled_g = dgl.sampling.sample_neighbors(new_g, th.arange(0, new_g.number_of_nodes()), fanout=10)
    print(np.mean(list(map(lambda x: x[1], dgl.to_networkx(new_g).degree))))
    print(new_g)
    print(sampled_g)

    print('- ' * 50)
    print('Metapath U-U-U')
    new_g = metapath_reachable_graph(het_graph, ['UU', 'UU'], 40)
    sampled_g = dgl.sampling.sample_neighbors(new_g, th.arange(0, new_g.number_of_nodes()), fanout=10)
    print(np.mean(list(map(lambda x: x[1], dgl.to_networkx(new_g).degree))))
    print(new_g)
    print(sampled_g)

    print('- ' * 50)
    print('Metapath U-U')
    new_g = dgl.metapath_reachable_graph(het_graph, ['UU'])
    sampled_g = dgl.sampling.sample_neighbors(new_g, th.arange(0, new_g.number_of_nodes()), fanout=10)
    print(np.mean(list(map(lambda x: x[1], dgl.to_networkx(new_g).degree))))
    print(new_g)
    print(sampled_g)

    source, target = new_g.edges()
    edges_list_file = open('../../../data/{}/edges_id.txt'.format(dataset), 'w+')
    for s, t in zip(source, target):
        edges_list_file.write('{} {}\n'.format(s, t))
    edges_list_file.flush()
    edges_list_file.close()


if __name__ == '__main__':
    main()









