import networkx as nx
import gensim
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
from gensim.models import Word2Vec
from sklearn.decomposition import PCA

'''
维基百科词条DeepWalk图嵌入
    gensim(word2Vec tools)
'''


def get_random_walk(Graph, node, path_length):
    """
    输入起始节点和路径长度, 生成随机游走节点序列
    """
    random_walk = [node]
    for i in range(path_length - 1):
        # 汇总邻接节点(当前游走到的节点的邻居的汇总)
        neb_nodes = list(Graph.neighbors(node))
        neb_nodes = list(set(neb_nodes) - set(random_walk))
        # 未达到路径长度, 但是没有节点可走了(走到端点了), 也退出
        if len(neb_nodes) == 0:
            break
        # 从邻接节点中随机选择下一个节点
        random_node = random.choice(neb_nodes)
        random_walk.append(random_node)
        # 当前游走到的节点变为起走节点
        node = random_node

    return random_walk


if __name__ == '__main__':
    # 正常显示中文标签
    plt.rcParams['font.sans-serif'] = ['SimHei']
    # 正常显示负号
    plt.rcParams['axes.unicode_minus'] = False

    """
    获取Wikipedia 引用关联数据
    screw_page: https://densitydesign.github.io/strumentalia-seealsology/
    1. https://en.wikipedia.org/wiki/Computer_vision
    2. https://en.wikipedia.org/wiki/Deep_learning
    3. https://en.wikipedia.org/wiki/Convolutional_neural_network
    4. https://en.wikipedia.org/wiki/Decision_tree
    5. https://en.wikipedia.org/wiki/Support-vector_machine
    第一列是初始节点, 第二列是终止节点, 第三列是边数(几跳)
    """
    df = pd.read_csv(r'dataset/seealsology-data.tsv', sep='\t')
    print(df.shape)

    """构建无向图"""
    G = nx.from_pandas_edgelist(df, 'source', 'target', edge_attr=True, create_using=nx.Graph())
    print(len(G))

    """生成随机游走函数get_random_walk"""
    random_walk_list = get_random_walk(G, 'random forest', 5)
    print(random_walk_list)

    """生成随机游走序列"""
    # 每个节点作为起始点生成随机游走序列个数
    gamma = 10
    # 随机游走序列的最大长度
    walk_length = 5
    random_walks = []
    for n in tqdm(list(G.nodes())):
        for i in range(gamma):
            random_walks.append(get_random_walk(G, n, walk_length))
    # 85600
    print(len(random_walks))

    """训练word2Vec model"""
    model = Word2Vec(vector_size=256,  # embedding dim
                     window=4,  # 窗口宽度(中心词左边看四个词, 右边看四个词)
                     sg=1,  # skip-gram(用中心词预测周围词, 设置为0则使Cbow)
                     hs=0,  # 不加分层softmax
                     negative=10,  # 负采样
                     alpha=0.03,  # 初始学习率
                     min_alpha=0.00007,  # 最小学习率
                     seed=14)  # 随机数种子
    # 用随机游走序列构造词汇表
    model.build_vocab(random_walks, progress_per=2)
    # 训练
    model.train(random_walks, total_examples=model.corpus_count, epochs=100, report_delay=1)

    """分析word2Vec结果"""
    # 查看某个节点的Embedding
    print(model.wv.get_vector('random forest').shape)
    print(model.wv.get_vector('random forest'))
    print(model.wv.similar_by_word('decision tree'))

    """
    PCA 降维可视化
    由于之前设定的是256维, 无法直接可视化, 因此使用PCA降至两维再可视化
    """
    pca = PCA(n_components=2)
    embed_2d = pca.fit_transform(model.wv.vectors)
    print(embed_2d.shape)

    """绘图"""
    plt.figure(figsize=(14, 14))
    plt.scatter(embed_2d[:, 0], embed_2d[:, 1])
    plt.show()

    """可视化某个词条的二维Embedding"""
    term = 'computer vision'
    term_256d = model.wv[term].reshape(1, -1)
    term_2d = pca.transform(term_256d)
    plt.figure(figsize=(14, 14))
    plt.scatter(embed_2d[:, 0], embed_2d[:, 1])
    plt.scatter(term_2d[:, 0], term_2d[:, 1], c='r', s=200)
    plt.show()

    """可视化某些词条的二维Embedding"""
    # 计算PageRank重要度
    pagerank = nx.pagerank(G)
    # 从高到低排序
    node_importance = sorted(pagerank.items(), key=lambda x: x[1], reverse=True)
    # 取最高的前n个节点
    n = 30
    terms_chosen = []
    for each in node_importance[:n]:
        terms_chosen.append(each[0])
    # 手动补充新节点
    terms_chosen.extend(['computer vision', 'deep learning', 'convolutional neural network', 'convolution',
                         'natural-language processing', 'attention (machine learning)', 'support-vector machine',
                         'decision tree', 'random forest', 'computational imaging', 'machine vision',
                         'cognitive science', 'neuroscience', 'psychophysics', 'brain', 'visual cortex',
                         'visual neuroscience', 'cognitive model', 'finite difference', 'finite difference time domain',
                         'finite difference coefficients', 'finite difference methods for option pricing', 'iso 128',
                         'iso 10303'])
    # 输入词条，输出词典中的索引号
    term2index = model.wv.key_to_index
    # 可视化全部词条和关键词条的二维Embedding
    plt.figure(figsize=(14, 14))
    plt.scatter(embed_2d[:, 0], embed_2d[:, 1])

    for item in terms_chosen:
        idx = term2index[item]
        plt.scatter(embed_2d[idx, 0], embed_2d[idx, 1], c='r', s=50)
        plt.annotate(item, xy=(embed_2d[idx, 0], embed_2d[idx, 1]), c='k', fontsize=12)
    plt.show()