from __future__ import division
import networkx as nx
# import pandas as pd
import scipy.sparse as sp
import numpy as np
from sklearn.metrics import roc_auc_score
# from sklearn.manifold import spectral_embedding
from sklearn.neural_network import MLPClassifier
# import time
import pickle
import tensorflow as tf
from gae.optimizer import OptimizerAE, OptimizerVAE
from gae.model import GCNModelAE, GCNModelVAE
from gae.preprocessing import preprocess_graph, construct_feed_dict, sparse_to_tuple, mask_test_edges, mask_test_edges_directed
from copy import deepcopy
from ge import LINE, DeepWalk, SDNE, Struc2Vec, Node2Vec
import sys
import os
import math
from karateclub import HOPE, NetMF, Diff2Vec, GraRep, GLEE, LaplacianEigenmaps,FirstOrderLINE, SecondOrderLINE
# from karateclub import DeepWalk, Walklets, HOPE, NetMF, Diff2Vec, GraRep, Node2Vec
# from karateclub import NodeSketch, LaplacianEigenmaps, NMFADMM, GLEE, RandNE, SocioDim

def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def negative_power(x, k):
    return np.exp(-(k * x))

def Diss(emb1, emb2, k):
    sum = 0.0
    for i in range(len(emb1)):
        sum = sum + k * (emb1[i] - emb2[i]) * (emb1[i] - emb2[i])

    return math.sqrt(sum)

# 根据最后预测列表计算AUC，Precision，Weak Link Precision
def get_scores(test_edges, test_edge_labels, test_preds, num_dif_community, Embedding_Method, Claassifier_Method):
    ## 原版 ##
    num_test_edges = len(test_edges)
    test_AUC = roc_auc_score(test_edge_labels, test_preds)
    positive_id = set(range(num_test_edges))
    # temp1 = set(test_preds.argsort()[::-1][0:num_test_edges])
    precision_score = (len(set(test_preds.argsort()[::-1][0:3*num_test_edges]) & positive_id) * 1.0) / (
                num_test_edges * 1.0)
    positive_dif_id = set(range(num_dif_community))
    # temp2 = set(test_preds.argsort()[::-1][0:num_test_edges])
    weak_precision_score = (len(set(test_preds.argsort()[::-1][0:3*num_test_edges]) & positive_dif_id) * 1.0) / (
            num_dif_community * 1.0)

    scores = {}
    scores['test_AUC'] = test_AUC
    scores['test_precision'] = precision_score
    scores['test_weak_precision'] = weak_precision_score

    print('')
    print('{} {} Test AUC score: '.format(Embedding_Method, Claassifier_Method), str(test_AUC))
    print('{} {} Test PRECISION score: '.format(Embedding_Method, Claassifier_Method), str(precision_score))
    print('{} {} Test WEAK PRECISION score: '.format(Embedding_Method, Claassifier_Method), str(weak_precision_score))
    return scores


# Input: NetworkX training graph, train_test_split (from mask_test_edges), n2v hyperparameters
# Output: dictionary with AUC,Precision, Weak Link Precision
def get_all_scores(
        g_train, train_test_split,
        edge_score_mode="edge-emb",  # Whether to use bootstrapped edge embeddings + LogReg (like in node2vec paper),
        # or simple dot-product (like in GAE paper) for edge scoring
        verbose=1,
        multip=0.1,
        k=0,
        dims=0, emb_matrix=[], Embedding_Method=''):
    adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
    test_edges, test_edges_false, num_dif_community, train_num_dif_community = train_test_split  # Unpack train-test split

    ## MLP ##
    # Generate bootstrapped edge embeddings (as is done in node2vec paper)
    # Edge embedding for (v1, v2) = hadamard product of node embeddings for v1, v2
    if edge_score_mode == "edge-emb":
        def get_edge_embeddings(edge_list):
            embs = []
            for edge in edge_list:
                node1 = edge[0]
                node2 = edge[1]
                emb1 = emb_matrix[node1]
                emb2 = emb_matrix[node2]
                edge_emb = np.multiply(emb1, emb2)
                embs.append(edge_emb)
            embs = np.array(embs)
            return embs

        diss = []

        def get_edge_and_norm_dis_embeddings(edge_list):
            embs = []
            for edge in edge_list:
                node1 = edge[0]
                node2 = edge[1]
                emb1 = emb_matrix[node1]
                emb2 = emb_matrix[node2]
                edge_emb = np.multiply(emb1, emb2)
                diss.append((np.linalg.norm([emb1 - emb2], axis=1, keepdims=False)) * multip)
                embs.append(edge_emb)
            embs = np.array(embs)
            return embs

        # Train-set edge embeddings
        pos_train_edge_embs = get_edge_embeddings(train_edges)
        neg_train_edge_embs = get_edge_embeddings(train_edges_false)
        train_edge_embs = np.concatenate([pos_train_edge_embs, neg_train_edge_embs])
        # Create train-set edge labels: 1 = real edge, 0 = false edge
        train_edge_labels = np.concatenate([np.ones(len(train_edges)), np.zeros(len(train_edges_false))])

        ### 分类器选择 ###
        # # Train logistic regression classifier on train-set edge embeddings
        edge_classifier = MLPClassifier(hidden_layer_sizes=(40,), max_iter=2000, random_state=0)
        edge_classifier.fit(train_edge_embs, train_edge_labels)

        # Test-set edge embeddings, labels
        pos_test_edge_embs = get_edge_and_norm_dis_embeddings(test_edges)
        test_preds = edge_classifier.predict_proba(pos_test_edge_embs)[:, 1]
        for sub_test_edges_false in np.array_split(test_edges_false, 10, axis=0):
            neg_test_edge_sub_embs = get_edge_and_norm_dis_embeddings(sub_test_edges_false)
            test_pred = edge_classifier.predict_proba(neg_test_edge_sub_embs)[:, 1]
            test_preds = np.hstack((test_preds, test_pred))

        # Create test edge labels: 1 = real edge, 0 = false edge
        test_edge_labels = np.concatenate([np.ones(len(test_edges)), np.zeros(len(test_edges_false))])
        MLP_Scores = get_scores(test_edges, test_edge_labels, test_preds, num_dif_community, Embedding_Method, 'MLP')

        # ## MLP+DISS ##
        # diss = np.array(diss).reshape(-1)
        # diss = negative_power(diss, k)
        # test_preds_add_dis = test_preds + diss
        # test_preds_add_dis = sigmoid(test_preds_add_dis)
        # MLP_Add_Diss_Scores = get_scores(test_edges, test_edge_labels, test_preds_add_dis, num_dif_community, Embedding_Method, 'MLP+Diss')
        #
        # ## MLP*DISS ##
        # test_preds_plus_dis = np.multiply(test_preds, diss)
        # test_preds_plus_dis = sigmoid(test_preds_plus_dis)
        # MLP_Multip_Diss_scores = get_scores(test_edges, test_edge_labels, test_preds_plus_dis, num_dif_community, Embedding_Method, 'MLP*Diss')

        # Diss ##
        # Diss_Scores = get_scores(test_edges, test_edge_labels, diss, num_dif_community, Embedding_Method, 'Diss')


        ##  嵌入向量拼接原版欧氏距离  ##
        def get_edge_add_dis_embeddings(edge_list):
            embs = []
            for edge in edge_list:
                node1 = edge[0]
                node2 = edge[1]
                emb1 = emb_matrix[node1]
                emb2 = emb_matrix[node2]
                edge_emb = np.multiply(emb1, emb2)
                # for i in range(1, k+1):
                #     edge_emb = np.hstack((edge_emb, np.linalg.norm([emb1 - emb2], ord=i, axis=1, keepdims=False) * multip))
                # edge_emb = np.hstack((edge_emb, np.linalg.norm([emb1 - emb2], ord=1, axis=1, keepdims=False) * multip, np.linalg.norm([emb1 - emb2], ord=2, axis=1, keepdims=False) * multip, np.linalg.norm([emb1 - emb2], ord=3, axis=1, keepdims=False) * multip, np.linalg.norm([emb1 - emb2], ord=4, axis=1, keepdims=False) * multip))
                edge_emb = np.hstack((edge_emb, np.linalg.norm([emb1 - emb2], ord=2, axis=1, keepdims=False) * multip))
                # edge_emb = np.hstack((edge_emb, Diss(emb1, emb2, k)))
                embs.append(edge_emb)
            embs = np.array(embs)
            return embs

        # Train-set edge embeddings
        pos_train_edge_embs = get_edge_add_dis_embeddings(train_edges)
        neg_train_edge_embs = get_edge_add_dis_embeddings(train_edges_false)
        train_edge_embs = np.concatenate([pos_train_edge_embs, neg_train_edge_embs])

        # Train logistic regression classifier on train-set edge embeddings
        edge_classifier = MLPClassifier(hidden_layer_sizes=(40,), max_iter=2000, random_state=0)
        edge_classifier.fit(train_edge_embs, train_edge_labels)

        pos_test_edge_embs = get_edge_add_dis_embeddings(test_edges)
        test_preds = edge_classifier.predict_proba(pos_test_edge_embs)[:, 1]
        for sub_test_edges_false in np.array_split(test_edges_false, 10, axis=0):
            neg_test_edge_sub_embs = get_edge_add_dis_embeddings(sub_test_edges_false)
            test_pred = edge_classifier.predict_proba(neg_test_edge_sub_embs)[:, 1]
            test_preds = np.hstack((test_preds, test_pred))

        # WLP_Scores = get_scores(test_edges, test_edge_labels, test_preds, num_dif_community, Embedding_Method, 'WLP')


    return MLP_Scores

def calculate_scores(adj_sparse, directed=False,
                         test_frac=.3, val_frac=.1, random_state=0, verbose=1,
                         train_test_split_file=None,
                         torch_dtype=0,
                         multip=0.1,
                         dims=10,
                         k=0, diff=1, method='',node2vec_dim = 32):
    np.random.seed(random_state)  # Guarantee consistent train/test split
    # tf.set_random_seed(random_state) # Consistent GAE training

    # Prepare LP scores dictionary
    AUC = []
    pre = []
    s_pre = []

    ### ---------- PREPROCESSING ---------- ###
    train_test_split = None
    with open(train_test_split_file, 'rb') as f:
        train_test_split = pickle.load(f)
        print('Found existing train-test split!')

    adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
    test_edges, test_edges_false, num_dif_community, train_num_dif_community = train_test_split  # Unpack tuple


    # g_train: new graph object with only non-hidden edges
    if directed == True:
        g_train = nx.DiGraph(adj_train)
    else:
        g_train = nx.Graph(adj_train)

    # Inspect train/test split
    if verbose >= 1:
        print("Total nodes:", adj_sparse.shape[0])
        print("Total edges:", int(adj_sparse.nnz / 2))  # adj is symmetric, so nnz (num non-zero) = 2*num_edges
        print("Training edges (positive):", len(train_edges))
        print("Training edges (negative):", len(train_edges_false))
        print("Validation edges (positive):", len(val_edges))
        print("Validation edges (negative):", len(val_edges_false))
        print("Test edges (positive):", len(test_edges))
        print("Test edges (negative):", len(test_edges_false))
        print("Test Weak Links:", num_dif_community)
        print("Test Strong Links::", len(test_edges) - num_dif_community)
        print('')
        print("------------------------------------------------------")

    if method=='Hope':
        model = HOPE(dimensions=16)
        model.fit(g_train)
        # # model = DeepWalk(dimensions=8, walk_length=10, walk_number=80, workers=8, window_size=5, epochs=5)
        # # model = LaplacianEigenmaps(dimensions=8)
        # # model.fit(g_train)
        # # model = DeepWalk(g_train, walk_length=10, num_walks=80, workers=8)
        # # model.train(window_size=5, iter=5, embed_size=8)  # train model
        embedding = model.get_embedding()
        MLP_Scores,Diss_Scores = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=embedding, Embedding_Method=method)

    if method=='LaplacianEigenmaps':
        model = LaplacianEigenmaps(dimensions=8)
        model.fit(g_train)
        # # model = DeepWalk(g_train, walk_length=10, num_walks=80, workers=8)
        # # model.train(window_size=5, iter=5, embed_size=8)  # train model
        embedding = model.get_embedding()
        MLP_Scores, MLP_Add_Diss_Scores, MLP_Multip_Diss_scores, Diss_Scores, WLP_Scores = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=embedding, Embedding_Method=method)

    if method=='GraRep':
        model = GraRep(dimensions=8)
        model.fit(g_train)
        embedding = model.get_embedding()
        MLP_Scores, MLP_Add_Diss_Scores, MLP_Multip_Diss_scores, Diss_Scores, WLP_Scores = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=embedding, Embedding_Method=method)

    # 获得嵌入向量
    if method == 'DeepWalk':
        model = DeepWalk(g_train, walk_length=10, num_walks=80, workers=8)
        model.train(window_size=5, iter=5, embed_size=8)  # train model
        emb_matrix = model.get_embeddings()
        MLP_Scores, Diss_Scores = get_all_scores(
            g_train, train_test_split,edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=emb_matrix, Embedding_Method=method)

    if method == 'Line':
        model = FirstOrderLINE(dimensions=128)
        model.fit(g_train)
        emb_matrix = model.get_embedding()

        assert embedding.shape[0] == graph.number_of_nodes()
        assert embedding.shape[1] == model.dimensions
        assert type(embedding) == np.ndarray


        # model = LINE(g_train, embedding_size = 128, order='first')
        # model.train(batch_size=512, epochs=50, verbose=0)
        # emb_matrix = model.get_embeddings()
        MLP_Scores = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=emb_matrix, Embedding_Method=method)

    if method == 'Node2Vec':
        model = Node2Vec(g_train, walk_length=10, num_walks=80, p=0.5, q=2, workers=8)
        model.train(window_size=1, iter=5, embed_size=node2vec_dim)
        emb_matrix = model.get_embeddings()
        MLP_Scores = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=emb_matrix, Embedding_Method=method)

    if method == 'VGAE':
        if verbose >= 1:
            print('GAE preprocessing...')

        # 由于内存限制，使用CPU (隐藏 GPU)训练
        os.environ['CUDA_VISIBLE_DEVICES'] = ""

        # 特征转换 正常矩阵 --> 稀疏矩阵 --> 元组
        # 特征元组包含： (矩阵坐标列表, 矩阵值列表, 矩阵维度)
        features_matrix = None
        if features_matrix is None:
            x = sp.lil_matrix(np.identity(adj_sparse.shape[0]))
        else:
            x = sp.lil_matrix(features_matrix)
        features_tuple = sparse_to_tuple(x)
        features_shape = features_tuple[2]

        # 获取图属性 (用于输入模型)
        num_nodes = adj_sparse.shape[0]  # 邻接矩阵的节点数量
        num_features = features_shape[1]  # 特征数量 (特征矩阵的列数)
        features_nonzero = features_tuple[1].shape[0]  # 特征矩阵中的非零条目数(或者矩阵值列表长度)

        # 保存原始邻接矩阵 (没有对角线条目) 到后面使用
        adj_orig = deepcopy(adj_sparse)
        adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
        adj_orig.eliminate_zeros()

        # 归一化邻接矩阵
        adj_norm = preprocess_graph(adj_train)

        # 添加对角线
        adj_label = adj_train + sp.eye(adj_train.shape[0])
        adj_label = sparse_to_tuple(adj_label)

        # 定义占位符
        placeholders = {
            'features': tf.sparse_placeholder(tf.float32),
            'adj': tf.sparse_placeholder(tf.float32),
            'adj_orig': tf.sparse_placeholder(tf.float32),
            'dropout': tf.placeholder_with_default(0., shape=())
        }

        # How much to weigh positive examples (true edges) in cost print_function
        # Want to weigh less-frequent classes higher, so as to prevent model output bias
        # pos_weight = (num. negative samples / (num. positive samples)
        pos_weight = float(adj_sparse.shape[0] * adj_sparse.shape[0] - adj_sparse.sum()) / adj_sparse.sum()

        # normalize (scale) average weighted cost
        norm = adj_sparse.shape[0] * adj_sparse.shape[0] / float(
            (adj_sparse.shape[0] * adj_sparse.shape[0] - adj_sparse.sum()) * 2)

        if verbose >= 1:
            print('Initializing GAE model...')

        # 创建 VAE 模型
        model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero,
                            32, 16, dtype=tf.float32, flatten_output=False)

        opt = OptimizerVAE(preds=model.reconstructions,
                           labels=tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False),
                           # labels=placeholders['adj_orig'],
                           model=model, num_nodes=num_nodes,
                           pos_weight=pos_weight,
                           norm=norm,
                           learning_rate=0.01,
                           dtype=tf.float32)

        prev_embs = []
        # 初始化 session
        sess = tf.Session()

        if verbose >= 1:
            # 打印所有可训练的变量
            total_parameters = 0
            for variable in tf.trainable_variables():
                # shape 是tf.Dimension的一个数组
                shape = variable.get_shape()
                # print("Variable shape: ", shape)
                variable_parameters = 1
                for dim in shape:
                    # print("Current dimension: ", dim)
                    variable_parameters *= dim.value
                # print("Variable params: ", variable_parameters)
                total_parameters += variable_parameters

        sess.run(tf.global_variables_initializer())

        if verbose >= 1:
            print('Starting GAE training!')

        # 训练模型
        for epoch in range(250):
            # 构造 feed dictionary
            feed_dict = construct_feed_dict(adj_norm, adj_label, features_tuple, placeholders)
            feed_dict.update({placeholders['dropout']: 0})
            # 单一权重更新
            outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)

            # 评估预测
            feed_dict.update({placeholders['dropout']: 0})
            gae_emb = sess.run(model.z_mean, feed_dict=feed_dict)

            prev_embs.append(gae_emb)

            # gae_score_matrix = np.dot(gae_emb, gae_emb.T)

        if verbose == 2:
            print("Optimization Finished!")

        # 打印最终结果
        feed_dict.update({placeholders['dropout']: 0})
        gae_emb = sess.run(model.z_mean, feed_dict=feed_dict)
        MLP_Scores, MLP_Add_Diss_Scores, MLP_Multip_Diss_scores, Diss_Scores, WLP_Scores = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=gae_emb, Embedding_Method=method)

    if method == 'SDNE':
        model = SDNE(g_train, hidden_size=[32, 16])  # init model
        model.train(batch_size=3000, epochs=100, verbose=2)  # train model
        emb_matrix = model.get_embeddings()  # get embedding vectors
        MLP_Scores, Diss_Scores = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=emb_matrix, Embedding_Method=method)

    if method == 'Struc2Vec':
        model = Struc2Vec(g_train, 10, 80, workers=8, verbose=40, )  # init model
        model.train(embed_size=8, window_size=5, iter=3, workers=8)  # train model
        emb_matrix = model.get_embeddings()  # get embedding vectors
        MLP_Scores, MLP_Add_Diss_Scores, MLP_Multip_Diss_scores, Diss_Scores, WLP_Scores = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=emb_matrix, Embedding_Method=method)

    # 保存数据
    AUC.append(MLP_Scores['test_AUC'])
    pre.append(MLP_Scores['test_precision'])
    s_pre.append(MLP_Scores['test_weak_precision'])

    # AUC.append(MLP_Add_Diss_Scores['test_AUC'])
    # pre.append(MLP_Add_Diss_Scores['test_precision'])
    # s_pre.append(MLP_Add_Diss_Scores['test_weak_precision'])
    #
    # AUC.append(MLP_Multip_Diss_scores['test_AUC'])
    # pre.append(MLP_Multip_Diss_scores['test_precision'])
    # s_pre.append(MLP_Multip_Diss_scores['test_weak_precision'])

    # AUC.append(Diss_Scores['test_AUC'])
    # pre.append(Diss_Scores['test_precision'])
    # s_pre.append(Diss_Scores['test_weak_precision'])

    # AUC.append(WLP_Scores['test_AUC'])
    # pre.append(WLP_Scores['test_precision'])
    # s_pre.append(WLP_Scores['test_weak_precision'])


    ### ---------- RETURN RESULTS ---------- ###
    return AUC, pre, s_pre