from __future__ import division
import networkx as nx
# import pandas as pd
import scipy.sparse as sp
import numpy as np
from sklearn.metrics import roc_auc_score
# from sklearn.manifold import spectral_embedding
from sklearn.neural_network import MLPClassifier
# import time
import pickle
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from gae.optimizer import OptimizerAE, OptimizerVAE
from gae.model import GCNModelAE, GCNModelVAE
from gae.preprocessing import preprocess_graph, construct_feed_dict, sparse_to_tuple, mask_test_edges, mask_test_edges_directed
from copy import deepcopy
from ge import LINE, DeepWalk, SDNE, Struc2Vec, Node2Vec
import sys
import os
import math
from karateclub import HOPE, NetMF, Diff2Vec, GraRep
# from karateclub import DeepWalk, Walklets, HOPE, NetMF, Diff2Vec, GraRep, Node2Vec
# from karateclub import NodeSketch, LaplacianEigenmaps, NMFADMM, GLEE, RandNE, SocioDim

def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def negative_power(x, k):
    return np.exp(-(k * x))

def Diss(emb1, emb2):
    sum = 0.0
    for i in range(len(emb1)):
        sum = sum + (emb1[i] - emb2[i]) * (emb1[i] - emb2[i])

    return math.sqrt(sum)

# Input: NetworkX training graph, train_test_split (from mask_test_edges), n2v hyperparameters
# Output: dictionary with AUC,Precision, Weak Link Precision
def get_all_scores(
        g_train, train_test_split,
        edge_score_mode="edge-emb",
        # Whether to use bootstrapped edge embeddings + LogReg (like in node2vec paper),
        # or simple dot-product (like in GAE paper) for edge scoring
        verbose=1,
        multip=0.1,
        k=0,
        dims=0, emb_matrix=[], Embedding_Method=''):
    adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
    test_edges, test_edges_false, num_dif_community, train_num_dif_community = train_test_split  # Unpack train-test split

    dis_weak_sum = 0.0
    dis_strong_sum = 0.0
    num_test_edges = len(test_edges)

    for i in range(num_dif_community):
        dis_weak_sum = dis_weak_sum + np.linalg.norm([emb_matrix[test_edges[i][0]] - emb_matrix[test_edges[i][1]]], ord=2, axis=1, keepdims=False)

    dis_weak_avg = dis_weak_sum / (num_dif_community * 1.0)
    dis_weak_avg = dis_weak_avg[0]
    print('{} Average Weak Distance: '.format(Embedding_Method), str(dis_weak_avg))

    for i in range(num_dif_community, num_test_edges):
        dis_strong_sum = dis_strong_sum + np.linalg.norm([emb_matrix[test_edges[i][0]] - emb_matrix[test_edges[i][1]]], ord=2, axis=1, keepdims=False)
    # np.linalg.norm([emb_matrix[test_edges[i][0]] - emb_matrix[test_edges[i][1]]], ord=2, axis=1, keepdims=False)

    dis_strong_avg = dis_strong_sum / ((num_test_edges - num_dif_community) * 1.0)
    dis_strong_avg = dis_strong_avg[0]
    print('{} Average Strong Distance: '.format(Embedding_Method), str(dis_strong_avg))

    return dis_weak_avg, dis_strong_avg

def calculate_scores(adj_sparse, directed=False,
                         test_frac=.3, val_frac=.1, random_state=0, verbose=1,
                         train_test_split_file=None,
                         torch_dtype=0,
                         multip=0.1,
                         dims=10,
                         k=0, diff=1, method=''):
    np.random.seed(random_state)  # Guarantee consistent train/test split
    # tf.set_random_seed(random_state) # Consistent GAE training

    # Prepare LP scores dictionary
    weak_diss = []
    strong_diss = []

    ### ---------- PREPROCESSING ---------- ###
    train_test_split = None
    with open(train_test_split_file, 'rb') as f:
        train_test_split = pickle.load(f)
        print('Found existing train-test split!')

    adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
    test_edges, test_edges_false, num_dif_community, train_num_dif_community = train_test_split  # Unpack tuple

    # g_train: new graph object with only non-hidden edges
    if directed == True:
        g_train = nx.DiGraph(adj_train)
    else:
        g_train = nx.Graph(adj_train)

    # Inspect train/test split
    if verbose >= 1:
        print("Total nodes:", adj_sparse.shape[0])
        print("Total edges:", int(adj_sparse.nnz / 2))  # adj is symmetric, so nnz (num non-zero) = 2*num_edges
        print("Training edges (positive):", len(train_edges))
        print("Training edges (negative):", len(train_edges_false))
        print("Validation edges (positive):", len(val_edges))
        print("Validation edges (negative):", len(val_edges_false))
        print("Test edges (positive):", len(test_edges))
        print("Test edges (negative):", len(test_edges_false))
        print("Test Weak Links:", num_dif_community)
        print("Test Strong Links::", len(test_edges) - num_dif_community)
        print('')
        print("------------------------------------------------------")

    if method == '1':
        # model = HOPE(dimensions=16)
        # model.fit(g_train)
        # model = DeepWalk(dimensions=8, walk_length=10, walk_number=80, workers=8, window_size=5, epochs=5)
        # model = LaplacianEigenmaps(dimensions=8)
        # model.fit(g_train)
        model = DeepWalk(g_train, walk_length=10, num_walks=80, workers=8)
        model.train(window_size=5, iter=5, embed_size=8)  # train model
        embedding = model.get_embeddings()
        dis_weak_avg, dis_strong_avg = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=embedding, Embedding_Method='deepwalk')
        weak_diss.append(dis_weak_avg)
        strong_diss.append(dis_strong_avg)

    if method == '1':
        model = LINE(g_train, embedding_size=10, order='all')
        model.train(batch_size=512, epochs=50, verbose=0)
        emb_matrix = model.get_embeddings()
        dis_weak_avg, dis_strong_avg = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=emb_matrix, Embedding_Method='LINE')
        weak_diss.append(dis_weak_avg)
        strong_diss.append(dis_strong_avg)

    if method == '1':
        model = Node2Vec(g_train, walk_length=10, num_walks=80, p=1, q=1, workers=8)
        model.train(window_size=5, iter=5, embed_size=8)
        emb_matrix = model.get_embeddings()
        dis_weak_avg, dis_strong_avg = get_all_scores(
            g_train, train_test_split, edge_score_mode="edge-emb",
            verbose=1, multip=multip, k=k, dims=dims, emb_matrix=emb_matrix, Embedding_Method='Node2Vec')
        weak_diss.append(dis_weak_avg)
        strong_diss.append(dis_strong_avg)

    # if method == '1':
    #     if verbose >= 1:
    #         print('GAE preprocessing...')
    #
    #     # 由于内存限制，使用CPU (隐藏 GPU)训练
    #     os.environ['CUDA_VISIBLE_DEVICES'] = ""
    #
    #     # 特征转换 正常矩阵 --> 稀疏矩阵 --> 元组
    #     # 特征元组包含： (矩阵坐标列表, 矩阵值列表, 矩阵维度)
    #     features_matrix = None
    #     if features_matrix is None:
    #         x = sp.lil_matrix(np.identity(adj_sparse.shape[0]))
    #     else:
    #         x = sp.lil_matrix(features_matrix)
    #     features_tuple = sparse_to_tuple(x)
    #     features_shape = features_tuple[2]
    #
    #     # 获取图属性 (用于输入模型)
    #     num_nodes = adj_sparse.shape[0]  # 邻接矩阵的节点数量
    #     num_features = features_shape[1]  # 特征数量 (特征矩阵的列数)
    #     features_nonzero = features_tuple[1].shape[0]  # 特征矩阵中的非零条目数(或者矩阵值列表长度)
    #
    #     # 保存原始邻接矩阵 (没有对角线条目) 到后面使用
    #     adj_orig = deepcopy(adj_sparse)
    #     adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
    #     adj_orig.eliminate_zeros()
    #
    #     # 归一化邻接矩阵
    #     adj_norm = preprocess_graph(adj_train)
    #
    #     # 添加对角线
    #     adj_label = adj_train + sp.eye(adj_train.shape[0])
    #     adj_label = sparse_to_tuple(adj_label)
    #
    #     # 定义占位符
    #     placeholders = {
    #         'features': tf.sparse_placeholder(tf.float32),
    #         'adj': tf.sparse_placeholder(tf.float32),
    #         'adj_orig': tf.sparse_placeholder(tf.float32),
    #         'dropout': tf.placeholder_with_default(0., shape=())
    #     }
    #
    #     # How much to weigh positive examples (true edges) in cost print_function
    #     # Want to weigh less-frequent classes higher, so as to prevent model output bias
    #     # pos_weight = (num. negative samples / (num. positive samples)
    #     pos_weight = float(adj_sparse.shape[0] * adj_sparse.shape[0] - adj_sparse.sum()) / adj_sparse.sum()
    #
    #     # normalize (scale) average weighted cost
    #     norm = adj_sparse.shape[0] * adj_sparse.shape[0] / float(
    #         (adj_sparse.shape[0] * adj_sparse.shape[0] - adj_sparse.sum()) * 2)
    #
    #     if verbose >= 1:
    #         print('Initializing GAE model...')
    #
    #     # 创建 VAE 模型
    #     model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero,
    #                         32, 16, dtype=tf.float32, flatten_output=False)
    #
    #     opt = OptimizerVAE(preds=model.reconstructions,
    #                        labels=tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False),
    #                        # labels=placeholders['adj_orig'],
    #                        model=model, num_nodes=num_nodes,
    #                        pos_weight=pos_weight,
    #                        norm=norm,
    #                        learning_rate=0.01,
    #                        dtype=tf.float32)
    #
    #     prev_embs = []
    #     # 初始化 session
    #     sess = tf.Session()
    #
    #     if verbose >= 1:
    #         # 打印所有可训练的变量
    #         total_parameters = 0
    #         for variable in tf.trainable_variables():
    #             # shape 是tf.Dimension的一个数组
    #             shape = variable.get_shape()
    #             # print("Variable shape: ", shape)
    #             variable_parameters = 1
    #             for dim in shape:
    #                 # print("Current dimension: ", dim)
    #                 variable_parameters *= dim.value
    #             # print("Variable params: ", variable_parameters)
    #             total_parameters += variable_parameters
    #
    #     sess.run(tf.global_variables_initializer())
    #
    #     if verbose >= 1:
    #         print('Starting GAE training!')
    #
    #     # 训练模型
    #     for epoch in range(250):
    #         # 构造 feed dictionary
    #         feed_dict = construct_feed_dict(adj_norm, adj_label, features_tuple, placeholders)
    #         feed_dict.update({placeholders['dropout']: 0})
    #         # 单一权重更新
    #         outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)
    #
    #         # 评估预测
    #         feed_dict.update({placeholders['dropout']: 0})
    #         gae_emb = sess.run(model.z_mean, feed_dict=feed_dict)
    #
    #         prev_embs.append(gae_emb)
    #
    #         # gae_score_matrix = np.dot(gae_emb, gae_emb.T)
    #
    #     if verbose == 2:
    #         print("Optimization Finished!")
    #
    #     # 打印最终结果
    #     feed_dict.update({placeholders['dropout']: 0})
    #     gae_emb = sess.run(model.z_mean, feed_dict=feed_dict)
    #     dis_weak_avg, dis_strong_avg = get_all_scores(
    #         g_train, train_test_split, edge_score_mode="edge-emb",
    #         verbose=1, multip=multip, k=k, dims=dims, emb_matrix=gae_emb, Embedding_Method='VGAE')
    #
    #     weak_diss.append(dis_weak_avg)
    #     strong_diss.append(dis_strong_avg)

    # if method == '1':
    #     model = SDNE(g_train, hidden_size=[32, 16])  # init model
    #     model.train(batch_size=3000, epochs=100, verbose=2)  # train model
    #     emb_matrix = model.get_embeddings()  # get embedding vectors
    #     dis_weak_avg, dis_strong_avg = get_all_scores(
    #         g_train, train_test_split, edge_score_mode="edge-emb",
    #         verbose=1, multip=multip, k=k, dims=dims, emb_matrix=emb_matrix, Embedding_Method='SDNE')
    #     weak_diss.append(dis_weak_avg)
    #     strong_diss.append(dis_strong_avg)

    ### ---------- RETURN RESULTS ---------- ###
    return weak_diss, strong_diss