# -*- coding: utf-8 -*-
"""
Created on Mon Nov 18 21:48:25 2019

@author: gby

link prediction model for MUSAE datasets.

MUSAE数据集提供了很多个社交平台的好友关系数据，更重要的是，
它提供了每个user的离散化的特征。
"""

import json
import random
import pandas as pd
import numpy as np
import networkx as nx
import gensim
from gensim.models.doc2vec import TaggedDocument
from gensim.models.doc2vec import Doc2Vec
from gensim.models.word2vec import Word2Vec

relations_path = '../twitch/twitch/ES/musae_ES_edges.csv'
features_path = '../twitch/twitch/ES/musae_ES_features.json'
name = 'twitch_es_features'
relations = pd.read_csv(relations_path)
with open(features_path) as f:
    features = json.loads(f.read())

# 全网：
G_all = nx.Graph()
for each in relations.iterrows():
    id1 = each[1]['from']
    id2 = each[1]['to']
    G_all.add_edge(int(id1),int(id2)) # 这里需要把pandas的int64格式，转成int，不然后面无法把graph保存成json

nums = [len(list(G_all.neighbors(node))) for node in G_all.nodes]
avg_num_friends = sum(nums)/len(nums)

new_users = random.sample(list(G_all.nodes),500)
new_users = [u for u in new_users if len(list(G_all.neighbors(u)))>avg_num_friends*2 \
                                  or len(list(G_all.neighbors(u)))>avg_num_friends/2]

old_users = [u for u in G_all.nodes if u not in new_users]
G_old = G_all.subgraph(old_users)

new_users_friends_dict = {user:list(G_all.neighbors(user)) for user in new_users}
old_users_friends_dict = {user:list(G_old.neighbors(user)) for user in old_users}

# 通过doc2vec的方法，得到每一个user的feature向量
def create_documents(features):
    """
    From a feature hash create a list of TaggedDocuments.
    :param features: Feature hash table - keys are nodes, values are feature lists.
    :return docs: Tagged Documents list.
    """
    docs = [TaggedDocument(words = [str(x) for x in v], tags = [str(k)]) for k, v in features.items()]
    return docs

# 训练得到Doc2vec向量：
docs = create_documents(features)
model = Doc2Vec(vector_size=50, min_count=2, epochs=400)
model.build_vocab(create_documents(features))
model.train(create_documents(features), total_examples=model.corpus_count, epochs=model.epochs)
model.save('../match_models/%s.emb'%name)


# 检验doc2vec的效果：
ranks = []
second_ranks = []
for user in G_all.nodes:
    user = str(user)
    inferred_vector = model.infer_vector([str(x) for x in features[user]])
    sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
    rank = [docid for docid, sim in sims].index(user)
    ranks.append(rank)

    second_ranks.append(sims[1])
import collections
counter = collections.Counter(ranks)
print(counter)





fm = Doc2Vec.load('../match_models/%s.emb'%name).docvecs

# 正负样本生成：
pos_pairs = random.sample(list(G_old.edges),20000)
neg_pairs = []
for _ in range(100000):
    neg_pairs.append((random.choice(old_users),random.choice(old_users)))
    
# 用户对特征生成：
def feature_fusion(u1,u2,feature_dict):
    f1 = feature_dict[str(u1)]
    f2 = feature_dict[str(u2)]
    ABS = np.abs(f1-f2)
    MUL = np.multiply(f1,f2)
    return np.concatenate([f1,f2,ABS,MUL])

X = []
Y = []
for pair in pos_pairs:
    X.append(feature_fusion(pair[0],pair[1],fm))
    Y.append(1)
for pair in neg_pairs[:80000]:
    X.append(feature_fusion(pair[0],pair[1],fm))
    Y.append(0)

random_idxs = np.random.permutation(range(len(X)))
X = np.array(X)[random_idxs]
Y = np.array(Y)[random_idxs]

import keras
from keras.models import Sequential
from keras.layers import Dense,Dropout
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score,precision_score, recall_score, f1_score
import matplotlib.pyplot as plt

X_train,X_test, y_train, y_test = train_test_split(X,Y,test_size=0.2)

nn_model = Sequential()
nn_model.add(Dense(32,activation='tanh',input_dim=200))
nn_model.add(Dense(16,activation='tanh'))
nn_model.add(Dense(1,activation='sigmoid'))

nn_model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
nn_model.fit(X_train, y_train, epochs=100, batch_size=64)


y_true_nn = np.array(y_test)
y_prob_nn = nn_model.predict(np.array(X_test))
y_predict_nn = [1 if y>0.5 else 0 for y in y_prob_nn ]

# accuracy,precision,recall,f1
print("=======NN:========")
print('accuracy:',accuracy_score(y_true_nn,y_predict_nn))
print('presicion:',precision_score(y_true_nn,y_predict_nn))
print('recall:',recall_score(y_true_nn,y_predict_nn))
print('F1:',f1_score(y_true_nn,y_predict_nn))

# ROC:
fpr, tpr, thresholds = roc_curve(y_true_nn, y_prob_nn)
roc_auc = auc(fpr, tpr)
print('AUC:',roc_auc)

nn_model.save('../match_models/twitch_es_nn.h5')

g_dict = nx.readwrite.json_graph.node_link_data(G_all)
with open('../data/subgraphs/twitch_es_graph.json','w') as f:
    f.write(json.dumps(g_dict))
    
new_users_df = pd.DataFrame(new_users,columns=['id'])
new_users_df.to_csv('../data/subgraphs/twitch_es_new_users.csv')

