# -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 11:37:16 2019

@author: GBY

train link prediction model
"""
import random
import pandas as pd
import numpy as np
import json
import networkx
from networkx import *
from featureExtraction import get_all_features
# 加载user_base数据和friends_relation数据：
user_base = pd.read_csv('../data/slct_users_10att.csv') # 用于查询任意user的属性
print(user_base.columns)

# 读入我们要研究的子网/子图、其中的new users，保存好new users的好友关系（注意，是在这个子网络中的好友关系！）
# 找出old users，生成old users的graph，后面的拓展都是在这个old graph中进行！所以需要单独弄一个old users的好友关系
with open('../data/subgraphs/subgraph1.json','r') as f:
    data = json.loads(f.read())
G = readwrite.json_graph.node_link_graph(data)
all_users = list(G.nodes)
new_users = list(pd.read_csv('../data/subgraphs/new_users1.csv').id)
new_users_friends_dict = {user:list(G.neighbors(user)) for user in new_users}
old_users = [n for n in all_users if n not in new_users]
G_o = G.subgraph(old_users) # old sub-graph
old_users_friends_dict = {user:list(G_o.neighbors(user)) for user in old_users}


# 正负样本的生成：
"""
注意：我们是要在老用户上面训练

正样本的选择：好友关系
负样本的选择：这里不采用随机挑选，而是选择二阶好友但不是直接好友的用户
这样的负样本更加有迷惑性，从而提高模型的性能

10.30记录：这样的迷惑性是不是太强了？搞得我的模型根本无法收敛？
           还是添加一点随机样本吧！
           
10.31记录：
"""

friends_pairs = []
strangers_pairs = []
random_pairs = []
for user in old_users:
    for f in old_users_friends_dict[user]:
        # 好友对：
        friends_pairs.append((user,f))
        for i in range(5):
            random_user = random.choice(old_users)
            if random_user not in old_users_friends_dict[user]:
                random_pairs.append((user,random_user))
        # 找出user的二阶好友，但不是直接好友的用户：
        for ff in old_users_friends_dict[f]:
            if ff not in old_users_friends_dict[user]:
                strangers_pairs.append((user,ff))
random_pairs = list(set(random_pairs))

pos_num = 20000
neg_num = pos_num*5
pos_user_pairs = pd.DataFrame(random.sample(friends_pairs,pos_num),columns=['id1','id2'])
neg_user_pairs = pd.DataFrame(random.sample(strangers_pairs,neg_num),columns=['id1','id2'])
rand_user_pairs = pd.DataFrame(random.sample(random_pairs,neg_num),columns=['id1','id2'])

#pos_features = get_all_features(user_base,old_users_friends_dict,pos_user_pairs)
#neg_features = get_all_features(user_base,old_users_friends_dict,neg_user_pairs)
#rand_features = get_all_features(user_base,old_users_friends_dict,rand_user_pairs)

pos_features = pd.read_csv('../data/pos_features.csv')
neg_features = pd.read_csv('../data/neg_features.csv')
rand_features = pd.read_csv('../data/rand_features.csv')

X = pd.concat([pos_features.drop(['id1','id2'],axis=1)[:20000],\
               rand_features.drop(['id1','id2'],axis=1)[:30000],\
               neg_features.drop(['id1','id2'],axis=1)[:0]])
Y = np.array([1]*20000+[0]*30000)

random_index = np.random.permutation(len(Y))
X = np.array(X)[random_index]
Y = Y[random_index]


import keras
from keras.models import Sequential
from keras.layers import Dense,Dropout
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score,precision_score, recall_score, f1_score
import matplotlib.pyplot as plt

X_train,X_test, y_train, y_test = train_test_split(X,Y,test_size=0.2)

nn_model = Sequential()
nn_model.add(Dense(32,activation='tanh',input_dim=13))
nn_model.add(Dense(16,activation='tanh',input_dim=13))
nn_model.add(Dense(8,activation='tanh'))
nn_model.add(Dense(1,activation='sigmoid'))

nn_model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
nn_model.fit(X_train, y_train, epochs=100, batch_size=64)

#nn_model.save('../match_models/new_LM_1.h5')



#=====================
from keras.models import load_model
LM = load_model('../match_models/new_LM_1.h5')   

y_true_nn = np.array(y_test)
y_prob_nn = nn_model.predict(np.array(X_test))
y_predict_nn = [1 if y>0.7 else 0 for y in y_prob_nn ]

# accuracy,precision,recall,f1
print("=======New:========")
print('accuracy:',accuracy_score(y_true_nn,y_predict_nn))
print('presicion:',precision_score(y_true_nn,y_predict_nn))
print('recall:',recall_score(y_true_nn,y_predict_nn))
print('F1:',f1_score(y_true_nn,y_predict_nn))

# ROC:
fpr, tpr, thresholds = roc_curve(y_true_nn, y_prob_nn)
roc_auc = auc(fpr, tpr)
print('AUC:',roc_auc)

# =======
y_true_nn = np.array(y_test)
y_prob_nn = LM.predict(np.array(X_test))
y_predict_nn = [1 if y>0.5 else 0 for y in y_prob_nn ]

# accuracy,precision,recall,f1
print("=======Old:========")
print('accuracy:',accuracy_score(y_true_nn,y_predict_nn))
print('presicion:',precision_score(y_true_nn,y_predict_nn))
print('recall:',recall_score(y_true_nn,y_predict_nn))
print('F1:',f1_score(y_true_nn,y_predict_nn))

# ROC:
fpr, tpr, thresholds = roc_curve(y_true_nn, y_prob_nn)
roc_auc = auc(fpr, tpr)
print('AUC:',roc_auc)
# =======



plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
         lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve--(NN model)')
plt.legend(loc="lower right")
plt.show()












