# -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 10:39:53 2019

@author: gby

brec for Twitch datasets.
"""
import random
random.seed(1)
import pandas as pd
import numpy as np
from keras.models import load_model
import json
import os
from scipy import sparse as sp
import networkx
from networkx import *
import matplotlib.pyplot as plt
import getopt
import sys
from gensim.models.doc2vec import Doc2Vec
from utils import musae_feature_fusion

# =========================Loading Preparation Data：============================

# 读入我们要研究的子网/子图、其中的new users，保存好new users的好友关系（注意，是在这个子网络中的好友关系！）
# 找出old users，生成old users的graph，后面的拓展都是在这个old graph中进行！所以需要单独弄一个old users的好友关系
name= 'twitch_es'
with open('../data/subgraphs/twitch_es_graph.json','r') as f:
    data = json.loads(f.read())
G = readwrite.json_graph.node_link_graph(data)
all_users = list(G.nodes)
new_users = list(pd.read_csv('../data/subgraphs/twitch_es_new_users.csv').id)
new_users_friends_dict = {user:list(G.neighbors(user)) for user in new_users}
old_users = [n for n in all_users if n not in new_users]
G_o = G.subgraph(old_users) # old sub-graph
old_users_friends_dict = {user:list(G_o.neighbors(user)) for user in old_users}


# 加载Link prediction model：LM
fm = Doc2Vec.load('../match_models/%s_features.emb'%name).docvecs
LM = load_model('../match_models/twitch_es_nn.h5')   
# 加载node embedding models：                                             
import gensim
from gensim.models import KeyedVectors
print("Loading node embeddings, may take a few minutes......")
n2v_emb = KeyedVectors.load_word2vec_format("../node_embeddings/twitch_es_n2v.emb")
gcn_emb = KeyedVectors.load_word2vec_format("../node_embeddings/twitch_es_gcn.emb")

# 加载random walk所需要的数据：                                      
M = sp.load_npz('../rw_data/%s_rw_base_matrix.npz'%name)
id_map = json.load(open('../rw_data/%s_rw_id_map.json'%name))
rev_id_map = json.load(open('../rw_data/%s_rw_rev_id_map.json'%name))


# ================================ Defining util functions： ====================


# given a seed:
def random_walks(nodes,num,iterations=3,alpha=0.9): # 【iterations的取值有待商榷】
    # 实验发现，alpha越趋近于0（不能等于0），拓展的范围就越趋近于直接寻找friends
    # node is str type
    e = np.zeros(len(id_map))
    for node in nodes:
        e[id_map[str(node)]] = 1
    r = e[:]
    e = sp.csc_matrix(e).T
    r = sp.csc_matrix(r).T
    for i in range(iterations):
        r = alpha*np.dot(M,r) + (1-alpha)*e
    # r.shape : (431930, 1)
    nonzero_idx = r.nonzero()[0] # node row index
    nonzero_values = r[nonzero_idx].toarray() # node probs
    ids = [rev_id_map[str(x)] for x in nonzero_idx] # node names
    value_df = pd.DataFrame(nonzero_values,columns=['prob'])
    id_df = pd.DataFrame(ids,columns=['id'])
    df = pd.concat([id_df,value_df],axis=1).sort_values(by='prob',ascending=False)
    return list(df.id)[:num]


def create_allocation(num_seeds,mode,cake,cvx_degree,ccv_degree):
    rank_list = range(1,num_seeds+1)
    if mode=='uniform':
        ps = [1/num_seeds for x in rank_list]
    if mode=='linear':
        ps = [num_seeds+1-x for x in rank_list]
    if mode=='convex':
        ps = [pow((num_seeds+1-x),cvx_degree) for x in rank_list]
    if mode=='concave':
        ps = [pow((num_seeds+1-x),ccv_degree) for x in rank_list]
#        ps = [-x*x+n*n for x in num_seeds]
    y = [round(cake*p/sum(ps)) for p in ps]
    return y


def link_prediction(new_user,existing_users):
    pair_features = []
    for ou in existing_users:
        pair_feature = musae_feature_fusion(new_user,ou,fm)
        pair_features.append(pair_feature)
    scores = LM.predict(np.array(pair_features))
    scores = pd.DataFrame(scores,columns=['score']).reset_index(drop=True)
    ids = pd.DataFrame(existing_users,columns=['id']).reset_index(drop=True)
    ranking = pd.concat([ids,scores],axis=1).sort_values(by='score',ascending=False)
    # 返回的df的columns是'id','socre'
    return ranking




# ===================================================================================
random.seed(0)
random.shuffle(old_users)# 这里先把old users顺序打乱，然后就可以取前n项作为随机sample了

def brec(params,given_seeds=None):
    new_user = params['new_user']
    pool_size = params['pool_size']
    seed_size = params['seed_size']
    # allocation_regime = params['allocation_regime']
    iterations = params['iterations']
    if given_seeds:
        
        seeds = [user for user in given_seeds if user in old_users][:seed_size]
        original_seeds = seeds[:]
    else:
        init_extend_base_size = params['init_extend_base_size']
        
        # extend_base = random.sample(old_users,init_extend_base_size)
        # old users顺序已经打乱，然后就可以取前n项作为随机sample了。这样，我多次调用brec函数，依然可以保证初始的seeds是相同的。
        extend_base = old_users[:init_extend_base_size] 
        extend_base_ranking = link_prediction(new_user,extend_base)
        seeds = extend_base_ranking.id[:seed_size]
        original_seeds = list(seeds)[:]

    # 进入迭代后，每一轮各种方法的种子就会很不一样了，所以不能写在一起。最好写成一个函数去调用
    def candidates_generation(method,iterations,seeds=seeds):
        # ---random sample (baseline):
        if method == 'b':
            all_candidates = random.sample(old_users,pool_size)
            all_candidates_ranking = link_prediction(new_user,all_candidates)
            ranked_candidates = list(all_candidates_ranking.id)
            return ranked_candidates,all_candidates_ranking

        each_iteration_rankings = [] # 保存每次迭代拓展出来的candidates，及其LM匹配分值的排序
        extend_size = pool_size//iterations # 每次迭代中的拓展个数
        all_candidates = [] # 保存所有的candidates，用于在拓展的时候检查去重
        for iteration in range(iterations):
            # 根据allocation regime来算出每个种子应该拓展多少个：
            allocations = create_allocation(len(seeds),'uniform',extend_size,cvx_degree=3,ccv_degree=1/5)
            current_iteration_candidates = []
            for seed,allocation in zip(seeds,allocations):
                rd = 20 #  redundancy, 冗余倍数
                # ---f:
                if method == 'f':
                    extend_users_f = old_users_friends_dict[seed][:allocation*rd]
                    # 为了防止在多次迭代时，重复选择了之前的candidates，所以下面做一个筛选：
                    extend_users_f = [x for x in extend_users_f if x not in all_candidates][:allocation] # 防止种子间重复
                    all_candidates += extend_users_f
                    current_iteration_candidates += extend_users_f
                # ---rw
                if method == 'rw':
                    extend_users_rw = random_walks([seed],allocation*rd) # 查询用str，得到的是int的list
                    extend_users_rw = [x for x in extend_users_rw if x not in all_candidates][:allocation]
                    all_candidates += extend_users_rw
                    current_iteration_candidates += extend_users_rw
                # ---n2v:
                if method == 'n2v':
                    similars = n2v_emb.most_similar(str(seed),topn=allocation*rd)
                    extend_users_n2v = [int(x[0]) for x in similars]
                    extend_users_n2v = [x for x in extend_users_n2v if x not in all_candidates][:allocation]
                    all_candidates += extend_users_n2v
                    current_iteration_candidates += extend_users_n2v
                # ---gcn:
                if method == 'gcn':
                    similars = gcn_emb.most_similar(str(seed),topn=allocation*rd)
                    extend_users_gcn = [int(x[0]) for x in similars]
                    extend_users_gcn = [x for x in extend_users_gcn if x not in all_candidates][:allocation]
                    all_candidates += extend_users_gcn
                    current_iteration_candidates += extend_users_gcn           

            ranking = link_prediction(new_user,current_iteration_candidates)
            each_iteration_rankings.append(ranking)
            # 产生下一个iteration的seeds：
            seeds = list(ranking.id)[:seed_size]


        # 所有迭代结束，将每次迭代的结果综合起来，形成总体排名：
        all_candidates_ranking = pd.concat(each_iteration_rankings).sort_values(by='score',ascending=False)
        all_candidates_ranking = all_candidates_ranking.drop_duplicates() # 去除重复项
        ranked_candidates = list(all_candidates_ranking.id)

        return ranked_candidates,all_candidates_ranking


    # 计算各种指标，也封装成一个函数吧：
    def cal_metrics(expected_candidates,ranked_candidates):
        print('  Number of candidates provided: ',len(ranked_candidates),'/',pool_size)
        cross = set(expected_candidates).intersection(set(ranked_candidates)) # 交集
        # 我这里重新定义了hit-rate，这里表示了对该new users的好友覆盖率。所以应该以好友数做分母
        hit_rate = len(cross)/len(expected_candidates)
        print('  Number of friends candidates Hit: %d, Hit-rate=%f'%(len(cross),hit_rate))

        # 所有推荐对的reverse-rank值，除以pool_size。但既然分母都一样，我就省去了
        mrr = 0
        if len(cross)>0:
            for user in cross:
                #获取该好友的排名：
                this_rank = ranked_candidates.index(user)+1
                mrr += 1/this_rank
        print('  mrr:',mrr)

        # topn
        topn_scores = []
        for n in [10,20,30]:
            topn_users = ranked_candidates[:n]
            s = len(set(topn_users).intersection(expected_candidates))
            print('  top%d score:'%n,s/n)
            topn_scores.append(round(s/n,3))

        return {'hit_rate':round(hit_rate,3),'mrr':round(mrr,3),'topn':topn_scores}

    # ====== 产生评价指标：
    candidates_ranking = {} 
    results = {}
    print('Evaluating for user_id: ',new_user)
    expected_candidates = new_users_friends_dict[new_user] # 期望的候选人当然就是new user的friends了
    print('Number of friends:',len(expected_candidates))
    print('------')
    for method in ['b','f','rw','n2v','gcn']:
        print('-Method:',method)
        ranked_candidates,all_candidates_ranking = candidates_generation(method,iterations,seeds=seeds)
        candidates_ranking[method] = all_candidates_ranking
        results[method] = cal_metrics(expected_candidates,ranked_candidates)
    
    return [candidates_ranking,results,original_seeds]



# =============================================
#import multiprocessing as mp
#import warnings
#import os
# warnings.filterwarnings("ignore")
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"


#if __name__ == '__main__':


    # # 多进程写法：（速度快，但是顺序会被打乱，实验过程中不便分析）
    # p = mp.Pool(10)   
    # # 把所有参数写成一个列表，好用迭代式的方式传进去：
    # params_list = []
    # for new_user in new_users:
    #     params = {}
    #     params['new_user'] = new_user
    #     params['pool_size'] = args['pool_size']
    #     params['seed_size'] = args['seed_size']
    #     params['iterations'] = args['iterations']
    #     params['init_extend_base_size'] = args['init_extend_base_size']
    #     params_list.append(params)
    # # 应该返回一个100长度的列表：
    # all_users_results = p.map(brec,params_list)
    # print(all_users_results)
    # p.close()#关闭进程池,不再接受请求
    # p.join()# 等待进程池中的事件执行完毕，回收进程池
logs = {}

# ================ Defining parameters:===================
args = {}
# 这里存放默认值：
args['iterations'] = 1
args['pool_size'] = 100 # 要给新用户推荐多少个用户
args['seed_size'] = 5 # 每一次迭代使用的种子数
args['init_extend_base_size'] = 20 # 初始使用多少用户来生成种子
params_text = "[i=%d, ps=%d, ss=%d, bs=%d,4methods]"%(args['iterations'],args['pool_size'],args['seed_size'],args['init_extend_base_size'])
# 下面这个专门给given seeds设定：
#params_text = "[i=%d, ps=%d, ss=%d]"%(args['iterations'],args['pool_size'],args['seed_size'])

# =======================================================


all_users_results = {}
for method in ['b','f','rw','n2v','gcn']:
    all_users_results[method] = {}
    for metric in ['hit_rate','mrr','topn']:
        all_users_results[method][metric] = []
""" 形成这样的结构：
{'b': {'hit_rate': [], 'mrr': [], 'topn': []},
 'f': {'hit_rate': [], 'mrr': [], 'topn': []},
 'rw': {'hit_rate': [], 'mrr': [], 'topn': []},
 'n2v': {'hit_rate': [], 'mrr': [], 'topn': []},
 'gcn': {'hit_rate': [], 'mrr': [], 'topn': []}}
"""
import time
t1 = time.time()
for n,new_user in enumerate(new_users):
    print("****",n)
    params = {}
    params['new_user'] = new_user
    params['pool_size'] = args['pool_size']
    params['seed_size'] = args['seed_size']
    params['iterations'] = args['iterations']
    params['init_extend_base_size'] = args['init_extend_base_size']
    
    # 初始化种子：分别是好中坏
    friends_seeds = new_users_friends_dict[new_user]
    friends_seeds = [x for x in friends_seeds if x in old_users]
    second_seeds = []
    for f in friends_seeds:
        second_seeds += old_users_friends_dict[f]
    second_seeds = [x for x in second_seeds if x not in friends_seeds]
    third_seeds = []
    for f in second_seeds:
        third_seeds += old_users_friends_dict[f]
    third_seeds = [x for x in third_seeds if x not in friends_seeds+second_seeds]
    random_seeds = random.sample(old_users,100)
    random_seeds = [x for x in random_seeds if x not in friends_seeds+second_seeds]
    
    
    [candidates_ranking,results,all_seeds] = brec(params,given_seeds=[])
    
    for i,method in enumerate(['b','f','rw','n2v','gcn']): 
        for metric in ['hit_rate','mrr','topn']:
            all_users_results[method][metric].append(results[method][metric])
t2 = time.time()
print('Runnig all new users cost time:',t2-t1,'s')
logs[params_text] = all_users_results


#all_users_results = {}
#all_users_results = logs['[i=1, ps=100, ss=5, bs=15]']

# 画出所有用户的某指标分布图：
metric = 'mrr'
print(metric,':')


group1_index = []
group2_index = []
for i,each in enumerate(new_users):
    c = len(new_users_friends_dict[each])
    if c<50:
        group1_index.append(i)
    else:
        group2_index.append(i)


x_axix = [i for i in range(len(new_users))]
Y = [[] for i in range(5)]
for i,method in enumerate(['b','f','rw','n2v','gcn']):
    Y[i] = np.array(all_users_results[method][metric])
if metric == 'topn':
    for i,method in enumerate(['b','f','rw','n2v','gcn']):
        Y[i] = [l[0] for l in np.array(all_users_results[method][metric])] # top10
        

# =============== 个体分析：=================
ym = np.array(Y) # shape

winners = []
for i in range(ym.shape[1]):
    col = ym[:,i]
    winner = []
    maxi = max(list(col))
    if maxi != 0:
        for idx,each in enumerate(list(col)):
            if maxi == each:
                winner.append(idx)
    winners.append(winner)

champions = {'f':0,'rw':0,'n2v':0,'gcn':0}
champions_idx = {'f':[],'rw':[],'n2v':[],'gcn':[]} # 用于找出对应的new user的序号
for i,winner in enumerate(winners):
    if len(winner) == 1: # ==为唯一冠军，>=则为并列冠军
        if 1 in winner:
            champions['f'] += 1
            champions_idx['f'].append(i)
        if 2 in winner:
            champions['rw'] += 1
            champions_idx['rw'].append(i)
        if 3 in winner:
            champions['n2v'] += 1
            champions_idx['n2v'].append(i)
        if 4 in winner:
            champions['gcn'] += 1
            champions_idx['gcn'].append(i)
print('champions for ',metric,':')
print(champions)

# 看看各自最好case的平均好友数：
#for method in ['f','rw','n2v','gcn']:
#    idxs = champions_idx[method]
#    num_friends = [len(new_users_friends_dict[new_users[idx]]) for idx in idxs]
#    print(method,'best cases avg num of friends:',sum(num_friends)/len(num_friends))

# ============= 整体对比图： =============
plt.figure(figsize=(10,5))
for y,c,l,s in zip(Y,['pink','black','red','green','blue'],\
                 ['b','f','rw','n2v','gcn'],['-','-','--','-.',':']):
    plt.plot(range(len(x_axix)),sorted(y),color=c,label=l,linewidth=2.5)
#    plt.plot(x_axix,y,color=c,label=l)
    plt.plot(x_axix,[sum(y)/len(y) for i in y],color=c,linestyle=s,linewidth=1.5)
    print(l,'%f'%(sum(y)/len(y)))
plt.legend() # 显示图例
plt.xlabel('New Users\' Indexs')
plt.ylabel(metric)
#plt.xticks(x_axix)
fontdict={'family': 'Arial', 'color': 'black', 'weight': 'normal', 'size': 20}
#plt.title(metric+'\n'+params_text,fontdict=fontdict)
plt.title(metric+'\n(twitch datasets)',fontdict=fontdict)
plt.show()



# 个体结果可视化：
#new_user_idx = champions_idx['gcn'][0]
#print(len(new_users_friends_dict[new_users[new_user_idx]]))
#params = {}
#params['new_user'] = new_users[29]
#params['pool_size'] = 100
#params['seed_size'] = 5
#params['iterations'] = 2
#params['init_extend_base_size'] = 15
#[candidates_ranking,nothing,original_seeds] = brec(params)
#for method in ['rw','n2v','gcn']:
##    brec_viz(G,params['new_user'],list(candidates_ranking[method].id),original_seeds,"[%s]"%method,plot_ego_graph=True)
#    brec_viz_iteractive(G,params['new_user'],list(candidates_ranking[method].id),\
#                        original_seeds,method,plot_ego_graph=False,params_text=params_text)
#





