# -*- coding: utf-8 -*-
"""
Created on Mon Aug 19 16:44:53 2019

@author: gby

random walk跟通过friends直接拓展的区别在于拓展同阶的好友存在一个概率分布，而这个分布可以自己定义

这里有两种写法，一个是纯粹random walk，借用graphsage中的random walk的方法。
这种自然可以保证一个共同节点多的节点会更多地被遍历。
但这种方法不方便我们快速取出walks的节点。

第二种方法，借用TrustRank的方法，构造一个权重矩阵，然后在矩阵上传播。
这种方法的难点和耗时之处就是权重矩阵的生成。
我有40多万节点，所以必须使用矩阵压缩

"""
import json
from scipy import sparse as sp
import numpy as np
import pandas as pd
import networkx as nx
from networkx import readwrite
from networkx.readwrite import json_graph

# 导入要处理的graph：
name= 'twitch_es'
with open('../data/subgraphs/twitch_es_graph.json','r') as f:
    data = json.loads(f.read())
g = readwrite.json_graph.node_link_graph(data)

new_users = list(pd.read_csv('../data/subgraphs/twitch_es_new_users.csv').id)
all_users = list(g.nodes)
old_users = [u for u in all_users if u not in new_users]
old_graph = g.subgraph(nodes=old_users)
old_users_friends_dict = {user:list(old_graph.neighbors(user)) for user in old_users}
friends_dict = old_users_friends_dict

# --------------------------------------------
id_map = {} # 通过node名（id）查询其行号
rev_id_map = {} # 方便反向查询
for i, id in enumerate(old_users):
    id_map[id] = i
    rev_id_map[i] = id


row_idx = []
col_idx = []
data = []

# 以共同好友数的归一化作为权重，这样会导致没有其他共同好友的两个好友间的权重为0，所以加一个1会好一些。
def co_f(node1,node2):
    f1 = set(friends_dict[node1])
    f2 = set(friends_dict[node2])
    return len(f1.intersection(f2)) + 1 # this '1' is important

# 跑的还是挺快的，1分钟吧：
for i,node in enumerate(old_users):
    if i%1000 == 0:
        print('num',i)
    curr_data = []
    all_friends = 0
    for f in friends_dict[node]: 
        row_idx.append(id_map[node])
        col_idx.append(id_map[f])
        if f != node:
            value = co_f(node,f)
        else:
            value = 0
        curr_data.append(value)
        all_friends += value
    if all_friends>0:
        curr_data = np.array(curr_data)/all_friends
    data += list(curr_data)

mat = sp.coo_matrix((data, (row_idx, col_idx)))
M = mat.T
sp.save_npz('../rw_data/%s_rw_base_matrix.npz'%name,M)
with open('../rw_data/%s_rw_id_map.json'%name,'w') as f:
    f.write(json.dumps(id_map))
with open('../rw_data/%s_rw_rev_id_map.json'%name,'w') as f:
    f.write(json.dumps(rev_id_map))


# ======================外部调用：===============
# ============== random walk data prepare: ======================
M = sp.load_npz('../rw_data/%s_rw_base_matrix.npz'%name)
id_map = json.load(open('../rw_data/%s_rw_id_map.json'%name))
rev_id_map = json.load(open('../rw_data/%s_rw_rev_id_map.json'%name))
# 通过json读进来的时候，字典的key都会变成str类型，WTF！
nodes = np.array([int(rev_id_map[str(x)]) for x in range(len(rev_id_map))])

# 基础矩阵已经生成，现在就可以开始random walk了：
#
## given a seed:
#def random_walks_old(node,num,iterations=3,alpha=0.9): # 实验发现，alpha越趋近于0（不能等于0），拓展的范围就越趋近于直接寻找friends
#    # node is str type
#    e = np.zeros(len(id_map))
#    e[id_map[node]] = 1
#    r = e[:]
#    e = sp.csc_matrix(e).T
#    r = sp.csc_matrix(r).T
#    for i in range(iterations):
#        r = alpha*np.dot(M,r) + (1-alpha)*e
#    # r.shape : (431930, 1)
#    nonzero_idx = r.nonzero()[0] # node row index
#    nonzero_values = r[nonzero_idx].toarray() # node probs
#    ids = [rev_id_map[str(x)] for x in nonzero_idx] # node names
#    value_df = pd.DataFrame(nonzero_values,columns=['prob'])
#    id_df = pd.DataFrame(ids,columns=['id'])
#    df = pd.concat([id_df,value_df],axis=1).sort_values(by='prob',ascending=False)
#    return list(df.id)[:num]
#
#
#def random_walks(node,num,iterations=3,alpha=0.9): # 实验发现，alpha越趋近于0（不能等于0），拓展的范围就越趋近于直接寻找friends
#    # node is str type
#    e = np.zeros(len(id_map))
#    e[id_map[node]] = 1
#    r = e[:]
#    e = sp.csc_matrix(e).T
#    r = sp.csc_matrix(r).T
#    for i in range(iterations):
#        r = alpha*np.dot(M,r) + (1-alpha)*e
#    # r.shape : (431930, 1)
#    nonzero_idx = r.nonzero()[0] # node row index
#    nonzero_values = r[nonzero_idx].toarray() # node probs
#    ids = nodes[nonzero_idx] # node names
#    value_df = pd.DataFrame(nonzero_values,columns=['prob'])
#    id_df = pd.DataFrame(ids,columns=['id'])
#    df = pd.concat([id_df,value_df],axis=1).sort_values(by='prob',ascending=False)
#    return list(df.id)[:num]
#
#import time
#t1 = time.time()
#l1 = random_walks_old('256',1000)
#l1 = [int(x) for x in l1]
#print(time.time()-t1,'s')
#t2 = time.time()
#l2 = random_walks('256',1000)
#l2 = [x for x in l2]
#print(time.time()-t2,'s')





