import json
import numpy as np
import torch
import os
from tqdm import tqdm
from multiprocessing import Pool
import torch.nn as nn

def multiprocess(func,li,pool_size=16):
    # 多进程计算tfidf_weight
    p=Pool(pool_size)         
    ans_li=p.map(func,li)  
    p.close()
    p.join()
    
    return ans_li


#输入一个list，线性归一化到【0,1】
def norm(array):
    ma=np.max(array)
    mi=np.min(array)
    if ma==mi:
        # print("这个tensor最大值等于最小值")
        # print("最大值和最小值为",ma)
        return array

    array=(array-mi)/(ma-mi)
    return array

#读取json，返回一个包含多个查询的列表
#这个json文件中有多行，每行是一个字典
def load_json(root_path="/home/yujia_zhou/long_doc/anchors-master/data/train_2048/"):
    li=os.listdir(root_path)
    li.sort()
    papers,length_list = [],[]
    la=0
    print("--------------------下面开始读取文件---------------------")
    for i in range(len(li)):
        # if i >0:break
        if li[i][-4:]!="json": continue

        file = open(root_path+li[i], 'r', encoding='utf-8')         
        cnt=0
        for line in tqdm(file.readlines()):
            # print(line)
            papers.append(json.loads(line))   
            cnt+=1
            # if cnt>100:break
        length_list.append([la,la+cnt])
        la+=cnt
        
    print("-------------------------------------------------------")

    return papers,length_list

def change_num_edges(w,k):
    n=w.shape[0]
    u=k*(n*n)/np.sum(w)
    w=w*u

    for i in range(n): 
        for j in range(n):
            w[i][j]=min(w[i][j],1.0)
    return w

# k代表图中的稀疏度
def cal_probability(k,weight_list):
    n=len(weight_list)
    w=np.zeros((n,n))
    for i in range(n):
        for j in range(i,n):
            w[i][j]=w[j][i]=weight_list[i]*weight_list[j]
    
    # 伸缩归一化
    w=norm(w)   
    # 利用期望控制稀疏度
    return change_num_edges(w,k)


def calLaplacianMatrix(adjacentMatrix):

    # compute the Degree Matrix: D=sum(A)
    degreeMatrix = np.sum(adjacentMatrix, axis=1)

    laplacianMatrix = np.diag(degreeMatrix) - adjacentMatrix

    for i in range(len(degreeMatrix)):
        if degreeMatrix[i]!=0:
            degreeMatrix[i]=1.0/(degreeMatrix[i] ** (0.5))
        else:
            degreeMatrix[i]=0

    # normailze
    # D^(-1/2) L D^(-1/2)
    sqrtDegreeMatrix = np.diag(degreeMatrix)
    return np.dot(np.dot(sqrtDegreeMatrix, laplacianMatrix), sqrtDegreeMatrix)

# O(n)预处理
def create_alias_table(area_ratio):
    l = len(area_ratio)
    accept, alias = [0] * l, [0] * l
    small, large = [], []

    for i, prob in enumerate(area_ratio):
        if prob < 1.0:
            small.append(i)
        else:
            large.append(i)

    while small and large:
        small_idx, large_idx = small.pop(), large.pop()
        accept[small_idx] = area_ratio[small_idx]
        alias[small_idx] = large_idx
        area_ratio[large_idx] = area_ratio[large_idx] - (1 - area_ratio[small_idx])
        if area_ratio[large_idx] < 1.0:
            small.append(large_idx)
        else:
            large.append(large_idx)

    while large:
        large_idx = large.pop()
        accept[large_idx] = 1
    while small:
        small_idx = small.pop()
        accept[small_idx] = 1

    return accept,alias

# O(1)采样
def alias_sample(accept, alias):
    N = len(accept)
    i = int(np.random.random()*N)
    r = np.random.random()
    if r < accept[i]:
        return i
    else:
        return alias[i]

def load_static_weight():
    ans=np.zeros(2048)
    for i in range(2048):
        ans[i]=1/(i//10+1)**2
    return ans

# 计算两个torch张量的相似度,返回全部cos_weight和top128的下标
def get_att_dis(target, behaviored,top_k,padding):
 
    attention_distribution = []
 
    for i in range(behaviored.size(0)):
        attention_score = torch.cosine_similarity(target, behaviored[i],dim=0)  # 计算每一个元素与给定元素的余弦相似度
        attention_distribution.append(attention_score)
    attention_distribution = torch.Tensor(attention_distribution)

    if len(behaviored)>top_k:
        values, indices=attention_distribution.topk(top_k,largest=True,sorted=True)
    else:
        indices=[i for i in range(len(behaviored))]
        for i in range(len(indices),top_k):
            indices.append(0)


    delta=padding-attention_distribution.shape[0]
    # 为了使用多batch，这里返回的元素长度要一致
    pad = nn.ZeroPad2d(padding=(0, delta))  
    attention_distribution=pad(attention_distribution)

    indices=torch.tensor(np.array(indices))

    return attention_distribution,indices


def get_path(args):
    root_path="./results/"+args.mod
    root_path=os.path.join(root_path,"s1_"+str(args.sparsity1)+"_s2_"+str(args.sparsity2))

    if not os.path.exists(root_path):
        os.mkdir(root_path)
    
    path=os.path.join(root_path,"strategy_"+args.strategy)

    # cur_time=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
    # path=os.path.join(root_path,cur_time)

    if not os.path.exists(path):
        os.mkdir(path)
    
    return path
    
def load_json_file(root_path="/home/yujia_zhou/long_doc/anchors-master/data/train_2048/000.group.json"):

    print("--------------------下面开始读取文件---------------------")
    papers=[]
    file = open(root_path, 'r', encoding='utf-8')         
    cnt=0
    for line in tqdm(file.readlines()):
        # print(line)
        papers.append(json.loads(line))   
        cnt+=1
        if cnt>10:break

    print("-------------------------------------------------------")

    return papers

if __name__=="__main__":
    load_json(10)