import networkx as nx
import numpy as np
import os
from scipy import stats
from tqdm import tqdm
import torch
import random 
import shelve


def metrics_spearmanr_rho(true, predication):
    assert true.shape == predication.shape
    rho, p_val = stats.spearmanr(true, predication)
    return rho


def metrics_kendall_tau(true, predication):
    assert true.shape == predication.shape
    tau, p_val = stats.kendalltau(true, predication)
    return tau


def metrics_mean_square_error(true, predication):
    assert true.shape == predication.shape
    mse = (np.square(true - predication).mean())
    return mse


def create_dir_if_not_exists(directory):
    if not os.path.exists(directory):
        os.makedirs(directory)
        return 'Make dirs of # {} '.format(directory)
    else:
        return "the dirs already exist! Cannot be created"


#在指定位置写入日志
def write_log_file(file_name_path, log_str, print_flag=True):
    if print_flag:
        print(log_str)
    if log_str is None:
        log_str = 'None'
    if os.path.isfile(file_name_path):
        with open(file_name_path, 'a+') as log_file:
            log_file.write(log_str + '\n')
    else:
        with open(file_name_path, 'w+') as log_file:
            log_file.write(log_str + '\n')


def save_data_by_shelve(path,fileName,saveName,data):
    file= shelve.open(os.path.join(path,fileName))
    file[saveName]=data
    file.close()

def print_args(args, file_path):
    d = max(map(len, args.__dict__.keys())) + 1
    with open(file_path, 'w') as f:
        for k, v, in args.__dict__.items():
            f.write(k.ljust(d) + ': ' + str(v) + '\n')


def read_all_gexf_graphs(dir):
    """
    read all the files with .gexf to networkx graph
    :param dir:
    :return: list of graphs
    """
    graphs = []
    
    for file in os.listdir(dir):
        if file.rsplit('.')[-1] != 'gexf':
            continue
        file_path = os.path.join(dir, file)
        g = nx.readwrite.gexf.read_gexf(file_path)
        graphs.append(g)
    
    return graphs


class graph(object):
    def __init__(self, node_num=0, label=None, name=None, prefix_name_label=None):
        self.node_num = node_num
        self.label = label
        self.name = name
        self.prefix_name_label = prefix_name_label
        self.features = []  # node feature matrix
        self.succs = []
        self.preds = []
        if node_num > 0:
            for i in range(node_num):
                self.features.append([])
                self.succs.append([])
                self.preds.append([])
    
    def add_edge(self, u, v):
        self.succs[u].append(v)
        self.preds[v].append(u)

def load_save_train_index(path,a):
    g = os.listdir(path)
    for i in g:
        temp=i.split(".")
        if temp[1]!="dir":
            continue
        if temp[0]=="trainData_random{}".format(a):
            file = shelve.open(os.path.join(path,temp[0]))
            return True,file["trainData"]
    return False,[]

#------------------------------------------------------------------------------
#graph [a,b] a为n*dim  n为结点个数 dim为特征向量长度  b为n*n 为邻接矩阵
def generate_epoch_pair(graphs, classes, batch, train=False,save_id_path="False"):
    pairIndexPath="/home/chenyongwei/project/malSim/data/functionSim/pair_index"
    a=0
    if train:
        a=random.randint(1,20)
        np.random.seed(a)
        print("训练的随机数种子为{}".format(a))
        is_save,data=load_save_train_index(pairIndexPath,a)
        if is_save:
            return data
        
    mydict={}
    lableDict={}
    for i in range(len(graphs)):
        for j in range(len(classes)):
            if i in classes[j]:
                lableDict[i]=j
    lthDict={}
    for i in range(len(graphs)):
        lthDict[i]=len(graphs[i][1])
    mydict["lable"]=lableDict
    mydict["lth"]=lthDict
    epoch_data = []
    ids=[]
    st = 0
    while st < len(graphs):
        if save_id_path != "False":
            res = get_pair1(graphs, classes, batch, mydict,st=st,save_id=True)
            input1, input2, adj1, adj2, y, id=res
            ids.append(id)
        else:
            input1, input2, adj1, adj2, y = get_pair1(graphs, classes, batch, mydict,st=st,save_id=False),
        epoch_data.append((input1, input2, adj1, adj2, y))
        st += batch

    #保存训练数据,加快训练
    if train:
        file = shelve.open(os.path.join(pairIndexPath,"trainData_random{}".format(a)))
        file["trainData"]=epoch_data
    #保存下标数据
    if save_id_path !="False":
        file = shelve.open(os.path.join(pairIndexPath,"{}_index_{}".format(save_id_path,a)))
        file["ids"]=ids
    return epoch_data


#显存不够，改成稀疏矩阵
#save_id 保存格式[(a,b),(c,d)]
def get_pair1(graphs, classes, batch, myDict, st=-1, save_id=False):
    len_class = len(classes)
    if st + batch > len(graphs):
        batch = len(graphs) - st
    ed = st + batch

    pos_ids = []
    neg_ids = []  
    for g_id in range(st, ed):
        cls = myDict["lable"][g_id]  # fam class index
        tot_g = len(classes[cls])
        # positive pair
        if tot_g >= 2:
            g1_id = classes[cls][np.random.randint(tot_g)]
            while g_id == g1_id:
                g1_id = classes[cls][np.random.randint(tot_g)]
            pos_ids.append((g_id, g1_id))
        else:
            pos_ids.append((g_id, g_id))#自己和自己
        # negative pair
        cls2 = np.random.randint(len_class)
        while (len(classes[cls2]) == 0) or (cls2 == cls):
            cls2 = np.random.randint(len_class)
        tot_g2 = len(classes[cls2])
        g2_id = classes[cls2][np.random.randint(tot_g2)]
        neg_ids.append((g_id, g2_id))

    batch_pos = len(pos_ids)
    batch_neg = len(neg_ids)
    batch = batch_pos + batch_neg
    # ------------------------------------------------------------------
    #统一矩阵大小
    max_num_1 = 0
    max_num_2 = 0
    for pair in pos_ids:   
        max_num_1 = max(max_num_1, myDict["lth"][pair[0]])
        max_num_2 = max(max_num_2, myDict["lth"][pair[1]])
    for pair in neg_ids:
        max_num_1 = max(max_num_1, myDict["lth"][pair[0]])
        max_num_2 = max(max_num_2, myDict["lth"][pair[1]])

    feature_dim = len(graphs[0][0][0])

    #特征向量矩阵 这个batch 是1 和 -1 相加
    x1_input = np.zeros((batch, max_num_1, feature_dim))
    x2_input = np.zeros((batch, max_num_2, feature_dim))
    adj1 = np.zeros((batch, max_num_1, max_num_1))
    adj2 = np.zeros((batch, max_num_2, max_num_2))
    y_input = np.zeros(batch)
    
    x1_labels = np.zeros(batch)
    x2_labels = np.zeros(batch)
    
    for i in range(batch_pos):
        y_input[i] = 1
        g1 = graphs[pos_ids[i][0]]
        g2 = graphs[pos_ids[i][1]]
        for j in range(myDict["lth"][pos_ids[i][0]]):
            x1_input[i,j]=np.array(g1[0][j])
        # 稀疏矩阵保存的,需要处理一下，可恶文档没有更新。。。。直接转成邻接矩阵吧
        temp = g1[1].to_dense().numpy()
        adj1[i,0:len(temp),0:len(temp[0])]=temp

        for j in range(myDict["lth"][pos_ids[i][1]]):
            x2_input[i,j]=np.array(g2[0][j])
        temp = g2[1].to_dense().numpy()
        adj2[i,0:len(temp),0:len(temp[0])]=temp

    for i in range(batch_pos, batch_pos + batch_neg):
        y_input[i] = -1
        g1 = graphs[neg_ids[i - batch_pos][0]]
        g2 = graphs[neg_ids[i - batch_pos][1]]

        for j in range(myDict["lth"][neg_ids[i - batch_pos][0]]):
            x1_input[i,j]=np.array(g1[0][j])
        temp = g1[1].to_dense().numpy()
        adj1[i,0:len(temp),0:len(temp[0])]=temp
        for j in range(myDict["lth"][neg_ids[i - batch_pos][1]]):
            x2_input[i,j]=np.array(g2[0][j])
        temp = g2[1].to_dense().numpy()
        adj2[i,0:len(temp),0:len(temp[0])]=temp


    #目前只能在这个地方修改，在forward里面修改的话。，内存应该还是会爆炸，哈哈哈
    #好像不能怎么写，稀疏矩阵的批处理感觉有点复杂。
    #要不用一下mask矩阵，统一长度，虽然我觉得多个0，0的下标很奇怪
    res1,res2=[],[]
    lth1,lth2=0,0
    tempData=[]
    #找到最大的node size值
    for i in range(len(adj1)):
        temp=torch.FloatTensor(adj1[i]).to_sparse().indices().numpy()
        size=len(temp[0])
        if size>lth1:
            lth1=size
        tempData.append(temp)
    for i in range(len(adj1)):
        a=torch.zeros(2,lth1).numpy()
        a[:,:len(tempData[i][0])]=tempData[i]
        res1.append(a)  


    tempData=[]
    for i in range(len(adj2)):
        temp=torch.FloatTensor(adj2[i]).to_sparse().indices().numpy()
        size=len(temp[0])
        if size>lth2:
            lth2=size
        tempData.append(temp)
    for i in range(len(adj2)):
        a=torch.zeros(2,lth2).numpy()
        a[:,:len(tempData[i][0])]=tempData[i]
        res2.append(a)  

    # 形状不一样，不能转成np，转成numpy后速度会提升，可能
    res1=np.array(res1)
    res2=np.array(res2)
    adj1=torch.FloatTensor(res1).long()
    adj2=torch.FloatTensor(res2).long()

    id_inf={}
    id_inf["y"]=y_input
    id_inf["ids"]=pos_ids+neg_ids
    if save_id:
        return (x1_input, x2_input, adj1, adj2, y_input, id_inf)
    return x1_input, x2_input, adj1, adj2, y_input

