import os
import math
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
import datetime
import dgl
import errno
import numpy as np
import os
import random
from scipy import sparse
import pandas as pd
#GAN
def set_random_seed(seed=0):
    """Set random seed.
    Parameters
    ----------
    seed : int
        Random seed to use
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)


def mkdir_p(path, log=True):
    """Create a directory for the specified path.
    Parameters
    ----------
    path : str
        Path name
    log : bool
        Whether to print result for directory creation
    """
    try:
        os.makedirs(path)
        if log:
            print('Created directory {}'.format(path))
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(path) and log:
            print('Directory {} already exists.'.format(path))
        else:
            raise


def get_date_postfix():
    """Get a date based postfix for directory name.
    Returns
    -------
    post_fix : str
    """
    dt = datetime.datetime.now()
    post_fix = '{}_{:02d}-{:02d}-{:02d}'.format(
        dt.date(), dt.hour, dt.minute, dt.second)
    return post_fix


def setup_log_dir(args, sampling=False):
    """Name and create directory for logging.
    Parameters
    ----------
    args : dict
        Configuration
    Returns
    -------
    log_dir : str
        Path for logging directory
    sampling : bool
        Whether we are using sampling based training
    """
    date_postfix = get_date_postfix()
    log_dir = os.path.join(
        args['log_dir'],
        '{}_{}'.format(args['dataset'], date_postfix))
    if sampling:
        log_dir = log_dir + '_sampling'
    mkdir_p(log_dir)
    return log_dir


# The configuration below is from the paper.
default_configure = {
    'lr': 0.001,  # Learning rate
    'num_heads': [8],  # Number of attention heads for node-level attention
    'hidden_units': 8,
    'dropout': 0.4,
    'weight_decay': 0.001,
    'num_epochs': 1000,
    'k_cv': 5,
    'sample_times': 1,
    'in_size': 512,
    'out_size': 128,
    'W_size': 256,
    'Gat_layers': 1,
    # 'alpha': 0.6
    'alpha': 0.6,
}

sampling_configure = {
    'batch_size': 2048
}


def setup(args):
    args.update(default_configure)
    set_random_seed(args['seed'])
    if args['data'] == 'MAN':
        args['dataset'] = 'MAN'
    # args['dataset'] = 'DTI1' if args['hetero'] else 'DTI1'
    #    args['device'] = 'cuda: 0' if torch.cuda.is_available() else 'cpu'
    args['device'] = 'cuda:0'
    args['log_dir'] = setup_log_dir(args)
    #    args['ratio'] = 'ten'
    #    args['network_path'] = 'dataset/data/'
    return args


def load_data(network_path,r):
    #network_path = '../data/'
    if network_path == './dataset/MAN/':
        drug_drug = pd.read_csv(network_path + 'ddi.csv')
        protein_protein = pd.read_csv(network_path + 'ppi.csv')
        drug_protein = np.loadtxt(network_path + 'dti_matrix.txt')
        protein_drug = drug_protein.T
        dti_o = np.loadtxt(network_path + 'dti_matrix.txt')
        edges = pd.read_csv(network_path + 'edges.csv')

    dg_dict = {
        ('drug', 'similarity', 'drug'): (drug_drug['Drug0'], drug_drug['Drug1']),
        ('drug', 'dp', 'protein'): (edges['Drug'], edges['Target']),
        ('protein', 'pd', 'drug'): (edges['Target'], edges['Drug']),
    }
    dg = dgl.heterograph(dg_dict)
    pg_dict = {
        ('protein', 'similarity', 'protein'): (protein_protein['Protein0'], protein_protein['Protein1']),
        ('protein', 'pd', 'drug'): (edges['Target'], edges['Drug']),
        ('drug', 'dp', 'protein'): (edges['Drug'], edges['Target']),
    }
    pg = dgl.heterograph(pg_dict)
    num_drug = dti_o.shape[0]
    num_protein = dti_o.shape[1]
    graph = [dg, pg]


    whole_positive_index = []
    whole_negative_index = []
    for i in range(np.shape(dti_o)[0]):
        for j in range(np.shape(dti_o)[1]):
            if int(dti_o[i][j]) == 1:
                whole_positive_index.append([i, j])
            elif int(dti_o[i][j]) == 0:
                whole_negative_index.append([i, j])

    if r == 'ten':
        negative_sample_index = np.random.choice(np.arange(len(whole_negative_index)),
                                                 size=10 * len(whole_positive_index), replace=False)
    elif r == 'one':
        negative_sample_index = np.random.choice(np.arange(len(whole_negative_index)),
                                                 size=1 * len(whole_positive_index), replace=False)
    elif r == 'all':
        negative_sample_index = np.random.choice(np.arange(len(whole_negative_index)), size=len(whole_negative_index),
                                                 replace=False)
    else:
        print('wrong positive negative ratio')

    data_set = np.zeros((len(negative_sample_index) + len(whole_positive_index), 3), dtype=int)
    count = 0
    for i in whole_positive_index:
        data_set[count][0] = i[0]
        data_set[count][1] = i[1]
        data_set[count][2] = 1
        count += 1
    for i in negative_sample_index:
        data_set[count][0] = whole_negative_index[i][0]
        data_set[count][1] = whole_negative_index[i][1]
        data_set[count][2] = 0
        count += 1

    if network_path=='dataset/MAN/':
        print('MAN dataset loaded')

    return data_set, graph, num_drug, num_protein

def get_metrics(real_score, predict_score):
    sorted_predict_score = np.array(
        sorted(list(set(np.array(predict_score).flatten())))) #.flatten()变成一维 sorted() 函数会返回一个排好序的列表
    sorted_predict_score_num = len(sorted_predict_score)
    thresholds = sorted_predict_score[np.int32(
        sorted_predict_score_num*np.arange(1, 1000)/1000)]
    thresholds = np.mat(thresholds)
    thresholds_num = thresholds.shape[1]

    predict_score_matrix = np.tile(predict_score, (thresholds_num, 1))
    negative_index = np.where(predict_score_matrix < thresholds.T)
    positive_index = np.where(predict_score_matrix >= thresholds.T)
    predict_score_matrix[negative_index] = 0
    predict_score_matrix[positive_index] = 1
    TP = predict_score_matrix.dot(real_score.T)
    FP = predict_score_matrix.sum(axis=1)-TP
    FN = real_score.sum()-TP
    TN = len(real_score.T)-TP-FP-FN

    fpr = FP/(FP+TN)
    tpr = TP/(TP+FN)
    ROC_dot_matrix = np.mat(sorted(np.column_stack((fpr, tpr)).tolist())).T
    ROC_dot_matrix.T[0] = [0, 0]
    ROC_dot_matrix = np.c_[ROC_dot_matrix, [1, 1]]
    x_ROC = ROC_dot_matrix[0].T
    y_ROC = ROC_dot_matrix[1].T
    auc = 0.5*(x_ROC[1:]-x_ROC[:-1]).T*(y_ROC[:-1]+y_ROC[1:])

    recall_list = tpr
    precision_list = TP/(TP+FP)
    PR_dot_matrix = np.mat(sorted(np.column_stack(
        (recall_list, precision_list)).tolist())).T
    PR_dot_matrix.T[0] = [0, 1]
    PR_dot_matrix = np.c_[PR_dot_matrix, [1, 0]]
    x_PR = PR_dot_matrix[0].T
    y_PR = PR_dot_matrix[1].T
    aupr = 0.5*(x_PR[1:]-x_PR[:-1]).T*(y_PR[:-1]+y_PR[1:])

    f1_score_list = 2*TP/(len(real_score.T)+TP-TN)
    accuracy_list = (TP+TN)/len(real_score.T)
    specificity_list = TN/(TN+FP)
    max_index = np.argmax((abs(tpr - fpr)))
    # max_index = np.argmax(f1_score_list)
    f1_score = f1_score_list[max_index]
    accuracy = accuracy_list[max_index]
    specificity = specificity_list[max_index]
    recall = recall_list[max_index]
    precision = precision_list[max_index]
    return [aupr[0, 0], auc[0, 0], f1_score, accuracy, recall, precision]

