import pandas as pd
import torch
from utils import load_data,get_metrics
import torch.nn as nn

from sklearn.metrics import roc_auc_score,roc_curve
from sklearn.metrics import average_precision_score,precision_recall_curve
import numpy as np
import dgl
from Student_model import HAN_DTI
import torch.nn.functional as F

my_drug = pd.read_csv('./dataset/MAN/my_drug.csv')
final_targets = pd.read_csv('./dataset/MAN/final_targets.tsv', sep='\t')

class PKT(nn.Module):
    """Probabilistic Knowledge Transfer for deep representation learning
    Code from author: https://github.com/passalis/probabilistic_kt"""
    def __init__(self):
        super(PKT, self).__init__()

    def forward(self, f_s, f_t):
        return self.cosine_similarity_loss(f_s, f_t)

    @staticmethod
    def cosine_similarity_loss(output_net, target_net, eps=0.0000001):
        # Normalize each vector by its norm
        output_net_norm = torch.sqrt(torch.sum(output_net ** 2, dim=1, keepdim=True))
        output_net = output_net / (output_net_norm + eps)
        output_net[output_net != output_net] = 0

        target_net_norm = torch.sqrt(torch.sum(target_net ** 2, dim=1, keepdim=True))
        target_net = target_net / (target_net_norm + eps)
        target_net[target_net != target_net] = 0

        # Calculate the cosine similarity
        model_similarity = torch.mm(output_net, output_net.transpose(0, 1))
        target_similarity = torch.mm(target_net, target_net.transpose(0, 1))

        # Scale cosine similarity to 0..1
        model_similarity = (model_similarity + 1.0) / 2.0
        target_similarity = (target_similarity + 1.0) / 2.0

        # Transform them into probabilities
        model_similarity = model_similarity / torch.sum(model_similarity, dim=1, keepdim=True)
        target_similarity = target_similarity / torch.sum(target_similarity, dim=1, keepdim=True)

        # Calculate the KL-divergence
        loss = torch.sum(target_similarity * torch.log((target_similarity + eps) / (model_similarity + eps)))

        return loss

class DistillKL(nn.Module):
    """Distilling the Knowledge in a Neural Network"""
    def __init__(self, T):
        super(DistillKL, self).__init__()
        self.T = T

    def forward(self, y_s, y_t):
        p_s = F.log_softmax(y_s/self.T, dim=1)
        p_t = F.softmax(y_t/self.T, dim=1)
        loss = F.kl_div(p_s, p_t, reduction='sum') * (self.T**2) / y_s.shape[0]
        return loss


class teacher_Rloss(nn.Module):
    def __init__(self,hidden_dim = 1000):
        super(teacher_Rloss, self).__init__()
        self.PKT = PKT()
        self.proj = nn.Sequential(
            nn.Linear(hidden_dim,hidden_dim),
            nn.ELU(),
            nn.Linear(hidden_dim,128)
        ).to(args['device'])
        for model in self.proj:
            if isinstance(model,nn.Linear):
                nn.init.xavier_normal_(model.weight,gain=1.414)
        self.proj_s = nn.Sequential(
            nn.Linear(128,hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim,hidden_dim)
        ).to(args['device'])
        for model in self.proj_s:
            if isinstance(model,nn.Linear):
                nn.init.xavier_normal_(model.weight,gain=1.414)
    def forward(self,hd,hp,t_hd,t_hp):
        hd = self.proj_s(hd)
        hp = self.proj_s(hp)
        loss_d = self.PKT(hd,t_hd)
        loss_p = self.PKT(hp,t_hp)
        return 0.5*loss_d + 0.5*loss_p


class teacher_Lloss(nn.Module):
    def __init__(self):
        super(teacher_Lloss, self).__init__()
        self.KL = DistillKL(T=2)

    def forward(self,teacher_logits, logits):
        #z-score normalization
        teacher_logits = (teacher_logits-torch.mean(teacher_logits))/torch.std(teacher_logits)
        logits = (logits - torch.mean(logits)) / torch.std(logits)
        loss = self.KL(logits, teacher_logits)

        return loss

class DTI_PU_loss(nn.Module):
    def __init__(self):
        super(DTI_PU_loss, self).__init__()

    def forward(self, drug_protein_reconstruct, drug_protein, pos_x_index, pos_y_index, neg_x_index,
                neg_y_index, alpha):
        alpha = alpha
        loss_fn = torch.nn.MSELoss(reduction='none')
        loss_mat = loss_fn(drug_protein_reconstruct, drug_protein)
        loss = (loss_mat[pos_x_index, pos_y_index].mean() * ((1 - alpha) / 2) + loss_mat[
            neg_x_index, neg_y_index].mean() * (alpha / 2))
        return loss

def evaluate(model, g, features_d, features_p, best_aupr, DTItest, DTIvalid):
    model.eval()
    pred_list = []
    ground_truth = []
    with torch.no_grad():
        d_x, p_x, logits = model(g, features_d, features_p)
        logits = logits.cpu().numpy()
        d_x = d_x.cpu().numpy()
        p_x = p_x.cpu().numpy()
    DTIvalid = DTIvalid.cpu().numpy()
    DTItest = DTItest.cpu().numpy()

    for ele in DTIvalid:
        pred_list.append(logits[ele[0], ele[1]])
        ground_truth.append(ele[2])

    valid_auc = roc_auc_score(ground_truth, pred_list)
    # print (valid_auc)
    valid_aupr = average_precision_score(ground_truth, pred_list)
    if valid_aupr >= best_aupr:

        best_valid_aupr = valid_aupr
        best_valid_auc = valid_auc
        pred_list = []
        ground_truth = []
        for ele in DTItest:
            pred_list.append(logits[ele[0], ele[1]])
            ground_truth.append(ele[2])

        aupr, auc, f1_score, accuracy, recall, precision = get_metrics(np.array(ground_truth), np.array(pred_list))
        print('--------------Link Prediction Test--------------')
        print('AUC = {:.4f}'.format(auc))
        print('AP = {:.4f}'.format(aupr))
        print('f1_score = {:.4f}'.format(f1_score))
        print('accuracy = {:.4f}'.format(accuracy))
        print('recall = {:.4f}'.format(recall))
        print('precision = {:.4f}'.format(precision))


        test_auc = roc_auc_score(ground_truth, pred_list)
        fpr, tpr, thresholds = roc_curve(ground_truth, pred_list)
        test_aupr = average_precision_score(ground_truth, pred_list)
        precision, recall, thresholds = precision_recall_curve(ground_truth, pred_list)
    print('valid auc aupr,', valid_auc, valid_aupr, 'test auc aupr', test_auc, test_aupr)


    return best_valid_auc, best_valid_aupr, test_auc, test_aupr, fpr, tpr, precision, recall



def train_and_evaluate(teacher_logits,DTItrain, DTIvalid, DTItest, graph, pos_x_index, pos_y_index, neg_x_index, neg_y_index,
                       drug_protein_train, train_mask, features_d, features_p, t_hd, t_hp, epochs, in_size, out_size, loss_alpha):
    pos_x_index = torch.tensor(pos_x_index, dtype=torch.long)
    pos_y_index = torch.tensor(pos_y_index, dtype=torch.long)
    neg_x_index = torch.tensor(neg_x_index, dtype=torch.long)
    neg_y_index = torch.tensor(neg_y_index, dtype=torch.long)
    DTItrain = torch.from_numpy(DTItrain).long()
    DTIvalid = torch.from_numpy(DTIvalid).long()
    DTItest = torch.from_numpy(DTItest).long()
    drug_protein_train = torch.from_numpy(drug_protein_train).float()
    train_mask = torch.from_numpy(train_mask).float()
    num_drug = features_d.shape[0]
    num_protein = features_p.shape[0]

    DTIadj = np.zeros((num_drug, num_protein))
    DTIadj[pos_x_index, pos_y_index] = 1
    DTIadj = torch.tensor(DTIadj)

    model = HAN_DTI(
        all_meta_paths=[[['similarity'], ['dp', 'pd']], [['similarity'], ['pd', 'dp']]],
        in_size=in_size,  # features_d.shape[1],
        hidden_size=args['hidden_units'],
        out_size=args['out_size'],
        num_heads=args['num_heads'],
        dropout=args['dropout'],
        GAT_Layers=args['Gat_layers'],
        W_size=args['W_size']).to(args['device'])

    loss_fcn = DTI_PU_loss()
    optimizer = torch.optim.Adam(model.parameters(), lr=args['lr'], weight_decay=args['weight_decay'])

    DTItrain = DTItrain.to(args['device'])
    DTIvalid = DTIvalid.to(args['device'])
    DTItest = DTItest.to(args['device'])
    train_mask = train_mask.to(args['device'])
    drug_protein = drug_protein_train.to(args['device'])
    pos_x_index = pos_x_index.to(args['device'])
    pos_y_index = pos_y_index.to(args['device'])
    neg_x_index = neg_x_index.to(args['device'])
    neg_y_index = neg_y_index.to(args['device'])

    for i in range(len(graph)):
        graph[i] = graph[i].to(args['device'])
        print(graph[i])

    best_valid_aupr = 0
    best_valid_auc = 0

    for epoch in range(epochs):
        model.train()
        d, p, logits = model(graph, features_d, features_p)
        loss_student = loss_fcn(logits, drug_protein, pos_x_index, pos_y_index, neg_x_index, neg_y_index,
                        loss_alpha)
        loss_e = teacher_Rloss()
        loss_teacherR = loss_e(d, p, t_hd, t_hp)

        loss_t = teacher_Lloss()
        loss_teacherL = loss_t(teacher_logits, logits)

        loss = loss_student + 1*loss_teacherL + 0.01*loss_teacherR
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        valid_auc, valid_aupr, test_auc, test_aupr, fpr, tpr, precision, recall = evaluate(model, graph,
                                                                                                     features_d,
                                                                                                     features_p,
                                                                                                     best_valid_aupr, DTItest,
                                                                                                     DTIvalid)
        best_valid_aupr = valid_aupr
        best_valid_auc = valid_auc
        print('loss_t: {}'.format(loss_teacherL.item()))
        print('loss_s: {}'.format(loss_student.item()))
        print('loss_e: {}'.format(loss_teacherR.item()))
        print('Epoch {:d} | Train Loss {:.4f} | best_valid_auc {:.4f} | best_valid_aupr {:.4f} |'
              'test_auc {:.4f} |test_aupr {:.4f}'.format(
            epoch + 1, loss.item(), best_valid_auc, best_valid_aupr, test_auc, test_aupr))
    return best_valid_auc, best_valid_aupr, test_auc, test_aupr, fpr, tpr, precision, recall

def get_train(DTItrain,num_drug,num_protein):

    drug_protein = np.zeros((num_drug,num_protein))
    mask = np.zeros((num_drug,num_protein))
    #print (DTItrain)
    pos_x_index=[]
    pos_y_index=[]
    neg_x_index=[]
    neg_y_index=[]
    for ele in DTItrain:
        drug_protein[ele[0],ele[1]] = ele[2]
        mask[ele[0],ele[1]] = 1
        if ele[2]==1:
            pos_x_index.append(ele[0])
            pos_y_index.append(ele[1])
        if ele[2]==0:
            neg_x_index.append(ele[0])
            neg_y_index.append(ele[1])
    protein_drug = drug_protein.T
    train_mask=mask
    return pos_x_index,pos_y_index,neg_x_index,neg_y_index,drug_protein,train_mask

def main(args):

    teacher_path = 'model/5Fold/'
    dataset_path ='dataset/5_Fold/'
    test_auc_round = []
    test_aupr_round = []

    for fold in range(5):

        #Teacher model
        tea_path = teacher_path + f'{fold+1}'+'/entity_embedding.npy'
        entity_embedding = np.load(tea_path)
        drug_kgid = pd.read_csv('./dataset/MAN/drug_kgid.csv')
        target_kgid = pd.read_csv('./dataset/MAN/target_kgid.csv')
        rows = drug_kgid['kg_id'].tolist()
        t_hd = entity_embedding[rows]
        rows = target_kgid['kg_id'].tolist()
        t_hp = entity_embedding[rows]
        t_hd = torch.from_numpy(t_hd)
        t_hp = torch.from_numpy(t_hp)
        t_hd = t_hd.to(args['device']) #(934,1000)
        t_hp = t_hp.to(args['device']) #(826,1000)
        tea_path2 = teacher_path + f'{fold+1}' + '/teacher_logits.txt'

        teacher_logits = np.loadtxt(tea_path2)
        teacher_logits = torch.from_numpy(teacher_logits).to(args['device'])
        teacher_logits = teacher_logits.to(torch.float32)
        in_size = args['in_size']
        alpha = args['alpha']
        out_size = args['out_size']
        k_CV = args['k_cv']
        sample_times = args['sample_times']

        data_set, graph, num_drug, num_protein = load_data(args['network_path'],args['ratio'] )
        # hd = torch.FloatTensor(np.loadtxt(args['network_path'] + '/drug_vector_d100.txt'))
        # hp = torch.FloatTensor(np.loadtxt(args['network_path'] + '/protein_vector_d400.txt'))
        hd = torch.randn((num_drug, in_size))
        hp = torch.randn((num_protein, in_size))
        features_d = hd.to(args['device'])
        features_p = hp.to(args['device'])


        in_size = [features_d.shape[1], features_p.shape[1]]

        data_path1 = dataset_path + f'{fold+1}' + '/DTItrain.npy'
        data_path2 = dataset_path + f'{fold+1}' + '/DTIvalid.npy'
        data_path3 = dataset_path + f'{fold+1}' + '/DTItest.npy'
        DTItrain = np.load(data_path1)
        DTIvalid = np.load(data_path2)
        DTItest = np.load(data_path3)
        pos_x_index, pos_y_index, neg_x_index, neg_y_index, drug_protein_train, train_mask = get_train(DTItrain,
                                                                                                               num_drug,
                                                                                                               num_protein)
        best_valid_auc, best_valid_aupr, test_auc, test_aupr, fpr, tpr, precision, recall = train_and_evaluate(
            teacher_logits,DTItrain, DTIvalid, DTItest, graph, pos_x_index, pos_y_index, neg_x_index, neg_y_index,
            drug_protein_train, train_mask, features_d, features_p, t_hd, t_hp, args['num_epochs'], in_size, out_size, alpha)

        test_auc_round.append(test_auc)
        test_aupr_round.append(test_aupr)
        # plt.plot(fpr, tpr)
        # plt.xlabel('False Positive Rate')
        # plt.ylabel('True Positive Rate')
        # plt.title('ROC Curve')
        # plt.show()
if __name__ == '__main__':
    import argparse
    from utils import setup

    parser = argparse.ArgumentParser('HANDTI')
    parser.add_argument('-s', '--seed', type=int, default=1,
                        help='Random seed')
    parser.add_argument('-ld', '--log-dir', type=str, default='results',
                        help='Dir for saving training results')
    parser.add_argument('--pre_trained', action='store_true',
                        help='pre_trained hetero_data drug and protein feature ')
    parser.add_argument('-data', '--data', type=str, default='MAN',
                        help='different dataset. eg.hetero,Es,ICs,GPCRs,Ns ')
    parser.add_argument('-r', '--ratio', type=str, default='all',
                        help='posive :negative.one,ten,all')
    parser.add_argument('-path', '--network_path', type=str, default='./dataset/MAN/',
                        help='different dataset path.eg.hetero_data,Es,ICs,GPCRs,Ns')
    #teacher
    parser.add_argument("--n-bases", type=int, default=4)
    parser.add_argument("--d_epoch", type=int, default=100)

    parser.add_argument('--dlr', default=0.01, type=float)
    parser.add_argument('--ddrop', default=0.1, type=float)
    parser.add_argument('--ddecay', default=1e-04, type=float, help='weight decay')
    parser.add_argument('--dalpha', default=0.1, type=float, help='weight decay')
    parser.add_argument('--n_runs', type=int, default=5, help='batch size')
    parser.add_argument('--timesteps', type=int, default=10, help='diffusion of iteration')

    args = parser.parse_args().__dict__
    args = setup(args)
    print(args)
    main(args)