# -*- coding utf-8 -*-
# 作者: SMF
# 时间: 2022.08.11
import argparse
import copy
import csv
import math
import os

import numpy
import numpy as np
import torch
from matplotlib import pyplot as plt
from sklearn.metrics import f1_score, roc_curve, auc, precision_recall_curve
from torch import nn, save
import torch.nn.functional as F
from src.models import LP
from src.prepareData import prepare_data
from src.utils import neighborhood, show_auc, set_seed

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
                    help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=1, help='Random seed.')
parser.add_argument('--epochs', type=int, default=300,
                    help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
                    help='Learning rate.')
parser.add_argument('--weight_decay', type=float, default=1e-7,
                    help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=64,
                    help='Dimension of representations')
parser.add_argument('--alpha', type=float, default=0.5,
                    help='Weight between miRNA space and disease space')
parser.add_argument('--data', type=int, default=1, choices=[1, 2],
                    help='Dataset')
parser.add_argument('--root', type=str, default='../dataset_MDA_old/', help='数据集位置')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

set_seed(args.seed, args.cuda)


# gdi, ldi, rnafeat, gl, gd = load_data(args.data,args.cuda)

def read_csv(path):
    with open(path, 'r', newline='') as csv_file:
        reader = csv.reader(csv_file)
        md_data = []
        md_data += [[float(i) for i in row] for row in reader]
        # md_data_new = ToTensor(md_data)
        # return torch.FloatTensor(md_data)
        return numpy.array(md_data)


def scaley(ymat):
    return (ymat - ymat.min()) / ymat.max()


def normalized(wmat):
    deg = np.diag(np.sum(wmat, axis=0))
    degpow = np.power(deg, -0.5)
    degpow[np.isinf(degpow)] = 0
    W = np.dot(np.dot(degpow, wmat), degpow)
    return W


def norm_adj(feat):
    C = neighborhood(feat.T, k=10)
    norm_adj = normalized(C.T * C + np.eye(C.shape[0]))
    g = torch.from_numpy(norm_adj).float()
    return g


class GNNp(nn.Module):
    def __init__(self):
        super(GNNp, self).__init__()
        self.gnnpl = LP(args.hidden, mdi.shape[1])
        self.gnnpd = LP(args.hidden, mdi.shape[0])

    def forward(self, y0):
        yl, zl = self.gnnpl(gm, y0)
        yd, zd = self.gnnpd(gd, y0.t())
        return yl, zl, yd, zd


print("Dataset {}, 5-fold CV".format(args.data))


def criterion(output, target, msg, n_nodes, mu, logvar):
    if msg == 'disease':
        cost = F.binary_cross_entropy(output, target)
    else:
        cost = F.mse_loss(output, target)

    KL = -0.5 / n_nodes * torch.mean(torch.sum(
        1 + 2 * logvar - mu.pow(2) - logvar.exp().pow(2), 1))
    return cost + KL


def train(gnnp, y0, epoch, alpha):
    beta = 1.0
    optp = torch.optim.Adam(gnnp.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    for e in range(epoch):
        gnnp.train()
        yl, zl, yd, zd = gnnp(y0)  # y0(495, 383), yl(495, 383), zl(495, 64), yd(383, 495), zd(383, 64)
        losspl = F.binary_cross_entropy(yl, y0)
        losspd = F.binary_cross_entropy(yd, y0.t())
        value = alpha * yl + (1 - alpha) * yd.t()  # value(495, 383)
        att = torch.softmax(torch.mm(zl, zd.t()) / math.sqrt(args.hidden), dim=-1) * value  # att(495, 383)
        lossp = beta * (alpha * losspl + (1 - alpha) * losspd) + F.mse_loss(att,
                                                                            y0)  # F.mse_loss(torch.mm(zl,zd.t()),
        # y0) + F.mse_loss(yl,yd.t())
        optp.zero_grad()
        lossp.backward()
        optp.step()
        gnnp.eval()
        with torch.no_grad():
            yl, zl, yd, zd = gnnp(y0)

        # if e % 20 == 0 and e != 0:
        #     print('Epoch %d | Lossp: %.4f' % (e, lossp.item()))

    return alpha * yl + (1 - alpha) * yd.t()


def trainres(A0, alpha, epochs):
    # gnnq = GNNq()
    gnnp = GNNp()
    if args.cuda:
        # gnnq = gnnq.cuda()
        gnnp = gnnp.cuda()

    train(gnnp, A0, epochs, alpha)
    # gnnq.eval()
    gnnp.eval()
    yli, _, ydi, _ = gnnp(A0)
    resi = alpha * yli + (1 - alpha) * ydi.t()
    return resi


def fivefoldcv(A, alpha=0.5):
    N = A.shape[0]
    idx = np.arange(N)
    np.random.shuffle(idx)
    res = torch.zeros(5, A.shape[0], A.shape[1])
    aurocl = np.zeros(5)
    auprl = np.zeros(5)
    for i in range(5):
        print("Fold {}".format(i + 1))
        A0 = A.clone()
        for j in range(i * N // 5, (i + 1) * N // 5):
            A0[idx[j], :] = torch.zeros(A.shape[1])

        resi = trainres(A0, alpha, epochs=args.epochs)  # (495, 383) 得到score矩阵
        # resi = scaley(resi)
        res[i] = resi

        if args.cuda:
            resi = resi.cpu().detach().numpy()
        else:
            resi = resi.detach().numpy()

        auroc, aupr = show_auc(resi, args.data)
        aurocl[i] = auroc
        auprl[i] = aupr

    ymat = res[aurocl.argmax()]
    print("===Final result===")
    print('AUROC= %.4f +- %.4f | AUPR= %.4f +- %.4f' % (aurocl.mean(), aurocl.std(), auprl.mean(), auprl.std()))
    if args.cuda:
        return ymat.cpu().detach().numpy()
    else:
        return ymat.detach().numpy()


class Config(object):
    def __init__(self):
        self.data_path = '../dataset_MDA_old'
        self.validation = 5
        self.save_path = '../save'
        self.epoch = 1000
        self.alpha = 0.1


opt = Config()
alpha = 0.5


def vision(EPOCH, F1_score, AUC_ROC, AUPR, SUM):
    # 绘制三指标随epochs变化图像
    # plt.plot(EPOCH, F1_score)
    # plt.xlabel('epochs')
    # plt.ylabel('F1_score')
    # plt.title('F1随epochs变化曲线', fontsize=20)
    # plt.show()
    # plt.plot(EPOCH, AUC_ROC)
    # plt.xlabel('epochs')
    # plt.ylabel('AUC_ROC')
    # plt.title('AUC_ROC随epochs变化曲线', fontsize=20)
    # plt.show()
    # plt.plot(EPOCH, AUPR)
    # plt.xlabel('epochs')
    # plt.ylabel('AUPR')
    # plt.title('AUPR随epochs变化曲线', fontsize=20)
    # plt.show()
    plt.plot(EPOCH, SUM)
    plt.xlabel('epochs')
    plt.ylabel('SUM')
    plt.title('SUM随epochs变化曲线', fontsize=20)
    plt.show()


def train_old(model, optimizer, opt, cha_index, cha_index0, F1_score, AUC_ROC, AUPR, EPOCH,
              SUM, y0):
    # beta = 1.0
    # for e in range(epoch):
    #     gnnp.train()
    #     yl, zl, yd, zd = gnnp(y0)  # y0(495, 383), yl(495, 383), zl(495, 64), yd(383, 495), zd(383, 64)
    #     losspl = F.binary_cross_entropy(yl, y0)
    #     losspd = F.binary_cross_entropy(yd, y0.t())
    #     value = alpha * yl + (1 - alpha) * yd.t()  # value(495, 383)
    #     att = torch.softmax(torch.mm(zl, zd.t()) / math.sqrt(args.hidden), dim=-1) * value  # att(495, 383)
    #     lossp = beta * (alpha * losspl + (1 - alpha) * losspd) + F.mse_loss(att,
    #                                                                         y0)  # F.mse_loss(torch.mm(zl,zd.t()),
    #     # y0) + F.mse_loss(yl,yd.t())
    #     optp.zero_grad()
    #     lossp.backward()
    #     optp.step()
    #     gnnp.eval()
    #     with torch.no_grad():
    #         yl, zl, yd, zd = gnnp(y0)

    # if e % 20 == 0 and e != 0:
    #     print('Epoch %d | Lossp: %.4f' % (e, lossp.item()))

    # score = alpha * yl + (1 - alpha) * yd.t()
    model.to(device)
    model.train()
    # regression_crit = Myloss()
    # one_index = train_data[2][0].cuda().t().tolist()
    # zero_index = train_data[2][1].cuda().t().tolist()
    beta = 1.0

    def train_epoch(y0):
        model.zero_grad()
        y0 = y0.cuda()
        # train_data = torch.Tensor(train_data).to(device)
        yl, zl, yd, zd = model(y0)
        losspl = F.binary_cross_entropy(yl, y0)
        losspd = F.binary_cross_entropy(yd, y0.t())
        value = alpha * yl + (1 - alpha) * yd.t()  # value(495, 383)
        att = torch.softmax(torch.mm(zl, zd.t()) / math.sqrt(args.hidden), dim=-1) * value  # att(495, 383)
        loss = beta * (alpha * losspl + (1 - alpha) * losspd) + F.mse_loss(att,
                                                                           y0)  # F.mse_loss(torch.mm(zl,zd.t()),
        # y0) + F.mse_loss(yl,yd.t())
        # loss = regression_crit(one_index, zero_index, train_data[4].cuda(), scores)
        scores_new = alpha * yl + (1 - alpha) * yd.t()
        loss.backward()
        optimizer.step()
        return loss, scores_new

    for epoch in range(1, opt.epoch + 1):
        if epoch > 800:
            if SUM[-1] <= SUM[-2]:
                save(model.state_dict(), '../save/NIMGSA_MDA_all5.pt')
            else:
                pass
        if epoch == opt.epoch:
            vision(EPOCH, F1_score, AUC_ROC, AUPR, SUM)
        train_reg_loss, score = train_epoch(y0)
        # if epoch % 100 == 0:
        #     print(train_reg_loss.item() / (len(one_index[0]) + len(zero_index[0])))
        if epoch % 100 == 0:
            # for i in range(score.size(0)):
            #     for j in range(score.size(1)):
            #         if score[i][j] < 0.5:
            #             # zero_index.append([i, j])
            #             score[i][j] = 0
            #         if score[i][j] >= 0.5:
            #             score[i][j] = 1

            # print("epoch: ", epoch)
            # print(score)
            # scoreTP = scoreFP = scoreFN = scoreTN = 0
            # a = score[cha_index]
            # dataPre = []
            # dataAct = []
            # for ind in cha_index:
            #     dataAct.append(1)
            #     if score[ind[0], ind[1]] == 0:
            #         scoreFN += 1
            #         dataPre.append(0)
            #     else:
            #         scoreTP += 1
            #         dataPre.append(1)
            # for ind0 in cha_index0:
            #     dataAct.append(0)
            #     if score[ind0[0], ind0[1]] == 1:
            #         scoreFP += 1
            #         dataPre.append(1)
            #     else:
            #         scoreTN += 1
            #         dataPre.append(0)
            #
            # # labels = [0, 1]
            # cm = confusion_matrix(dataAct, dataPre)
            # print(cm)
            # sns.heatmap(cm, annot=True, annot_kws={'size': 20, 'weight': 'bold', 'color': 'blue'})
            # plt.rc('font', family='Arial Unicode MS', size=14)
            # plt.title('混淆矩阵', fontsize=20)
            # plt.xlabel('Predict', fontsize=14)
            # plt.ylabel('Actual', fontsize=14)
            # plt.show()
            # print("a = ", a)
            # print("FN: {}, TP: {}, FP: {}, TN: {}".format(scoreFN / len(cha_index), scoreTP / len(cha_index), scoreFP / len(cha_index0), scoreTN / len(cha_index0)))
            # # FPR, TPR, threshold = roc_curve(data[:, 1], data[:, 2], pos_label=1)
            #
            # # 精确率Precision
            # P = scoreTP / (scoreTP + scoreFP)
            # # 召回率Recall
            # R = scoreTP / (scoreTP + scoreFN)
            # # F1
            # F1 = 2 / (1 / P + 1 / R)
            # # 准确率Accuracy
            # Acc = (scoreTP + scoreTN) / (scoreTP + scoreFP + scoreFN + scoreTN)

            dataPre = []
            dataAct = []
            for ind in cha_index:
                dataAct.append(1)
                dataPre.append(score[ind[0], ind[1]].data.cpu().numpy())
            for ind0 in cha_index0:
                dataAct.append(0)
                dataPre.append(score[ind0[0], ind0[1]].data.cpu().numpy())
            # 绘制ROC/AUC
            act = np.array(dataAct)
            pre = np.array(dataPre)
            FPR, TPR, thresholds = roc_curve(act, pre)
            AUC = auc(FPR, TPR)
            print('AUC:', AUC)
            # plt.rc('font', family='Arial Unicode MS', size=14)
            # plt.plot(FPR, TPR, label="AUC={:.2f}".format(AUC), marker='o', color='b', linestyle='--')
            # plt.legend(loc=4, fontsize=10)
            # plt.title('ROC曲线', fontsize=20)
            # plt.xlabel('FPR', fontsize=14)
            # plt.ylabel('TPR', fontsize=14)
            # plt.show()

            # print("Precision: {}, Recall: {}, F1: {}, Accuracy: {}".format(P, R, F1, Acc))

            # 严格定义计算方法
            precision, recall, thresholds = precision_recall_curve(dataAct, dataPre)
            PR = auc(recall, precision)
            print("PR: ", PR)
            # plt.xlabel('Recall')
            # plt.ylabel('Precision')
            # # plt.grid()  # 生成网格
            #
            # plt.plot(recall, precision)
            # plt.figure("P-R Curve")
            # plt.show()

            dataPre = np.around(dataPre, 0).astype(int)
            f1_weighted = f1_score(dataAct, dataPre, average='weighted')
            f1_macro = f1_score(dataAct, dataPre, average='macro')
            print("f1-score: 考虑类别的不平衡性为{}, 不考虑类别的不平衡性为{}".format(f1_weighted, f1_macro))

            Sum = f1_weighted + AUC + PR
            Sum = Sum / 3

            F1_score.append(f1_weighted)
            AUC_ROC.append(AUC)
            AUPR.append(PR)
            EPOCH.append(epoch)
            SUM.append(Sum)
            # print()
            pass
        pass


if __name__ == '__main__':
    dataset, cha_index, cha_index0, cha_index1, cha_index2 = prepare_data(opt)
    # mdi = np.loadtxt(args.root + 'm-d.txt', delimiter=',')
    # mdi = read_csv(args.root + 'm-d.csv')  # (495, 383)
    # mdit = torch.from_numpy(mdi).float()
    # mm = np.loadtxt(args.root + 'm-m.txt', delimiter=',')
    # dd = np.loadtxt(args.root + 'd-d.txt', delimiter=',')
    # mm = read_csv(args.root + 'm-m.csv')  # (495, 495)
    # dd = read_csv(args.root + 'd-d.csv')  # (383, 383)
    F1_score = []
    AUC_ROC = []
    AUPR = []
    SUM = []

    # 五折划分
    for k in range(5):
        print("第 {} 次折划分".format(k))
        # train_data = Dataset(opt, dataset[k])
        mdi = copy.deepcopy(dataset[k]['md_true'])
        mdit = torch.tensor(mdi, dtype=torch.float32)
        mm = copy.deepcopy(dataset[k]['mm']['data'])
        dd = copy.deepcopy(dataset[k]['dd']['data'])
        # train_data[0][0]['data'] = torch.Tensor(train_data[0][0]['data']).to(device)
        # train_data[0][1]['data'] = torch.Tensor(train_data[0][1]['data']).to(device)
        # mm = torch.Tensor(mm).to(device)
        # dd = torch.Tensor(dd).to(device)
        mm = np.array(mm.cpu())
        dd = np.array(dd.cpu())
        gm = normalized(mm)
        gd = normalized(dd)
        gm = torch.from_numpy(gm).float().to(device)
        gd = torch.from_numpy(gd).float().to(device)
        mm = torch.Tensor(mm).to(device)
        dd = torch.Tensor(dd).to(device)


        F1_score1 = []
        AUC_ROC1 = []
        AUPR1 = []
        EPOCH1 = []
        SUM1 = []

        title = '../save/result--dataset' + str(args.data)
        A0 = mdit.clone()

        gnnp = GNNp()
        # 损失函数
        optp = torch.optim.Adam(gnnp.parameters(), lr=args.lr, weight_decay=args.weight_decay)

        # resi = trainres(A0, alpha, epochs=args.epochs)  # 获得 score 矩阵
        # ymat = fivefoldcv(mdit, alpha=args.alpha)
        # title += '--fivefoldcv'
        # ymat = scaley(ymat)
        # np.savetxt(title + '.txt', ymat, fmt='%10.5f', delimiter=',')
        # np.savetxt('nimgsa.txt', ymat, fmt='%10.5f', delimiter=',')
        # print("===Max result===")
        # show_auc(ymat, args.data)
        # optimizer = optim.Adam(model1.parameters(), lr=0.001)
        train_old(gnnp, optp, opt, cha_index=cha_index[k],
                  cha_index0=cha_index0[k],
                  F1_score=F1_score1, AUC_ROC=AUC_ROC1, AUPR=AUPR1, EPOCH=EPOCH1, SUM=SUM1, y0=A0)

        # F1_score = []
        # AUC_ROC = []
        # AUPR = []
        # SUM = []
        for i in range(1, opt.validation + 1):
            print('-' * 50)
            print("Training, {} dataset".format(i))
            # print("Starting Testing, {}".format(k))
            # model = Model(sizes)
            # optimizer = optim.Adam(model.parameters(), lr=0.001)
            # train_data[i][0]['data'] = torch.Tensor(train_data[i][0]['data']).to(device)
            # train_data[i][1]['data'] = torch.Tensor(train_data[i][1]['data']).to(device)
            # train(model, train_data[i], optimizer, opt, G1_Sum, G2_Sum, cha_index=cha_index, cha_index0=cha_index0,
            #       F1_score=F1_score, AUC_ROC=AUC_ROC, AUPR=AUPR, EPOCH=EPOCH, SUM=SUM)
            # print(G2)
            # print(G1)
            # model = HGNN()
            model = GNNp()
            model = model.to(device)
            m_state_dict = torch.load('../save/NIMGSA_MDA_all5.pt')
            model.load_state_dict(m_state_dict)
            B0 = mdit.clone().to(device)
            yl, zl, yd, zd = model(B0)
            scores = alpha * yl + (1 - alpha) * yd.t()
            # scores = model(train_data[i], G1_Sum, G2_Sum)
            dataPre = []
            dataAct = []
            for ind in cha_index1[k]:
                dataAct.append(1)
                dataPre = np.append(dataPre, scores[ind[0], ind[1]].data.cpu().numpy())
            for ind0 in cha_index2[k]:
                dataAct.append(0)
                dataPre = np.append(dataPre, scores[ind0[0], ind0[1]].data.cpu().numpy())
            # 绘制ROC/AUC
            act = np.array(dataAct)
            pre = np.array(dataPre)
            FPR, TPR, thresholds = roc_curve(act, pre)
            AUC = auc(FPR, TPR)
            print('AUC:', AUC)
            if i == 5:
                plt.rc('font', family='Arial Unicode MS', size=14)
                plt.plot(FPR, TPR, label="AUC={:.2f}".format(AUC), marker='o', color='b', linestyle='--')
                plt.legend(loc=4, fontsize=10)
                plt.title('ROC曲线', fontsize=20)
                plt.xlabel('FPR', fontsize=14)
                plt.ylabel('TPR', fontsize=14)
                plt.show()

            # print("Precision: {}, Recall: {}, F1: {}, Accuracy: {}".format(P, R, F1, Acc))

            # 严格定义计算方法
            precision, recall, thresholds = precision_recall_curve(dataAct, dataPre)
            PR = auc(recall, precision)
            print("PR: ", PR)
            if i == 5:
                plt.xlabel('Recall')
                plt.ylabel('Precision')
                # plt.grid()  # 生成网格

                plt.plot(recall, precision)
                plt.figure("P-R Curve")
                plt.show()

            dataPre = np.around(dataPre, 0).astype(int)
            f1_weighted = f1_score(dataAct, dataPre, average='weighted')
            f1_macro = f1_score(dataAct, dataPre, average='macro')
            print("f1-score: 考虑类别的不平衡性为{}, 不考虑类别的不平衡性为{}".format(f1_weighted, f1_macro))

            F1_score.append(f1_weighted)
            AUC_ROC.append(AUC)
            AUPR.append(PR)
            Sum = (f1_weighted + PR + AUC) / 3
            SUM = np.append(SUM, Sum)
        print('Mean: F1_score: {}, AUC_ROC: {}, AUPR: {}, SUM: {}'.format(np.mean(F1_score[k * 5:(k + 1) * 5]),
                                                                          np.mean(AUC_ROC[k * 5:(k + 1) * 5]),
                                                                          np.mean(AUPR[k * 5:(k + 1) * 5]),
                                                                          np.mean(SUM[k * 5:(k + 1) * 5])))
        print('Median: F1_score: {}, AUC_ROC: {}, AUPR: {}, SUM: {}'.format(np.median(F1_score[k * 5:(k + 1) * 5]),
                                                                            np.median(AUC_ROC[k * 5:(k + 1) * 5]),
                                                                            np.median(AUPR[k * 5:(k + 1) * 5]),
                                                                            np.median(SUM[k * 5:(k + 1) * 5])))
        print('F1_score: {}, AUC_ROC: {}, AUPR: {}, SUM: {}'.format(F1_score, AUC_ROC, AUPR, SUM))
    # gm = normalized(mm)
    # gd = normalized(dd)
    # gm = torch.from_numpy(gm).float()
    # gd = torch.from_numpy(gd).float()
    # if args.cuda:
    #     mdit = mdit.cuda()
    #     gm = gm.cuda()
    #     gd = gd.cuda()

    # title = '../save/result--dataset' + str(args.data)
    # ymat = fivefoldcv(mdit, alpha=args.alpha)
    # title += '--fivefoldcv'
    # ymat = scaley(ymat)
    # np.savetxt(title + '.txt', ymat, fmt='%10.5f', delimiter=',')
    # # np.savetxt('nimgsa.txt', ymat, fmt='%10.5f', delimiter=',')
    # print("===Max result===")
    # show_auc(ymat, args.data)
