# -*- coding utf-8 -*-
# 作者: SMF
# 时间: 2022.08.11
import argparse
import csv
import math
import os

import numpy
import numpy as np
import torch
from sklearn.metrics import f1_score, precision_recall_curve, auc, roc_curve
from torch import nn, save, optim
import torch.nn.functional as F

from datasets import load_feature_construct_H
from models import HGNN
from src.main import Myloss, opt
from src.prepareData import prepare_data
from src.trainData import Dataset
from src.utils_src import neighborhood, show_auc, set_seed
from utils import hypergraph_utils as hgut

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
                    help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=1, help='Random seed.')
parser.add_argument('--epochs', type=int, default=300,
                    help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
                    help='Learning rate.')
parser.add_argument('--weight_decay', type=float, default=1e-7,
                    help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=64,
                    help='Dimension of representations')
parser.add_argument('--alpha', type=float, default=0.5,
                    help='Weight between miRNA space and disease space')
parser.add_argument('--data', type=int, default=1, choices=[1, 2],
                    help='Dataset')
parser.add_argument('--root', type=str, default='../datasets/MDAData/data(383-495)/', help='数据集位置')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

set_seed(args.seed, args.cuda)


# gdi, ldi, rnafeat, gl, gd = load_data(args.data,args.cuda)

def read_csv(path):
    with open(path, 'r', newline='') as csv_file:
        reader = csv.reader(csv_file)
        md_data = []
        md_data += [[float(i) for i in row] for row in reader]
        # md_data_new = ToTensor(md_data)
        # return torch.FloatTensor(md_data)
        return numpy.array(md_data)


def scaley(ymat):
    return (ymat - ymat.min()) / ymat.max()


def normalized(wmat):
    deg = np.diag(np.sum(wmat, axis=0))
    degpow = np.power(deg, -0.5)
    degpow[np.isinf(degpow)] = 0
    W = np.dot(np.dot(degpow, wmat), degpow)
    return W


def norm_adj(feat):
    C = neighborhood(feat.T, k=10)
    norm_adj = normalized(C.T * C + np.eye(C.shape[0]))
    g = torch.from_numpy(norm_adj).float()
    return g


# class GNNp(nn.Module):
#     def __init__(self):
#         super(GNNp, self).__init__()
#         self.gnnpl = LP(args.hidden, mdi.shape[1])
#         self.gnnpd = LP(args.hidden, mdi.shape[0])
#
#     def forward(self, y0):
#         yl, zl = self.gnnpl(gm, y0)
#         yd, zd = self.gnnpd(gd, y0.t())
#         return yl, zl, yd, zd


print("Dataset {}, 5-fold CV".format(args.data))


# def criterion(output, target, msg, n_nodes, mu, logvar):
#     if msg == 'disease':
#         cost = F.binary_cross_entropy(output, target)
#     else:
#         cost = F.mse_loss(output, target)
#
#     KL = -0.5 / n_nodes * torch.mean(torch.sum(
#         1 + 2 * logvar - mu.pow(2) - logvar.exp().pow(2), 1))
#     return cost + KL


# def train(gnnp, y0, epoch, alpha):
#     beta = 1.0
#     optp = torch.optim.Adam(gnnp.parameters(), lr=args.lr, weight_decay=args.weight_decay)
#     for e in range(epoch):
#         gnnp.train()
#         yl, zl, yd, zd = gnnp(y0)  # y0(495, 383), yl(495, 383), zl(495, 64), yd(383, 495), zd(383, 64)
#         losspl = F.binary_cross_entropy(yl, y0)
#         losspd = F.binary_cross_entropy(yd, y0.t())
#         value = alpha * yl + (1 - alpha) * yd.t()  # value(495, 383)
#         att = torch.softmax(torch.mm(zl, zd.t()) / math.sqrt(args.hidden), dim=-1) * value  # att(495, 383)
#         lossp = beta * (alpha * losspl + (1 - alpha) * losspd) + F.mse_loss(att,
#                                                                             y0)  # F.mse_loss(torch.mm(zl,zd.t()),
#         # y0) + F.mse_loss(yl,yd.t())
#         optp.zero_grad()
#         lossp.backward()
#         optp.step()
#         gnnp.eval()
#         with torch.no_grad():
#             yl, zl, yd, zd = gnnp(y0)
#
#         if e % 20 == 0 and e != 0:
#             print('Epoch %d | Lossp: %.4f' % (e, lossp.item()))
#
#     return alpha * yl + (1 - alpha) * yd.t()


# def trainres(A0, alpha, epochs):
#     # gnnq = GNNq()
#     gnnp = GNNp()
#     if args.cuda:
#         # gnnq = gnnq.cuda()
#         gnnp = gnnp.cuda()
#
#     train(gnnp, A0, epochs, alpha)
#     # gnnq.eval()
#     gnnp.eval()
#     yli, _, ydi, _ = gnnp(A0)
#     resi = alpha * yli + (1 - alpha) * ydi.t()
#     return resi

def train(model, train_data, optimizer, opt, G1_Sum, G2_Sum, cha_index, cha_index0, F1_score, AUC_ROC, AUPR, EPOCH, SUM):
    model.train()
    regression_crit = Myloss()
    one_index = train_data[2][0].cuda().t().tolist()
    zero_index = train_data[2][1].cuda().t().tolist()
    score1 = None

    def train_epoch():
        model.zero_grad()
        # train_data = torch.Tensor(train_data).to(device)
        scores = model(train_data, G1_Sum, G2_Sum)  # score(495, 383)
        loss = regression_crit(one_index, zero_index, train_data[4].cuda(), scores)
        loss.backward()
        optimizer.step()
        return loss, scores

    for epoch in range(1, opt.epoch + 1):
        if epoch > 800:
            save(model.state_dict(), '../save/HGNN_MDA_all5.pt')
            return score1
        train_reg_loss, score1 = train_epoch()
        # score = score1
        if epoch % 10 == 0:
            print(train_reg_loss.item() / (len(one_index[0]) + len(zero_index[0])))
        # if epoch % 10 == 0:
        #     dataPre = []
        #     dataAct = []
        #     for ind in cha_index:
        #         dataAct.append(1)
        #         dataPre.append(score[ind[0], ind[1]].data.cpu().numpy())
        #     for ind0 in cha_index0:
        #         dataAct.append(0)
        #         dataPre.append(score[ind0[0], ind0[1]].data.cpu().numpy())
        #     # 绘制ROC/AUC
        #     act = np.array(dataAct)
        #     pre = np.array(dataPre)
        #     FPR, TPR, thresholds = roc_curve(act, pre)
        #     AUC = auc(FPR, TPR)
        #     print('AUC:', AUC)
        #     # plt.rc('font', family='Arial Unicode MS', size=14)
        #     # plt.plot(FPR, TPR, label="AUC={:.2f}".format(AUC), marker='o', color='b', linestyle='--')
        #     # plt.legend(loc=4, fontsize=10)
        #     # plt.title('ROC曲线', fontsize=20)
        #     # plt.xlabel('FPR', fontsize=14)
        #     # plt.ylabel('TPR', fontsize=14)
        #     # plt.show()
        #
        #     # print("Precision: {}, Recall: {}, F1: {}, Accuracy: {}".format(P, R, F1, Acc))
        #
        #     # 严格定义计算方法
        #     precision, recall, thresholds = precision_recall_curve(dataAct, dataPre)
        #     PR = auc(recall, precision)
        #     print("PR: ", PR)
        #     # plt.xlabel('Recall')
        #     # plt.ylabel('Precision')
        #     # # plt.grid()  # 生成网格
        #     #
        #     # plt.plot(recall, precision)
        #     # plt.figure("P-R Curve")
        #     # plt.show()
        #
        #     dataPre = np.around(dataPre, 0).astype(int)
        #     f1_weighted = f1_score(dataAct, dataPre, average='weighted')
        #     f1_macro = f1_score(dataAct, dataPre, average='macro')
        #     print("f1-score: 考虑类别的不平衡性为{}, 不考虑类别的不平衡性为{}".format(f1_weighted, f1_macro))
        #
        #     Sum = f1_weighted + AUC + PR
        #     Sum = Sum / 3
        #
        #     F1_score.append(f1_weighted)
        #     AUC_ROC.append(AUC)
        #     AUPR.append(PR)
        #     EPOCH.append(epoch)
        #     SUM.append(Sum)
        #     # print()


def fivefoldcv(A):
    N = A.shape[0]
    idx = np.arange(N)
    np.random.shuffle(idx)
    res = torch.zeros(5, A.shape[0], A.shape[1])
    # res = dict()
    aurocl = np.zeros(5)
    auprl = np.zeros(5)
    # aurocl = dict()
    # auprl = dict()
    G1_Sum = {}
    G2_Sum = {}
    for k in range(1, 8):
        fts1, H1 = \
            load_feature_construct_H('../datasets/MDAData/data(383-495)/m-m.csv', K_neigs=[k])
        G1 = hgut.generate_G_from_H(H1, variable_weight=False)

        fts2, H2 = \
            load_feature_construct_H('../datasets/MDAData/data(383-495)/d-d.csv', K_neigs=[k])
        G2 = hgut.generate_G_from_H(H2, variable_weight=False)
        G1 = torch.Tensor(G1).to(device)
        G2 = torch.Tensor(G2).to(device)
        G1_Sum[k] = G1
        G2_Sum[k] = G2
    for i in range(5):
        print("Fold {}".format(i + 1))
        A0 = A.clone()
        for j in range(i * N // 5, (i + 1) * N // 5):
            A0[idx[j], :] = torch.zeros(A.shape[1])
            dataset['md_p'] = A0
            dataset['md_true'] = dataset['md_p']

        # mdi = read_csv(args.root + 'm-d.csv')  # (495, 383)
        dataset['md_p'] = A0.float()
        # dataset['md_p'] = torch.from_numpy(dataset['md_p']).float()
        dataset['md_true'] = dataset['md_p']
        # mm = read_csv(args.root + 'm-m.csv')  # (495, 495)
        # dd = read_csv(args.root + 'd-d.csv')  # (383, 383)
        dataset['mm']['data'] = np.array(dataset['mm']['data'].cpu())
        dataset['dd']['data'] = np.array(dataset['dd']['data'].cpu())
        dataset['mm']['data'] = normalized(dataset['mm']['data'])
        dataset['dd']['data'] = normalized(dataset['dd']['data'])
        dataset['mm']['data'] = torch.from_numpy(dataset['mm']['data']).float()
        dataset['dd']['data'] = torch.from_numpy(dataset['dd']['data']).float()
        train_data = Dataset(opt, dataset)

        model1 = HGNN()
        model1 = model1.to(device)
        train_data[0][0]['data'] = torch.Tensor(train_data[0][0]['data']).to(device)
        train_data[0][1]['data'] = torch.Tensor(train_data[0][1]['data']).to(device)
        F1_score1 = []
        AUC_ROC1 = []
        AUPR1 = []
        EPOCH1 = []
        SUM1 = []

        optimizer = optim.Adam(model1.parameters(), lr=0.001)
        resi = train(model1, train_data[1], optimizer, opt, G1_Sum, G2_Sum, cha_index=cha_index, cha_index0=cha_index0,
              F1_score=F1_score1, AUC_ROC=AUC_ROC1, AUPR=AUPR1, EPOCH=EPOCH1, SUM=SUM1)

        # resi = train()  # (495, 383) 得到score矩阵
        # resi = scaley(resi)
        res[i] = resi

        if args.cuda:
            resi = resi.cpu().detach().numpy()
        else:
            resi = resi.detach().numpy()

        auroc, aupr = show_auc(resi, args.data)
        aurocl[i] = auroc
        auprl[i] = aupr

    ymat = res[aurocl.argmax()]
    print("===Final result===")
    print('AUROC= %.4f +- %.4f | AUPR= %.4f +- %.4f' % (aurocl.mean(), aurocl.std(), auprl.mean(), auprl.std()))
    if args.cuda:
        return ymat.cpu().detach().numpy()
    else:
        return ymat.detach().numpy()


if __name__ == '__main__':
    dataset, cha_index, cha_index0, cha_index1, cha_index2 = prepare_data(opt)
    title = '../save/result--dataset' + str(args.data)
    ymat = fivefoldcv(dataset['md_p'])
    title += '--fivefoldcv'
    ymat = scaley(ymat)
    np.savetxt(title + '.txt', ymat, fmt='%10.5f', delimiter=',')
    print("===Max result===")
    show_auc(ymat, args.data)
