import numpy as np
import pandas as pd
import sys, os
from random import shuffle
import torch
import torch.nn as nn
from models.gat import GATNet
from models.gat_gcn import GAT_GCN
from models.gcn import GCNNet
# from modelsi.ginconv_4_4_fc import GINConvNet
from models.ginconv_4_4_1_Wattention import GINConvNet
from utils import *


# training function at each epoch
def train(model, device, train_loader, optimizer, epoch):
    print('Training on {} samples...'.format(len(train_loader.dataset)))
    # train(mode=True)将module设置为 training mode。仅仅当模型中有Dropout和BatchNorm时才会有影响
    model.train()
    for batch_idx, data in enumerate(train_loader):
        # 将训练数据写入device
        data = data.to(device)
        """
        torch.optim.Optimizer.zero_grad()
        清空所有被优化过的Variable的梯度.
        Variable是pytorch对tensor的封装。
        """
        optimizer.zero_grad()
        output = model(data)
        loss = loss_fn(output, data.y.view(-1, 1).float().to(device))
        """
        当前Variable对leaf variable求偏导。
        使用前将Variable的梯度置零。
        """
        loss.backward()
        """
        step(closure)  进行单次优化 (参数更新).
        用法：
        optimizer.step()这是大多数optimizer所支持的简化版本。一旦梯度被如backward()之类的函数计算好后，我们就可以调用这个函数。
        optimizer.step(closure)一些优化算法例如Conjugate Gradient和LBFGS需要重复多次计算函数，因此你需要传入一个闭包去允许它们重新计算你的模型。这个闭包应当清空梯度， 计算损失，然后返回。
        """
        optimizer.step()
        """
        一个interval结束后输出
        但原本的interval大小=batchsize
        """
        if batch_idx % LOG_INTERVAL == 0:
            print('Train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch,
                                                                           batch_idx * len(data.x),
                                                                           # batch_idx,
                                                                           len(train_loader.dataset),
                                                                           100. * batch_idx / len(train_loader),
                                                                           loss.item()))
        h_loss = loss.item()
    return h_loss

def predicting(model, device, loader):
    """
    model.eval()
    将模型设置成evaluation模式
    See documentations of particular modules for details of their behaviors in training/evaluation mode,
    if they are affected, e.g. Dropout, BatchNorm, etc.
    等价于self.train(False)
    """
    model.eval()
    """
    定义Tensor类型变量
    torch.Tensor是默认的tensor类型（torch.FlaotTensor）的简称。
    """
    total_preds = torch.Tensor()
    total_labels = torch.Tensor()
    print('Make prediction for {} samples...'.format(len(loader.dataset)))
    """
    with=try…finally…
    torch.no_grad()  禁用梯度计算的上下文管理器
    """
    with torch.no_grad():
        for data in loader:
            data = data.to(device)
            output = model(data)
            """
            nn.Module.cpu()
            将所有的模型参数(parameters)和buffers复制到CPU
            """
            total_preds = torch.cat((total_preds, output.cpu()), 0)
            """
            pytorch.Tensor.view(),和numpy里的reshape一样
            view(*args) → Tensor
            返回一个有相同数据但大小不同的tensor。 返回的tensor必须有与原tensor相同的数据和相同数
            目的元素，但可以有不同的大小。一个tensor必须是连续的contiguous()才能被查看。
            """
            total_labels = torch.cat((total_labels, data.y.view(-1, 1).cpu()), 0)
    return total_labels.numpy().flatten(), total_preds.numpy().flatten()

"""
python training_validation.py 0 0 0
the first argument is for the index of the datasets, 0/1 for 'davis' or 'kiba', respectively;
the second argument is for the index of the models, 0/1/2/3 for GINConvNet, GATNet, GAT_GCN, or GCNNet, respectively; 
and the third argument is for the index of the cuda, 0/1 for 'cuda:0' or 'cuda:1', respectively. 
"""
# ['Enzyme_under_n2_1','Enzyme_under_n10_1','Enzyme_under_n10_5fold_1','Enzyme_under_n20_1','Enzyme_under_n30_1']
# ['GPCR_under_n2_1','GPCR_under_n10_1','GPCR_under_n20_1','GPCR_full_1']
# ['IonChannel_under_n2_1','IonChannel_under_n10_1','IonChannel_under_n20_1','IonChannel_full_1']
# ['NR_full_n1_s1','NuclearReceptor_under_n2_1','NuclearReceptor_under_n10_1','NuclearReceptor_full_1','NuclearReceptor_full_3']
# ['2017used_under_n2_1','2017used_under_n10_1','2017used_under_n20_1','2017used_under_n30_1']
# ['DBimb_under_n2_1','DBimb_under_n10_1','DBimb_under_n20_1']
# ['Enzyme_under_n10_5fold_1',
#              'GPCR_under_n10_5fold_1',
#              'GPCR_full_5fold_1',
#              'IonChannel_under_n10_5fold_1',
#              'NuclearReceptor_full_5fold_1',
#              'NuclearReceptor_under_n2_5fold_1',
#              'NuclearReceptor_under_n10_5fold_1',
#              'NuclearReceptor_under_n10_5fold_2'
#              ]

# datasets = [['NuclearReceptor_under_n2_5fold_1'][int(sys.argv[1])]]
# modeling = [GINConvNet,GATNet, GAT_GCN, GCNNet][int(sys.argv[2])]
datasets = [['NuclearReceptor_under_n2_5fold_1'][0]]
modeling = [GINConvNet,GATNet, GAT_GCN, GCNNet][0]
model_st = modeling.__name__

TRAIN_BATCH_SIZE =16
TEST_BATCH_SIZE = 32
LR = 0.0005
LOG_INTERVAL = 20  # 每隔 LOG_INTERVAL 个batch输出一次，即每隔 TRAIN_BATCH_SIZE*LOG_INTERVAL 条数据输出一次
NUM_EPOCHS = 1

# cuda_name = "cuda:0"
# if len(sys.argv) > 3:
#     cuda_name = ["cuda:0", "cuda:1"][int(sys.argv[3])]
# 动态选择 CUDA 设备
if torch.cuda.is_available():
    num_gpus = torch.cuda.device_count()
    if len(sys.argv) > 3:
        cuda_index = int(sys.argv[3])
        if cuda_index < num_gpus:
            cuda_name = f"cuda:{cuda_index}"
        else:
            print(f"指定的 CUDA 设备索引 {cuda_index} 超出可用设备数量，使用默认设备 cuda:0")
            cuda_name = "cuda:0"
    else:
        cuda_name = "cuda:0" if num_gpus > 0 else "cpu"
else:
    cuda_name = "cpu"

print('cuda_name:', cuda_name)
print('Learning rate: ', LR)
print('Epochs: ', NUM_EPOCHS)
print('Train batch size:',TRAIN_BATCH_SIZE)
print('Test batch size:',TEST_BATCH_SIZE)

# Main program: iterate over different datasets
for dataset in datasets:
    print('\nrunning on ', model_st + '_' + dataset)
    processed_data_file_train = 'data/processed/' + dataset + '_train.pt'
    processed_data_file_test = 'data/processed/' + dataset + '_test.pt'
    processed_data_file_val = 'data/processed/' + dataset + '_val.pt'
    if ((not os.path.isfile(processed_data_file_train)) or (not os.path.isfile(processed_data_file_test))):
        print('please run create_data.py to prepare data in pytorch format!')
    else:
        train_data = TestbedDataset(root='data', dataset=dataset + '_train')
        test_data = TestbedDataset(root='data', dataset=dataset + '_test')
        valid_data = TestbedDataset(root='data', dataset=dataset + '_val')

        # make data PyTorch mini-batch processing ready
        """
        用来定义批处理（batch size）大小
        DataLoader(dataset, batch_size=1, shuffle=False, follow_batch=[], **kwargs)
        Data loader which merges(合并) data objects from a torch_geometric.data.dataset to a mini-batch.
        dataset (Dataset) – The dataset from which to load the data.
        batch_size (int, optional) – How many samples per batch to load. (default: 1)
        """
        train_loader = DataLoader(train_data, batch_size=TRAIN_BATCH_SIZE, shuffle=True)
        valid_loader = DataLoader(valid_data, batch_size=TEST_BATCH_SIZE, shuffle=False)
        test_loader = DataLoader(test_data, batch_size=TEST_BATCH_SIZE, shuffle=False)

        # training the model
        """
        将所有最开始读取数据时的tensor变量copy一份到device所指定的GPU上去，之后的运算都在GPU上进行。
        否则在cpu上
        """
        device = torch.device(cuda_name)
        print("test_train_v_145:", cuda_name)
        model = modeling().to(device)
        
        # 使用 DataParallel 进行多 GPU 训练
        if torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
        # print(model)

        # 定义损失函数。torch.nn的Loss函数计算出来的结果已经对mini-batch取了平均。
        loss_fn = nn.BCELoss()


        """
        class torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
        params (iterable) – 待优化参数的iterable或者是定义了参数组的dict
        """
        optimizer = torch.optim.Adam(model.parameters(), lr=LR)
        """
        用于控制epoch和保存训练中的最优model
        """
        best_loss = 1000
        best_test_loss = 1000
        best_test_acc = 0
        best_epoch = -1

        best_test_auc = 0
        """绘制loss曲线"""
        history_loss = []
        history_val_loss = []


        model_file_name = 'model_' + model_st + '_' + dataset + '.model'
        result_file_name = 'result_' + model_st + '_' + dataset + '.csv'
        result_testpred_name = 'pred_' + model_st + '_' + dataset + '.csv'

        for epoch in range(NUM_EPOCHS):
            e_loss=train(model, device, train_loader, optimizer, epoch + 1)
            history_loss.append(e_loss)

            print('predicting for valid data')
            G, P = predicting(model, device, valid_loader)
            """
            如果loss在val上有提升，就计算test，输出test结果，反之只输出上一次的test
            """
            val = binary_crossentropy(G, P)

            history_val_loss.append(val)
            if val < best_loss:
                best_loss = val
                best_epoch = epoch + 1
                """
                pytorch通过torch.save和torch.load记录每epoch训练出的权重参数
                model和result.csv会在val_loss比上一个最优值下降时保存，并同名覆盖上一个
                """
                if isinstance(model, nn.DataParallel):
                    torch.save(model.module.state_dict(), model_file_name)
                else:
                    torch.save(model.state_dict(), model_file_name)
                # torch.save(model.state_dict(), model_file_name)
                print('predicting for test data')
                G, P = predicting(model, device, test_loader)

                ret = [binary_crossentropy(G, P),accuracy(G,P),precision(G,P),recall(G,P),mcc(G,P),auROC(G,P),auPR(G,P)]

                with open(result_file_name, 'w') as f:
                    f.write('loss,accuracy,precision,recall,mcc,auROC,auPR\n')
                    f.write(','.join(map(str, ret)))

                filen_curve = model_st + '_' + dataset
                paintRoc(G, P, filen_curve)
                paintPR(G, P, filen_curve)

                #添加保存预测标签的csv文件
                with open(result_testpred_name, 'w') as f:
                    f.write('y_true,y_pred\n')
                    for i in range(len(G)):
                        listp = [G[i], P[i]]
                        f.write(','.join(map(str, listp)))
                        f.write('\n')

                best_test_loss = ret[0]
                best_test_acc = ret[1]
                best_test_auc=ret[-2]
                print('loss improved at epoch ', best_epoch, '; best_test_loss=', best_test_loss, 'best_test_acc=',
                      best_test_acc, 'best_test_auc=', best_test_auc, model_st, dataset)
            else:
                print('No improvement since epoch ', best_epoch, '; best_test_loss=', best_test_loss, 'best_test_acc=',
                      best_test_acc, 'best_test_auc=', best_test_auc, model_st, dataset)

        foldind = model_st + '_' + dataset
        plotLoss(history_loss, history_val_loss, foldind)

