import numpy as np
import pandas as pd
import sys, os
from random import shuffle
import torch
import torch.nn as nn
import torch.functional as F
from models.gat import GATNet
from models.gat_gcn import GAT_GCN
from models.gcn import GCNNet
from models.ginconv import GINConvNet
from utils import *


"""
python training_validation.py 0 0 0
['E10_n1_s1','E20_n1_s1','GPCR10_n1_s1','GPCR20_n1_s1','IC10_n1_s1','IC20_n1_s1','NR10_n1_s1','NR_full_n1_s1']
the first argument is for the index of the datasets, 0/1 for 'davis' or 'kiba', respectively;

the second argument is for the index of the models, 0/1/2/3 for GINConvNet, GATNet, GAT_GCN, or GCNNet, respectively; 
and the third argument is for the index of the cuda, 0/1 for 'cuda:0' or 'cuda:1', respectively. 
目前第3位只能写0
"""

# training function at each epoch
def train(model, device, train_loader, optimizer, epoch):
    print('Training on {} samples...'.format(len(train_loader.dataset)))
    # train(mode=True)将module设置为 training mode。仅仅当模型中有Dropout和BatchNorm时才会有影响
    model.train()
    for batch_idx, data in enumerate(train_loader):
        # 将训练数据写入device?
        data = data.to(device)
        """
        torch.optim.Optimizer.zero_grad()
        清空所有被优化过的Variable的梯度.
        Variable是pytorch对tensor的封装。
        """
        optimizer.zero_grad()
        output = model(data)


        """
        loss未传入
        """
        # print('y:')
        # print(data.y.view(-1, 1).float())
        # print(type(data.y.view(-1, 1).float()))
        # print(data.y.view(-1, 1).float().shape)
        # print('output:')
        # print(output)
        # print(type(output))
        # print(output.shape)
        """
        data.y.view(-1, 1).float().to(device)
        """
        loss = loss_fn(output, data.y.view(-1, 1).squeeze().long().to(device))



        """
        当前Variable对leaf variable求偏导。
        使用前将Variable的梯度置零。
        """
        loss.backward()
        """
        step(closure)  进行单次优化 (参数更新).
        用法：
        optimizer.step()这是大多数optimizer所支持的简化版本。一旦梯度被如backward()之类的函数计算好后，我们就可以调用这个函数。
        optimizer.step(closure)一些优化算法例如Conjugate Gradient和LBFGS需要重复多次计算函数，因此你需要传入一个闭包去允许它们重新计算你的模型。这个闭包应当清空梯度， 计算损失，然后返回。
        """
        optimizer.step()
        """
        一个interval结束后输出
        但原本的interval大小=batchsize
        这里不仅不等于，batch_idx * len(data.x)远超了train set大小
        """
        if batch_idx % LOG_INTERVAL == 0:
            print('Train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch,
                                                                           batch_idx * len(data.x),
                                                                           len(train_loader.dataset),
                                                                           100. * batch_idx / len(train_loader),
                                                                           loss.item()))
        h_loss = loss.item()
    return h_loss

def predicting(model, device, loader):
    """
    model.eval()
    将模型设置成evaluation模式
    See documentations of particular modules for details of their behaviors in training/evaluation mode,
    if they are affected, e.g. Dropout, BatchNorm, etc.
    等价于self.train(False)
    类似于keras的Model.evaluate
    """
    model.eval()
    """
    定义Tensor类型变量
    torch.Tensor是默认的tensor类型（torch.FlaotTensor）的简称。
    """
    total_preds = torch.Tensor()
    total_labels = torch.Tensor()
    print('Make prediction for {} samples...'.format(len(loader.dataset)))
    """
    with=try…finally…
    torch.no_grad()  禁用梯度计算的上下文管理器
    """
    with torch.no_grad():
        for data in loader:
            data = data.to(device)
            """
            model(data)里的data指的是forward（self,x）里的x吗？
            """
            output = model(data)
            """
            nn.Module.cpu()
            将所有的模型参数(parameters)和buffers复制到CPU
            """
            total_preds = torch.cat((total_preds, output.cpu()), 0)
            total_labels = torch.cat((total_labels, data.y.view(-1, 1).cpu()), 0)
    """
        prediction = torch.max(F.softmax(out),1)[1] #softmax转换成概率,并取出概率最大的一个
        pred_y = prediction.data.numpy().squeeze()
        target_y = y.data.numpy()

            '''经过 softmax 的激励函数后的最大概率才是预测值'''
            prediction = torch.max(F.softmax(out, dim=1), 1)[1]
            predict_y = prediction.data.numpy().squeeze()
            target_y = self.y.data.numpy()
    """
    return total_labels,total_preds
    # prediction=torch.max(F.softmax(total_preds),1)[1]
    # return total_labels.numpy().flatten(), total_preds.numpy().flatten()

# ['Enzyme_under_n2_1','Enzyme_under_n10_1','Enzyme_under_n20_1']
# ['2017used_under_n2_1','2017used_under_n10_1','2017used_under_n20_1']
# ['GPCR_under_n2_1','GPCR_under_n10_1','GPCR_under_n20_1']
# ['IonChannel_under_n2_1','IonChannel_under_n10_1','IonChannel_under_n20_1']
# ['NR_full_n1_s1','NuclearReceptor_under_n2_1','NuclearReceptor_under_n10_1','NuclearReceptor_full_1']
datasets = [['2017used_under_n2_1','2017used_under_n10_1','2017used_under_n20_1'][int(sys.argv[1])]]
modeling = [GINConvNet, GATNet, GAT_GCN, GCNNet][int(sys.argv[2])]
model_st = modeling.__name__

cuda_name = "cuda:0"
if len(sys.argv) > 3:
    cuda_name = ["cuda:0", "cuda:1"][int(sys.argv[3])]
print('cuda_name:', cuda_name)

TRAIN_BATCH_SIZE = 512
TEST_BATCH_SIZE = 32
LR = 0.0005
LOG_INTERVAL = 20
NUM_EPOCHS = 50


print('Learning rate: ', LR)
print('Epochs: ', NUM_EPOCHS)
print('Train batch size:',TRAIN_BATCH_SIZE)
print('Test batch size:',TEST_BATCH_SIZE)

# Main program: iterate over different datasets
for dataset in datasets:
    print('\nrunning on ', model_st + '_' + dataset)
    processed_data_file_train = 'data/processed/' + dataset + '_train.pt'
    processed_data_file_test = 'data/processed/' + dataset + '_test.pt'
    processed_data_file_val = 'data/processed/' + dataset + '_val.pt'
    if ((not os.path.isfile(processed_data_file_train)) or (not os.path.isfile(processed_data_file_test))):
        print('please run create_data.py to prepare data in pytorch format!')
    else:
        # 读取pt文件
        train_data = TestbedDataset(root='data', dataset=dataset + '_train')
        test_data = TestbedDataset(root='data', dataset=dataset + '_test')
        valid_data = TestbedDataset(root='data', dataset=dataset + '_val')

        # make data PyTorch mini-batch processing ready
        """
        用来定义批处理（batch size）大小
        DataLoader(dataset, batch_size=1, shuffle=False, follow_batch=[], **kwargs)
        Data loader which merges(合并) data objects from a torch_geometric.data.dataset to a mini-batch.
        dataset (Dataset) – The dataset from which to load the data.
        batch_size (int, optional) – How many samples per batch to load. (default: 1)
        """
        train_loader = DataLoader(train_data, batch_size=TRAIN_BATCH_SIZE, shuffle=True)
        valid_loader = DataLoader(valid_data, batch_size=TEST_BATCH_SIZE, shuffle=False)
        test_loader = DataLoader(test_data, batch_size=TEST_BATCH_SIZE, shuffle=False)

        # training the model
        """
        将所有最开始读取数据时的tensor变量copy一份到device所指定的GPU上去，之后的运算都在GPU上进行。
        否则在cpu上
        """
        device = torch.device(cuda_name if torch.cuda.is_available() else "cpu")
        print("test_train_v_145:", cuda_name)
        model = modeling().to(device)

        # 定义损失函数。torch.nn的Loss函数计算出来的结果已经对mini-batch取了平均。
        # loss_fn = nn.MSELoss()
        """keras中对loss的默认参数与pytorch的相同"""
        loss_fn = nn.CrossEntropyLoss()
        """
        class torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
        params (iterable) – 待优化参数的iterable或者是定义了参数组的dict
        """
        optimizer = torch.optim.Adam(model.parameters(), lr=LR)
        """
        控制epoch和保存训练中的最优model
        """
        best_loss = 1000
        best_test_loss = 1000
        best_test_acc = 0
        best_epoch = -1

        best_test_auc = 0
        """绘制loss曲线"""
        history_loss = []
        history_val_loss = []


        model_file_name = 'model_' + model_st + '_' + dataset + '.model'
        result_file_name = 'result_' + model_st + '_' + dataset + '.csv'
        for epoch in range(NUM_EPOCHS):
            e_loss=train(model, device, train_loader, optimizer, epoch + 1)
            history_loss.append(e_loss)

            print('predicting for valid data')
            G, P = predicting(model, device, valid_loader)
            """
            如果loss在val上有提升，就计算test，输出test结果，反之只输出上一次的test
            """
            val = crossentropy(G, P)
            history_val_loss.append(val)
            if val < best_loss:
                best_loss = val
                best_epoch = epoch + 1
                """
                pytorch通过torch.save和torch.load记录每epoch训练出的权重参数
                model和result.csv会在val_loss比上一个最优值下降时保存，并同名覆盖上一个
                """
                torch.save(model.state_dict(), model_file_name)
                print('predicting for test data')
                G, P = predicting(model, device, test_loader)
                """评价标准"""
                ret = [crossentropy(G, P),accuracy(G,P),precision(G,P),recall(G,P),auROC(G,P),auPR(G,P)]
                with open(result_file_name, 'w') as f:
                    f.write('loss,accuracy,precision,recall,auROC,auPR\n')
                    f.write(','.join(map(str, ret)))
                best_test_loss = ret[0]
                best_test_acc = ret[1]
                best_test_auc=ret[-2]
                print('loss improved at epoch ', best_epoch, '; best_test_loss=', best_test_loss, 'best_test_acc=',
                      best_test_acc, 'best_test_auc=', best_test_auc, model_st, dataset)
            else:
                print('No improvement since epoch ', best_epoch, '; best_test_loss=', best_test_loss, 'best_test_acc=',
                      best_test_acc, 'best_test_auc=', best_test_auc, model_st, dataset)

        foldind = model_st + '_' + dataset
        plotLoss(history_loss, history_val_loss, foldind)

