from torch.utils.data import DataLoader, Subset
import torch
from torch import nn
import argparse
from sklearn.model_selection import KFold, train_test_split
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import copy

from mydataset import MyDataset
from mymodel import *


parser = argparse.ArgumentParser()
parser.add_argument('--data-file', type=str,
                    default='data/intersected.csv', help='data file')
parser.add_argument('--feature-start', type=int, default=4, help='特征的起始列数')
parser.add_argument('--feature-end', type=int,
                    default=1266, help='特征的终止列数，注意区间左闭右开')
parser.add_argument('--label-name', type=str,
                    default='PathTstage', help='label name')
parser.add_argument('--label-class-num', type=int,
                    default=7, help='标签中的分类类别数。比如如果标签范围是0-6，那么类别数就是7。')
parser.add_argument('--model-name', type=str,
                    default='gtvt', help='model name')

parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--epochs', type=int, default=100, help='epochs')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--device', type=str, default=('cuda:1' if torch.cuda.is_available() else 'cpu'), help='device')
parser.add_argument('--num-workers', type=int, default=4, help='DataLoader num workers')
parser.add_argument('--kf', type=bool, default=False, help='是否使用k折交叉验证')
args = parser.parse_args()

# 特征从哪一行开始
data_file = args.data_file
features_start = args.feature_start
# 特征从哪一行结束
features_end = args.feature_end
label_name = args.label_name
label_class_num = args.label_class_num
model_name = args.model_name

batch_size = args.batch_size
epochs = args.epochs
lr = args.lr
device = torch.device(args.device)
num_workers = args.num_workers
kf = args.kf


mymodel = MyMlp(features_end - features_start, label_class_num, [512, 256, 128])
mymodel.to(device)


# 初始化权重
def init_weights_norm(model):
    for param in model.parameters():
        nn.init.normal_(param, mean=0, std=0.01)


def init_weights_xavier(m):
    if type(m) == nn.Linear:
        nn.init.xavier_normal_(m.weight)
        nn.init.zeros_(m.bias)


mymodel.apply(init_weights_xavier)


loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(mymodel.parameters(), lr=lr)
# optimizer = torch.optim.SGD(mymodel.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)


# 开始训练
train_losses = []
train_acces = []
eval_losses = []
eval_acces = []

# 最后一个epoch的混淆矩阵
train_y_pred_list = []
train_y_list = []
validate_y_pred_list = []
validate_y_list = []

# 最好epoch的混淆矩阵
train_y_pred_list_best = []
train_y_list_best = []
validate_y_pred_list_best = []
validate_y_list_best = []

# 用于保存获得最大验证精确度的epoch信息
validate_avg_acc_best = -1.0
validate_avg_acc_best_epoch = None


def train_one_epoch(mymodel, epoch, mydataloader_train):
    train_loss = 0
    train_acc = 0
    mymodel = mymodel.train()
    # !! 只保留当前epoch的混淆矩阵
    global train_y_pred_list
    global train_y_list
    train_y_pred_list.clear()
    train_y_list.clear()
    for (step, (x, y)) in enumerate(mydataloader_train):
        x, y = x.to(device), y.to(device)
        y_pred = mymodel(x)

        # forward
        loss = loss_fn(y_pred, y)

        # backward
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # record loss
        train_loss += loss.item()
        # record clasfi accuracy
        _, pred = y_pred.max(1)

        # <<< confusion matrix
        train_y_pred_list.extend(pred.data.squeeze().cpu().numpy())
        train_y_list.extend(y.data.squeeze().cpu().numpy())
        # >>> confusion matrix

        num_correct = (pred == y).sum().item()
        acc = num_correct / x.shape[0]
        train_acc += acc

    if epoch % 10 == 0:
        print(
            f'epoch: {epoch}, train_loss: {train_loss / len(mydataloader_train):4f}, acc: {train_acc / len(mydataloader_train):4f}')

    train_losses.append(train_loss / len(mydataloader_train))
    train_acces.append(train_acc / len(mydataloader_train))
    torch.save(mymodel.state_dict(), f'output/model/{model_name}_latest.pth')


def validate_one_epoch(mymodel, epoch, mydataloader_validate):
    mymodel = mymodel.eval()
    eval_loss = 0
    eval_acc = 0
    # !! 只保留当前epoch的矩阵
    global validate_y_pred_list
    global validate_y_list
    validate_y_pred_list.clear()
    validate_y_list.clear()
    with torch.no_grad():
        for (step, (x, y)) in enumerate(mydataloader_validate):
            x, y = x.to(device), y.to(device)
            y_pred = mymodel(x)
            loss = loss_fn(y_pred, y)
            eval_loss += loss.item()

            _, pred = y_pred.max(1)
            num_correct = (pred == y).sum().item()
            acc = num_correct / x.shape[0]

            # <<< For confusion matrix
            validate_y_pred_list.extend(pred.data.squeeze().cpu().numpy())
            validate_y_list.extend(y.data.squeeze().cpu().numpy())
            # >>> For confusion matrix

            eval_acc += acc

    if (epoch % 10) == 0:
        print(f'epoch: {epoch}: val_loss: {eval_loss / len(mydataloader_validate):.4f}, acc: {eval_acc / len(mydataloader_validate):.4f}')

    eval_losses.append(eval_loss / len(mydataloader_validate))
    eval_acces.append(eval_acc / len(mydataloader_validate))

    # 保存最大验证精确度
    global validate_avg_acc_best, validate_avg_acc_best_epoch
    if (eval_acc / len(mydataloader_validate)) > validate_avg_acc_best:
        validate_avg_acc_best = (eval_acc / len(mydataloader_validate))
        validate_avg_acc_best_epoch = epoch

        # <<< For confusion matrix
        global validate_y_pred_list_best, validate_y_list_best, train_y_pred_list_best, train_y_list_best
        global train_y_pred_list, train_y_list
            # 四个矩阵分别等于它们对应的copy
        train_y_pred_list_best = copy.deepcopy(train_y_pred_list)
        train_y_list_best = copy.deepcopy(train_y_list)
        validate_y_pred_list_best = copy.deepcopy(validate_y_pred_list)
        validate_y_list_best = copy.deepcopy(validate_y_list)
        # <<< For confusion matrix

        torch.save(mymodel.state_dict(), f'output/model/{model_name}_best.pth')


def train_k_fold(train_one_epoch, validate_one_epoch):
    """尝试使用交叉验证。这个函数有问题，需要后续修改。

    Args:
        train_one_epoch ([type]): [description]
        validate_one_epoch ([type]): [description]
    """
    kf = KFold(n_splits=10, shuffle=True, random_state=43)
    mydataset = MyDataset(data_file, features_start=features_start,
                          features_end=features_end, label_name=label_name)
    for fold_idx, (train_idx, validate_idx) in enumerate(kf.split(mydataset)):
        mydataset_train = torch.utils.data.Subset(mydataset, train_idx)
        mydataset_validate = torch.utils.data.Subset(mydataset, validate_idx)

        mydataloader_train = DataLoader(
            mydataset_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
        mydataloader_validate = DataLoader(
            mydataset_validate, batch_size=batch_size, shuffle=True, num_workers=num_workers)
        for epoch in range(epochs):
            print(f'=== fold {fold_idx} ===')
            train_one_epoch(epoch, mydataloader_train)
            validate_one_epoch(epoch, mydataloader_validate)


def train_common(mymodel, train_one_epoch, validate_one_epoch):
    """训练模型，不使用交叉验证。

    Args:
        train_one_epoch ([type]): [description]
        validate_one_epoch ([type]): [description]
    """
    # 准备数据集
    mydataset = MyDataset(data_file, features_start=features_start,
                          features_end=features_end, label_name=label_name)
    # train_size = int(0.8 * len(mydataset))
    # validate_size = len(mydataset) - train_size
    # mydataset_train, mydataset_validate = torch.utils.data.random_split(
    #     mydataset, [train_size, validate_size])
    # !! 分层抽样
    train_indice, validate_indice, _, _ = train_test_split(range(len(mydataset)), mydataset.get_targets(), stratify=mydataset.get_targets(), train_size=0.8)
    mydataset_train, mydataset_validate = Subset(mydataset, train_indice), Subset(mydataset, validate_indice)
    mydataloader_train = DataLoader(
        mydataset_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    mydataloader_validate = DataLoader(
        mydataset_validate, batch_size=batch_size, shuffle=True, num_workers=num_workers)

    # 开始训练
    for epoch in range(epochs):
        train_one_epoch(mymodel, epoch + 1, mydataloader_train)
        validate_one_epoch(mymodel, epoch + 1, mydataloader_validate)

        plt.figure()
        # plt.suptitle('Test', fontsize=12)

        ax1 = plt.subplot(2, 2, 1) 
        ax1.plot(train_losses)
        ax1.set_title('Train Loss', fontsize=10, color='r')
        ax2 = plt.subplot(2, 2, 2)
        ax2.plot(train_acces)
        ax2.set_title('Train Acc', fontsize=10, color='b')
        ax3 = plt.subplot(2, 2, 3)
        ax3.plot(eval_losses)
        ax3.set_title('Validate Loss', fontsize=10, color='r')
        ax4 = plt.subplot(2, 2, 4)
        ax4.plot(eval_acces)
        ax4.set_title('Validate Acc', fontsize=10, color='b')
        plt.subplots_adjust(hspace=0.3)
        # plt.show()
        plt.savefig('output/fig/acc.png')
        plt.close('all')

    print('训练时混淆矩阵(最后一个epoch)：')
    print(confusion_matrix(train_y_list, train_y_pred_list))
    print('验证时混淆矩阵(最后一个epoch)：')
    print(confusion_matrix(validate_y_list, validate_y_pred_list))
    print('训练时混淆矩阵(验证精度最高的epoch)：')
    print(confusion_matrix(train_y_list_best, train_y_pred_list_best))
    print('验证时混淆矩阵(验证精度最高的epoch)：')
    print(confusion_matrix(validate_y_list_best, validate_y_pred_list_best))
    print(f'最高验证精确度：{validate_avg_acc_best:4f}，对应epoch为{validate_avg_acc_best_epoch}')


def train(mymodel, train_one_epoch, validate_one_epoch):
    return train_common(mymodel, train_one_epoch, validate_one_epoch) if not kf else train_k_fold(train_one_epoch, validate_one_epoch)


if __name__ == '__main__':
    train(mymodel, train_one_epoch, validate_one_epoch)
