from data.dataset import getDataLoader, getClassesName
import pandas as pd
from models.classifier import Classifier, getConfig
import torch.nn as nn
import torch
from utils.evaluation import logger, ClassificationLogger, ClassificationEvaluator
from train import Trainer
import numpy as np


def train(model_save_dir, epochs=50):
    # K-Fold
    train_dataset_index = [1, 2, 3, 4]
    val_dataset_index = 5
    # batch_size大小
    train_batch_size = 32
    val_batch_size = 4
    test_batch_size = 4
    train_loader = getDataLoader(train_dataIndex=train_dataset_index,
                                 is_train_dataset=True,
                                 batch_size=train_batch_size,
                                 shuffle=True)
    val_loader = getDataLoader(train_dataIndex=val_dataset_index,
                               is_train_dataset=True,
                               batch_size=val_batch_size,
                               shuffle=False)
    test_loader = getDataLoader(is_train_dataset=False,
                                batch_size=test_batch_size,
                                shuffle=False)

    # 训练超参
    base_lr = 1e-3  # 0.001
    step_size = 2  # 学习率更新

    num_classes = 10  # 分类数

    config = getConfig()
    model = Classifier(config=config, num_classes=num_classes)
    model.load(model_save_dir + '/best_model.pt')
    model.freeze_encoder()

    # 交叉熵损失函数
    loss_func = nn.CrossEntropyLoss()
    # 从一批训练样本中随机选取一个样本用于更新梯度，速度快
    # params = [{
    #     'params': [p for p in model.vit.parameters() if p.requires_grad],
    #     'lr': 1e-3
    # }, {
    #     'params':
    #     [p for p in model.classifier.parameters() if p.requires_grad],
    #     'lr':
    #     1e-2
    # }]
    params = [{
        'params':
        [p for p in model.vit.pooler.parameters() if p.requires_grad],
        'lr':
        1e-3
    }, {
        'params':
        [p for p in model.classifier.parameters() if p.requires_grad],
        'lr':
        1e-3
    }]
    # optimizer = torch.optim.SGD(params=params, lr=base_lr)
    optimizer = torch.optim.Adam(params=params, lr=base_lr)
    # 每隔step_size个epoch，学习率下降至原来的0.1倍, last_epoch用来指示学习率更新范围
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size,
                                                gamma=0.1,
                                                last_epoch=-1)

    classes_name = getClassesName()
    classes = list(range(0, 10))
    classes_to_idx = {c: classes_name[c] for c in classes}

    # 日志记录创建
    logger.add("F:/学习/人工智能综合实践/ViT-cifar10/run.log")
    log_period = 10  # per 10 step
    my_logger = ClassificationLogger(log_period=10)

    #创建训练器
    device = torch.device("cuda:0")
    trainer = Trainer(model, loss_func, optimizer, device)

    # 开始训练
    global_step = 0  # 记录迭代步数
    f1_score_history = []

    result_record = -np.ones((9, epochs))

    for epoch_now in range(1, epochs + 1):
        evaluator = ClassificationEvaluator(classes, classes_to_idx)
        temp = 0
        for step_now, samples in enumerate(train_loader):
            lr = scheduler.get_last_lr()[0]  # 获取当前的学习率
            trainer_returned_obj = trainer.train(samples)
            evaluator.update(trainer_returned_obj)
            temp += trainer_returned_obj.losses
            if step_now + 1 % 10 == 0:
                print(f"loss of batch {step_now} : {temp/10/trainer_returned_obj.num:.6f}")
                temp = 0
            global_step += 1
            my_logger.training_log(evaluator, lr, step_now, len(train_loader),
                                   epoch_now, epochs)
        scheduler.step()  # 学习率更新

        result_record[0,epoch_now-1] = evaluator.loss()
        result_record[1, epoch_now - 1] = evaluator.accuracy()
        result_record[2, epoch_now - 1] = evaluator.f1()

        evaluator = ClassificationEvaluator(classes, classes_to_idx)
        for step_now, samples in enumerate(val_loader):
            trainer_returned_obj = trainer.validate(samples)
            evaluator.update(trainer_returned_obj)
        my_logger.validation_log(evaluator, epoch_now, epochs)

        f1_score_history.append(evaluator.f1())

        result_record[3, epoch_now - 1] = evaluator.loss()
        result_record[4, epoch_now - 1] = evaluator.accuracy()
        result_record[5, epoch_now - 1] = evaluator.f1()

        evaluator = ClassificationEvaluator(classes, classes_to_idx)
        for step_now, samples in enumerate(test_loader):
            test_returned_obj = trainer.validate(samples)
            evaluator.update(test_returned_obj)
        my_logger.validation_log(evaluator, epoch_now, epochs)

        result_record[6, epoch_now - 1] = evaluator.loss()
        result_record[7, epoch_now - 1] = evaluator.accuracy()
        result_record[8, epoch_now - 1] = evaluator.f1()

        if epoch_now == 1 or evaluator.f1() > max(f1_score_history):
            model.save(model_save_dir + r"/best_model.pt")
        elif epoch_now == epochs:
            model.save(model_save_dir + r"/last_model.pt")
            pass
        pass
        np.save('./result_record.np', result_record)

def test(model_save_dir):
    config = getConfig()
    model = Classifier(config=config, num_classes=10)
    model.load(model_save_dir + '/train1234v5.pt')
    # model
    classes_name = getClassesName()
    classes = list(range(0, 10))
    classes_to_idx = {c: classes_name[c] for c in classes}
    test_batch_size = 4
    test_loader = getDataLoader(is_train_dataset=False,
                                batch_size=test_batch_size,
                                shuffle=False)
    evaluator = ClassificationEvaluator(classes, classes_to_idx)

    loss_func = nn.CrossEntropyLoss()
    #创建训练器
    device = torch.device("cuda:0")
    trainer = Trainer(model, loss_func, None, device)
    for step_now, samples in enumerate(test_loader):
        test_returned_obj = trainer.validate(samples)
        evaluator.update(test_returned_obj)
    print(evaluator.confusion_matrix())
    print(evaluator.report())
    print(evaluator.loss())



if __name__ == '__main__':
    torch.manual_seed(55555)
    model_save_dir = './saved_models'
    # train(model_save_dir=model_save_dir, epochs=10)
    test(model_save_dir)
    pass
