import os
import torch
import shutil
from torch.utils.data import DataLoader
from torchvision.transforms import transforms, InterpolationMode
from config_hyperparam import cfg
from utils import train_one_epoch, evaluate, test
from model import swin_base_patch4_window12_384_in22k as Model
from scheduler import WarmupCosineSchedule
from dataset import CustomImageDataset
from autoaugment import AutoAugImageNetPolicy
from center_loss import CenterLoss
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import logging  # 引入日志记录模块

# 设置日志记录的基本配置
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s - %(message)s',
                    handlers=[logging.FileHandler('training.log'),  # 将日志记录到文件
                              logging.StreamHandler()])  # 同时在控制台输出日志


def backup_config_file():
    """
    备份配置文件到指定路径
    """
    try:
        if not os.path.exists(cfg.check_path):
            os.makedirs(cfg.check_path)
        shutil.copy2("config_hyperparam.py", cfg.check_path)
        logging.info("配置文件备份成功")
    except shutil.Error as e:
        logging.error("配置文件备份失败，shutil.Error: {}".format(e))
    except OSError as e:
        logging.error("配置文件备份失败，OSError: {}".format(e))


def load_model_weights(model, cfg):
    """
    加载模型权重文件
    """
    if cfg.pth!= "":
        try:
            assert os.path.exists(cfg.pth), "weights file: '{}' not exist.".format(cfg.pth)
            weights_dict = torch.load(cfg.pth)['model']
            weights_dict = {k: v for k, v in weights_dict.items() if 'head' not in k}
            model.load_state_dict(weights_dict, strict=False)
            logging.info("模型权重文件加载成功")
        except FileNotFoundError:
            logging.error("模型权重文件不存在，请检查文件路径")
        except KeyError as e:
            logging.error("在权重字典中找不到对应的键，KeyError: {}".format(e))
        except RuntimeError as e:
            logging.error("加载模型权重时出现运行时错误，RuntimeError: {}".format(e))
    return model


def plot_and_save_curve(data_list, x_label, y_label, title, save_path):
    """
    绘制曲线并保存图片的通用函数
    :param data_list: 要绘制的数据列表，例如损失值列表、准确率列表等
    :param x_label: x轴标签
    :param y_label: y轴标签
    :param title: 图表标题
    :param save_path: 图片保存路径
    """
    plt.figure()
    plt.plot(data_list)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.title(title)
    plt.legend()
    plt.savefig(save_path)
    plt.close()


def main():
    backup_config_file()

    train_transform = transforms.Compose([
        transforms.Resize((256, 256), InterpolationMode.BILINEAR),
        transforms.RandomCrop((cfg.resize, cfg.resize)),
        transforms.RandomHorizontalFlip(),
        AutoAugImageNetPolicy(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    test_transform = transforms.Compose([
        transforms.Resize((256, 256), InterpolationMode.BILINEAR),
        transforms.CenterCrop((cfg.resize, cfg.resize)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.229], [0.224, 0.225])
    ])

    train_dataset = CustomImageDataset(root_dir=cfg.dataset_path, transform=train_transform, train=True)
    test_dataset = CustomImageDataset(root_dir=cfg.dataset_path, transform=test_transform, train=False)

    nw = min([os.cpu_count(), cfg.batch_size if cfg.batch_size > 1 else 0, 8])
    print("Using {} num_workers for dataloader".format(nw))

    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.batch_size,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True,
                              num_workers=nw)
    test_loader = DataLoader(test_dataset,
                             batch_size=cfg.batch_size,
                             shuffle=True,
                             pin_memory=True,
                             drop_last=True,
                             num_workers=nw)
    predict_loader = DataLoader(test_dataset,
                                batch_size=cfg.batch_size,
                                shuffle=True,
                                pin_memory=True,
                                drop_last=True,
                                num_workers=nw)

    deviceIds = cfg.gpu_idx
    model = Model()
    model = load_model_weights(model, cfg)

    model = model.to(cfg.device)
    # model = nn.DataParallel(model, device_ids=deviceIds)
    # print(model)

    num_parameters = sum(parameter.numel() for parameter in model.parameters() if parameter.requires_grad)
    print("number of parameters: {:.2f} M".format(num_parameters / 1000000))

    if cfg.freeze:
        for name, value in model.named_parameters():
            if "fc" in name:
                value.requires_grad = True
            else:
                value.requires_grad = False

    pg = [p for p in model.parameters() if p.requires_grad]
    optimizer = optim.SGD(pg, lr=cfg.lr, momentum=0.9, weight_decay=0.001)
    scheduler = WarmupCosineSchedule(optimizer, warmup_steps=cfg.warmup, t_total=cfg.epochs)
    centerloss = CenterLoss(cfg.num_class, 1024).to(cfg.device)
    optimzer4center = optim.SGD(centerloss.parameters(), lr=0.05)

    # ------------------------------------------ 训练网络 ------------------------------------------#
    start_epoch = 0
    if cfg.resume:
        assert os.path.isfile(cfg.best_model_path)
        try:
            checkpoint = torch.load(cfg.best_model_path)
            best_acc = checkpoint["best_acc"]
            start_epoch = checkpoint["epoch"]
            model.load_state_dict(checkpoint["model"])
            optimizer.load_state_dict(checkpoint["optimizer"])
            logging.info("成功加载检查点，当前起始 epoch 为 {}".format(start_epoch))
            logging.info("目前最佳准确率为 {}".format(best_acc))
        except FileNotFoundError:
            logging.error("检查点文件不存在，请检查文件路径")
        except KeyError as e:
            logging.error("在检查点文件中找不到对应的键，KeyError: {}".format(e))
        except RuntimeError as e:
            logging.error("加载检查点时出现运行时错误，RuntimeError: {}".format(e))

    best_acc = 0
    train_loss_list = []
    test_loss_list = []
    train_acc_list = []
    test_acc_list = []
    lr_list = []

    for epoch in range(start_epoch, cfg.epochs):
        # 训练
        try:
            train_loss, train_acc = train_one_epoch(model=model, optimizer=optimizer, data_loader=train_loader,
                                                    device=cfg.device, epoch=epoch, center_loss=centerloss,
                                                    optimzer4center=optimzer4center)
            train_loss_list.append(train_loss)
            train_acc_list.append(train_acc)
            logging.info("Epoch {} 训练完成，训练损失为 {}，训练准确率为 {}".format(epoch, train_loss, train_acc))
        except Exception as e:
            logging.error("Epoch {} 训练过程出现错误，错误信息为: {}".format(epoch, e))
            continue

        scheduler.step()

        # 测试
        try:
            test_loss, test_acc = evaluate(model=model, data_loader=test_loader, device=cfg.device, epoch=epoch,
                                           center_loss=centerloss)
            test_loss_list.append(test_loss)
            test_acc_list.append(test_acc)
            logging.info("Epoch {} 测试完成，测试损失为 {}，测试准确率为 {}".format(epoch, test_loss, test_acc))
        except Exception as e:
            logging.error("Epoch {} 测试过程出现错误，错误信息为: {}".format(epoch, e))
            continue

        lr_list.append(optimizer.param_groups[0]["lr"])

        # 绘制损失的变化曲线
        plot_and_save_curve(train_loss_list, 'Epoch', 'Loss', 'Train Loss', cfg.result_loss)
        plot_and_save_curve(test_loss_list, 'Epoch', 'Loss', 'Test Loss', cfg.result_loss)

        # 绘制准确率的变化曲线
        plot_and_save_curve(train_acc_list, 'Epoch', 'Accuracy', 'Train Acc', cfg.result_acc)
        plot_and_save_curve(test_acc_list, 'Epoch', 'Accuracy', 'Test Acc', cfg.result_acc)

        # 绘制学习率的变化曲线
        plot_and_save_curve(lr_list, 'Epoch', 'Learning Rate', 'LR', cfg.result_lr)

        is_best = test_acc > best_acc
        best_acc = max(test_acc, best_acc)
        checkpoint = {
            "best_acc": best_acc,
            "epoch": epoch + 1,
            "model": model.state_dict(),
            "optimizer": optimizer.state_dict(),
        }

        if is_best:
            try:
                torch.save(checkpoint, cfg.best_model_path)
                logging.info("保存新的最佳模型，当前最佳准确率为 {}".format(best_acc))
            except Exception as e:
                logging.error("保存最佳模型时出现错误，错误信息为: {}".format(e))

    test(model=model, data_loader=predict_loader, device=cfg.device)


if __name__ == '__main__':
    main()