import os
import torch
import torch.optim as optim
import matplotlib
from torch.utils.data import DataLoader
from torchvision import transforms
from dataclasses import dataclass
from tqdm import tqdm
import models.barcode_model_small as models
import utils.datasets_barcode as data_utils
import utils.loss as loss_utils
import tensorboardX

matplotlib.use("agg")


@dataclass
class TrainParams:
    """
    训练神经网络时的参数

    :param device: 训练与验证时选用的设备，可以是 cpu 或 cuda'
    :param pretrained_model_data_path: 存有模型参数及训练状态的字典的路径，可用于初始化模型
    :param learning_rate: 学习率
    :param patience_epochs: loss 持续不下降的轮数超过这个值，则停止训练，默认为 10
    :param workers: dataloader 加载数据时的线程数
    :param epochs: 训练的轮数
    :param batch_size: 一个 batch 的图片数量
    :param train_dataset_dir: 训练集的路径
    :param validation_dataset_dir: 验证集的路径
    :param test_datasets_dir: 测试集的路径
    :param input_size: 输入神经网络的图像的宽高 (width,height)
    :param output_size: 输出神经网络的 heatmap 的宽高 (width,height)
    :param initial_heatmap_sigma: 生成高斯热图的高斯核的标准差，训练过程中随轮数增加而减小
    :param result_save_dir: 保存训练得到模型以及 loss 曲线的文件夹路径
    """

    device: str = "cuda:0" if torch.cuda.is_available() else "cpu"
    pretrained_model_data_path: str = None
    learning_rate: float = 0.002
    patience_epochs: int = 10
    workers: int = 8
    epochs: int = 8
    batch_size: int = 5
    train_dataset_dir: str = "barcode_datasets"
    validation_dataset_dir: str = "barcode_val_datasets"
    test_datasets_dir: str = "test_datasets"
    input_size: tuple = (128, 128)
    output_size: tuple = (128, 128)
    initial_heatmap_sigma: float = 4
    result_save_dir: str = "runs_barcode"


def create_dataloader(train_params: TrainParams, heatmap_sigma: float = 4) -> tuple:
    """
    根据训练时的参数，构建训练集和验证集的 dataloader

    :param train_params: 实例化的 TrainParams，包含训练参数
    :return: 训练集 dataloader 和验证集 dataloader
    """
    # 确定 dataset 数据构建时转换为 tensor 的方法
    transform = transforms.ToTensor()

    # 构建训练数据集
    train_dataset = data_utils.BarcodeKeypointDataset(
        train_params.train_dataset_dir,
        transform=transform,
        input_size=train_params.input_size,
        output_size=train_params.output_size,
        sigma=heatmap_sigma,
    )
    train_loader = DataLoader(
        train_dataset,
        batch_size=train_params.batch_size,
        shuffle=True,
        collate_fn=data_utils.collate_fn,
        num_workers=train_params.workers,
        drop_last=True,
    )

    # 构建验证数据集
    val_dataset = data_utils.BarcodeKeypointDataset(
        train_params.validation_dataset_dir,
        transform=transform,
        input_size=train_params.input_size,
        output_size=train_params.output_size,
        sigma=heatmap_sigma,
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=train_params.batch_size,
        shuffle=False,
        collate_fn=data_utils.collate_fn,
        num_workers=train_params.workers,
    )

    return train_loader, val_loader


if __name__ == "__main__":
    train_params = TrainParams()
    os.makedirs(train_params.result_save_dir, exist_ok=True)
    exist_train_directory_num = len(os.listdir(train_params.result_save_dir))
    runs_dir = os.path.join(
        train_params.result_save_dir, "runs" + str(exist_train_directory_num + 1)
    )
    os.makedirs(runs_dir, exist_ok=False)
    summary_save_dir = os.path.join(runs_dir, "summary")

    os.makedirs(summary_save_dir, exist_ok=False)
    tensorboard_writer = tensorboardX.SummaryWriter(
        log_dir=summary_save_dir, flush_secs=10
    )
    heatmap_sigma = train_params.initial_heatmap_sigma
    # 构建训练集和验证集的 dataloader
    train_dataloader, val_dataloader = create_dataloader(
        train_params, train_params.initial_heatmap_sigma
    )
    # 训练总样本数
    train_images_num = len(train_dataloader.sampler)
    # 设置运行的设备
    device = torch.device(train_params.device)

    # 初始化模型
    model = models.GrayDemoNet2().to(device)

    # GPU 并行训练
    # model = torch.nn.DataParallel(model).to(device)

    # 初始化损失函数
    compute_loss = loss_utils.CustomLoss().to(device)

    # 初始化优化器
    optimizer = optim.Adam(model.parameters(), lr=train_params.learning_rate)

    # 若存在预训练模型参数，则加载预训练模型参数
    if train_params.pretrained_model_data_path:
        checkpoint = torch.load(train_params.pretrained_model_data_path)
        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        trained_epoch = checkpoint["trained_epoch"]
        best_fitness = checkpoint["best_loss"]
        print(f"Load model: {train_params.pretrained_model_data_path}")
        print(f"Trained epoch: {trained_epoch}, Best fitness: {best_fitness}")
    else:
        trained_epoch = 0
        best_fitness = float("inf")
        print("No pre-trained model path.")

    # 记录 loss 持续没有提升的 epoch 数
    epochs_without_improvement = 0

    # epoch从已经训练轮数的下一轮开始记录
    for epoch in range(trained_epoch + 1, train_params.epochs + 1):
        if epoch % 2 == 0:
            # 根据epoch数和initial_heatmap_sigma，计算当前的sigma值
            heatmap_sigma = round(
                train_params.initial_heatmap_sigma * 0.8 ** (epoch // 2), 2
            )
            if heatmap_sigma < 1:
                heatmap_sigma = 1

            # 根据新的 sigma 值，构建训练集和验证集的 dataloader
            train_dataloader, val_dataloader = create_dataloader(
                train_params, heatmap_sigma
            )

        # 切换到训练模式，并利用 tqdm 显示进度条
        model.train()
        tqdm_instance_train = tqdm(
            enumerate(train_dataloader),
            total=len(train_dataloader),
            desc=f"Train Epoch {epoch}/{train_params.epochs}",
        )

        # 初始化针对所有样本的计算损失之和为 0
        total_train_loss = 0

        # 依次遍历所有样本进行神经网络训练
        for batch_idx, (
            images,
            heatmaps,
        ) in tqdm_instance_train:
            images, heatmaps = (
                images.to(device),
                heatmaps.to(device),
            )
            real_batch_size = images.size(0)  # 获取这个 batch 的图片数
            optimizer.zero_grad()
            outputs = model(images)

            mask = (heatmaps[:, 0, :, :] > 0).float()
            keypoint_loss = compute_loss(outputs[:, 0, :, :], heatmaps[:, 0, :, :])
            sin_loss = compute_loss(
                outputs[:, 1, :, :] * mask, heatmaps[:, 1, :, :] * mask
            )
            cos_loss = compute_loss(
                outputs[:, 2, :, :] * mask, heatmaps[:, 2, :, :] * mask
            )
            loss = keypoint_loss + sin_loss + cos_loss

            tensorboard_writer.add_scalar(
                "train/keypoint_loss",
                keypoint_loss.item(),
                epoch * len(train_dataloader) + batch_idx,
            )
            tensorboard_writer.add_scalar(
                "train/sin_loss",
                sin_loss.item(),
                epoch * len(train_dataloader) + batch_idx,
            )
            tensorboard_writer.add_scalar(
                "train/cos_loss",
                cos_loss.item(),
                epoch * len(train_dataloader) + batch_idx,
            )
            tensorboard_writer.add_scalar(
                "train/total_loss",
                loss.item(),
                epoch * len(train_dataloader) + batch_idx,
            )

            loss.backward()
            optimizer.step()
            total_train_loss += loss.item() * real_batch_size
            # 更新进度条标题
            tqdm_instance_train.set_description(
                f"Train Epoch {epoch}/{train_params.epochs}"
            )
        train_average_loss = total_train_loss / train_images_num
        print(f"Train  Loss: {train_average_loss}")

        # 切换到验证模式，计算模型在验证集上的损失
        model.eval()
        with torch.no_grad():
            total_val_loss = 0
            val_images_num = len(val_dataloader.sampler)
            tqdm_instance_val = tqdm(
                enumerate(val_dataloader), total=len(val_dataloader), desc="Val"
            )
            # 使用 tqdm 创建进度条
            for batch_idx, (images, heatmaps) in tqdm_instance_val:
                real_batch_size = images.size(0)  # 获取这个 batch 的图片数
                images, heatmaps = (images.to(device), heatmaps.to(device))
                outputs = model(images)

                mask = (heatmaps[:, 0, :, :] > 0).float()
                keypoint_loss = compute_loss(outputs[:, 0, :, :], heatmaps[:, 0, :, :])
                sin_loss = compute_loss(
                    outputs[:, 1, :, :] * mask, heatmaps[:, 1, :, :] * mask
                )
                cos_loss = compute_loss(
                    outputs[:, 2, :, :] * mask, heatmaps[:, 2, :, :] * mask
                )
                loss = keypoint_loss + sin_loss + cos_loss

                tensorboard_writer.add_scalar(
                    "val/keypoint_loss",
                    keypoint_loss.item(),
                    epoch * len(val_dataloader) + batch_idx,
                )
                tensorboard_writer.add_scalar(
                    "val/sin_loss",
                    sin_loss.item(),
                    epoch * len(val_dataloader) + batch_idx,
                )
                tensorboard_writer.add_scalar(
                    "val/cos_loss",
                    cos_loss.item(),
                    epoch * len(val_dataloader) + batch_idx,
                )
                tensorboard_writer.add_scalar(
                    "val/total_loss",
                    loss.item(),
                    epoch * len(val_dataloader) + batch_idx,
                )

                total_val_loss += loss.item() * real_batch_size
            val_average_loss = total_val_loss / val_images_num
            print(f"Val Loss: {val_average_loss}")
            print(">==============================================================<")
        tensorboard_writer.close()

        # 根据虚拟验证集的 loss 保存合适的模型
        fitness = val_average_loss
        if fitness < best_fitness:
            best_fitness = fitness
            torch.save(
                model,
                os.path.join(
                    runs_dir,
                    "best_model.pt",
                ),
            )
            epochs_without_improvement = 0
        else:
            epochs_without_improvement += 1

        # 每个2个 epoch 保存模型参数
        if epoch % 2 == 0:
            model_epoch_save_path = os.path.join(
                runs_dir,
                "sigma_" + str(heatmap_sigma) + "epoch_" + str(epoch) + ".pt",
            )
            torch.save(
                {
                    "trained_epoch": epoch,
                    "model_state_dict": model.state_dict(),
                    "optimizer_state_dict": optimizer.state_dict(),
                    "best_loss": best_fitness,
                },
                model_epoch_save_path,
            )
        # 保存 loss 曲线
        if epochs_without_improvement >= train_params.patience_epochs:
            print("Stopping early due to no improvement!")
            break
