import glob
import os
import time
import torch
import random
import numpy as np
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets
from torchvision.utils import save_image
from models.content_loss import *
from models.style_loss import *
from models.transformer_arch import *
from options.train_options import TrainOptions
from utils.utils import *

def main():
    opt = TrainOptions().parse()  # 解析模型训练命令行参数
    devices = [torch.device(f"cuda:{i}" if i >= 0 else "cpu") for i in opt.gpu_ids]  # 初始化训练设备

    # 初始化工作空间文件夹
    make_dirs(opt.checkpoints_dir)  # checkpoints 文件夹
    workspace_dir = make_dirs(f"{opt.checkpoints_dir}/{opt.name}")  # 工作区文件夹
    sample_dir = make_dirs(f"{workspace_dir}/sample")  # 样本测试保存文件夹
    model_dir = make_dirs(f"{workspace_dir}/model")  # 模型保存文件夹
    writer = SummaryWriter(log_dir=workspace_dir, comment=opt.name)  # Tensorboard

    # 初始化训练数据 dataloader
    train_dataset = datasets.ImageFolder(opt.dataset, content_image_transform(opt.content_size))
    train_dataloader = DataLoader(train_dataset, batch_size=opt.batch_size, num_workers=opt.num_workers)
    train_data_count = len(train_dataset)  # 训练数据总条数
    iter_count_per_epoch = train_data_count // opt.batch_size  # 每个 epoch 迭代次数

    # 初始化网络
    transformer_net = data_parallel_network(TransformerNet(), devices)
    features_net = features_extract_network(opt.arch, opt.content_layers, opt.style_layers)
    features_net = data_parallel_network(features_net, devices)
    if opt.checkpoint_model:
        transformer_net.load_state_dict(data_parallel_state_dict(opt.checkpoint_model))

    # 定义优化器和损失函数
    optimizer = torch.optim.Adam(transformer_net.parameters(), opt.lr)
    content_loss_func, style_loss_func = ContentLoss(opt.content_weight), StyleLoss(opt.style_weight)

    # 提取风格图片的风格特征
    style_image = Image.open(opt.style_image)
    style_image = style_image_transform(opt.style_size)(style_image)
    style_image = style_image.repeat(opt.batch_size, 1, 1, 1).to(devices[0])
    style_features = [gram_matrix(feature) for feature in features_net.to(devices[0])(style_image)[1]]

    for epoch in range(opt.epoch_count, opt.num_epochs):
        print(f"# Epoch-{epoch + 1} 训练开始")
        transformer_net.train()  # 开启训练模式
        epoch_begin_time = time.time()  # epoch 训练计时器
        metrics = {"content": [], "style": [], "total": []}  # 计算度量
        for batch_idx, (batch_images, _) in enumerate(train_dataloader):
            # 清空梯度、释放缓存
            optimizer.zero_grad()
            torch.cuda.empty_cache()

            # 批量计算
            batch_images = batch_images.to(devices[0])
            transformed_images = transformer_net.to(devices[0])(batch_images)
            images_content_features, _ = features_net.to(devices[0])(batch_images)
            transformed_content_features, transformed_style_features = features_net.to(devices[0])(transformed_images)

            # 计算损失
            content_loss = content_loss_func(transformed_content_features, images_content_features)
            style_loss = style_loss_func(transformed_style_features, style_features)
            total_loss = content_loss + style_loss

            # 计算梯度、参数更新
            total_loss.backward()
            optimizer.step()

            metrics["content"].append(content_loss.item())
            metrics["style"].append(style_loss.item())
            metrics["total"].append(total_loss.item())

            # 日志输出
            train_rounds = epoch * iter_count_per_epoch + batch_idx + 1
            if train_rounds % opt.log_interval == 0:
                # 控制台输出
                message = f"[Epoch {epoch + 1}/{opt.num_epochs}] Batch[{batch_idx + 1}/{iter_count_per_epoch}] "
                message += "[content: %.2f (%.2f) style: %.2f (%.2f) total: %.2f (%.2f)]" % (
                    content_loss.item(), float(np.mean(metrics["content"])),
                    style_loss.item(), float(np.mean(metrics["style"])),
                    total_loss.item(), float(np.mean(metrics["total"])))
                print(message)

                # Tensorboard 输出
                writer.add_scalar("content loss", content_loss.item(), train_rounds)
                writer.add_scalar("style loss", style_loss.item(), train_rounds)
                writer.add_scalar("total loss", total_loss.item(), train_rounds)
                scalar_dict = {"content": content_loss.item(), "style": style_loss.item(), "total": total_loss.item()}
                writer.add_scalars("loss", scalar_dict, train_rounds)

            # 样本测试
            if train_rounds % opt.sample_interval == 0:
                # 随机取出 8 个测试样本
                image_samples = []
                for path in random.sample(glob.glob(f"{opt.dataset}/*/*"), 8):
                    image_samples.append(style_image_transform(opt.content_size)(Image.open(path)))
                image_samples = torch.stack(image_samples)

                # 对样本进行迁移测试
                transformer_net.eval()
                with torch.no_grad():
                    output = transformer_net(image_samples.to(devices[0]))

                # 样本测试结果输出
                image_grid = denormalize_image(torch.cat((image_samples.cpu(), output.cpu()), 2))
                save_image(image_grid, f"{sample_dir}/sample_{opt.batch_size}_{train_rounds}.jpg", nrow=4)
                writer.add_images("sample", image_grid, train_rounds)
                transformer_net.train()

            # 模型保存
            if train_rounds % opt.checkpoint_interval == 0:
                torch.save(transformer_net.module.state_dict(),
                           f"{model_dir}/{opt.name}_{opt.batch_size}_{train_rounds}.pth")

        epoch_end_time = time.time()
        print(f"# 总耗时：{epoch_end_time - epoch_begin_time} sec.")

if __name__ == '__main__':
    main()
