import os
import math
import time

import torch
import numpy as np
from collections import OrderedDict
from tqdm import tqdm

from skimage.metrics import structural_similarity as calculate_ssim
from skimage.metrics import peak_signal_noise_ratio as calculate_psnr
import lpips
from torchmetrics.image.fid import FrechetInceptionDistance

from options.train_options import TrainOptions
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer


def lcm(a, b): return abs(a * b) / math.gcd(a, b) if a and b else 0


def main():
    opt = TrainOptions().parse()

    # --- ADD 加载验证数据集 START---
    # 使用 TestOptions 来配置验证数据加载器
    val_opt = TestOptions().parse(save=False)
    val_opt.nThreads = 1
    val_opt.batchSize = 1
    val_opt.serial_batches = True  # no shuffle
    val_opt.no_flip = True  # no flip
    val_opt.phase = "val"

    val_loader = CreateDataLoader(val_opt)
    val_dataset = val_loader.load_data()
    val_dataset_size = len(val_loader)
    print(f'#validation images = {val_dataset_size}')
    # --- ADD 加载验证数据集 END---

    iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
    if opt.continue_train:
        try:
            start_epoch, start_image_iter = np.loadtxt(iter_path, delimiter=',', dtype=int)
        except:
            start_epoch, start_image_iter = 1, 0
        print('Resuming from epoch %d at iteration %d' % (start_epoch, start_image_iter))
    else:
        start_epoch, start_image_iter = 1, 0

    opt.print_freq = lcm(opt.print_freq, opt.batchSize)
    if opt.debug:
        opt.display_freq = 1
        opt.print_freq = 1
        opt.niter = 1
        opt.niter_decay = 0
        opt.max_dataset_size = 10

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    dataset_size_images = len(data_loader)
    num_batches_per_epoch = dataset_size_images // opt.batchSize
    print('#training images = %d' % dataset_size_images)

    model = create_model(opt)

    # 初始化 LPIPS 和 FID
    device = next(model.parameters()).device
    lpips_model = lpips.LPIPS(net='alex', spatial=False).to(device)
    lpips_model.eval()
    fid_metric = FrechetInceptionDistance(feature=2048, normalize=True).to(device)

    visualizer = Visualizer(opt)
    if opt.fp16:
        from apex import amp

        model, [optimizer_G, optimizer_D] = amp.initialize(model, [model.optimizer_G, model.optimizer_D],
                                                           opt_level='O1')
        model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
    else:
        optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D

    total_steps = (start_epoch - 1) * dataset_size_images + start_image_iter

    display_delta = total_steps % opt.display_freq
    print_delta = total_steps % opt.print_freq
    save_delta = total_steps % opt.save_latest_freq

    # 初始化最佳验证指标和对应的epoch
    best_val_ssim = -1.0  # SSIM 通常在 [0, 1] 之间，-1.0 作为初始值
    best_epoch = 0

    # 初始化最佳验证指标和对应的epoch
    best_val_ssim = -1.0  # SSIM 通常在 [0, 1] 之间，-1.0 作为初始值
    # 对于 FID，越低越好
    best_val_fid = float('inf')
    best_epoch_ssim = 0
    best_epoch_fid = 0

    for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        if epoch == start_epoch:
            current_epoch_start_batch_idx = start_image_iter // opt.batchSize
            # 用于跟踪当前 epoch 内处理的图像数，从恢复的值开始
            current_epoch_image_iter = start_image_iter
        else:
            # 新的 epoch，从批量索引 0 开始
            current_epoch_start_batch_idx = 0
            # 新的 epoch，epoch 内图像计数从 0 开始
            current_epoch_image_iter = 0

        epoch_tqdm = tqdm(enumerate(dataset, start=current_epoch_start_batch_idx),
                          total=num_batches_per_epoch,  # 每个 epoch 的总批量数
                          initial=current_epoch_start_batch_idx,  # 当前 epoch 的起始批量索引
                          desc=f'Epoch {epoch}/{opt.niter + opt.niter_decay}')

        # 确保模型处于训练模式。
        # 如果模型被 DataParallel 封装，使用 .module
        if len(opt.gpu_ids) > 0 and not opt.fp16:  # 假设create_model在非fp16且多GPU时会封装
            model.module.train()
        else:
            model.train()

        for i, data in epoch_tqdm:
            if total_steps % opt.print_freq == print_delta:
                iter_start_time = time.time()

            total_steps += opt.batchSize
            current_epoch_image_iter += opt.batchSize

            # whether to collect output images
            save_fake = total_steps % opt.display_freq == display_delta

            ############## Forward Pass ######################
            losses, generated = model(data['label'], data['inst'], data['image'], data['feat'], infer=save_fake)

            # sum per device losses
            losses = [torch.mean(x) if not isinstance(x, int) else x for x in losses]
            loss_dict = dict(zip(model.module.loss_names, losses))

            # calculate final loss scalar
            loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
            loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat', 0) + loss_dict.get('G_VGG', 0)

            ############### Backward Pass ####################
            # update generator weights
            optimizer_G.zero_grad()
            if opt.fp16:
                with amp.scale_loss(loss_G, optimizer_G) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss_G.backward()
            optimizer_G.step()

            # update discriminator weights
            optimizer_D.zero_grad()
            if opt.fp16:
                with amp.scale_loss(loss_D, optimizer_D) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss_D.backward()
            optimizer_D.step()

            ############## Display results and errors ##########
            ### print out errors
            loss_info = {
                k: v.data.item() if isinstance(v, torch.Tensor) and v.numel() > 0 else (v if isinstance(v, int) else 0)
                for k, v in loss_dict.items()
            }
            loss_info_for_tqdm = {k: f"{v:.3f}" for k, v in loss_info.items() if v != 0}
            epoch_tqdm.set_postfix(loss_info_for_tqdm)

            if total_steps % opt.print_freq == print_delta:
                t = (time.time() - iter_start_time) / opt.print_freq
                t_interval = time.time() - iter_start_time
                visualizer.log(
                    epoch=epoch,
                    i=current_epoch_image_iter,  # 当前 epoch 内已处理图像数
                    errors=loss_info,  # 数值误差字典
                    t=t_interval,  # 间隔持续时间
                    step=total_steps,  # 总图像数 (用于 TB 全局步数)
                )

            ### display output images
            if save_fake:
                visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
                                       ('synthesized_image', util.tensor2im(generated.data[0])),
                                       ('real_image', util.tensor2im(data['image'][0]))])
                visualizer.display_current_results(visuals, epoch, total_steps)

            ### save latest model
            if total_steps % opt.save_latest_freq == save_delta:
                print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
                model.module.save('latest')
                np.savetxt(iter_path, (epoch, current_epoch_image_iter), delimiter=',', fmt='%d')

        # --- Epoch 结束逻辑 ---
        # 在每个 epoch 结束时评估模型，以确定是否是最佳模型
        # 将模型设置为评估模式
        # 如果模型被 DataParallel 封装，使用 .module
        if len(opt.gpu_ids) > 0 and not opt.fp16:
            model.module.eval()
        else:
            model.eval()

        # 在每个验证 epoch 开始时重置 FID metric
        fid_metric.reset()

        val_ssims = []
        val_psnrs = []
        val_lpips = []  # 用于存储 LPIPS 值

        val_tqdm = tqdm(enumerate(val_dataset), total=val_dataset_size, desc=f'Validation Epoch {epoch}')
        with torch.no_grad():  # Crucial for evaluation: disable gradient calculation
            for val_i, val_data in val_tqdm:
                val_label = val_data['label'].to(device)
                val_inst = val_data['inst'].to(device) if val_data['inst'] is not None else None
                val_image = val_data['image'].to(device)  # Real image
                val_feat = val_data['feat'].to(device) if val_data['feat'] is not None else None

                # --- 修复点 START ---
                # 确保所有输入张量都是 4 维的 (B, C, H, W)
                # 即使 batchSize=1，也需要保持批次维度
                if val_label.dim() == 3:  # 如果是 (C, H, W)，添加批次维度
                    val_label = val_label.unsqueeze(0)
                if val_image.dim() == 3:  # 如果是 (C, H, W)，添加批次维度
                    val_image = val_image.unsqueeze(0)
                if val_inst is not None and val_inst.dim() == 3:  # 如果是 (C, H, W)，添加批次维度
                    val_inst = val_inst.unsqueeze(0)
                if val_feat is not None and val_feat.dim() == 3:  # 如果是 (C, H, W)，添加批次维度
                    val_feat = val_feat.unsqueeze(0)
                # --- 修复点 END ---

                # 进行推理，获取生成图像
                # 这里 infer 应该始终为 True
                _, val_generated = model(val_label, val_inst, val_image, val_feat, infer=True)

                # --- SSIM 和 PSNR 计算 ---
                # util.tensor2im 将 [-1, 1] 的 Tensor 转换为 [0, 255] 的 NumPy 图像 (H,W,C)
                real_img_np = util.tensor2im(val_image.cpu().data[0])
                gen_img_np = util.tensor2im(val_generated.cpu().data[0])

                # SSIM 和 PSNR 通常期望输入是 [0, 1] 范围的浮点数或 [0, 255] 的整数
                real_img_norm = real_img_np.astype(np.float32) / 255.0
                gen_img_norm = gen_img_np.astype(np.float32) / 255.0

                ssim_val = calculate_ssim(real_img_norm, gen_img_norm, data_range=1.0, channel_axis=-1)
                val_ssims.append(ssim_val)

                psnr_val = calculate_psnr(real_img_norm, gen_img_norm, data_range=1.0)
                val_psnrs.append(psnr_val)

                # --- FID 计算 ---
                # torchmetrics.image.fid.FrechetInceptionDistance 通常期望输入是 uint8 (0-255) 或者 float (0-1)。
                # 你的模型输出是 [-1, 1] 的 Tensor。为了与 FID 兼容，需要转换到 [0, 255] 的 uint8。
                # 或者，如果 fid_metric 初始化时 `normalize=True`，则可以接受 [-1, 1] 的浮点数。
                # 我们的 `fid_metric` 初始化时 `normalize=True`，所以直接传入 [-1, 1] 的 float tensor 即可。
                # 但是，torchmetrics 的 FID 通常会检查 input type，uint8 更常见。
                # 这里将其转为 [0, 255] uint8 (B,C,H,W)
                real_img_for_fid = (val_image * 0.5 + 0.5).mul(255).add_(0.5).clamp_(0, 255).to(torch.uint8)
                gen_img_for_fid = (val_generated * 0.5 + 0.5).mul(255).add_(0.5).clamp_(0, 255).to(torch.uint8)

                # update FID with current batch. Note: fid_metric expects (B, C, H, W)
                fid_metric.update(real_img_for_fid, real=True)
                fid_metric.update(gen_img_for_fid, real=False)

                # --- LPIPS 计算 ---
                # LPIPS 期望输入为 [-1, 1] 范围的 Tensor
                # val_generated 和 val_image 已经是 [-1, 1] 的 Tensor
                lpips_val = lpips_model(val_generated, val_image).item()
                val_lpips.append(lpips_val)

        avg_val_ssim = np.mean(val_ssims)
        avg_val_psnr = np.mean(val_psnrs)
        avg_val_fid = fid_metric.compute().item()  # 计算最终的 FID
        avg_val_lpips = np.mean(val_lpips)

        # # Log validation metrics to TensorBoard
        # print(f'\nEpoch {epoch} Validation Results:')
        # print(f'  Avg SSIM: {avg_val_ssim:.4f}')
        # print(f'  Avg PSNR: {avg_val_psnr:.4f}')
        # print(f'  Avg FID: {avg_val_fid:.4f}')
        # print(f'  Avg LPIPS: {avg_val_lpips:.4f}')

        # Log validation metrics to TensorBoard
        visualizer.log_validation_metrics(epoch, {
            'val_SSIM↑': avg_val_ssim,
            'val_PSNR↑': avg_val_psnr,
            'val_FID↓': avg_val_fid,
            'val_LPIPS↓': avg_val_lpips
        }, total_steps)

        # 基于 FID (越低越好)
        if avg_val_fid < best_val_fid:
            print(f'Validation FID improved from {best_val_fid:.4f} to {avg_val_fid:.4f}. Saving best FID model.')
            best_val_fid = avg_val_fid
            best_epoch_fid = epoch
            model.module.save('best', best_epoch_fid)
        # 每次 epoch 结束时更新 iter.txt，以便即使没有保存最佳模型，也能记录断点
        np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')

        ### instead of only training the local enhancer, train the entire network after certain iterations
        if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
            model.module.update_fixed_params()

        ### linearly decay learning rate after certain iterations
        if epoch > opt.niter:
            model.module.update_learning_rate()


if __name__ == '__main__':
    main()
