"""
UNet训练脚本
支持K折交叉验证、断点续训、自动保存最佳模型
"""
import os
import sys
import argparse
import time
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim

# 添加项目根目录到路径
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))

from baselines.unet.model import UNet
from baselines.unet.config import *
from datasets import create_kfold_dataloaders
from metrics import SegmentationMetrics, CombinedLoss
from utils import (
    set_seed, setup_logger, log_config,
    save_checkpoint, load_checkpoint, save_train_state,
    get_scheduler, AverageMeter
)


def train_one_epoch(model, train_loader, criterion, optimizer, device, epoch, logger):
    """
    训练一个epoch

    Args:
        model: 模型
        train_loader: 训练数据加载器
        criterion: 损失函数
        optimizer: 优化器
        device: 设备
        epoch: 当前epoch
        logger: 日志记录器

    Returns:
        avg_loss: 平均损失
        metrics_dict: 指标字典
    """
    model.train()

    loss_meter = AverageMeter()
    metrics = SegmentationMetrics()

    pbar = tqdm(train_loader, desc=f'Epoch {epoch} [Train]')

    for batch_idx, (images, masks) in enumerate(pbar):
        images = images.to(device)
        masks = masks.to(device)

        # 前向传播
        outputs = model(images)

        # 计算损失
        total_loss, bce_loss, dice_loss = criterion(outputs, masks)

        # 反向传播
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

        # 更新指标
        loss_meter.update(total_loss.item(), images.size(0))

        # 计算评价指标（使用sigmoid后的输出）
        with torch.no_grad():
            pred_sigmoid = torch.sigmoid(outputs)
            metrics.update(pred_sigmoid, masks)

        # 更新进度条
        pbar.set_postfix({
            'loss': f'{loss_meter.avg:.4f}',
            'bce': f'{bce_loss.item():.4f}',
            'dice': f'{dice_loss.item():.4f}',
            'lr': f'{optimizer.param_groups[0]["lr"]:.6f}'
        })

    # 获取所有指标
    metrics_dict = metrics.get_all_metrics()

    logger.info(f'Epoch {epoch} [Train] - Loss: {loss_meter.avg:.4f}, '
                f'IoU: {metrics_dict["iou"]:.4f}, '
                f'Dice: {metrics_dict["dice"]:.4f}, '
                f'PA: {metrics_dict["pixel_accuracy"]:.4f}')

    return loss_meter.avg, metrics_dict


def validate(model, val_loader, criterion, device, epoch, logger):
    """
    验证

    Args:
        model: 模型
        val_loader: 验证数据加载器
        criterion: 损失函数
        device: 设备
        epoch: 当前epoch
        logger: 日志记录器

    Returns:
        avg_loss: 平均损失
        metrics_dict: 指标字典
    """
    model.eval()

    loss_meter = AverageMeter()
    metrics = SegmentationMetrics()

    pbar = tqdm(val_loader, desc=f'Epoch {epoch} [Val]')

    with torch.no_grad():
        for images, masks in pbar:
            images = images.to(device)
            masks = masks.to(device)

            # 前向传播
            outputs = model(images)

            # 计算损失
            total_loss, bce_loss, dice_loss = criterion(outputs, masks)

            # 更新指标
            loss_meter.update(total_loss.item(), images.size(0))

            # 计算评价指标
            pred_sigmoid = torch.sigmoid(outputs)
            metrics.update(pred_sigmoid, masks)

            # 更新进度条
            pbar.set_postfix({
                'loss': f'{loss_meter.avg:.4f}',
            })

    # 获取所有指标
    metrics_dict = metrics.get_all_metrics()

    logger.info(f'Epoch {epoch} [Val] - Loss: {loss_meter.avg:.4f}, '
                f'IoU: {metrics_dict["iou"]:.4f}, '
                f'Dice: {metrics_dict["dice"]:.4f}, '
                f'mIoU: {metrics_dict["miou"]:.4f}, '
                f'PA: {metrics_dict["pixel_accuracy"]:.4f}, '
                f'Precision: {metrics_dict["precision"]:.4f}, '
                f'Recall: {metrics_dict["recall"]:.4f}')

    return loss_meter.avg, metrics_dict


def train_fold(fold_idx, args):
    """
    训练单个fold

    Args:
        fold_idx: fold索引
        args: 命令行参数

    Returns:
        best_metrics: 最佳指标字典
    """
    # 设置随机种子
    set_seed(TRAIN_CONFIG['seed'])

    # 设置日志
    log_dir = os.path.join('logs', 'unet')
    logger = setup_logger(log_dir, 'unet', fold_idx)

    # 记录配置
    config_dict = {
        'fold': fold_idx,
        'model': 'UNet',
        **DATA_CONFIG,
        **TRAIN_CONFIG,
        **MODEL_CONFIG,
        **LOSS_CONFIG,
        **OPTIMIZER_CONFIG,
        **SCHEDULER_CONFIG,
        **OTHER_CONFIG
    }
    log_config(logger, config_dict)

    # 设置设备
    device = torch.device(OTHER_CONFIG['device'])
    logger.info(f'使用设备: {device}')

    # 创建数据加载器
    logger.info('创建数据加载器...')
    train_loader, val_loader, train_files, val_files = create_kfold_dataloaders(
        images_dir=DATA_CONFIG['images_dir'],
        masks_dir=DATA_CONFIG['masks_dir'],
        k_folds=TRAIN_CONFIG['k_folds'],
        fold_idx=fold_idx,
        batch_size=TRAIN_CONFIG['batch_size'],
        num_workers=TRAIN_CONFIG['num_workers'],
        to_rgb=DATA_CONFIG['to_rgb'],
        augment_train=True,
        seed=TRAIN_CONFIG['seed']
    )

    # 创建模型
    logger.info('创建模型...')
    model = UNet(
        in_channels=MODEL_CONFIG['in_channels'],
        out_channels=MODEL_CONFIG['out_channels'],
        features=MODEL_CONFIG['features']
    ).to(device)

    # 打印模型信息
    total_params = sum(p.numel() for p in model.parameters())
    logger.info(f'模型参数量: {total_params / 1e6:.2f}M')

    # 定义损失函数
    criterion = CombinedLoss(
        bce_weight=LOSS_CONFIG['bce_weight'],
        dice_weight=LOSS_CONFIG['dice_weight']
    )

    # 定义优化器
    if OPTIMIZER_CONFIG['type'] == 'adam':
        optimizer = optim.Adam(
            model.parameters(),
            lr=TRAIN_CONFIG['learning_rate'],
            weight_decay=TRAIN_CONFIG['weight_decay']
        )
    else:
        optimizer = optim.SGD(
            model.parameters(),
            lr=TRAIN_CONFIG['learning_rate'],
            momentum=0.9,
            weight_decay=TRAIN_CONFIG['weight_decay']
        )

    # 定义学习率调度器
    scheduler = get_scheduler(
        optimizer,
        scheduler_type=SCHEDULER_CONFIG['type'],
        **SCHEDULER_CONFIG
    )

    # 断点续训
    start_epoch = 0
    best_metric = 0.0
    checkpoint_dir = os.path.join('checkpoints', 'unet', f'fold_{fold_idx}')

    if args.resume:
        resume_path = os.path.join(checkpoint_dir, 'last.pth')
        if os.path.exists(resume_path):
            start_epoch, best_metric = load_checkpoint(
                resume_path, model, optimizer, scheduler
            )
            logger.info(f'从 epoch {start_epoch} 继续训练')

    # 训练循环
    logger.info('开始训练...')
    for epoch in range(start_epoch, TRAIN_CONFIG['num_epochs']):
        epoch_start_time = time.time()

        # 训练
        train_loss, train_metrics = train_one_epoch(
            model, train_loader, criterion, optimizer, device, epoch, logger
        )

        # 验证
        val_loss, val_metrics = validate(
            model, val_loader, criterion, device, epoch, logger
        )

        # 更新学习率
        scheduler.step()

        # 保存检查点
        is_best = val_metrics['miou'] > best_metric
        if is_best:
            best_metric = val_metrics['miou']
            logger.info(f'*** 新的最佳模型！mIoU: {best_metric:.4f} ***')

        # 保存模型
        state = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'scheduler_state_dict': scheduler.state_dict(),
            'best_metric': best_metric,
            'val_metrics': val_metrics
        }

        # 保存last
        save_checkpoint(state, checkpoint_dir, 'last.pth')

        # 保存best
        if is_best:
            save_checkpoint(state, checkpoint_dir, 'best.pth')

        # 保存训练状态
        save_train_state(checkpoint_dir, epoch, best_metric, config_dict)

        epoch_time = time.time() - epoch_start_time
        logger.info(f'Epoch {epoch} 完成，用时: {epoch_time:.2f}秒\n')

    logger.info(f'Fold {fold_idx} 训练完成！最佳 mIoU: {best_metric:.4f}')

    # 加载最佳模型并返回指标
    best_checkpoint_path = os.path.join(checkpoint_dir, 'best.pth')
    checkpoint = torch.load(best_checkpoint_path)
    best_metrics = checkpoint['val_metrics']

    return best_metrics


def main():
    parser = argparse.ArgumentParser(description='UNet训练脚本')
    parser.add_argument('--fold', type=int, default=None,
                        help='指定训练哪一折（0-7），不指定则训练所有折')
    parser.add_argument('--resume', action='store_true',
                        help='是否从断点继续训练')
    parser.add_argument('--epochs', type=int, default=None,
                        help='训练轮数（覆盖配置文件）')
    parser.add_argument('--batch_size', type=int, default=None,
                        help='批大小（覆盖配置文件）')
    parser.add_argument('--lr', type=float, default=None,
                        help='学习率（覆盖配置文件）')

    args = parser.parse_args()

    # 覆盖配置
    if args.epochs is not None:
        TRAIN_CONFIG['num_epochs'] = args.epochs
    if args.batch_size is not None:
        TRAIN_CONFIG['batch_size'] = args.batch_size
    if args.lr is not None:
        TRAIN_CONFIG['learning_rate'] = args.lr

    # 训练
    if args.fold is not None:
        # 训练指定fold
        print(f'训练 Fold {args.fold}')
        best_metrics = train_fold(args.fold, args)
        print(f'Fold {args.fold} 最佳指标: {best_metrics}')
    else:
        # 训练所有fold
        print(f'训练所有 {TRAIN_CONFIG["k_folds"]} 折')
        all_results = {}

        for fold_idx in range(TRAIN_CONFIG['k_folds']):
            print(f'\n{"=" * 80}')
            print(f'开始训练 Fold {fold_idx}')
            print(f'{"=" * 80}\n')

            best_metrics = train_fold(fold_idx, args)
            all_results[f'fold_{fold_idx}'] = best_metrics

        # 汇总结果
        print(f'\n{"=" * 80}')
        print('所有折训练完成！汇总结果：')
        print(f'{"=" * 80}\n')

        # 计算平均值和标准差
        import numpy as np

        metrics_names = list(all_results['fold_0'].keys())
        summary = {}

        for metric_name in metrics_names:
            values = [all_results[f'fold_{i}'][metric_name] for i in range(TRAIN_CONFIG['k_folds'])]
            summary[metric_name] = {
                'mean': np.mean(values),
                'std': np.std(values),
                'values': values
            }

        # 打印汇总
        for metric_name, stats in summary.items():
            print(f'{metric_name}:')
            print(f'  Mean: {stats["mean"]:.4f}')
            print(f'  Std:  {stats["std"]:.4f}')
            print(f'  Values: {[f"{v:.4f}" for v in stats["values"]]}')
            print()

        # 保存汇总结果
        import json
        summary_path = os.path.join('logs', 'unet', 'summary_results.json')
        os.makedirs(os.path.dirname(summary_path), exist_ok=True)
        with open(summary_path, 'w', encoding='utf-8') as f:
            json.dump({'all_results': all_results, 'summary': summary}, f, indent=4, ensure_ascii=False)

        print(f'汇总结果已保存到: {summary_path}')


if __name__ == '__main__':
    main()
