"""
通用训练模板
所有baseline模型都可以使用这个模板进行训练
"""
import os
import sys
import argparse
import time
import json
import numpy as np
from tqdm import tqdm
import torch
import torch.optim as optim

from datasets import create_kfold_dataloaders
from metrics import SegmentationMetrics, CombinedLoss
from utils import (
    set_seed, setup_logger, log_config,
    create_json_logger,
    save_checkpoint, load_checkpoint, save_train_state,
    get_scheduler, AverageMeter
)


def train_one_epoch(model, train_loader, criterion, optimizer, device, epoch, logger):
    """训练一个epoch"""
    model.train()
    loss_meter = AverageMeter()
    metrics = SegmentationMetrics()

    pbar = tqdm(train_loader, desc=f'Epoch {epoch} [Train]')

    for batch_idx, (images, masks) in enumerate(pbar):
        images = images.to(device)
        masks = masks.to(device)

        outputs = model(images)
        total_loss, bce_loss, dice_loss = criterion(outputs, masks)

        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

        loss_meter.update(total_loss.item(), images.size(0))

        with torch.no_grad():
            pred_sigmoid = torch.sigmoid(outputs)
            metrics.update(pred_sigmoid, masks)

        pbar.set_postfix({
            'loss': f'{loss_meter.avg:.4f}',
            'bce': f'{bce_loss.item():.4f}',
            'dice': f'{dice_loss.item():.4f}',
            'lr': f'{optimizer.param_groups[0]["lr"]:.6f}'
        })

    metrics_dict = metrics.get_all_metrics()
    logger.info(f'Epoch {epoch} [Train] - Loss: {loss_meter.avg:.4f}, '
                f'IoU: {metrics_dict["iou"]:.4f}, Dice: {metrics_dict["dice"]:.4f}')

    return loss_meter.avg, metrics_dict


def validate(model, val_loader, criterion, device, epoch, logger):
    """验证"""
    model.eval()
    loss_meter = AverageMeter()
    metrics = SegmentationMetrics()

    pbar = tqdm(val_loader, desc=f'Epoch {epoch} [Val]')

    with torch.no_grad():
        for images, masks in pbar:
            images = images.to(device)
            masks = masks.to(device)

            outputs = model(images)
            total_loss, bce_loss, dice_loss = criterion(outputs, masks)

            loss_meter.update(total_loss.item(), images.size(0))

            pred_sigmoid = torch.sigmoid(outputs)
            metrics.update(pred_sigmoid, masks)

            pbar.set_postfix({'loss': f'{loss_meter.avg:.4f}'})

    metrics_dict = metrics.get_all_metrics()
    logger.info(f'Epoch {epoch} [Val] - Loss: {loss_meter.avg:.4f}, '
                f'IoU: {metrics_dict["iou"]:.4f}, Dice: {metrics_dict["dice"]:.4f}, '
                f'mIoU: {metrics_dict["miou"]:.4f}')

    return loss_meter.avg, metrics_dict


def train_model(model_name, model_class, config_module, fold_idx, args):
    """
    通用训练函数

    Args:
        model_name: 模型名称（如'unet', 'fcn'等）
        model_class: 模型类
        config_module: 配置模块
        fold_idx: fold索引
        args: 命令行参数
    """
    # 获取配置
    DATA_CONFIG = config_module.DATA_CONFIG
    TRAIN_CONFIG = config_module.TRAIN_CONFIG
    MODEL_CONFIG = config_module.MODEL_CONFIG
    LOSS_CONFIG = config_module.LOSS_CONFIG
    OPTIMIZER_CONFIG = config_module.OPTIMIZER_CONFIG
    SCHEDULER_CONFIG = config_module.SCHEDULER_CONFIG
    OTHER_CONFIG = config_module.OTHER_CONFIG

    # 覆盖配置
    if args.epochs is not None:
        TRAIN_CONFIG['num_epochs'] = args.epochs
    if args.batch_size is not None:
        TRAIN_CONFIG['batch_size'] = args.batch_size
    if args.lr is not None:
        TRAIN_CONFIG['learning_rate'] = args.lr

    set_seed(TRAIN_CONFIG['seed'])

    log_dir = os.path.join('logs', model_name)
    logger = setup_logger(log_dir, model_name, fold_idx)

    # 创建JSON日志记录器
    json_logger = create_json_logger(log_dir, model_name, fold_idx)

    config_dict = {
        'fold': fold_idx,
        'model': model_name,
        **DATA_CONFIG,
        **TRAIN_CONFIG,
        **MODEL_CONFIG,
        **LOSS_CONFIG,
        **OPTIMIZER_CONFIG,
        **SCHEDULER_CONFIG,
        **OTHER_CONFIG
    }
    log_config(logger, config_dict)

    # 记录配置到JSON日志
    json_logger.log_config(config_dict)

    device = torch.device(OTHER_CONFIG['device'])
    logger.info(f'使用设备: {device}')

    logger.info('创建数据加载器...')
    train_loader, val_loader, _, _ = create_kfold_dataloaders(
        images_dir=DATA_CONFIG['images_dir'],
        masks_dir=DATA_CONFIG['masks_dir'],
        k_folds=TRAIN_CONFIG['k_folds'],
        fold_idx=fold_idx,
        batch_size=TRAIN_CONFIG['batch_size'],
        num_workers=TRAIN_CONFIG['num_workers'],
        to_rgb=DATA_CONFIG['to_rgb'],
        augment_train=True,
        seed=TRAIN_CONFIG['seed']
    )

    logger.info('创建模型...')
    model = model_class(**MODEL_CONFIG).to(device)

    total_params = sum(p.numel() for p in model.parameters())
    logger.info(f'模型参数量: {total_params / 1e6:.2f}M')

    criterion = CombinedLoss(
        bce_weight=LOSS_CONFIG['bce_weight'],
        dice_weight=LOSS_CONFIG['dice_weight']
    )

    if OPTIMIZER_CONFIG['type'] == 'adam':
        optimizer = optim.Adam(
            model.parameters(),
            lr=TRAIN_CONFIG['learning_rate'],
            weight_decay=TRAIN_CONFIG['weight_decay']
        )
    else:
        optimizer = optim.SGD(
            model.parameters(),
            lr=TRAIN_CONFIG['learning_rate'],
            momentum=0.9,
            weight_decay=TRAIN_CONFIG['weight_decay']
        )

    scheduler = get_scheduler(optimizer, scheduler_type=SCHEDULER_CONFIG['type'], **SCHEDULER_CONFIG)

    start_epoch = 0
    best_metric = 0.0
    checkpoint_dir = os.path.join('checkpoints', model_name, f'fold_{fold_idx}')

    if args.resume:
        resume_path = os.path.join(checkpoint_dir, 'last.pth')
        if os.path.exists(resume_path):
            start_epoch, best_metric = load_checkpoint(resume_path, model, optimizer, scheduler)

    logger.info('开始训练...')
    training_start_time = time.time()

    for epoch in range(start_epoch, TRAIN_CONFIG['num_epochs']):
        epoch_start_time = time.time()

        train_loss, train_metrics = train_one_epoch(model, train_loader, criterion, optimizer, device, epoch, logger)
        val_loss, val_metrics = validate(model, val_loader, criterion, device, epoch, logger)

        scheduler.step()

        # 计算epoch时间
        epoch_time = time.time() - epoch_start_time

        # 记录到JSON日志
        train_metrics_for_json = {'loss': train_loss, **train_metrics}
        val_metrics_for_json = {'loss': val_loss, **val_metrics}
        json_logger.log_epoch(
            epoch=epoch,
            train_metrics=train_metrics_for_json,
            val_metrics=val_metrics_for_json,
            lr=optimizer.param_groups[0]['lr'],
            epoch_time=epoch_time
        )

        is_best = val_metrics['miou'] > best_metric
        if is_best:
            best_metric = val_metrics['miou']
            logger.info(f'*** 新的最佳模型！mIoU: {best_metric:.4f} ***')

            # 记录最佳模型到JSON日志
            checkpoint_path = os.path.join(checkpoint_dir, 'best.pth')
            json_logger.log_best_model(
                epoch=epoch,
                metrics=val_metrics,
                checkpoint_path=checkpoint_path
            )

        state = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'scheduler_state_dict': scheduler.state_dict(),
            'best_metric': best_metric,
            'val_metrics': val_metrics
        }

        save_checkpoint(state, checkpoint_dir, 'last.pth')
        if is_best:
            save_checkpoint(state, checkpoint_dir, 'best.pth')

        save_train_state(checkpoint_dir, epoch, best_metric, config_dict)

        logger.info(f'Epoch {epoch} 完成，用时: {epoch_time:.2f}秒\n')

    # 计算总训练时间
    total_training_time = time.time() - training_start_time

    # 记录训练总结到JSON日志
    json_logger.log_summary(
        total_epochs=TRAIN_CONFIG['num_epochs'],
        total_time=total_training_time,
        best_miou=best_metric
    )

    logger.info(f'Fold {fold_idx} 训练完成！最佳 mIoU: {best_metric:.4f}')
    logger.info(f'JSON日志已保存: {json_logger.get_log_path()}')

    best_checkpoint_path = os.path.join(checkpoint_dir, 'best.pth')
    checkpoint = torch.load(best_checkpoint_path)
    return checkpoint['val_metrics']
