"""
DeepLabv3+ 训练脚本
与UNet训练脚本结构相同，只是模型不同
"""
import os
import sys

# 添加项目根目录到路径
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))

# 导入UNet的训练脚本作为模板，替换模型
from baselines.unet.train import train_one_epoch, validate, train_fold as _train_fold_template
import argparse
import time
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim

from baselines.deeplabv3plus.model import DeepLabV3Plus
from baselines.deeplabv3plus.config import *
from datasets import create_kfold_dataloaders
from metrics import SegmentationMetrics, CombinedLoss
from utils import (
    set_seed, setup_logger, log_config,
    save_checkpoint, load_checkpoint, save_train_state,
    get_scheduler, AverageMeter
)


def train_fold(fold_idx, args):
    """训练单个fold"""
    set_seed(TRAIN_CONFIG['seed'])

    log_dir = os.path.join('logs', 'deeplabv3plus')
    logger = setup_logger(log_dir, 'deeplabv3plus', fold_idx)

    config_dict = {
        'fold': fold_idx,
        'model': 'DeepLabv3plus',
        **DATA_CONFIG,
        **TRAIN_CONFIG,
        **MODEL_CONFIG,
        **LOSS_CONFIG,
        **OPTIMIZER_CONFIG,
        **SCHEDULER_CONFIG,
        **OTHER_CONFIG
    }
    log_config(logger, config_dict)

    device = torch.device(OTHER_CONFIG['device'])
    logger.info(f'使用设备: {device}')

    logger.info('创建数据加载器...')
    train_loader, val_loader, train_files, val_files = create_kfold_dataloaders(
        images_dir=DATA_CONFIG['images_dir'],
        masks_dir=DATA_CONFIG['masks_dir'],
        k_folds=TRAIN_CONFIG['k_folds'],
        fold_idx=fold_idx,
        batch_size=TRAIN_CONFIG['batch_size'],
        num_workers=TRAIN_CONFIG['num_workers'],
        to_rgb=DATA_CONFIG['to_rgb'],
        augment_train=True,
        seed=TRAIN_CONFIG['seed']
    )

    logger.info('创建模型...')
    model = DeepLabV3Plus(
        in_channels=MODEL_CONFIG['in_channels'],
        out_channels=MODEL_CONFIG['out_channels'],
        pretrained=MODEL_CONFIG['pretrained']
    ).to(device)

    total_params = sum(p.numel() for p in model.parameters())
    logger.info(f'模型参数量: {total_params / 1e6:.2f}M')

    criterion = CombinedLoss(
        bce_weight=LOSS_CONFIG['bce_weight'],
        dice_weight=LOSS_CONFIG['dice_weight']
    )

    if OPTIMIZER_CONFIG['type'] == 'adam':
        optimizer = optim.Adam(
            model.parameters(),
            lr=TRAIN_CONFIG['learning_rate'],
            weight_decay=TRAIN_CONFIG['weight_decay']
        )
    else:
        optimizer = optim.SGD(
            model.parameters(),
            lr=TRAIN_CONFIG['learning_rate'],
            momentum=0.9,
            weight_decay=TRAIN_CONFIG['weight_decay']
        )

    scheduler = get_scheduler(
        optimizer,
        scheduler_type=SCHEDULER_CONFIG['type'],
        **SCHEDULER_CONFIG
    )

    start_epoch = 0
    best_metric = 0.0
    checkpoint_dir = os.path.join('checkpoints', 'deeplabv3plus', f'fold_{fold_idx}')

    if args.resume:
        resume_path = os.path.join(checkpoint_dir, 'last.pth')
        if os.path.exists(resume_path):
            start_epoch, best_metric = load_checkpoint(
                resume_path, model, optimizer, scheduler
            )
            logger.info(f'从 epoch {start_epoch} 继续训练')

    logger.info('开始训练...')
    for epoch in range(start_epoch, TRAIN_CONFIG['num_epochs']):
        epoch_start_time = time.time()

        train_loss, train_metrics = train_one_epoch(
            model, train_loader, criterion, optimizer, device, epoch, logger
        )

        val_loss, val_metrics = validate(
            model, val_loader, criterion, device, epoch, logger
        )

        scheduler.step()

        is_best = val_metrics['miou'] > best_metric
        if is_best:
            best_metric = val_metrics['miou']
            logger.info(f'*** 新的最佳模型！mIoU: {best_metric:.4f} ***')

        state = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'scheduler_state_dict': scheduler.state_dict(),
            'best_metric': best_metric,
            'val_metrics': val_metrics
        }

        save_checkpoint(state, checkpoint_dir, 'last.pth')

        if is_best:
            save_checkpoint(state, checkpoint_dir, 'best.pth')

        save_train_state(checkpoint_dir, epoch, best_metric, config_dict)

        epoch_time = time.time() - epoch_start_time
        logger.info(f'Epoch {epoch} 完成，用时: {epoch_time:.2f}秒\n')

    logger.info(f'Fold {fold_idx} 训练完成！最佳 mIoU: {best_metric:.4f}')

    best_checkpoint_path = os.path.join(checkpoint_dir, 'best.pth')
    checkpoint = torch.load(best_checkpoint_path)
    best_metrics = checkpoint['val_metrics']

    return best_metrics


def main():
    parser = argparse.ArgumentParser(description='DeepLabv3+ 训练脚本')
    parser.add_argument('--fold', type=int, default=None,
                        help='指定训练哪一折（0-7），不指定则训练所有折')
    parser.add_argument('--resume', action='store_true',
                        help='是否从断点继续训练')
    parser.add_argument('--epochs', type=int, default=None,
                        help='训练轮数（覆盖配置文件）')
    parser.add_argument('--batch_size', type=int, default=None,
                        help='批大小（覆盖配置文件）')
    parser.add_argument('--lr', type=float, default=None,
                        help='学习率（覆盖配置文件）')

    args = parser.parse_args()

    if args.epochs is not None:
        TRAIN_CONFIG['num_epochs'] = args.epochs
    if args.batch_size is not None:
        TRAIN_CONFIG['batch_size'] = args.batch_size
    if args.lr is not None:
        TRAIN_CONFIG['learning_rate'] = args.lr

    if args.fold is not None:
        print(f'训练 Fold {args.fold}')
        best_metrics = train_fold(args.fold, args)
        print(f'Fold {args.fold} 最佳指标: {best_metrics}')
    else:
        print(f'训练所有 {TRAIN_CONFIG["k_folds"]} 折')
        all_results = {}

        for fold_idx in range(TRAIN_CONFIG['k_folds']):
            print(f'\n{"=" * 80}')
            print(f'开始训练 Fold {fold_idx}')
            print(f'{"=" * 80}\n')

            best_metrics = train_fold(fold_idx, args)
            all_results[f'fold_{fold_idx}'] = best_metrics

        print(f'\n{"=" * 80}')
        print('所有折训练完成！汇总结果：')
        print(f'{"=" * 80}\n')

        import numpy as np
        import json

        metrics_names = list(all_results['fold_0'].keys())
        summary = {}

        for metric_name in metrics_names:
            values = [all_results[f'fold_{i}'][metric_name] for i in range(TRAIN_CONFIG['k_folds'])]
            summary[metric_name] = {
                'mean': np.mean(values),
                'std': np.std(values),
                'values': values
            }

        for metric_name, stats in summary.items():
            print(f'{metric_name}:')
            print(f'  Mean: {stats["mean"]:.4f}')
            print(f'  Std:  {stats["std"]:.4f}')
            print(f'  Values: {[f"{v:.4f}" for v in stats["values"]]}')
            print()

        summary_path = os.path.join('logs', 'deeplabv3plus', 'summary_results.json')
        os.makedirs(os.path.dirname(summary_path), exist_ok=True)
        with open(summary_path, 'w', encoding='utf-8') as f:
            json.dump({'all_results': all_results, 'summary': summary}, f, indent=4, ensure_ascii=False)

        print(f'汇总结果已保存到: {summary_path}')


if __name__ == '__main__':
    main()
