import argparse
import json
import os

import matplotlib.pyplot as plt
import torch

from data_loader import DataLoaderManager
from metrics import SegmentationMetrics
from nestedunet import NestedUNet, NestedUNetTrainer
from preprocessing import DataPreprocessor
from resunet import AttentionResUNet, ResUNet, ResUNetTrainer


class TrainingConfig:
    """训练配置类"""

    def __init__(self):
        # 数据路径
        self.image_dir = "data/brainskull"
        self.mask_dir = "data/venmask12"

        # 预处理参数
        self.target_size = (256, 256)
        self.target_spacing = (1.0, 1.0, 1.0)

        # 训练参数
        self.batch_size = 16
        self.epochs = 50
        self.learning_rate = 1e-4
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 数据集划分
        self.train_ratio = 0.7
        self.val_ratio = 0.15

        # 保存路径
        self.checkpoint_dir = "run/checkpoints"
        self.log_dir = "run/logs"
        self.result_dir = "run/results"

        # 创建目录
        os.makedirs(self.checkpoint_dir, exist_ok=True)
        os.makedirs(self.log_dir, exist_ok=True)
        os.makedirs(self.result_dir, exist_ok=True)


def train_model(model_type="nestedunet", config=None):
    """
    训练模型

    Args:
        model_type: 模型类型 ('nestedunet', 'resunet', 'attention_resunet')
        config: 训练配置
    """
    if config is None:
        config = TrainingConfig()

    print(f"训练{model_type}模型...")
    print(f"设备: {config.device}")

    # 1. 准备数据
    print("准备数据...")
    preprocessor = DataPreprocessor(
        target_spacing=config.target_spacing, target_size=config.target_size
    )

    data_manager = DataLoaderManager(config)
    train_loader, val_loader, test_loader = data_manager.prepare_data_loaders(
        config.image_dir,
        config.mask_dir,
        preprocessor,
        config.train_ratio,
        config.val_ratio,
        config.batch_size,
    )

    # 2. 创建模型
    print(f"创建{model_type}模型...")
    if model_type == "nestedunet":
        model = NestedUNet(in_channels=1, out_channels=1, deep_supervision=False)
        trainer = NestedUNetTrainer(model, config.device, config.learning_rate)
    elif model_type == "resunet":
        model = ResUNet(in_channels=1, out_channels=1)
        trainer = ResUNetTrainer(model, config.device, config.learning_rate)
    elif model_type == "attention_resunet":
        model = AttentionResUNet(in_channels=1, out_channels=1)
        trainer = ResUNetTrainer(model, config.device, config.learning_rate)
    else:
        raise ValueError(f"Unknown model type: {model_type}")

    # 3. 训练模型
    print("开始训练...")
    metrics_calculator = SegmentationMetrics()

    # 记录训练历史
    history = {
        "train_loss": [],
        "val_loss": [],
        "val_dice": [],
        "val_jaccard": [],
        "val_ppv": [],
        "val_hd95": [],
    }

    best_dice = 0
    best_epoch = 0

    for epoch in range(config.epochs):
        print(f"\nEpoch {epoch + 1}/{config.epochs}")

        # 训练
        train_loss = trainer.train_epoch(train_loader)
        history["train_loss"].append(train_loss)

        # 验证
        val_loss, val_metrics = trainer.validate(val_loader, metrics_calculator)
        history["val_loss"].append(val_loss)
        history["val_dice"].append(val_metrics["dice"])
        history["val_jaccard"].append(val_metrics["jaccard"])
        history["val_ppv"].append(val_metrics["ppv"])
        history["val_hd95"].append(val_metrics["hd95"])

        print(f"Train Loss: {train_loss:.4f}")
        print(f"Val Loss: {val_loss:.4f}")
        print(f"Val Dice: {val_metrics['dice']:.4f}")
        print(f"Val Jaccard: {val_metrics['jaccard']:.4f}")
        print(f"Val PPV: {val_metrics['ppv']:.4f}")
        print(f"Val HD95: {val_metrics['hd95']:.4f}")

        # 保存最佳模型
        if val_metrics["dice"] > best_dice:
            best_dice = val_metrics["dice"]
            best_epoch = epoch
            checkpoint_path = os.path.join(
                config.checkpoint_dir, f"{model_type}_best.pth"
            )
            trainer.save_checkpoint(epoch, val_loss, val_metrics, checkpoint_path)
            print(f"保存最佳模型 (Dice: {best_dice:.4f})")

    print(f"\n训练完成! 最佳Dice: {best_dice:.4f} (Epoch {best_epoch + 1})")

    # 4. 保存训练历史
    history_path = os.path.join(config.log_dir, f"{model_type}_history.json")
    with open(history_path, "w") as f:
        json.dump(history, f, indent=4)

    # 5. 绘制训练曲线
    plot_training_history(history, model_type, config.result_dir)

    # 6. 在测试集上评估
    print("\n在测试集上评估...")
    test_loss, test_metrics = trainer.validate(test_loader, metrics_calculator)

    print(f"Test Loss: {test_loss:.4f}")
    print(f"Test Dice: {test_metrics['dice']:.4f}")
    print(f"Test Jaccard: {test_metrics['jaccard']:.4f}")
    print(f"Test PPV: {test_metrics['ppv']:.4f}")
    print(f"Test HD95: {test_metrics['hd95']:.4f}")

    # 保存测试结果
    test_results = {
        "model_type": model_type,
        "test_loss": test_loss,
        "test_metrics": test_metrics,
        "best_epoch": best_epoch + 1,
        "best_dice": best_dice,
    }

    test_results_path = os.path.join(
        config.result_dir, f"{model_type}_test_results.json"
    )
    with open(test_results_path, "w") as f:
        json.dump(test_results, f, indent=4)

    return trainer, history, test_results


def plot_training_history(history, model_type, save_dir):
    """绘制训练历史曲线"""
    fig, axes = plt.subplots(2, 3, figsize=(15, 10))
    fig.suptitle(f"{model_type} Training History", fontsize=16)

    # 损失曲线
    axes[0, 0].plot(history["train_loss"], label="Train Loss")
    axes[0, 0].plot(history["val_loss"], label="Val Loss")
    axes[0, 0].set_xlabel("Epoch")
    axes[0, 0].set_ylabel("Loss")
    axes[0, 0].set_title("Loss")
    axes[0, 0].legend()
    axes[0, 0].grid(True)

    # Dice曲线
    axes[0, 1].plot(history["val_dice"])
    axes[0, 1].set_xlabel("Epoch")
    axes[0, 1].set_ylabel("Dice")
    axes[0, 1].set_title("Validation Dice")
    axes[0, 1].grid(True)

    # Jaccard曲线
    axes[0, 2].plot(history["val_jaccard"])
    axes[0, 2].set_xlabel("Epoch")
    axes[0, 2].set_ylabel("Jaccard")
    axes[0, 2].set_title("Validation Jaccard")
    axes[0, 2].grid(True)

    # PPV曲线
    axes[1, 0].plot(history["val_ppv"])
    axes[1, 0].set_xlabel("Epoch")
    axes[1, 0].set_ylabel("PPV")
    axes[1, 0].set_title("Validation PPV")
    axes[1, 0].grid(True)

    # HD95曲线
    axes[1, 1].plot(history["val_hd95"])
    axes[1, 1].set_xlabel("Epoch")
    axes[1, 1].set_ylabel("HD95")
    axes[1, 1].set_title("Validation HD95")
    axes[1, 1].grid(True)

    # 综合指标
    axes[1, 2].plot(history["val_dice"], label="Dice")
    axes[1, 2].plot(history["val_jaccard"], label="Jaccard")
    axes[1, 2].plot(history["val_ppv"], label="PPV")
    axes[1, 2].set_xlabel("Epoch")
    axes[1, 2].set_ylabel("Score")
    axes[1, 2].set_title("All Metrics")
    axes[1, 2].legend()
    axes[1, 2].grid(True)

    plt.tight_layout()
    save_path = os.path.join(save_dir, f"{model_type}_training_history.png")
    plt.savefig(save_path, dpi=300)
    plt.close()
    print(f"训练曲线保存至: {save_path}")


def compare_models():
    """比较不同模型的性能"""
    config = TrainingConfig()
    models = ["nestedunet", "resunet", "attention_resunet"]
    results = {}

    for model_type in models:
        print(f"\n{'=' * 50}")
        print(f"训练 {model_type}")
        print(f"{'=' * 50}")

        trainer, history, test_results = train_model(model_type, config)
        results[model_type] = test_results

    # 比较结果
    print("\n\n模型性能比较:")
    print(f"{'Model':<20} {'Dice':<10} {'Jaccard':<10} {'PPV':<10} {'HD95':<10}")
    print("-" * 60)

    for model_type, result in results.items():
        metrics = result["test_metrics"]
        print(
            f"{model_type:<20} {metrics['dice']:<10.4f} {metrics['jaccard']:<10.4f} "
            f"{metrics['ppv']:<10.4f} {metrics['hd95']:<10.4f}"
        )

    # 保存比较结果
    comparison_path = os.path.join(config.result_dir, "model_comparison.json")
    with open(comparison_path, "w") as f:
        json.dump(results, f, indent=4)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="训练脑室分割模型")
    parser.add_argument(
        "--model",
        type=str,
        default="nestedunet",
        choices=["nestedunet", "resunet", "attention_resunet", "compare"],
        help="选择模型类型或比较所有模型",
    )
    parser.add_argument("--epochs", type=int, default=50, help="训练轮数")
    parser.add_argument("--batch_size", type=int, default=16, help="批次大小")
    parser.add_argument("--lr", type=float, default=1e-4, help="学习率")

    args = parser.parse_args()

    if args.model == "compare":
        compare_models()
    else:
        config = TrainingConfig()
        config.epochs = args.epochs
        config.batch_size = args.batch_size
        config.learning_rate = args.lr

        train_model(args.model, config)