#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
医学MRI脑室分割系统主程序
支持训练模型和启动Web服务
"""

import argparse
import logging
import os

# 配置日志
logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)


def setup_environment():
    """设置环境和检查依赖"""
    # 检查必要的目录
    directories = [
        "data/brainskull",
        "data/venmask12",
        "run/uploads",
        "run/results",
        "run/checkpoints",
        "run/logs",
        "run/temp",
    ]

    for dir_name in directories:
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)
            logger.info(f"创建目录: {dir_name}")

    # 检查数据集
    if not os.listdir("data/brainskull"):
        logger.warning("警告: brainskull目录为空，请添加MRI图像数据")
        return False

    if not os.listdir("data/venmask12"):
        logger.warning("警告: venmask12目录为空，请添加标注数据")
        return False

    return True


def train_models(models=None, epochs=50):
    """训练模型"""
    from train import TrainingConfig, train_model

    config = TrainingConfig()
    config.epochs = epochs

    if models is None:
        models = ["nestedunet", "resunet", "attention_resunet"]

    logger.info(f"开始训练模型: {', '.join(models)}")

    for model_name in models:
        logger.info(f"\n{'=' * 50}")
        logger.info(f"训练 {model_name}")
        logger.info(f"{'=' * 50}")
        train_model(model_name, config)


def start_web_server(host="0.0.0.0", port=5000, debug=False):
    """启动Web服务器"""
    logger.info("启动Web服务器...")

    # 检查是否有训练好的模型
    checkpoint_files = [
        "run/checkpoints/nestedunet_best.pth",
        "run/checkpoints/resunet_best.pth",
        "run/checkpoints/attention_resunet_best.pth",
    ]

    available_models = []
    for checkpoint in checkpoint_files:
        if os.path.exists(checkpoint):
            model_name = os.path.basename(checkpoint).replace("_best.pth", "")
            available_models.append(model_name)

    if not available_models:
        logger.warning("未找到训练好的模型，某些功能可能不可用")
        logger.info("请先运行: python main.py --train")
    else:
        logger.info(f"可用模型: {', '.join(available_models)}")

    # 启动Flask应用
    from app import app, load_models

    logger.info("加载模型...")
    load_models()

    logger.info(f"Web服务器运行在: http://{host}:{port}")
    app.run(host=host, port=port, debug=debug)


def test_models():
    """测试已训练的模型"""
    import torch

    from data_loader import DataLoaderManager
    from metrics import SegmentationMetrics
    from nestedunet import NestedUNet
    from preprocessing import DataPreprocessor
    from resunet import AttentionResUNet, ResUNet

    logger.info("测试模型性能...")

    # 准备测试数据
    config = TrainingConfig()
    preprocessor = DataPreprocessor()
    data_manager = DataLoaderManager(config)

    _, _, test_loader = data_manager.prepare_data_loaders(
        "data/brainskull",
        "data/venmask12",
        preprocessor,
        config.train_ratio,
        config.val_ratio,
        config.batch_size,
    )

    # 测试每个模型
    models_to_test = {
        "nestedunet": (NestedUNet, "run/checkpoints/nestedunet_best.pth"),
        "resunet": (ResUNet, "run/checkpoints/resunet_best.pth"),
        "attention_resunet": (
            AttentionResUNet,
            "run/checkpoints/attention_resunet_best.pth",
        ),
    }

    metrics_calculator = SegmentationMetrics()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    results = {}
    for model_name, (model_class, checkpoint_path) in models_to_test.items():
        if not os.path.exists(checkpoint_path):
            logger.warning(f"未找到{model_name}的检查点文件")
            continue

        # 加载模型
        model = model_class(in_channels=1, out_channels=1).to(device)
        checkpoint = torch.load(checkpoint_path, map_location=device)
        model.load_state_dict(checkpoint["model_state_dict"])
        model.eval()

        # 测试
        all_metrics = []
        with torch.no_grad():
            for batch in test_loader:
                images = batch["image"].to(device)
                masks = batch["mask"].to(device)

                outputs = model(images)
                if isinstance(outputs, list):
                    outputs = outputs[-1]

                pred = torch.sigmoid(outputs)
                _, batch_metrics = metrics_calculator.compute_metrics_batch(pred, masks)
                all_metrics.append(batch_metrics)

        # 计算平均指标
        avg_metrics = {}
        for key in all_metrics[0].keys():
            avg_metrics[key] = np.mean([m[key] for m in all_metrics])

        results[model_name] = avg_metrics

        logger.info(f"\n{model_name} 测试结果:")
        for metric, value in avg_metrics.items():
            logger.info(f"  {metric}: {value:.4f}")

    return results

def inference(model_name="nestedunet"):
    """
    对测试集中的随机样本进行推理并可视化结果
    
    Args:
        model_name: 模型名称 ('nestedunet', 'resunet', 'attention_resunet')
    """
    import random
    import matplotlib.pyplot as plt
    import torch
    import numpy as np
    
    from data_loader import DataLoaderManager
    from metrics import SegmentationMetrics
    from nestedunet import NestedUNet
    from preprocessing import DataPreprocessor
    from resunet import AttentionResUNet, ResUNet
    from train import TrainingConfig
    
    logger.info(f"使用{model_name}模型对随机测试样本进行推理...")
    
    # 检查模型文件是否存在
    checkpoint_path = f"run/checkpoints/{model_name}_best.pth"
    if not os.path.exists(checkpoint_path):
        logger.error(f"模型文件不存在: {checkpoint_path}")
        logger.info("请先训练模型: python main.py --train")
        return
    
    # 准备数据 - 只加载测试数据集
    config = TrainingConfig()
    preprocessor = DataPreprocessor()
    data_manager = DataLoaderManager(config)
    
    # 使用新方法只加载测试数据
    test_loader = data_manager.prepare_test_loader_only(
        "data/brainskull",
        "data/venmask12",
        preprocessor,
        config.train_ratio,
        config.val_ratio,
        1  # 批次大小设为1，方便可视化
    )
    
    # 随机选择一个样本
    test_data = list(test_loader)
    if not test_data:
        logger.error("测试集为空")
        return
    
    sample = random.choice(test_data)
    
    # 加载模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    if model_name == "nestedunet":
        model = NestedUNet(in_channels=1, out_channels=1)
    elif model_name == "resunet":
        model = ResUNet(in_channels=1, out_channels=1)
    elif model_name == "attention_resunet":
        model = AttentionResUNet(in_channels=1, out_channels=1)
    else:
        logger.error(f"未知模型类型: {model_name}")
        return
    
    model = model.to(device)
    checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=False)
    model.load_state_dict(checkpoint["model_state_dict"])
    model.eval()
    
    # 获取样本数据
    image = sample["image"].to(device)
    mask = sample["mask"].to(device)
    filename = sample["filename"][0]
    slice_idx = sample["slice_idx"].item()
    
    # 模型推理
    with torch.no_grad():
        output = model(image)
        if isinstance(output, list):
            output = output[-1]
        
        pred = torch.sigmoid(output)
    
    # 计算评估指标
    metrics_calculator = SegmentationMetrics()
    _, metrics = metrics_calculator.compute_metrics_batch(pred, mask)
    
    # 转换为NumPy数组用于可视化
    image_np = image.cpu().numpy()[0, 0]
    mask_np = mask.cpu().numpy()[0, 0]
    pred_np = pred.cpu().numpy()[0, 0]
    pred_binary = (pred_np > 0.5).astype(np.uint8)
    
    # 可视化结果
    plt.figure(figsize=(15, 5))
    
    plt.subplot(131)
    plt.title('Input Image')
    plt.imshow(image_np, cmap='gray')
    plt.axis('off')
    
    plt.subplot(132)
    plt.title('Model Prediction')
    plt.imshow(pred_binary, cmap='gray')
    plt.axis('off')
    
    plt.subplot(133)
    plt.title('Ground Truth')
    plt.imshow(mask_np, cmap='gray')
    plt.axis('off')
    
    plt.suptitle(f'File: {filename}, Slice: {slice_idx}', fontsize=16)
    
    # 添加评估指标说明
    metrics_text = "\n".join([
        f"Dice: {metrics['dice']:.4f}",
        f"Jaccard: {metrics['jaccard']:.4f}",
        f"PPV: {metrics['ppv']:.4f}",
        f"Sensitivity: {metrics['sensitivity']:.4f}",
        f"HD95: {metrics['hd95']:.4f}"
    ])
    
    plt.figtext(0.02, 0.02, metrics_text, wrap=True, fontsize=12, 
                bbox=dict(facecolor='white', alpha=0.8))
    
    # 保存结果图像
    result_dir = "run/results"
    os.makedirs(result_dir, exist_ok=True)
    result_path = os.path.join(result_dir, f"inference_{model_name}_{filename}_{slice_idx}.png")
    plt.savefig(result_path, dpi=150, bbox_inches='tight')
    
    logger.info(f"推理结果已保存至: {result_path}")
    
    # 显示图像
    plt.tight_layout()
    plt.show()
    
    # 打印评估指标
    logger.info("\n评估指标:")
    for metric, value in metrics.items():
        logger.info(f"  {metric}: {value:.4f}")
    
    return metrics


def main():
    """主函数"""
    parser = argparse.ArgumentParser(
        description="医学MRI脑室分割系统",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
使用示例:
  # 训练所有模型
  python main.py --train
  
  # 训练特定模型
  python main.py --train --models nestedunet resunet
  
  # 启动Web服务
  python main.py --web
  
  # 测试模型性能
  python main.py --test
  
  # 随机样本推理
  python main.py --inference --model nestedunet
  
  # 设置环境
  python main.py --setup
        """,
    )

    parser.add_argument("--train", action="store_true", help="训练模型")
    parser.add_argument(
        "--models",
        nargs="+",
        choices=["nestedunet", "resunet", "attention_resunet"],
        help="指定要训练的模型",
    )
    parser.add_argument("--epochs", type=int, default=50, help="训练轮数 (默认: 50)")
    parser.add_argument("--web", action="store_true", help="启动Web服务器")
    parser.add_argument(
        "--host", type=str, default="0.0.0.0", help="Web服务器地址 (默认: 0.0.0.0)"
    )
    parser.add_argument(
        "--port", type=int, default=5000, help="Web服务器端口 (默认: 5000)"
    )
    parser.add_argument("--test", action="store_true", help="测试模型性能")
    parser.add_argument("--setup", action="store_true", help="设置环境和检查依赖")
    parser.add_argument("--debug", action="store_true", help="调试模式")
    parser.add_argument("--inference", action="store_true", help="对随机测试样本进行推理")
    parser.add_argument("--model", type=str, default="nestedunet", 
                        choices=["nestedunet", "resunet", "attention_resunet"],
                        help="用于推理的模型 (默认: nestedunet)")

    args = parser.parse_args()

    # 如果没有指定任何操作，显示帮助
    if not any([args.train, args.web, args.test, args.setup, args.inference]):
        parser.print_help()
        return

    # 检查环境
    if not setup_environment():
        logger.error("环境设置失败")
        return

    # 执行操作
    if args.train:
        train_models(args.models, args.epochs)

    if args.test:
        test_models()
        
    if args.inference:
        inference(args.model)

    if args.web:
        start_web_server(args.host, args.port, args.debug)


if __name__ == "__main__":
    # 导入必要的模块
    import numpy as np

    from train import TrainingConfig

    main()