#!/usr/bin/env python3
"""
医学图像分割评估脚本

使用方法:
    python evaluation.py
    python evaluation.py --config custom_config.yaml
    python evaluation.py --model_path ./saved_models/best_model.pth
    python evaluation.py --dataset test  # 评估测试集
"""

import argparse
import yaml
import os
import sys
import json
import torch
from tqdm import tqdm

from data.data_loader import create_data_loaders, get_num_classes
from utils.model_utils import load_model_for_inference
from utils.metrics_utils import evaluate_metrics
from utils.plot_utils import plot_confusion_matrix, plot_metrics_comparison

def parse_args():
    """解析命令行参数"""
    
    parser = argparse.ArgumentParser(description="医学图像分割评估脚本")
    parser.add_argument("--config", type=str, default="config.yaml",
                       help="配置文件路径 (默认: config.yaml)")
    parser.add_argument("--model_path", type=str, default=None,
                       help="模型权重路径 (默认: 使用config中的路径)")
    parser.add_argument("--dataset", type=str, choices=["val", "test"], default="test",
                       help="评估数据集 (默认: test)")
    parser.add_argument("--batch_size", type=int, default=None,
                       help="批次大小 (默认: 使用config中的设置)")
    parser.add_argument("--save_predictions", action="store_true",
                       help="保存预测结果")
    
    return parser.parse_args()


def load_config(config_path):
    """加载配置文件"""
    
    if not os.path.exists(config_path):
        print(f"错误: 配置文件不存在: {config_path}")
        sys.exit(1)
    
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = yaml.safe_load(f)
        return config
    except Exception as e:
        print(f"错误: 无法加载配置文件: {e}")
        sys.exit(1)


def evaluate_model(model, data_loader, device, num_classes, save_predictions=False, output_dir=None):
    """评估模型"""
    
    model.eval()
    all_pred_masks = []
    all_true_masks = []
    
    with torch.no_grad():
        for batch_idx, (images, masks) in enumerate(tqdm(data_loader, desc="评估中")):
            images = images.to(device)
            masks = masks.to(device)
            
            # 前向传播
            outputs = model(images)
            pred_masks = torch.argmax(outputs, dim=1)
            
            # 收集预测结果
            all_pred_masks.extend([mask.cpu() for mask in pred_masks])
            all_true_masks.extend([mask.cpu() for mask in masks])
            
            # 保存预测结果（可选）
            if save_predictions and output_dir:
                for i, (pred_mask, true_mask) in enumerate(zip(pred_masks, masks)):
                    pred_path = os.path.join(output_dir, f"pred_batch{batch_idx}_img{i}.png")
                    true_path = os.path.join(output_dir, f"true_batch{batch_idx}_img{i}.png")
                    
                    # 保存为图像文件
                    from PIL import Image
                    pred_img = Image.fromarray(pred_mask.cpu().numpy().astype("uint8") * 255)
                    true_img = Image.fromarray(true_mask.cpu().numpy().astype("uint8") * 255)
                    pred_img.save(pred_path)
                    true_img.save(true_path)
    
    # 计算指标
    metrics = evaluate_metrics(all_pred_masks, all_true_masks, num_classes)
    
    return metrics, all_pred_masks, all_true_masks


def save_metrics_report(metrics, save_path, class_names):
    """保存指标报告"""
    
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    
    # 保存JSON格式的详细指标
    with open(save_path, "w", encoding="utf-8") as f:
        json.dump(metrics, f, indent=2, ensure_ascii=False)
    
    # 生成文本报告
    report_path = save_path.replace(".json", "_report.txt")
    with open(report_path, "w", encoding="utf-8") as f:
        f.write("医学图像分割评估报告\n")
        f.write("=" * 50 + "\n\n")
        
        f.write(f"整体准确率: {metrics['accuracy']:.4f}\n\n")
        
        f.write("各类别指标:\n")
        f.write("-" * 30 + "\n")
        for i in range(len(metrics["dice"])):
            f.write(f"类别 {class_names[i]} (ID: {i}):\n")
            f.write(f"  Dice: {metrics['dice'][i]:.4f}\n")
            f.write(f"  IoU: {metrics['iou'][i]:.4f}\n")
            f.write(f"  Precision: {metrics['precision'][i]:.4f}\n")
            f.write(f"  Recall: {metrics['recall'][i]:.4f}\n")
            f.write(f"  F1-Score: {metrics['f1_score'][i]:.4f}\n\n")
        
        f.write("平均指标:\n")
        f.write("-" * 30 + "\n")
        f.write(f"平均 Dice: {sum(metrics['dice']) / len(metrics['dice']):.4f}\n")
        f.write(f"平均 IoU: {sum(metrics['iou']) / len(metrics['iou']):.4f}\n")
        f.write(f"平均 Precision: {sum(metrics['precision']) / len(metrics['precision']):.4f}\n")
        f.write(f"平均 Recall: {sum(metrics['recall']) / len(metrics['recall']):.4f}\n")
        f.write(f"平均 F1-Score: {sum(metrics['f1_score']) / len(metrics['f1_score']):.4f}\n")
    
    print(f"指标报告已保存到: {save_path}")
    print(f"文本报告已保存到: {report_path}")


def generate_visualizations(metrics, config, dataset_name):
    """生成可视化图表"""
    
    plots_dir = os.path.join("results", "plots")
    os.makedirs(plots_dir, exist_ok=True)
    
    num_classes = get_num_classes(config)
    class_names = config["data"]["name_classes"] if "name_classes" in config["data"] else [f"Class {i}" for i in range(num_classes)]

    # 绘制混淆矩阵
    if config["evaluation"]["plot_confusion_matrix"]:
        cm_path = os.path.join(plots_dir, f"confusion_matrix_{dataset_name}.png")
        plot_confusion_matrix(
            metrics["confusion_matrix"],
            class_names,
            cm_path,
            title=f"Confusion Matrix - {dataset_name.upper()}"
        )
        print(f"混淆矩阵已保存到: {cm_path}")
    
    # 绘制指标对比图
    metrics_path = os.path.join(plots_dir, f"metrics_comparison_{dataset_name}.png")
    plot_metrics_comparison(
        metrics,
        metrics_path,
        title=f"Metrics Comparison - {dataset_name.upper()}",
        class_names=class_names
    )
    print(f"指标对比图已保存到: {metrics_path}")


def print_metrics_summary(metrics, class_names):
    """打印指标摘要"""
    
    print("\n" + "="*50)
    print("评估结果摘要")
    print("="*50)
    print(f"整体准确率: {metrics['accuracy']:.4f}")
    print(f"平均 Dice: {sum(metrics['dice']) / len(metrics['dice']):.4f}")
    print(f"平均 IoU: {sum(metrics['iou']) / len(metrics['iou']):.4f}")
    print(f"平均 F1-Score: {sum(metrics['f1_score']) / len(metrics['f1_score']):.4f}")
    
    print("\n各类别详细指标:")
    print("-" * 30)
    for i in range(len(metrics["dice"])):
        print(f"类别 {class_names[i]}: Dice={metrics['dice'][i]:.4f}, IoU={metrics['iou'][i]:.4f}")
    print("="*50 + "\n")


def main():
    """主函数"""
    
    # 解析命令行参数
    args = parse_args()
    
    # 加载配置
    config = load_config(args.config)
    
    # 从config中获取类别数和类别名称
    num_classes = get_num_classes(config)
    class_names = config["data"]["name_classes"] if "name_classes" in config["data"] else [f"Class {i}" for i in range(num_classes)]
    
    # 设置模型路径
    model_path = args.model_path or config["evaluation"]["model_path"]
    if not model_path or not os.path.exists(model_path):
        print(f"错误: 模型文件不存在或未指定: {model_path}. 请在config.yaml中设置evaluation.model_path或通过--model_path参数指定。")
        sys.exit(1)
    
    # 设置批次大小
    if args.batch_size:
        config["training"]["batch_size"] = args.batch_size
    
    print(f"开始评估模型: {model_path}")
    print(f"评估数据集: {args.dataset}")
    
    try:
        # 加载模型
        model, device = load_model_for_inference(model_path, config)
        if model is None:
            print("模型加载失败")
            sys.exit(1)
        
        # 创建数据加载器
        train_loader, val_loader, test_loader = create_data_loaders(config)
        
        if args.dataset == "val":
            data_loader = val_loader
        else:
            data_loader = test_loader
        
        # 设置预测结果保存目录
        output_dir = None
        if args.save_predictions:
            output_dir = os.path.join(config["inference"]["output_dir"], f"evaluation_{args.dataset}")
            os.makedirs(output_dir, exist_ok=True)
        
        # 评估模型
        metrics, pred_masks, true_masks = evaluate_model(
            model, data_loader, device, num_classes,
            save_predictions=args.save_predictions, output_dir=output_dir
        )
        
        # 打印结果摘要
        print_metrics_summary(metrics, class_names)
        
        # 保存指标报告
        metrics_save_path = config["evaluation"]["metric_save_path"].replace(
            ".json", f"_{args.dataset}.json"
        )
        save_metrics_report(metrics, metrics_save_path, class_names)
        
        # 生成可视化图表
        generate_visualizations(metrics, config, args.dataset)
        
        print(f"\n评估完成！结果已保存到 results/ 目录")
        
    except Exception as e:
        print(f"评估过程中发生错误: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()

