#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
评估指标工具
"""

import numpy as np
import logging
from sklearn.metrics import (
    accuracy_score,
    precision_score,
    recall_score,
    f1_score,
    confusion_matrix,
    classification_report
)


def compute_metrics(y_true, y_pred, average='macro'):
    """
    计算评估指标
    
    Args:
        y_true: 真实标签
        y_pred: 预测标签
        average: 平均方式，可选'micro', 'macro', 'weighted', 'samples'
    
    Returns:
        dict: 包含各项指标的字典
    """
    try:
        # 计算基本指标
        accuracy = accuracy_score(y_true, y_pred)
        precision = precision_score(y_true, y_pred, average=average, zero_division=0)
        recall = recall_score(y_true, y_pred, average=average, zero_division=0)
        f1 = f1_score(y_true, y_pred, average=average, zero_division=0)
        
        # 计算混淆矩阵
        cm = confusion_matrix(y_true, y_pred)
        
        return {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1_score': f1,
            'confusion_matrix': cm
        }
    except Exception as e:
        logger = logging.getLogger()
        logger.error(f"计算指标时出错: {str(e)}")
        return {
            'accuracy': 0.0,
            'precision': 0.0,
            'recall': 0.0,
            'f1_score': 0.0,
            'confusion_matrix': np.zeros((1, 1))
        }


def print_metrics(metrics, class_names=None):
    """
    打印评估指标
    
    Args:
        metrics: 指标字典
        class_names: 类别名称列表
    """
    logger = logging.getLogger()
    
    logger.info("评估指标:")
    logger.info(f"准确率: {metrics['accuracy']:.4f}")
    logger.info(f"精确率: {metrics['precision']:.4f}")
    logger.info(f"召回率: {metrics['recall']:.4f}")
    logger.info(f"F1分数: {metrics['f1_score']:.4f}")
    
    # 打印混淆矩阵
    cm = metrics['confusion_matrix']
    logger.info("混淆矩阵:")
    
    if class_names is None:
        class_names = [str(i) for i in range(cm.shape[0])]
    
    # 打印列标签
    header = "真实\\预测 | " + " | ".join(class_names)
    logger.info(header)
    logger.info("-" * len(header))
    
    # 打印行
    for i, row in enumerate(cm):
        row_str = f"{class_names[i]}       | " + " | ".join([f"{x:4d}" for x in row])
        logger.info(row_str)


def evaluate_model(model, data_loader, config):
    """
    评估模型性能
    
    Args:
        model: 模型
        data_loader: 数据加载器
        config: 配置字典
    
    Returns:
        dict: 评估指标
    """
    import torch
    
    logger = logging.getLogger()
    logger.info("开始评估模型...")
    
    # 设置设备
    device = torch.device(config.get('system', {}).get('device', 'cuda' if torch.cuda.is_available() else 'cpu'))
    model = model.to(device)
    
    # 设置模型为评估模式
    model.eval()
    
    all_targets = []
    all_predictions = []
    all_probabilities = []
    
    # 不计算梯度
    with torch.no_grad():
        for batch in data_loader:
            # 将数据移到设备上
            inputs = batch['frames'].to(device)
            targets = batch['label'].to(device)
            
            # 前向传播
            outputs = model(inputs)
            
            # 获取预测结果
            probabilities = torch.softmax(outputs, dim=1)
            _, predicted = torch.max(outputs, 1)
            
            # 收集结果
            all_targets.extend(targets.cpu().numpy())
            all_predictions.extend(predicted.cpu().numpy())
            all_probabilities.append(probabilities.cpu().numpy())
    
    # 转换为numpy数组
    all_targets = np.array(all_targets)
    all_predictions = np.array(all_predictions)
    all_probabilities = np.concatenate(all_probabilities, axis=0)
    
    # 计算指标
    metrics = compute_metrics(all_targets, all_predictions)
    
    # 打印详细报告
    logger.info("\n" + classification_report(all_targets, all_predictions))
    
    # 打印指标
    print_metrics(metrics)
    
    # 如果需要，可以保存预测结果
    if config.get('evaluation', {}).get('save_predictions', False):
        np.savez(
            f"{config.get('output_dir', './output')}/predictions.npz",
            targets=all_targets,
            predictions=all_predictions,
            probabilities=all_probabilities
        )
    
    return metrics 