"""
audio_classification_evaluation_npz.py

支持NPZ格式的音频分类模型评估完整脚本

功能增强：
1. 专门针对.npz格式音频特征文件优化
2. 自动检测并提取.npz文件中的特征数据
3. 支持多特征键名的灵活处理
4. 增强错误处理和日志记录


完整评估    python a05_model_evaluation.py --config configs/default.yaml
单样本预测   python a05_model_evaluation.py --config configs/default.yaml --sample data/processed/sample_1743671102_aug0.npz --feature-key mel    ## 支持自动检测常见特征键名(mel, mfcc等)
指定特征键名 python a05_model_evaluation.py --config configs/default.yaml --feature-key my_feature
"""

import torch
import numpy as np
import pandas as pd
from pathlib import Path
import yaml
from torch import nn
from torch.utils.data import DataLoader
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as sns
from typing import List, Dict, Any, Tuple, Optional, Union
import argparse
import time
from tqdm import tqdm
import warnings
import logging

from utils.audio_cnn import AudioCNN

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('logs/evaluation.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 忽略特定警告
warnings.filterwarnings('ignore', category=UserWarning)


class NPZAudioDataset:
    """专门处理NPZ格式的音频数据集类

    参数：
        file_list: NPZ文件路径列表
        label_list: 对应标签列表
        data_root: 数据根目录
        feature_key: NPZ文件中特征的键名（默认自动检测）
        target_length: 目标时间步长（默认1000帧）
        max_retries: 文件加载重试次数（默认3次）
    """

    def __init__(
            self,
            file_list: List[str],
            label_list: List[int],
            data_root: str,
            feature_key: Optional[str] = None,
            target_length: int = 1000,
            max_retries: int = 3
    ):
        self.file_list = file_list
        self.label_list = label_list
        self.data_root = Path(data_root)
        self.feature_key = feature_key
        self.target_length = target_length
        self.max_retries = max_retries
        self.common_feature_keys = ['mel', 'mfcc', 'spectrogram', 'features']

        assert len(file_list) == len(label_list), "文件列表和标签列表长度不一致"

    def __len__(self) -> int:
        return len(self.file_list)

    def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
        retry_count = 0
        while retry_count < self.max_retries:
            try:
                npz_path = self.data_root / self.file_list[idx]

                # 加载NPZ文件
                with np.load(npz_path, allow_pickle=True) as data:
                    # 自动检测特征键名
                    if self.feature_key is None:
                        found_keys = [k for k in self.common_feature_keys if k in data]
                        if not found_keys:
                            raise KeyError(f"未找到标准特征键，可用键: {list(data.keys())}")
                        feature_key = found_keys[0]
                        if retry_count == 0:  # 只在第一次尝试时记录
                            logger.info(f"自动检测到特征键: {feature_key}")
                    else:
                        feature_key = self.feature_key

                    features = data[feature_key]

                    # 确保特征是2D数组 [n_mels, time]
                    if features.ndim == 1:
                        features = np.expand_dims(features, 0)
                    elif features.ndim == 3:
                        features = np.squeeze(features, 0)

                    # 统一长度处理
                    features = self._adjust_length(features)

                    # 转换为张量并添加通道维度 [1, n_mels, time]
                    features = torch.FloatTensor(features).unsqueeze(0)
                    label = torch.tensor(self.label_list[idx], dtype=torch.long)

                    return features, label

            except Exception as e:
                retry_count += 1
                if retry_count == self.max_retries:
                    logger.error(f"加载文件{self.file_list[idx]}失败(尝试{self.max_retries}次): {str(e)}")
                    # 返回空数据避免训练中断
                    dummy_feat = torch.zeros(1, 128, self.target_length)
                    dummy_label = torch.tensor(0, dtype=torch.long)
                    return dummy_feat, dummy_label
                time.sleep(1)  # 失败后等待1秒再重试

    def _adjust_length(self, features: np.ndarray) -> np.ndarray:
        """调整特征时间轴长度"""
        _, time_steps = features.shape

        if time_steps < self.target_length:
            # 填充（右侧补零）
            padded = np.zeros((features.shape[0], self.target_length))
            padded[:, :time_steps] = features
            return padded
        elif time_steps > self.target_length:
            # 随机裁剪
            start = np.random.randint(0, time_steps - self.target_length)
            return features[:, start:start + self.target_length]
        else:
            return features


# class AudioCNN(nn.Module):
#     """音频分类CNN模型（与训练脚本保持一致）"""
#
#     def __init__(self, num_classes: int):
#         super(AudioCNN, self).__init__()
#         self.features = nn.Sequential(
#             nn.Conv2d(1, 32, kernel_size=3, padding=1),
#             nn.BatchNorm2d(32),
#             nn.ReLU(),
#             nn.MaxPool2d(2, 2),
#
#             nn.Conv2d(32, 64, kernel_size=3, padding=1),
#             nn.BatchNorm2d(64),
#             nn.ReLU(),
#             nn.MaxPool2d(2, 2),
#
#             nn.Conv2d(64, 128, kernel_size=3, padding=1),
#             nn.BatchNorm2d(128),
#             nn.ReLU(),
#             nn.MaxPool2d(2, 2)
#         )
#
#         self.classifier = nn.Sequential(
#             nn.Linear(128 * 16 * 125, 512),  # 假设输入为128x1000
#             nn.ReLU(),
#             nn.Dropout(0.5),
#             nn.Linear(512, num_classes)
#         )
#
#     def forward(self, x: torch.Tensor) -> torch.Tensor:
#         x = self.features(x)
#         x = torch.flatten(x, 1)
#         x = self.classifier(x)
#         return x


def load_config(config_path: str) -> Dict[str, Any]:
    """加载配置文件"""
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            cfg = yaml.safe_load(f)

        required_keys = {'data_dir', 'batch_size', 'model_save_path'}
        if missing := required_keys - set(cfg.keys()):
            raise ValueError(f"缺少必要配置项: {missing}")

        # 设置默认值
        cfg.setdefault('feature_key', None)
        cfg.setdefault('target_length', 1000)
        cfg.setdefault('processed_dir', 'processed')

        return cfg
    except Exception as e:
        logger.error(f"加载配置文件失败: {str(e)}")
        raise


def load_metadata(cfg: Dict[str, Any]) -> Tuple[List[str], List[int], np.ndarray]:
    """加载元数据并验证NPZ文件"""
    meta_path = Path(cfg['data_dir']) / cfg.get('metadata_file', 'metadata.csv')

    try:
        df = pd.read_csv(meta_path)
        logger.info(f"成功加载元数据: {meta_path}")
    except Exception as e:
        logger.error(f"无法加载元数据文件 {meta_path}: {str(e)}")
        raise

    # 标签编码
    label_encoder = LabelEncoder()
    df['label_encoded'] = label_encoder.fit_transform(df['label'])

    # 提取相对路径
    df['relative_path'] = df['file_path'].str.replace(
        r'^data[\\/]processed[\\/]',
        '',
        regex=True
    )

    # 验证NPZ文件存在性
    file_list = []
    label_list = []
    processed_dir = Path(cfg['data_dir']) / cfg['processed_dir']
    valid_extensions = ('.npz',)

    for _, row in df.iterrows():
        file_path = processed_dir / row['relative_path']

        # 检查文件是否存在且是NPZ格式
        if file_path.exists() and file_path.suffix.lower() in valid_extensions:
            file_list.append(row['relative_path'])
            label_list.append(row['label_encoded'])
        else:
            logger.warning(f"文件不存在或格式不支持: {file_path}")

    # 保存标签编码器
    encoder_path = Path(cfg['model_save_path']).parent / 'label_encoder.pth'
    torch.save(label_encoder, encoder_path)
    logger.info(f"标签编码器已保存到 {encoder_path}")

    return file_list, label_list, label_encoder.classes_


def load_model(
        cfg: Dict[str, Any],
        num_classes: int,
        device: torch.device
) -> Tuple[nn.Module, List[str]]:
    """加载训练好的模型"""
    try:
        # 加载标签编码器
        encoder_path = Path(cfg['model_save_path']).parent / 'label_encoder.pth'
        if not encoder_path.exists():
            raise FileNotFoundError(f"标签编码器文件不存在: {encoder_path}")

        label_encoder = torch.load(encoder_path)
        class_names = list(label_encoder.classes_)

        # 初始化模型结构
        model = AudioCNN(num_classes=num_classes)

        # 加载模型权重（支持多GPU训练保存的模型）
        state_dict = torch.load(cfg['model_save_path'], map_location=device)

        # 处理多GPU训练保存的模型
        if all(k.startswith('module.') for k in state_dict.keys()):
            state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}

        model.load_state_dict(state_dict)
        model = model.to(device)
        model.eval()

        logger.info(f"模型加载成功 | 设备: {device} | 类别数: {num_classes}")
        return model, class_names
    except Exception as e:
        logger.error(f"加载模型失败: {str(e)}")
        raise


def create_test_loader(
        cfg: Dict[str, Any],
        test_files: Optional[List[str]] = None,
        test_labels: Optional[List[int]] = None
) -> DataLoader:
    """创建测试集数据加载器（针对NPZ文件优化）"""
    try:
        # 如果没有提供测试数据，则从metadata加载
        if test_files is None or test_labels is None:
            file_list, label_list, _ = load_metadata(cfg)
            test_files, test_labels = file_list, label_list

        processed_dir = Path(cfg['data_dir']) / cfg['processed_dir']

        # 创建数据集
        dataset = NPZAudioDataset(
            file_list=test_files,
            label_list=test_labels,
            data_root=processed_dir,
            feature_key=cfg.get('feature_key'),
            target_length=cfg.get('target_length', 1000)
        )

        # 创建数据加载器
        test_loader = DataLoader(
            dataset,
            batch_size=cfg['batch_size'],
            num_workers=min(4, cfg.get('num_workers', 4)),
            pin_memory=torch.cuda.is_available(),
            shuffle=False
        )

        logger.info(f"测试集加载完成 | 样本数: {len(test_files)}")
        return test_loader
    except Exception as e:
        logger.error(f"创建测试加载器失败: {str(e)}")
        raise


def evaluate_model(
        model: nn.Module,
        test_loader: DataLoader,
        class_names: List[str],
        device: torch.device
) -> Dict[str, Any]:
    """在测试集上评估模型性能"""
    try:
        model.eval()
        all_preds = []
        all_labels = []
        total_correct = 0
        total_samples = 0
        inference_times = []

        with torch.no_grad():
            for inputs, labels in tqdm(test_loader, desc="评估进度"):
                inputs, labels = inputs.to(device), labels.to(device)

                # 测量推理时间
                start_time = time.perf_counter()
                outputs = model(inputs)
                inference_times.append((time.perf_counter() - start_time) * 1000)

                _, preds = torch.max(outputs, 1)

                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

                total_correct += (preds == labels).sum().item()
                total_samples += labels.size(0)

        # 计算指标
        accuracy = 100 * total_correct / total_samples
        avg_inference_time = np.mean(inference_times)

        # 生成分类报告
        report = classification_report(
            all_labels,
            all_preds,
            target_names=class_names,
            output_dict=True
        )

        # 生成混淆矩阵
        confusion = confusion_matrix(all_labels, all_preds)

        logger.info("评估完成")
        return {
            'accuracy': accuracy,
            'report': report,
            'confusion': confusion,
            'inference_time': avg_inference_time,
            'predictions': all_preds,
            'labels': all_labels
        }
    except Exception as e:
        logger.error(f"评估过程中出错: {str(e)}")
        raise


def save_evaluation_results(
        results: Dict[str, Any],
        output_dir: Path,
        class_names: List[str]
):
    """保存评估结果和可视化图表"""
    try:
        output_dir.mkdir(parents=True, exist_ok=True)

        # 保存文本报告
        report_path = output_dir / "evaluation_report.txt"
        with open(report_path, "w", encoding="utf-8") as f:
            f.write(f"模型评估报告\n{'=' * 30}\n")
            f.write(f"整体准确率: {results['accuracy']:.2f}%\n")
            f.write(f"平均推理时间: {results['inference_time']:.2f} ms/样本\n\n")

            # 分类报告
            f.write("分类报告:\n")
            df_report = pd.DataFrame(results['report']).transpose()
            f.write(df_report.to_string())

            # 混淆矩阵
            f.write("\n\n混淆矩阵:\n")
            df_cm = pd.DataFrame(
                results['confusion'],
                index=class_names,
                columns=class_names
            )
            f.write(df_cm.to_string())

        # 保存混淆矩阵可视化
        plt.figure(figsize=(12, 10))
        sns.heatmap(
            results['confusion'],
            annot=True,
            fmt="d",
            cmap="Blues",
            xticklabels=class_names,
            yticklabels=class_names
        )
        plt.title("混淆矩阵", fontsize=14)
        plt.xlabel("预测标签", fontsize=12)
        plt.ylabel("真实标签", fontsize=12)
        plt.xticks(rotation=45, ha='right')
        plt.yticks(rotation=0)
        plt.tight_layout()
        cm_path = output_dir / "confusion_matrix.png"
        plt.savefig(cm_path, dpi=300, bbox_inches='tight')
        plt.close()

        # 保存类别准确率柱状图
        class_acc = []
        for i, class_name in enumerate(class_names):
            mask = np.array(results['labels']) == i
            correct = (np.array(results['predictions'])[mask] == i).sum()
            total = mask.sum()
            class_acc.append(100 * correct / total if total > 0 else 0)

        plt.figure(figsize=(12, 6))
        bars = plt.bar(class_names, class_acc, color='skyblue')
        plt.axhline(results['accuracy'], color='red', linestyle='--',
                    label=f'平均准确率: {results["accuracy"]:.1f}%')
        plt.ylim(0, 100)
        plt.title("各类别准确率", fontsize=14)
        plt.xlabel("类别", fontsize=12)
        plt.ylabel("准确率 (%)", fontsize=12)
        plt.xticks(rotation=45, ha='right')

        # 在柱子上添加数值标签
        for bar in bars:
            height = bar.get_height()
            plt.text(bar.get_x() + bar.get_width() / 2., height,
                     f'{height:.1f}%',
                     ha='center', va='bottom')

        plt.legend()
        plt.tight_layout()
        acc_path = output_dir / "class_accuracy.png"
        plt.savefig(acc_path, dpi=300, bbox_inches='tight')
        plt.close()

        logger.info(f"评估结果已保存到 {output_dir}")
        return {
            'report_path': report_path,
            'confusion_matrix_path': cm_path,
            'accuracy_plot_path': acc_path
        }
    except Exception as e:
        logger.error(f"保存评估结果失败: {str(e)}")
        raise


def predict_single_npz(
        model: nn.Module,
        npz_path: Path,
        cfg: Dict[str, Any],
        device: torch.device
) -> Tuple[str, float, Dict[str, float]]:
    """对单个NPZ样本进行预测

    返回:
        predicted_class: 预测类别名称
        confidence: 最高置信度
        class_probabilities: 所有类别的概率分布
    """
    try:
        # 创建临时数据集
        temp_dataset = NPZAudioDataset(
            file_list=[str(npz_path.relative_to(Path(cfg['data_dir']) / cfg['processed_dir']))],
            label_list=[0],  # 虚拟标签
            data_root=Path(cfg['data_dir']) / cfg['processed_dir'],
            feature_key=cfg.get('feature_key'),
            target_length=cfg.get('target_length', 1000)
        )

        # 获取样本
        sample, _ = temp_dataset[0]
        sample = sample.unsqueeze(0).to(device)  # 添加batch维度

        # 预测
        with torch.no_grad():
            output = model(sample)
            probabilities = torch.softmax(output, dim=1)
            confidence, pred_idx = torch.max(probabilities, 1)
            confidence = confidence.item()
            prob_dist = probabilities.squeeze().cpu().numpy()

        # 获取类别名称
        encoder_path = Path(cfg['model_save_path']).parent / 'label_encoder.pth'
        label_encoder = torch.load(encoder_path)
        class_names = label_encoder.classes_

        # 创建类别概率字典
        class_prob = {class_names[i]: float(prob_dist[i]) for i in range(len(class_names))}

        return class_names[pred_idx.item()], confidence, class_prob
    except Exception as e:
        logger.error(f"单样本预测失败: {str(e)}")
        raise


def main():
    # 解析命令行参数
    parser = argparse.ArgumentParser(description="NPZ音频分类模型评估")
    parser.add_argument("--config", type=str, default="configs/default.yaml",
                        help="配置文件路径")
    parser.add_argument("--sample", type=str, default=None,
                        help="(可选)要预测的NPZ样本路径")
    parser.add_argument("--feature-key", type=str, default=None,
                        help="(可选)手动指定NPZ文件中的特征键名")
    args = parser.parse_args()

    try:
        # 加载配置
        cfg = load_config(args.config)

        # 覆盖特征键配置（如果命令行指定）
        if args.feature_key:
            cfg['feature_key'] = args.feature_key

        # 设置设备
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        logger.info(f"使用设备: {device}")

        # 单样本预测模式
        if args.sample:
            sample_path = Path(args.sample)
            if not sample_path.exists():
                raise FileNotFoundError(f"样本文件不存在: {sample_path}")

            # 检查文件格式
            if sample_path.suffix.lower() != '.npz':
                logger.warning(f"警告: 样本文件不是NPZ格式 ({sample_path.suffix})")

            # 加载模型
            encoder_path = Path(cfg['model_save_path']).parent / 'label_encoder.pth'
            label_encoder = torch.load(encoder_path)
            model, _ = load_model(cfg, len(label_encoder.classes_), device)

            # 执行预测
            pred_class, confidence, class_prob = predict_single_npz(model, sample_path, cfg, device)

            # 打印结果
            print("\n预测结果:")
            print(f"- 预测类别: {pred_class}")
            print(f"- 置信度: {confidence:.2%}")
            print("\n类别概率分布:")
            for cls, prob in sorted(class_prob.items(), key=lambda x: x[1], reverse=True):
                print(f"  {cls}: {prob:.2%}")
            return

        # 完整评估模式
        logger.info("开始完整模型评估流程...")

        # 加载模型和标签编码器
        encoder_path = Path(cfg['model_save_path']).parent / 'label_encoder.pth'
        label_encoder = torch.load(encoder_path)
        model, class_names = load_model(cfg, len(label_encoder.classes_), device)

        # 创建测试集加载器
        test_loader = create_test_loader(cfg)

        # 评估模型
        results = evaluate_model(model, test_loader, class_names, device)

        # 打印摘要结果
        print("\n评估结果摘要:")
        print(f"- 准确率: {results['accuracy']:.2f}%")
        print(f"- 平均推理时间: {results['inference_time']:.2f} ms/样本")

        # 保存详细结果
        output_dir = Path(cfg['model_save_path']).parent / "evaluation_results"
        saved_files = save_evaluation_results(results, output_dir, class_names)

        # 打印结果文件路径
        print("\n生成的结果文件:")
        for name, path in saved_files.items():
            print(f"- {name}: {path}")

    except Exception as e:
        logger.error(f"程序运行出错: {str(e)}")
        raise


if __name__ == "__main__":
    main()