"""
音频特征处理增强版
功能：支持多种特征类型、数据增强、并行处理、完善错误日志的通用音频特征提取框架
"""

import librosa
import numpy as np
import pandas as pd
from pathlib import Path
import yaml
from tqdm import tqdm
import concurrent.futures
import soundfile as sf
from typing import List, Dict, Tuple
import logging
import sys
import warnings

# 配置日志系统
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("logs/feature_extraction.log"),
        logging.StreamHandler(sys.stdout)
    ]
)
logger = logging.getLogger(__name__)

# 忽略librosa的非关键警告
warnings.filterwarnings("ignore", category=UserWarning)


def load_config(config_path: Path) -> Dict:
    """加载并验证配置文件"""
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            cfg = yaml.safe_load(f)

        # 配置参数校验
        required_keys = {'sample_rate', 'n_fft', 'hop_length', 'n_mels', 'data_dir'}
        if not required_keys.issubset(cfg.keys()):
            missing = required_keys - cfg.keys()
            raise ValueError(f"配置文件缺少必要参数: {missing}")

        return cfg
    except Exception as e:
        logger.error(f"配置文件加载失败: {str(e)}")
        sys.exit(1)


def preprocess_audio(
        y: np.ndarray,
        sr: int,
        cfg: Dict
) -> np.ndarray:
    """
    音频预处理流水线
    包含：静音切除、音量归一化、去噪等
    """
    # 静音切除
    if cfg.get('trim_silence', True):
        y, _ = librosa.effects.trim(
            y,
            top_db=cfg.get('trim_threshold', 20),
            frame_length=cfg.get('trim_frame_length', 2048),
            hop_length=cfg.get('trim_hop_length', 512)
        )

    # 音量归一化
    if cfg.get('normalize', True):
        y = librosa.util.normalize(y)

    # 可选：添加噪声增强
    if cfg.get('noise_augmentation', False):
        noise = np.random.normal(0, cfg.get('noise_level', 0.001), len(y))
        y = y + noise

    return y


def extract_features(
        y: np.ndarray,
        sr: int,
        cfg: Dict
) -> Dict[str, np.ndarray]:
    """
    多特征联合提取
    返回包含不同特征的字典
    """
    features = {}

    # Mel频谱
    mel = librosa.feature.melspectrogram(
        y=y, sr=sr,
        n_fft=cfg['n_fft'],
        hop_length=cfg['hop_length'],
        n_mels=cfg['n_mels'],
        fmin=cfg.get('fmin', 20),
        fmax=cfg.get('fmax', sr // 2)
    )
    features['mel'] = librosa.power_to_db(mel)

    # MFCC
    if cfg.get('extract_mfcc', False):
        mfcc = librosa.feature.mfcc(
            S=librosa.power_to_db(mel),
            n_mfcc=cfg.get('n_mfcc', 20)
        )
        features['mfcc'] = mfcc

    # Chroma特征
    if cfg.get('extract_chroma', False):
        chroma = librosa.feature.chroma_stft(
            y=y, sr=sr,
            n_fft=cfg['n_fft'],
            hop_length=cfg['hop_length']
        )
        features['chroma'] = chroma

    # 时域统计特征
    if cfg.get('extract_stats', True):
        features['stats'] = np.array([
            np.mean(y), np.std(y),
            np.max(y), np.min(y),
            librosa.feature.rms(y=y).mean()
        ])

    return features


def process_single_file(
        wav_path: Path,
        output_dir: Path,
        cfg: Dict
) -> Tuple[str, str]:
    """
    处理单个音频文件
    返回 (特征路径, 标签) 或 None（处理失败时）
    """
    try:
        # 加载音频（使用soundfile保持一致性）
        y, sr = sf.read(wav_path, dtype='float32')
        if y.ndim > 1:
            y = librosa.to_mono(y.T)

        # 重采样（如果需要）
        if sr != cfg['sample_rate']:
            y = librosa.resample(y, orig_sr=sr, target_sr=cfg['sample_rate'])
            sr = cfg['sample_rate']

        # 预处理
        y = preprocess_audio(y, sr, cfg)

        # 特征提取
        features = extract_features(y, sr, cfg)

        # 保存特征
        save_path = output_dir / f"{wav_path.stem}.npz"
        np.savez_compressed(save_path, **features)

        return str(save_path), wav_path.parent.name

    except Exception as e:
        logger.error(f"处理文件失败 {wav_path}: {str(e)}", exc_info=True)
        return None


def process_dataset(cfg: Dict):
    """
    数据集批量处理主函数
    支持并行处理和多维度特征保存
    """
    data_dir = Path(cfg['data_dir'])
    # raw_dir = data_dir / "raw"
    raw_dir = data_dir / "augmented"
    processed_dir = data_dir / "processed"

    # 创建输出目录
    processed_dir.mkdir(parents=True, exist_ok=True)

    # 收集所有待处理文件
    file_list = []
    for class_dir in raw_dir.iterdir():
        if class_dir.is_dir():
            file_list.extend([
                (wav, class_dir.name)
                for wav in class_dir.glob("*.wav")
            ])

    # 使用线程池并行处理
    metadata = []
    with concurrent.futures.ThreadPoolExecutor(
            max_workers=cfg.get('num_workers', 4)
    ) as executor:
        futures = [
            executor.submit(
                process_single_file,
                wav,
                processed_dir,
                cfg
            )
            for wav, _ in file_list
        ]

        # 进度条监控
        with tqdm(total=len(futures), desc="处理进度") as pbar:
            for future in concurrent.futures.as_completed(futures):
                result = future.result()
                if result:
                    feat_path, label = result
                    metadata.append({
                        "file_path": feat_path,
                        "label": label,
                        "original_path": str(file_list[futures.index(future)][0])
                    })
                pbar.update(1)

    # 保存元数据
    meta_df = pd.DataFrame(metadata)
    meta_path = data_dir / "metadata.csv"
    meta_df.to_csv(meta_path, index=False)
    logger.info(f"元数据保存至 {meta_path} ({len(meta_df)} 条记录)")


if __name__ == "__main__":
    # 加载配置
    cfg = load_config(Path("configs/default.yaml"))

    # 添加实验配置记录
    logger.info("启动特征提取任务")
    logger.info(f"配置参数：\n{yaml.dump(cfg, indent=2)}")

    try:
        process_dataset(cfg)
        logger.info("任务成功完成")
    except KeyboardInterrupt:
        logger.warning("用户中断任务")
        sys.exit(130)
    except Exception as e:
        logger.error(f"任务异常终止: {str(e)}", exc_info=True)
        sys.exit(1)