"""
音频分类模型训练增强版
功能：带完整类型注解、自动混合精度训练、梯度裁剪、学习率调度、模型检查点保存的完整训练流程
"""

import torch
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.preprocessing import LabelEncoder
import yaml
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from typing import List, Tuple, Dict, Any, Optional
from tqdm import tqdm
import torch.cuda.amp as amp  # 自动混合精度

# 自定义模块
from utils.audio_cnn import AudioCNN


class AudioDataset(Dataset):
    """音频特征数据集类

    功能：
    - 加载预处理后的音频特征文件(.npy)
    - 统一特征长度（填充或随机裁剪）
    - 转换为PyTorch张量

    参数：
        file_list: 特征文件路径列表（相对路径）
        label_list: 对应标签列表
        data_dir: 原始数据根目录
        processed_dir: 预处理特征存放目录
        target_length: 目标时间步长（默认1000帧）
    """

    def __init__(
            self,
            file_list: List[str],
            label_list: List[int],
            data_dir: str,
            processed_dir: str,
            target_length: int = 1000
    ):
        self.file_list = file_list
        self.label_list = label_list
        self.data_dir = Path(data_dir)
        self.processed_dir = Path(processed_dir)
        self.target_length = target_length

        # 验证数据一致性
        assert len(file_list) == len(label_list), "文件列表和标签列表长度不一致"

    def __len__(self) -> int:
        """返回数据集样本总数"""
        return len(self.file_list)

    def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
        """获取单个样本

        返回：
            features: 形状为[1, n_mels, target_length]的频谱特征
            label: 对应的类别标签
        """
        try:
            # 加载特征文件 [n_mels, time]
            data = np.load(self.processed_dir / self.file_list[idx])

            # 处理不同保存格式：
            if isinstance(data, np.lib.npyio.NpzFile):
                # 如果是.npz文件，提取'mel'特征
                features = data['mel']  # 假设关键特征是'mel'
                data.close()  # 重要：关闭npz文件
            else:
                # 普通.npy文件直接使用
                features = data

            # 统一长度处理
            features = self._adjust_length(features)

            # 转换为张量并添加通道维度 [1, n_mels, time]
            features = torch.FloatTensor(features).unsqueeze(0)
            label = torch.tensor(self.label_list[idx], dtype=torch.long)

            return features, label

        except Exception as e:
            print(f"加载文件{self.file_list[idx]}失败: {str(e)}")
            # 返回空数据避免训练中断
            dummy_feat = torch.zeros(1, 128, self.target_length)
            dummy_label = torch.tensor(0, dtype=torch.long)
            return dummy_feat, dummy_label

    def _adjust_length(self, features: np.ndarray) -> np.ndarray:
        """调整特征时间轴长度"""
        if features.shape[1] < self.target_length:
            # 填充（右侧补零）
            padded = np.zeros((features.shape[0], self.target_length))
            padded[:, :features.shape[1]] = features
            return padded
        else:
            # 随机裁剪
            start = np.random.randint(0, features.shape[1] - self.target_length)
            return features[:, start:start + self.target_length]


def load_metadata(cfg: Dict[str, Any]) -> Tuple[List[str], List[int], np.ndarray]:
    """加载元数据并验证特征文件

    参数：
        cfg: 配置字典

    返回：
        file_list: 有效特征文件相对路径列表
        label_list: 对应的编码标签列表
        class_names: 类别名称数组
    """
    meta_path = Path(cfg['data_dir']) / cfg['metadata_file']

    # 读取元数据
    try:
        df = pd.read_csv(meta_path)
    except Exception as e:
        raise ValueError(f"无法加载元数据文件 {meta_path}: {str(e)}")

    # 标签编码
    label_encoder = LabelEncoder()
    df['label_encoded'] = label_encoder.fit_transform(df['label'])

    # 提取相对路径（移除data/processed前缀）
    df['relative_path'] = df['file_path'].str.replace(
        r'^data[\\/]processed[\\/]',
        '',
        regex=True
    )

    # 验证文件存在性
    file_list = []
    label_list = []
    processed_dir = Path(cfg['data_dir']) / cfg['processed_dir']

    for _, row in df.iterrows():
        npy_path = processed_dir / row['relative_path']
        if npy_path.exists():
            file_list.append(row['relative_path'])
            label_list.append(row['label_encoded'])
        else:
            print(f"警告: 文件不存在 {npy_path}")

    # 保存标签编码器
    encoder_path = Path(cfg['model_save_path']).parent / 'label_encoder.pth'
    torch.save(label_encoder, encoder_path)
    print(f"标签编码器已保存到 {encoder_path}")

    return file_list, label_list, label_encoder.classes_


def create_data_loaders(
        cfg: Dict[str, Any],
        file_list: List[str],
        label_list: List[int]
) -> Tuple[DataLoader, DataLoader]:
    """创建训练和验证数据加载器

    参数：
        cfg: 配置字典
        file_list: 特征文件列表
        label_list: 标签列表

    返回：
        train_loader: 训练集数据加载器
        val_loader: 验证集数据加载器
    """
    # 划分数据集（保持类别分布）
    X_train, X_val, y_train, y_val = train_test_split(
        file_list,
        label_list,
        test_size=cfg.get('val_ratio', 0.2),
        stratify=label_list,
        random_state=cfg.get('random_seed', 42)
    )

    processed_dir = Path(cfg['data_dir']) / cfg['processed_dir']

    # 训练集加载器（启用shuffle）
    train_loader = DataLoader(
        AudioDataset(X_train, y_train, cfg['data_dir'], processed_dir),
        batch_size=cfg['batch_size'],
        shuffle=True,
        num_workers=cfg.get('num_workers', 4),
        pin_memory=torch.cuda.is_available(),
        persistent_workers=True  # 保持worker进程存活
    )

    # 验证集加载器
    val_loader = DataLoader(
        AudioDataset(X_val, y_val, cfg['data_dir'], processed_dir),
        batch_size=cfg['batch_size'],
        num_workers=cfg.get('num_workers', 4),
        pin_memory=torch.cuda.is_available(),
        persistent_workers=True
    )

    print(f"数据加载器创建完成 | 训练集: {len(X_train)} | 验证集: {len(X_val)}")
    return train_loader, val_loader


def setup_training(cfg: Dict[str, Any], num_classes: int) -> Tuple[
    nn.Module, optim.Optimizer, amp.GradScaler, optim.lr_scheduler._LRScheduler]:
    """初始化训练组件

    返回：
        model: 初始化后的模型
        optimizer: 优化器
        scaler: 混合精度梯度缩放器
        scheduler: 学习率调度器
    """

    if not torch.cuda.is_available() and cfg.get('use_amp', False):
        print("警告: 当前环境不支持CUDA，已自动禁用混合精度训练")
        cfg['use_amp'] = False

    # 初始化模型
    model = AudioCNN(num_classes=num_classes)

    # GPU支持
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    print(f"使用设备: {device}")

    # 参数验证
    weight_decay = float(cfg.get('weight_decay', 1e-4))  # 确保是float类型
    lr = float(cfg['lr'])

    # 优化器（带权重衰减）
    optimizer = optim.AdamW(
        model.parameters(),
        lr=cfg['lr'],
        weight_decay=weight_decay
    )

    # 学习率调度器（余弦退火）
    scheduler = optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=int(cfg['epochs']),
        eta_min=float(cfg.get('min_lr', 1e-6))
    )

    # 混合精度训练
    scaler = amp.GradScaler(enabled=cfg.get('use_amp', True))

    return model, optimizer, scaler, scheduler


def train_epoch(
        model: nn.Module,
        train_loader: DataLoader,
        optimizer: optim.Optimizer,
        criterion: nn.Module,
        scaler: amp.GradScaler,
        device: torch.device,
        grad_clip: float = 1.0
) -> float:
    """执行单个训练周期

    返回：
        avg_loss: 平均训练损失
    """
    model.train()
    total_loss = 0.0

    progress_bar = tqdm(train_loader, desc="训练", leave=False)
    for inputs, labels in progress_bar:
        inputs, labels = inputs.to(device), labels.to(device)

        # 混合精度前向传播
        with amp.autocast(enabled=scaler.is_enabled()):
            outputs = model(inputs)
            loss = criterion(outputs, labels)

        # 反向传播
        optimizer.zero_grad()
        scaler.scale(loss).backward()

        # 梯度裁剪
        if grad_clip > 0:
            scaler.unscale_(optimizer)
            nn.utils.clip_grad_norm_(model.parameters(), grad_clip)

        # 参数更新
        scaler.step(optimizer)
        scaler.update()

        # 记录损失
        total_loss += loss.item()
        progress_bar.set_postfix(loss=loss.item())

    return total_loss / len(train_loader)


def validate(
        model: nn.Module,
        val_loader: DataLoader,
        criterion: nn.Module,
        device: torch.device
) -> Tuple[float, float]:
    """执行验证

    返回：
        avg_loss: 平均验证损失
        accuracy: 验证集准确率(%)
    """
    model.eval()
    total_loss = 0.0
    correct = 0
    total = 0

    with torch.no_grad():
        for inputs, labels in tqdm(val_loader, desc="验证", leave=False):
            inputs, labels = inputs.to(device), labels.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, labels)

            total_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    avg_loss = total_loss / len(val_loader)
    accuracy = 100 * correct / total

    return avg_loss, accuracy


def save_checkpoint(
        model: nn.Module,
        optimizer: optim.Optimizer,
        scheduler: optim.lr_scheduler._LRScheduler,
        epoch: int,
        best_acc: float,
        cfg: Dict[str, Any],
        is_best: bool = False
):
    """保存模型检查点"""
    checkpoint = {
        'epoch': epoch,
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'scheduler': scheduler.state_dict(),
        'best_acc': best_acc,
        'config': cfg
    }

    # 保存最新检查点
    torch.save(checkpoint, Path(cfg['model_save_path']).parent / 'last_checkpoint.pth')

    # 保存最佳模型
    if is_best:
        torch.save(model.state_dict(), cfg['model_save_path'])


def train():
    """主训练函数"""
    # 加载配置
    with open("configs/default.yaml", "r", encoding="utf-8") as f:
        cfg = yaml.safe_load(f)

    # 验证必要配置
    required_keys = {'data_dir', 'batch_size', 'lr', 'epochs', 'model_save_path'}
    if missing := set(required_keys) - set(cfg.keys()):
        raise ValueError(f"缺少必要配置项: {missing}")

    # 设置随机种子（保证可复现性）
    torch.manual_seed(cfg.get('random_seed', 42))
    np.random.seed(cfg.get('random_seed', 42))

    # 加载数据
    file_list, label_list, class_names = load_metadata(cfg)
    print(f"数据加载完成 | 总样本: {len(file_list)} | 类别: {class_names}")

    # 创建数据加载器
    train_loader, val_loader = create_data_loaders(cfg, file_list, label_list)

    # 初始化训练组件
    model, optimizer, scaler, scheduler = setup_training(cfg, len(class_names))

    # 损失函数
    criterion = nn.CrossEntropyLoss()
    device = next(model.parameters()).device

    # 训练循环
    best_acc = 0.0
    for epoch in range(1, cfg['epochs'] + 1):
        print(f"\nEpoch {epoch}/{cfg['epochs']}")

        # 训练阶段
        train_loss = train_epoch(
            model, train_loader, optimizer, criterion,
            scaler, device, cfg.get('grad_clip', 1.0)
        )

        # 验证阶段
        val_loss, val_acc = validate(model, val_loader, criterion, device)

        # 更新学习率
        scheduler.step()

        # 打印日志
        print(f"训练损失: {train_loss:.4f} | 验证损失: {val_loss:.4f} | 验证准确率: {val_acc:.2f}%")

        # 保存检查点
        is_best = val_acc > best_acc
        if is_best:
            best_acc = val_acc
            print(f"新高准确率! 保存最佳模型到 {cfg['model_save_path']}")

        save_checkpoint(
            model, optimizer, scheduler, epoch,
            best_acc, cfg, is_best
        )

    print(f"\n训练完成! 最佳验证准确率: {best_acc:.2f}%")


if __name__ == "__main__":
    train()