import os
import torch
import torchaudio
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from sklearn.model_selection import train_test_split
import numpy as np
import librosa

audio_sr = 16000
audio_MFCC_num = 13


class SpeechDataset():
    def __init__(self, file_paths, labels, num_mfcc=audio_MFCC_num, max_len=audio_sr):
        """
        初始化参数
        :param file_paths: 音频文件路径列表
        :param labels: 音频文件标签列表
        :param num_mfcc: 梅尔频率倒谱系数的特征数量
        :param max_len: 音频文件的采样率
        """
        self.file_paths = file_paths
        self.labels = labels
        self.num_mfcc = num_mfcc
        self.max_len = max_len

    def __len__(self):
        """
        SpeechDataset.__len__() 方法是 PyTorch 中的 Dataset 类要求实现的一个方法，用于返回数据集中样本的数量。
        它是 PyTorch 数据加载器 (DataLoader) 用来确定迭代次数的关键方法。
        """
        return len(self.file_paths)

    def __getitem__(self, idx):
        file_path = self.file_paths[idx]  # 音频路径
        label = self.labels[idx]  # 音频标签

        # 加载音频文件。返回值y：音频文件的时间序列数据（一维数组，表示音频的振幅）；sr：音频文件的采样率（Hz）。
        y, sr = librosa.load(file_path, sr=self.max_len)  # sr是样本采样率

        # 裁剪或填充到 max_len
        if len(y) < self.max_len:
            # 若音频数据长度 len(y) 小于 self.max_len，用 0 在末尾填充至 self.max_len。
            y = np.pad(y, (0, max(0, self.max_len - len(y))), 'constant')
        else:
            # 若音频数据长度 len(y) 大于 self.max_len，截取前 self.max_len 的数据。
            y = y[:self.max_len]

        # 提取MFCC特征
        mfcc = librosa.feature.mfcc(y=y, sr=self.max_len, n_mfcc=self.num_mfcc)  # 将音频数据 y 转换为 MFCC特征
        mfcc = np.expand_dims(mfcc, axis=0)  # 增加一个维度，使其形状为 [1, num_mfcc, time_steps]，符合模型输入需求
        mfcc = torch.tensor(mfcc, dtype=torch.float32)  # 将 mfcc 转换为 PyTorch 的张量格式

        return mfcc, label


def prepare_dataloaders(data_dir, keywords, test_size=0.2):
    """准备数据集"""

    # 下载数据集
    torchaudio.datasets.SPEECHCOMMANDS(root=data_dir, url='speech_commands_v0.02',
                                       folder_in_archive='SpeechCommands', download=True)
    # 修改成下载后的数据集路径
    data_dir = os.path.join(os.path.join(data_dir, 'SpeechCommands'), 'speech_commands_v0.02')

    # 罗列数据集与标签
    label_map = {word: i for i, word in enumerate(keywords)}
    file_paths = []
    labels = []

    for keyword in keywords:
        keyword_dir = os.path.join(data_dir, keyword)
        for file_name in os.listdir(keyword_dir):
            file_paths.append(os.path.join(keyword_dir, file_name))
            labels.append(label_map[keyword])

    # 分割数据集
    train_files, val_files, train_labels, val_labels = train_test_split(file_paths, labels,
                                                                        test_size=test_size, random_state=42)

    # 创建数据加载器
    train_dataset = SpeechDataset(train_files, train_labels)
    val_dataset = SpeechDataset(val_files, val_labels)

    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
    return train_loader, val_loader, label_map


class LSTM_Model_SpeechRecognizer(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_layers, num_classes):
        super(LSTM_Model_SpeechRecognizer, self).__init__()
        self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_dim, num_classes)

    def forward(self, x):
        """前向传播"""
        # x: [batch_size, 1, num_mfcc, time]
        x = x.squeeze(1).transpose(1, 2)
        out, _ = self.lstm(x)
        out = out[:, -1, :]
        out = self.fc(out)
        return out


def train_model(train_loader, val_loader, model, criterion, optimizer, num_epochs=10, model_path='best_model.pth'):
    """训练模型"""
    best_val_loss = float('inf')
    for epoch in range(num_epochs):

        # 训练模式
        model.train()
        train_loss = 0.0
        for inputs, labels in train_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        # 验证模式
        model.eval()
        val_loss = 0.0
        correct = 0
        total = 0
        with torch.no_grad():
            for inputs, labels in val_loader:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                val_loss += loss.item()
                _, predicted = outputs.max(1)
                correct += (predicted == labels).sum().item()
                total += labels.size(0)

        train_loss /= len(train_loader)
        val_loss /= len(val_loader)
        accuracy = correct / total

        print(f"Epoch {epoch + 1}, Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, Accuracy: {accuracy:.4f}")

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), model_path)
            print("Saved Best Model!")


def predict(model, audio_path, label_map):
    y, sr = librosa.load(audio_path, sr=audio_sr)
    y = np.pad(y, (0, max(0, audio_sr - len(y))), 'constant')[:audio_sr]
    mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
    mfcc = torch.tensor(mfcc, dtype=torch.float32).unsqueeze(0).unsqueeze(0).to(device)

    with torch.no_grad():
        output = model(mfcc)
        _, predicted = output.max(1)
        label = list(label_map.keys())[list(label_map.values()).index(predicted.item())]
        return label


if __name__ == '__main__':
    # 数据准备
    dataset_dir = '../dataset'
    keywords = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
    train_loader, val_loader, label_map = prepare_dataloaders(data_dir=dataset_dir, keywords=keywords)

    # 模型参数
    epoch_num = 20
    input_dim = 13
    hidden_dim = 256
    num_layers = 2
    num_classes = len(keywords)
    learning_rate = 0.001
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    best_model_path = 'best_model.pth'

    # 模型定义
    model = LSTM_Model_SpeechRecognizer(input_dim, hidden_dim, num_layers, num_classes).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # 模型训练
    train_model(train_loader, val_loader, model, criterion, optimizer, epoch_num, best_model_path)

    # 加载最优模型
    print("Loading Best Model...")
    model.load_state_dict(torch.load(best_model_path))
    model.eval()

    # 测试单个音频
    test_audio_path = '../dataset/SpeechCommands/speech_commands_v0.02/zero/0a7c2a8f_nohash_0.wav'
    predicted_result = predict(model, test_audio_path, label_map)
    print(f"Predicted Result: {predicted_result}")
