import os
import numpy as np
import torch, torchaudio
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from sklearn.model_selection import train_test_split
import librosa


# 自定义数据集类
class SpeechDataset(Dataset):
    def __init__(self, file_paths, labels, num_mfcc=13, max_len=16000):
        self.file_paths = file_paths
        self.labels = labels
        self.num_mfcc = num_mfcc
        self.max_len = max_len
        torchaudio.datasets.SPEECHCOMMANDS(root='./dataset',
                                           download=True,
                                           url='speech_commands_v0.02',
                                           folder_in_archive="SpeechCommands")

    def __len__(self):
        return len(self.file_paths)

    def __getitem__(self, idx):
        file_path = self.file_paths[idx]
        label = self.labels[idx]

        # 加载音频文件
        y, sr = librosa.load(file_path, sr=16000)

        # 裁剪或填充到 max_len
        if len(y) < self.max_len:
            y = np.pad(y, (0, self.max_len - len(y)), 'constant')
        else:
            y = y[:self.max_len]

        # 提取 MFCC 特征
        mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=self.num_mfcc)
        mfcc = np.expand_dims(mfcc, axis=0)  # 增加通道维度

        return torch.tensor(mfcc, dtype=torch.float32), label


# LSTM 模型
class LSTMKeywordModel(nn.Module):
    def __init__(self, input_dim=13, hidden_dim=128, num_layers=2, num_classes=2):
        super(LSTMKeywordModel, self).__init__()
        self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_dim, num_classes)

    def forward(self, x):
        # x: [batch_size, 1, num_mfcc, time]
        x = x.squeeze(1).transpose(1, 2)  # [batch_size, time, num_mfcc]
        out, _ = self.lstm(x)  # [batch_size, time, hidden_dim]
        out = out[:, -1, :]  # 取最后一个时间步的输出
        out = self.fc(out)  # [batch_size, num_classes]
        return out


# 数据集准备
def prepare_data(data_dir, keywords, test_size=0.2):
    label_map = {word: i for i, word in enumerate(keywords)}
    file_paths = []
    labels = []

    for keyword in keywords:
        keyword_dir = os.path.join(data_dir, keyword)
        for file_name in os.listdir(keyword_dir):
            file_paths.append(os.path.join(keyword_dir, file_name))
            labels.append(label_map[keyword])

    train_files, val_files, train_labels, val_labels = train_test_split(
        file_paths, labels, test_size=test_size, random_state=42
    )

    train_dataset = SpeechDataset(train_files, train_labels)
    val_dataset = SpeechDataset(val_files, val_labels)

    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
    return train_loader, val_loader, label_map


# 模型训练
def train_model(train_loader, val_loader, model, criterion, optimizer, device, num_epochs=10,
                best_model_path="best_model.pth"):
    best_val_loss = float("inf")

    for epoch in range(num_epochs):
        model.train()
        train_loss = 0.0
        for inputs, labels in train_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        model.eval()
        val_loss = 0.0
        correct = 0
        total = 0
        with torch.no_grad():
            for inputs, labels in val_loader:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                val_loss += loss.item()
                _, predicted = outputs.max(1)
                correct += (predicted == labels).sum().item()
                total += labels.size(0)

        train_loss /= len(train_loader)
        val_loss /= len(val_loader)
        accuracy = correct / total

        print(f"Epoch {epoch + 1}, Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, Accuracy: {accuracy:.4f}")

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), best_model_path)
            print("Saved Best Model!")


# 模型测试
def predict(file_path, model, device, label_map):
    y, sr = librosa.load(file_path, sr=16000)
    y = np.pad(y, (0, max(0, 16000 - len(y))), 'constant')[:16000]
    mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
    mfcc = torch.tensor(mfcc, dtype=torch.float32).unsqueeze(0).unsqueeze(0).to(device)

    with torch.no_grad():
        output = model(mfcc)
        _, predicted = output.max(1)
        label = list(label_map.keys())[list(label_map.values()).index(predicted.item())]
        return label


# 主程序
if __name__ == "__main__":
    # 参数
    data_dir = "../dataset/speech_commands_v0.02"  # 数据集路径
    # keywords = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]  # 目标关键词
    keywords = ["zero", "one", "two"]  # 目标关键词
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    num_epochs = 50
    best_model_path = "best_model.pth"

    # 数据准备
    train_loader, val_loader, label_map = prepare_data(data_dir, keywords)

    # 模型定义
    model = LSTMKeywordModel(num_classes=len(keywords)).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 训练模型
    train_model(train_loader, val_loader, model, criterion, optimizer, device, num_epochs, best_model_path)

    # 加载最优模型
    model.load_state_dict(torch.load(best_model_path))
    model.eval()

    # 测试单个音频
    test_file = "../dataset/speech_commands_v0.02/six/bab36420_nohash_3.wav"  # 替换为测试音频文件路径
    predicted_label = predict(test_file, model, device, label_map)
    print(f"Predicted label: {predicted_label}")
