import torch
import torch.nn as nn
import torch.optim as optim
import src  # 初始化src中__init__.py中的变量
from src.data_loader import prepare_dataloaders
from src.model import LSTM_Model_SpeechRecognizer
from src.train import train_model, plotting_results
from src.predict import predict

if __name__ == '__main__':
    # 数据集准备
    dataset_dir = '../dataset'
    keywords = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
    train_loader, val_loader, label_map = prepare_dataloaders(data_dir=dataset_dir, keywords=keywords)
    # 实例化模型
    model = LSTM_Model_SpeechRecognizer(input_dim=src.input_dim, hidden_dim=src.hidden_dim,
                                        num_layers=src.num_layers, num_classes=len(keywords))
    # 定义损失函数：交叉熵损失函数
    criterion = nn.CrossEntropyLoss()
    # 定义优化器（优化器的作用是在反向传播阶段更新模型参数）
    optimizer = optim.Adam(model.parameters(), lr=src.learning_rate)
    num_epochs, train_losses, val_losses, val_accuracies = train_model(train_loader, val_loader, model, criterion,
                                                                       optimizer, num_epochs=src.num_epochs)
    # 绘制损失与准确率的图像
    plotting_results(num_epochs, train_losses, val_losses, val_accuracies)

    # 加载已保存的模型文件，对测试音频进行预测
    model.load_state_dict(torch.load('best_model.pth'))
    test_audio_path = '../dataset/SpeechCommands/speech_commands_v0.02/zero/0b7ee1a0_nohash_3.wav'
    predicted_label = predict(model, test_audio_path, label_map, device=src.device)
    print(f"Predicted Label: {predicted_label}")
