import torch
import torch.optim as optim
from model.model_lstm import EmotionLSTM
from data.loader import dataloader
import time
import os
from config import DataDir, N_MFCC, INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM, NUM_LAYERS, LEARNING_RATE, BATCH_SIZE, \
    NUM_EPOCHS, MAX_PAD_LEN, Model_Path, Patience, Factor, Val_Acc_Start, device
from matplotlib import pyplot as plt



def draw_figure(train_losses, train_accuracies, val_losses, val_accuracies):
    epochs = range(1, len(train_losses) + 1)

    # 绘制【损失】曲线
    plt.figure(figsize=(10, 6))
    plt.subplot(2, 1, 1)
    plt.plot(epochs, train_losses, label='Train Loss')
    plt.plot(epochs, val_losses, label='Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    # 绘制【准确率】曲线
    plt.subplot(2, 1, 2)
    plt.plot(epochs, train_accuracies, label='Train Accuracy')
    plt.plot(epochs, val_accuracies, label='Validation Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.tight_layout()
    plt.show()
    plt.savefig('training_metrics.png')
def train(dataloader, epochs):
    # 记录每个epoch的损失和准确率
    train_losses = []
    train_accuracies = []
    val_losses = []
    val_accuracies = []

    # 初始化最佳验证损失
    best_val_loss = float('inf')  # 初始化最佳验证损失为﹢∞(正无穷)

    # 初始化模型、损失函数、优化器、数据加载器
    model = EmotionLSTM(INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM, NUM_LAYERS)  # 创建模型
    lossfunc = torch.nn.CrossEntropyLoss()  # 定义损失函数
    dataloader = dataloader(DataDir, N_MFCC, MAX_PAD_LEN, BATCH_SIZE)  # 初始化数据加载器
    train_loader, test_loader, val_loader = dataloader.load()
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)  # 定义优化器

    # 将模型、数据都移到Device
    # model.to(device)
    # train_loader, test_loader, val_loader = train_loader.to(device), test_loader.to(device), val_loader.to(device)

    # 学习率衰减方式
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=Patience, factor=Factor)
    scheduler_start = False

    # 轮次循环
    for epoch in range(epochs):
        """  
        训练过程
        """
        model.train()  # 进入训练模式
        train_loss = 0  # 初始化损失
        train_correct = 0  # 初始化预测正确的样本数
        train_samples = 0  # 初始化总样本数

        # 批次循环
        for batch_idx, (batch_features, batch_labels) in enumerate(train_loader):
            optimizer.zero_grad()  # 梯度清零
            outputs = model(batch_features)  # 正向传播
            loss = lossfunc(outputs, batch_labels)  # 计算损失
            train_loss += loss.item()  # 累加损失

            _, predicted = torch.max(outputs, 1)  # 计算预测结果
            train_correct += (predicted == batch_labels).sum().item()  # 计算预测正确的样本数
            train_samples += batch_labels.size(0)  # 计算总样本数

            loss.backward()  # 反向传播
            optimizer.step()  # 更新模型参数

        # 计算当前epoch的平均损失和准确率
        train_loss_avg = train_loss / len(train_loader)  # 计算当前epoch的平均损失，len(train_loader)是批次数量
        train_accuracy_avg = train_correct / train_samples  # 计算当前epoch的准确率
        train_losses.append(train_loss_avg)
        train_accuracies.append(train_accuracy_avg)

        """
        验证过程
        """
        model.eval()
        val_loss = 0
        val_correct = 0
        val_samples = 0

        with torch.no_grad():
            for batch_idx, (batch_features, batch_labels) in enumerate(val_loader):
                outputs = model(batch_features)  # 正向传播
                loss = lossfunc(outputs, batch_labels)  # 计算损失
                val_loss += loss.item()  # 累加损失
                _, predicted = torch.max(outputs, 1)  # 计算预测结果
                val_correct += (predicted == batch_labels).sum().item()  # 计算预测正确的样本数
                val_samples += batch_labels.size(0)  # 计算总样本数

        # 计算当前epoch下 验证集的平均损失和准确率
        val_loss_avg = val_loss / len(val_loader)  # len(val_loader)是批次数量
        val_accuracy_avg = val_correct / val_samples
        val_losses.append(val_loss_avg)
        val_accuracies.append(val_accuracy_avg)

        """
        更新学习率
        """
        if val_accuracy_avg >= Val_Acc_Start and scheduler_start == False:
            scheduler_start = True
        if scheduler_start:
            scheduler.step(val_loss_avg)
        print(f'Epoch {(epoch + 1):5d}/{NUM_EPOCHS}:, Train_Loss: {train_loss_avg:.4f}, \tVal_Loss: {val_loss_avg:.4f}, \tTrain_Acc: {train_accuracy_avg:.4f}, \tVal_Acc: {val_accuracy_avg:.4f}, \tlr: {round(scheduler.get_last_lr()[0],6)}, \t{scheduler_start}')

        """  
        保存模型  
        """
        if val_loss_avg < best_val_loss:
            best_val_loss = val_loss_avg
            model_info = f"Epoch={epoch:3d}, Val_Loss={val_loss_avg:.4f}, Train_Loss={train_loss_avg:.4f}, Val_Acc={val_accuracy_avg:.4f}, Train_Acc={train_accuracy_avg:.4f}"
            model_path = Model_Path.replace('TIMESTAMP', str(int(time.time()))+"_"+model_info)
            torch.save(model.state_dict(), model_path)

    return train_losses, train_accuracies, val_losses, val_accuracies


if __name__ == '__main__':
    train_losses, train_accuracies, val_losses, val_accuracies = train(dataloader, NUM_EPOCHS)
    draw_figure(train_losses, train_accuracies, val_losses, val_accuracies)
