from resnet import ResNet1D

import torch
import torch.nn as nn
import torch.optim as optim
from dataloader import MyDataset
from torch.utils.data import DataLoader, random_split

import os
import matplotlib.pyplot as plt


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# print(device)

# 0. 导入数据集
dataset = MyDataset("./data/trainset")
train_datast, test_dataset = random_split(dataset, [int(0.8*len(dataset)), len(dataset)-int(0.8*len(dataset))])

train_loader = DataLoader(train_datast, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32)

# 1. 创建模型
model = ResNet1D(num_class=2).to(device)

# 1.5 如果存在模型 则加载
if os.path.exists("./model/save/resnet_wav.pth"):
    model.load_state_dict(torch.load("./model/save/resnet_wav.pth"))

# 2. 创建损失函数
criterion = nn.CrossEntropyLoss()
# 3. 创建优化器
optimizer = optim.Adam(model.parameters(), lr=0.001)

def train(epoch):
    model.train()
    total_loss, correct = 0, 0
    for (txt_x, embed_y), label in train_loader:
        # txt_x, embed_y, label = torch.from_numpy(txt_x).to(device), torch.from_numpy(embed_y).to(device), torch.from_numpy(label).to(device)
        txt_x, embed_y, label = txt_x.to(device).float(), embed_y.to(device).float(), label.to(device).long()
        # print(txt_x.shape, embed_y.shape, label.shape)   # [32, 300]
        optimizer.zero_grad()
        output = model(txt_x, embed_y)
        loss = criterion(output, label)
        loss.backward()
        optimizer.step()

        # 计算损失 
        total_loss += loss.item()
        correct += (output.argmax(1) == label).sum().item()
        if epoch%10 == 0:
            torch.save(model.state_dict(), "./model/save/resnet_wav.pth")
    
    return total_loss/len(train_loader), correct/len(train_loader.dataset)

            

def evaluate():
    model.eval()
    correct = 0
    with torch.no_grad():
        for (txt_x, embed_y), label in test_loader:
            txt_x, embed_y, label = txt_x.to(device).float(), embed_y.to(device).float(), label.to(device).long()
            output = model(txt_x, embed_y)
            correct += (output.argmax(1)==label).sum().item()
        
    return correct/len(test_loader.dataset)


if __name__ == "__main__":
    train_losses = []        # 记录损失  画图使用
    train_accuracies = []    # 记录准确率
    test_accuracies = []     

    # 4. 循环训练
    num_epochs = 60
    for epoch in range(num_epochs):
        train_loss, train_acc = train(epoch) 
        train_losses.append(train_loss)
        train_accuracies.append(train_acc)
        test_acc = evaluate()
        test_accuracies.append(test_acc)
        print(f"Epoch{epoch+1}/{num_epochs}, Loss: {train_loss:.4f},",
                f"Train Acc: {train_acc:.4f}, Test Acc: {test_acc: .4f}")
    

    # 1 绘制损失和准确率曲线
    plt.figure(figsize=(12, 5))
    # 绘制损失曲线
    plt.plot(train_losses, label="Train Loss", color='blue')
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.title("Training Loss Curve")
    plt.legend()
    plt.savefig('./model/img/training_loss_curve.png')  # 将图片保存下来
    
    plt.figure(figsize=(12, 5))
    # 2 绘制训练准确率图像
    plt.plot(train_accuracies, label="Train Accuracies", color='red')
    plt.plot(test_accuracies, label="Test Accuracies", color='green')
    plt.xlabel("Epoch")
    plt.ylabel("Accuracies")
    plt.title("Accuracies Curve")
    plt.legend()
    plt.savefig('./model/img/training_and_test_accuracies_curve.png')
  
    # 4 保存图像到文件
    plt.tight_layout()
    plt.close()
    print("训练阶段---最后十个epoch的平均准确率:", sum(train_accuracies[49:])/len(train_accuracies[49:]))
    print("训练阶段---最后十个epoch的平均准确率:", sum(test_accuracies[49:])/len(test_accuracies[49:]))
