import torch
import torch.nn as nn
import torch.optim as optim
from models.simple_cnn import SimpleCNN
from utils.data_loader import get_dataloaders
import matplotlib.pyplot as plt
from tqdm import tqdm

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 数据加载
data_dir = 'prepared_dataset'
train_loader, val_loader = get_dataloaders(data_dir)

# 定义模型
num_classes = len(train_loader.dataset.classes)
model = SimpleCNN(num_classes).to(device)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)


# 保存检查点函数
def save_checkpoint(state, filename="checkpoint.pth.tar"):
    torch.save(state, filename)


# 加载检查点函数
def load_checkpoint(filename="checkpoint.pth.tar"):
    model_checkpoint = torch.load(filename)
    model.load_state_dict(model_checkpoint['state_dict'])
    optimizer.load_state_dict(model_checkpoint['optimizer'])
    return model_checkpoint['epoch']


# 训练模型
def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=25):
    train_loss_history = []
    val_loss_history = []

    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        print(f"Epoch {epoch + 1}/{num_epochs}")

        # 使用 tqdm 显示进度条
        for inputs, labels in tqdm(train_loader, desc="Training", unit="batch"):
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item() * inputs.size(0)

        epoch_loss = running_loss / len(train_loader.dataset)
        train_loss_history.append(epoch_loss)
        print(f'Train Loss: {epoch_loss:.4f}')

        model.eval()
        running_loss = 0.0
        with torch.no_grad():
            for inputs, labels in tqdm(val_loader, desc="Validation", unit="batch"):
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                running_loss += loss.item() * inputs.size(0)

        epoch_loss = running_loss / len(val_loader.dataset)
        val_loss_history.append(epoch_loss)
        print(f'Val Loss: {epoch_loss:.4f}')

        # 保存检查点
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, filename=f'checkpoint_epoch_{epoch + 1}.pth.tar')

    return train_loss_history, val_loss_history


train_loss, val_loss = train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=25)

# 可视化训练和验证损失
plt.plot(train_loss, label='Train Loss')
plt.plot(val_loss, label='Val Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
