import torch
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader, ConcatDataset
from torchvision import datasets, transforms
import torch.nn as nn
import os

# 数据增强设置
transform = transforms.Compose([
    transforms.Resize((160, 160)),
    transforms.ToTensor(),
])

# 设置原始数据集和增强数据集的路径
original_data_dir = r'E:\Desktop\实验六\Monkeys'
augmented_data_dir = r'E:\Desktop\实验六\DA_Monkeys'

# 加载原始数据集和增强数据集
original_dataset = datasets.ImageFolder(root=original_data_dir, transform=transform)
augmented_dataset = datasets.ImageFolder(root=augmented_data_dir, transform=transform)

# 合并数据集
combined_dataset = ConcatDataset([original_dataset, augmented_dataset])

# 划分训练集和测试集
train_size = int(0.8 * len(combined_dataset))
test_size = len(combined_dataset) - train_size
train_data, test_data = torch.utils.data.random_split(combined_dataset, [train_size, test_size])

# 创建 DataLoader
batch_size = 32
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)

class MonNet(nn.Module):
    def __init__(self):
        super(MonNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, kernel_size=(5, 5), padding=0)
        self.bn1 = nn.BatchNorm2d(6)
        self.maxpool1 = nn.MaxPool2d(6)
        self.relu = nn.ReLU()
        self.conv2 = nn.Conv2d(6, 16, kernel_size=(5, 5), padding=5)
        self.bn1 = nn.BatchNorm2d(16)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 5)
        self.sf1 = nn.Softmax(dim=1)

    def forward(self, x):
        x = self.relu(self.conv1(x))
        x = self.maxpool1(x)
        x = self.relu(self.conv2(x))
        x = self.maxpool1(x)
        x = x.view(-1, 16 * 5 * 5)
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        y = self.sf1(self.fc3(x))
        return y

# 初始化模型、损失函数、优化器
model = MonNet()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# 检查是否有保存的模型文件
checkpoint_path = 'checkpoint.pth'
start_epoch = 0

train_losses = []  # 用于保存训练损失的列表
train_accuracies = []  # 用于保存训练准确率的列表

if os.path.exists(checkpoint_path):
    print("Loading checkpoint...")
    checkpoint = torch.load(checkpoint_path)
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    start_epoch = checkpoint['epoch'] + 1
    train_losses = checkpoint.get('train_losses', [])  # 加载历史损失
    train_accuracies = checkpoint.get('train_accuracies', [])  # 加载历史准确率
    print(f"Resuming training from epoch {start_epoch}")


# 训练配置
num_epochs = 50
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# 用于绘制曲线
train_losses = []
train_accuracies = []

# 训练过程
for epoch in range(start_epoch, num_epochs):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0

    for images, labels in train_loader:
        images, labels = images.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

        # 计算准确率
        _, predicted = torch.max(outputs, 1)
        correct += (predicted == labels).sum().item()
        total += labels.size(0)

    # 计算每个 epoch 的平均损失和准确率
    epoch_loss = running_loss / len(train_loader)
    epoch_accuracy = correct / total
    train_losses.append(epoch_loss)
    train_accuracies.append(epoch_accuracy)

    print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {epoch_loss:.4f}, Accuracy: {epoch_accuracy:.4f}")

    # 保存模型检查点
    torch.save({
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'loss': loss,
        'train_losses': train_losses,  # 保存损失记录
        'train_accuracies': train_accuracies
    }, checkpoint_path)

# 绘制 Loss 和 Accuracy 曲线
plt.figure(figsize=(12, 5))

plt.subplot(1, 2, 1)
plt.plot(range(num_epochs), train_losses, label="Training Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Training Loss Curve")
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(range(num_epochs), train_accuracies, label="Training Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Training Accuracy Curve")
plt.legend()

plt.show()
