# 导入所需的库
import os
import random

# 导入数据处理和可视化库
import matplotlib.pyplot as plt
import numpy as np

# 导入深度学习框架 PyTorch 相关库
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from AlexNet import AlexNet


# 设置随机种子以保证结果的可重复性
def setup_seed(seed):
    np.random.seed(seed)  # 设置 Numpy 随机种子
    random.seed(seed)  # 设置 Python 内置随机种子
    os.environ['PYTHONHASHSEED'] = str(seed)  # 设置 Python 哈希种子
    torch.manual_seed(seed)  # 设置 PyTorch 随机种子
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)  # 设置 CUDA 随机种子
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.benchmark = False  # 关闭 cudnn 加速
        torch.backends.cudnn.deterministic = True  # 设置 cudnn 为确定性算法


# 设置随机种子
setup_seed(0)
# 检查是否有可用的 GPU，如果有则使用 GPU，否则使用 CPU
if torch.cuda.is_available():
    device = torch.device("cuda")  # 使用 GPU
    print("CUDA is available. Using GPU.")
else:
    device = torch.device("cpu")  # 使用 CPU
    print("CUDA is not available. Using CPU.")

transform = {
    "train": transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(),
                                 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
    "test": transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(),
                                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
}

train_dataset = datasets.ImageFolder("dataset/train", transform=transform["train"])
test_dataset = datasets.ImageFolder("dataset/test", transform=transform["test"])

train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=32, shuffle=False)

model = AlexNet(num_classes=2).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)

epochs = 20
for epoch in range(epochs):
    model.train()
    tatol_loss = 0
    for i, (images, labels) in enumerate(train_dataloader):
        images = images.to(device)
        labels = labels.to(device)
        #前向传播
        outputs = model(images)
        loss = criterion(outputs, labels)
        #反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        tatol_loss += loss
        print(f"Epoch [{epoch + 1}/{epochs}],Iter[{i}/{len(train_dataloader)}], Loss: {loss}")
    avg_loss = tatol_loss / len(train_dataloader)
    print(f"Epoch {epoch + 1}/{epochs}, Loss: {avg_loss}")
    model.eval()
    total =0
    correct = 0
    test_loss =0
    total_loss =0
    with torch.no_grad():
        for images, labels in test_dataloader:
            images = images.to(device)
            labels = labels.to(device)
            outputs = model(images)
            test_loss = criterion(outputs, labels)
            total_loss +=test_loss
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    avg_test_loss = total_loss / len(test_dataloader)
    acc = correct / total
    print(f"Test Data: Epoch [{epoch + 1}/{epochs}], Loss {avg_test_loss:.4f}, Accuracy {acc * 100}%")
    if acc > most_acc:
        torch.save(model.state_dict(), f"./model/model_best.pth")
        most_acc = acc
    if (epoch + 1) % 10 == 0:
        torch.save(model.state_dict(), f"./model/model_{acc * 100}.pth")


