import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, models
from vgg数据集 import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载预训练VGG16模型并修改分类器
def initialize_model(num_classes, feature_extract=True):
    model = models.vgg16(weights=models.VGG16_Weights.DEFAULT)

    # 冻结卷积层参数（只训练分类器）
    if feature_extract:
        for param in model.parameters():
            param.requires_grad = False

    # 修改分类器
    num_ftrs = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(num_ftrs, num_classes)
    return model.to(device)

# 初始化模型
model = initialize_model(num_classes=45, feature_extract=True)

Epochs = 100
lr = 0.001
best_loss = float('inf')
patience = 10
counter = 0
loss_fn = nn.CrossEntropyLoss()
# 只优化分类器的最后一层参数
optimizer = torch.optim.Adam(model.classifier[6].parameters(), lr=lr)

# 学习率调度器
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
    optimizer,
    mode='min',
    factor=0.5,
    patience=5,
    verbose=True
)

# 评估模型函数
def evaluate_model(model, val_loader, loss_fn, device):
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in val_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = loss_fn(outputs, labels)

            running_loss += loss.item() * images.size(0)
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()

    epoch_loss = running_loss / len(val_loader.dataset)
    epoch_acc = 100. * correct / total

    return epoch_loss, epoch_acc

for epoch in range(Epochs):
    total_loss = 0
    model.train()
    for i, (images, labels) in enumerate(train_loader):
        images, labels = images.to(device), labels.to(device)
        optimizer.zero_grad()
        outputs = model(images)
        loss = loss_fn(outputs, labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    avg_loss = total_loss / len(train_loader)
    print(f"Epoch {epoch + 1} 训练损失: {avg_loss:.4f}")

    # 在验证集上评估模型
    val_loss, val_acc = evaluate_model(model, val_loader, loss_fn, device)
    print(f"Epoch {epoch + 1} 验证损失: {val_loss:.4f}, 验证准确率: {val_acc:.2f}%")

    # 更新学习率
    scheduler.step(val_loss)

    if val_loss < best_loss:
        best_loss = val_loss
        counter = 0
        # 保存最佳模型
        torch.save(model.state_dict(), '../static/vgg16_mammals_best_model.pth')
        print(f"已保存最佳模型 (验证损失: {val_loss:.4f})")
    else:
        counter += 1
        if counter >= patience:
            print(f"Early stopping at epoch {epoch + 1}")
            break

# 加载最佳模型并在测试集上评估
model.load_state_dict(torch.load('../static/vgg16_mammals_best_model.pth'))
test_loss, test_acc = evaluate_model(model, test_loader, loss_fn, device)
print(f"测试集损失: {test_loss:.4f}, 测试集准确率: {test_acc:.2f}%")