import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, models
from sklearn.metrics import classification_report, accuracy_score
from alex45数据集 import *

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载预训练AlexNet模型并修改分类器
def initialize_model(num_classes, feature_extract=True):
    model = models.alexnet(weights=models.AlexNet_Weights.DEFAULT)

    # 冻结卷积层参数（只训练分类器）
    if feature_extract:
        for param in model.parameters():
            param.requires_grad = False

    # 修改分类器
    num_ftrs = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(num_ftrs, num_classes)
    return model.to(device)


# 评估模型函数
def evaluate_model(model, dataloader, device):
    model.eval()
    all_preds = []
    all_labels = []
    with torch.no_grad():
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, preds = torch.max(outputs, 1)
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())

    accuracy = accuracy_score(all_labels, all_preds) * 100
    class_report = classification_report(all_labels, all_preds)

    return accuracy, class_report


# 初始化模型
model = initialize_model(num_classes=45, feature_extract=True)
Epochs = 100
lr = 0.001
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)

# 添加学习率调度器
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)

best_loss = float('inf')
counter = 0

for epoch in range(Epochs):
    total_loss = 0
    model.train()
    train_preds = []
    train_labels = []
    for i, (images, labels) in enumerate(train_loader):
        images, labels = images.to(device), labels.to(device)
        optimizer.zero_grad()
        outputs = model(images)
        loss = loss_fn(outputs, labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()

        _, preds = torch.max(outputs, 1)
        train_preds.extend(preds.cpu().numpy())
        train_labels.extend(labels.cpu().numpy())

    train_accuracy = accuracy_score(train_labels, train_preds) * 100
    avg_loss = total_loss / len(train_loader)

    # 评估模型在验证集上的性能
    model.eval()
    val_accuracy, val_class_report = evaluate_model(model, test_loader, device)
    val_loss = 0
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            val_loss += loss_fn(outputs, labels).item()
    val_loss /= len(test_loader)

    # 更新学习率
    scheduler.step(val_loss)

    print(f"Epoch {epoch + 1}/{Epochs}")
    print(f"训练损失: {avg_loss:.4f}, 训练准确率: {train_accuracy:.2f}%")
    print(f"验证损失: {val_loss:.4f}, 验证准确率: {val_accuracy:.2f}%")

    if val_loss < best_loss:
        best_loss = val_loss
        counter = 0
        # 保存最佳模型
        torch.save(model.state_dict(), '../static/alex_mammals_best_model.pth')
        print(f"已保存最佳模型 (验证损失: {val_loss:.4f})")
    else:
        counter += 1
        if counter >= 15:  # 早停策略
            print(f"早停在第 {epoch + 1} 轮")
            break

# 加载最佳模型并在测试集上评估
model.load_state_dict(torch.load('../static/alex_mammals_best_model.pth'))
test_accuracy, test_class_report = evaluate_model(model, test_loader, device)

print("\n最终模型在测试集上的性能:")
print(f"准确率: {test_accuracy:.2f}%")