# animal_train.py
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
from animal_model import get_model, get_class_names
import os

def main():
    # 定义数据预处理
    transform = transforms.Compose([
        transforms.Resize(256),  # 将图像大小调整为 256x256
        transforms.CenterCrop(224),  # 从中心裁剪出 224x224 的图像
        transforms.RandomHorizontalFlip(),  # 随机水平翻转
        transforms.RandomRotation(10),  # 随机旋转10度
        transforms.ToTensor(),  # 将图像转换为张量
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 归一化
    ])

    # 数据集路径
    data_path = '../../动物数据集'

    # 检查数据集是否存在
    if not os.path.exists(data_path):
        print(f"错误：数据集路径 {data_path} 不存在！")
        print("请确保动物数据集文件夹存在，并且包含各个动物类别的子文件夹。")
        return

    # 加载自定义数据集
    train_dataset = datasets.ImageFolder(root=data_path, transform=transform)

    # 获取类别数量
    num_classes = len(train_dataset.classes)
    print(f"检测到 {num_classes} 个类别: {train_dataset.classes}")

    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)

    # 获取模型
    model = get_model(num_classes=num_classes)

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)

    # 指定设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    model.to(device)

    # 训练模型
    def train(model, device, train_loader, optimizer, criterion, scheduler, epochs):
        model.train()
        best_loss = float('inf')  # 初始化最低损失值为无穷大
        model_path = 'animal_model.pth'  # 模型保存路径

        for epoch in range(epochs):
            epoch_loss = 0.0  # 初始化当前 epoch 的总损失
            correct = 0
            total = 0

            for batch_idx, (data, target) in enumerate(train_loader):
                data, target = data.to(device), target.to(device)
                optimizer.zero_grad()
                output = model(data)
                loss = criterion(output, target)
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()  # 累加当前 epoch 的损失

                # 计算准确率
                _, predicted = torch.max(output.data, 1)
                total += target.size(0)
                correct += (predicted == target).sum().item()

                if batch_idx % 10 == 0:
                    print(f'Epoch {epoch + 1}, Batch {batch_idx}/{len(train_loader)}, '
                          f'Loss: {loss.item():.4f}, '
                          f'Accuracy: {100. * correct / total:.2f}%')

            scheduler.step()  # 更新学习率

            avg_epoch_loss = epoch_loss / len(train_loader)  # 计算当前 epoch 的平均损失
            accuracy = 100. * correct / total

            print(f"Epoch {epoch + 1} 结束，平均损失: {avg_epoch_loss:.6f}, 准确率: {accuracy:.2f}%")

            # 检查当前 epoch 的平均损失是否低于最低损失
            if avg_epoch_loss < best_loss:
                best_loss = avg_epoch_loss
                # 保存模型
                torch.save({
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'loss': best_loss,
                    'classes': train_dataset.classes
                }, model_path)
                print(f"模型已保存到 {model_path}，当前最低损失: {best_loss:.6f}")

        print("训练结束，模型文件保存成功")
        return model_path

    # 开始训练
    model_path = train(model, device, train_loader, optimizer, criterion, scheduler, epochs=20)
    print(f"训练完成！模型已保存到: {model_path}")

if __name__ == "__main__":
    main()