# import torch
# import torch.nn as nn
# from 数据集处理 import train_dl
# from torchvision.models import ResNet18_Weights,resnet18
#
# '''
# 模型训练的一般步骤：
# 超参数
# 数据
# 创建模型
# 损失函数
# 优化器
# 学习率调度器
# 开始循环训练
#     清空梯度
#     前向传播
#     计算损失
#     反向传播
#     更新参数
# 保存模型
#
# 基本的提速方式：
# 1、使用GPU
# 2、利用多线程或者多进程
# 3、调整批次大小充分利用显卡
# 4、学习率调度器
# '''
#
# # ——创建设备————————————————————————————————————————————————————————————————————————————————
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#
# def load_resnet18(num_classes=50):
#     model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
#     # 冻结底层参数
#     for p in model.parameters():
#         p.requires_grad = False
#     # 修改最后一层全连接 为50分类
#     in_features = model.fc.in_features
#     out_features = num_classes
#     model.fc = nn.Linear(in_features, out_features)
#     # 只有最后一层需要训练
#     for p in model.fc.parameters():
#         p.requires_grad = True
#     return model.to(device)
#
# model = load_resnet18()
# # ——超参数——————————————————————————————————————————————————————————————————————————————————
# lr = 0.01
# Epochs = 50
#
#
#
# # ——损失函数、优化器、学习率调度器——————————————————————————————————————————————————————————————
# '''
# 1.nn.CrossEntropyLoss()      内部已经带有Logsofmax激活函数了，输出层可以不用激活了
# 2.nn.NLLLoss()               内部不带，输出层需要进行softmax激活
# '''
# loss_fn = nn.CrossEntropyLoss()
# optimizer = torch.optim.Adam(model.fc.parameters(), lr=lr)
# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, Epochs)
#
# # ——开始循环训练——————————————————————————————————————————————————————————————————————————————
# for epoch in range(Epochs):
#     total_loss = 0
#     for i, (imgs, labels) in enumerate(train_dl):
#         # 将数据放到设备上
#         imgs, labels = imgs.to(device), labels.to(device)
#         optimizer.zero_grad()
#         y_pre = model(imgs)
#         loss = loss_fn(y_pre, labels)
#         total_loss += loss.item()
#         loss.backward()
#         optimizer.step()
#         if i==0 or (i+1)%50==0:
#             print(f"Batch:{i+1} Train_Loss:{loss.item():.4f}")
#     # 输出一轮训练的信息
#     avg_loss = (total_loss / len(train_dl))
#     print(f"[{epoch+1}/{Epochs}] Loss:{avg_loss:.4f}")
#     # 更新学习率调度器
#     scheduler.step()
#
#
# # ——保存模型————————————————————————————————————————————————————————————————————————————————
# torch.save(model.state_dict(), "./BEST_MODEL.pth")
#
#
import torch
import torch.nn as nn
from 数据集处理 import train_dl
from torchvision.models import resnet18, ResNet18_Weights
import os

'''
模型训练的一般步骤：
超参数
数据
创建模型
损失函数
优化器
学习率调度器
开始循环训练
    清空梯度
    前向传播
    计算损失
    反向传播
    更新参数
保存模型

基本的提速方式：
1、使用GPU
2、利用多线程或者多进程
3、调整批次大小充分利用显卡
4、学习率调度器
'''

# ——创建设备————————————————————————————————————————————————————————————————————————————————
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")


def load_resnet(num_classes=50):
    # 修正：使用resnet18而不是RegNet
    model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)

    # 冻结底层参数
    for p in model.parameters():
        p.requires_grad = False

    # 修改最后一层全连接 为50分类
    in_features = model.fc.in_features
    model.fc = nn.Linear(in_features, num_classes)

    # 只有最后一层需要训练
    for p in model.fc.parameters():
        p.requires_grad = True

    return model.to(device)


model = load_resnet(num_classes = 50)

# ——超参数——————————————————————————————————————————————————————————————————————————————————
lr = 0.01
Epochs = 50

# ——损失函数、优化器、学习率调度器——————————————————————————————————————————————————————————————
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.fc.parameters(), lr=lr)  # 只优化最后一层
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=Epochs)

# ——训练函数——————————————————————————————————————————————————————————————————————————————


def train_model(model, train_loader, loss_fn, optimizer, scheduler, epochs):
    model.train()

    for epoch in range(epochs):
        total_loss = 0
        correct = 0
        total = 0

        for i, (imgs, labels) in enumerate(train_loader):
            # 将数据放到设备上
            imgs, labels = imgs.to(device), labels.to(device)

            optimizer.zero_grad()
            y_pred = model(imgs)
            loss = loss_fn(y_pred, labels)
            loss.backward()
            optimizer.step()

            total_loss += loss.item()

            # 计算准确率
            _, predicted = torch.max(y_pred.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

            if i == 0 or (i + 1) % 50 == 0:
                batch_acc = 100 * (predicted == labels).sum().item() / labels.size(0)
                print(f"Epoch: {epoch + 1}/{epochs}, Batch: {i + 1}/{len(train_loader)}, "
                      f"Loss: {loss.item():.4f}, Batch Acc: {batch_acc:.2f}%")

        # 输出一轮训练的信息
        avg_loss = total_loss / len(train_loader)
        epoch_acc = 100 * correct / total
        current_lr = scheduler.get_last_lr()[0]

        print(f"[{epoch + 1}/{epochs}] Avg Loss: {avg_loss:.4f}, "
              f"Accuracy: {epoch_acc:.2f}%, LR: {current_lr:.6f}")
        print("-" * 60)

        # 更新学习率调度器
        scheduler.step()

    return model


# ——开始训练——————————————————————————————————————————————————————————————————————————————
print("开始训练...")
trained_model = train_model(model, train_dl, loss_fn, optimizer, scheduler, Epochs)

# ——保存模型————————————————————————————————————————————————————————————————————————————————
# 创建保存目录
os.makedirs("./models", exist_ok=True)
model_save_path = "./models/BEST_MODEL.pth"

# 保存整个模型和状态字典
torch.save({
    'model_state_dict': trained_model.state_dict(),
    'optimizer_state_dict': optimizer.state_dict(),
    'epoch': Epochs,
    'loss': loss_fn
}, model_save_path)

print(f"模型已保存到: {model_save_path}")


# ——验证模型加载——————————————————————————————————————————————————————————————————————————————
def load_trained_model(num_classes=50):
    """加载训练好的模型"""
    model = load_resnet(num_classes=num_classes)
    checkpoint = torch.load(model_save_path, map_location=device)
    model.load_state_dict(checkpoint['model_state_dict'])
    return model

# 示例：如何加载训练好的模型
# loaded_model = load_trained_model(num_classes=50)
# loaded_model.eval()