# https://blog.csdn.net/bananapai/article/details/145736300

import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Resize, ToTensor

from Alexnet2MNIST_model import AlexNet

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 超参,(我电脑太垃圾了我EPOCH就意思一下写个2)
EPOCH = 2
lr = 1e-4
batch_size = 1000
weight_decay = 0.00001

# 数据转换器
transformer = Compose([
    Resize((224, 224), antialias=True),
    ToTensor()
])


# 用函数设置标签的转换器，也可以用lambda表达式直接设置
def label_transformer(label):
    return torch.tensor(label)


ds = MNIST('./data', train=True, download=False, transform=transformer, target_transform=label_transformer)
dl = DataLoader(ds, batch_size=batch_size, shuffle=True)
val_ds = MNIST('./data', train=False, download=False,
               transform=transformer, target_transform=label_transformer)
val_dl = DataLoader(val_ds, batch_size=batch_size, shuffle=False)

model = AlexNet()
model.to(device)

try:
    # 有模型参数就加载，这里的model用到的方法是继承nn.Module的方法
    # 里面的weights_only这个参数是类似于声明本模型我信任，不写有弱警告
    model.load_state_dict(torch.load('weights/model_detect_numbers.pt', weights_only=True))
    print("model loaded successfully")
except:
    print('model loads failed')

loss_fn = torch.nn.CrossEntropyLoss()

optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
# 学习率调度器,这里是一般的学习率调度器，还有余弦退火调度器
scheduler = ReduceLROnPlateau(
    optimizer,
    # 模式设置为min就是降低学习率
    mode='min', factor=0.5,
    # 忍耐值为1，就是验证阶段损失函数损失插值小于阈值超过忍耐值就开始降低学习率
    # 但是前三次的Epoch内不算
    patience=1, threshold=1e-3,
    cooldown=0,
    # 最小学习率设置
    min_lr=2e-3
)

total_loss = 0
count = 0
epoch = 0


def train():
    # 全局变量
    global total_loss, count, epoch
    model.train()
    for i, (inputs, labels) in enumerate(dl):
        # 清空梯度
        optimizer.zero_grad()
        # 前向传播
        y = model(inputs)
        # 计算损失
        loss = loss_fn(y, labels)
        # 这里的loss是需要用item转数据类型的
        total_loss += loss.item()
        count += 1
        epoch += 1
        # 反向传播
        loss.backward()
        # 更新梯度
        optimizer.step()

        if (i + 1) % 2 == 0:
            print(f'训练损失第【{i}/{len(ds) / batch_size}】次（当前epoch【{epoch + 1}/{EPOCH}】）训练损失；{total_loss / count}')


val_total_loss = 0
val_count = 0


# 评估模式，验证损失
def valid():
    global val_total_loss, val_count
    # 评估模式
    model.eval()
    with torch.no_grad():
        for i, (inputs, labels) in enumerate(val_dl):
            y = model(inputs)
            loss = loss_fn(y, labels)
            val_total_loss += loss.item()
            val_count += 1

    print(f'评估损失；{val_total_loss / count}')
    return val_total_loss / val_count


# 开始训练循环
for epoch in range(EPOCH):
    print(f'Epoch进度[{epoch + 1}/{EPOCH}]')
    # 训练开启
    train()
    # 评估开启，且把损失值记录
    loss = valid()
    # 学习率调度器接收评估阶段的损失值得以更新学习率
    scheduler.step(loss)

    print(optimizer.param_groups[0]['lr'])

torch.save(model.state_dict(), 'weights/model_detect_numbers.pt')

print("Alexnet2MNIST_train.py", 444)