import torch
from torch import nn
from torch.optim import lr_scheduler
from Func import train_model, validate_model, data_preproc_load, save_model_and_log, plot_and_save_fig
from timm import create_model  # Pytorch image models

# 调整参数区
model_name = 'resnetrs50'  # 可用resnetrs50、tf_efficientnetv2_b3
input_channels = 3  # 对于RGB图像，输入通道数为3
num_classes = 4  # 分类类别
# classes = ['impressionism', 'realism', 'surrealism']  # 3
classes = ['expressionism', 'impressionism', 'realism', 'surrealism']  # 4
# classes = ['baroque', 'expressionism', 'impressionism', 'realism', 'surrealism']  # 5
data_improve = False  # 数据增强技术，如旋转、缩放、剪裁等
train_path = 'Data/train'
test_path = 'Data/test'
num_epoch = 20
b_s: int = 48
lr = 0.0015
w_d = 0.0001
# 优化器和学习率调整器在第61行
scheduler_mode = 'max'  # 这个参数的选择取决于你正在监控的指标是什么（scheduler.step()的参数）。如果你正在监控的指标是损失，你希望它尽可能低，那么应该使用 'min'。如果你正在监控的指标是准确率，你希望它尽可能高，那么应该使用 'max'
scheduler_factor = 0.5  # 学习率变化的系数
min_lr = 0.0001  # 最小学习率

# 数据预处理及加载
train_loader, test_loader = data_preproc_load(data_improve, b_s, train_path, test_path)

# 创建模型
model = create_model(model_name, pretrained=True)  # 下载的模型在/Users/yjc/.cache/huggingface/hub

# 更改最后的全连接层以匹配我们的分类任务
if model_name == 'resnetrs50':
    num_fc_features_resnetrs = model.fc.in_features
    model.fc = nn.Linear(num_fc_features_resnetrs, num_classes)
elif model_name == 'tf_efficientnetv2_b3':
    num_fc_features_efficientnetv2 = model.classifier.in_features
    model.classifier = nn.Linear(num_fc_features_efficientnetv2, num_classes)

# 将模型移动到指定的设备上
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
model = model.to(device)

# 定义损失函数、优化器和学习率调整器
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=lr, weight_decay=w_d)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode=scheduler_mode, factor=scheduler_factor,
                                           min_lr=min_lr)  # 学习率调整器

# 训练模型
model.train()  # 启用模型训练模式

best_val_acc = 0.0  # 初始化一些东西
best_train_acc = 0.0
model_val_acc = 0.0
model_train_acc = 0.0
model_epoch = 0
no_improve = 0
best_model_params = None
train_losses = []  # 四个列表来存储训练和验证的损失和准确率,用于绘图
train_accuracies = []
val_losses = []
val_accuracies = []

for epoch in range(num_epoch):  # 迭代次数
    curr_loss, curr_train_acc = train_model(model, criterion, optimizer, train_loader, device, epoch)
    curr_val_acc, avg_loss = validate_model(model, criterion, test_loader, device, num_classes, classes)

    scheduler.step(curr_val_acc)  # 更新学习率

    train_losses.append(curr_loss)
    train_accuracies.append(curr_train_acc)
    val_losses.append(avg_loss)
    val_accuracies.append(curr_val_acc)

    # 保存最佳模型
    if (curr_val_acc > best_val_acc) or (curr_train_acc > best_train_acc):
        # 验证集正确率和训练集正确率有一个是提升的，就保存模型
        best_model_params = model.state_dict()
        model_val_acc = curr_val_acc
        model_train_acc = curr_train_acc
        model_epoch = epoch
    # 未达到一定的提升幅度，就将no_improve加一
    if ((curr_val_acc >= best_val_acc - 5) and (curr_train_acc >= best_train_acc + 2.5)) or \
            (curr_val_acc > best_val_acc + 2.5):
        no_improve = 0
    else:
        no_improve += 1
    # 更新最佳准确率
    if curr_val_acc > best_val_acc:
        best_val_acc = curr_val_acc
    if curr_train_acc > best_train_acc:
        best_train_acc = curr_train_acc
    # 模型性能三次没有明显提升就停止
    if no_improve >= 3:
        print('Early stopping')
        break

    torch.mps.empty_cache()  # 清空缓存
    model.train()

save_model_and_log(best_model_params, model_name, num_classes, model_val_acc, model_train_acc, b_s, num_epoch,
                   model_epoch,
                   data_improve, optimizer, lr, w_d, scheduler, scheduler_mode, scheduler_factor, min_lr)

plot_and_save_fig(train_losses, val_losses, train_accuracies, val_accuracies)
