# -*- coding: UTF-8 -*-
# *******************************************************************
# File Name: main
# > Author: 04000387
# > Created Time: 2024/12/23 16:34
# *******************************************************************
import torch
from torch import nn, optim
from transformers import BertTokenizer
from datas import ReadWithBoxData, WithBoxCollectFn
from model import VggTransformer, TrainTransformer
from torchvision.transforms import v2
model_path = "D:\\models\\ernie"
tokenizer = BertTokenizer.from_pretrained(model_path)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

sets = ReadWithBoxData("../generate/pure")
# transformers = v2.Compose([
#     v2.ToImage(),
#     v2.ToDtype(torch.float32, scale=True),
#     v2.Resize(size=(224, 224))
# ])

transformers = v2.Compose([
    v2.ToImage(),
    v2.ToDtype(torch.float32, scale=True),
    v2.Resize(size=(224, 224))
])

collect_fn = WithBoxCollectFn(tokenizer, transformers)

model = VggTransformer(out_channel=256, vocab_size=tokenizer.vocab_size + 1, padding_idx=0, num_layer=1, dim_feedforward=128).to(device)

# model.set_parameter_no_grad()

#
# model_path = "../checkpoints/best_model.pth"
# model.load_state_dict(torch.load(model_path, weights_only=True, map_location=device))

loss_fn = {"output": nn.CrossEntropyLoss(), "box": nn.MSELoss()}
optimizer = optim.AdamW([p for p in model.parameters() if p.requires_grad], lr=1e-3)
trainer = TrainTransformer(model, loss_fn, optimizer, batch_size=8, collect_fn=collect_fn, device=device, show_size=4, save_model_iter=64, warmup_step=1500)
trainer.run(sets, epoch=1500)

