# -*- coding: UTF-8 -*-
# *******************************************************************
# File Name: inference
# > Author: 04000387
# > Created Time: 2024/12/26 15:43
# *******************************************************************
import torch
from torch.utils.data import DataLoader
from transformers import BertTokenizer
from datas import ReadTxt, CollectFn
from model import VggTransformer
from torchvision.transforms import v2
import matplotlib.pyplot as plt

model_path = "E:\\models\\ernie"
tokenizer = BertTokenizer.from_pretrained(model_path)

device = torch.device("cuda")

sets = ReadTxt()

# transformers = v2.Compose([
#     v2.ToImage(),
#     v2.ToDtype(torch.float32, scale=True),
#     v2.Resize(size=(224, 224)),
#     v2.Normalize(mean=[0.48235, 0.45882, 0.40784], std=[1.0 / 255, 1.0 / 255, 1.0 / 255])
# ])

transformers = v2.Compose([
    v2.ToImage(),
    v2.ToDtype(torch.float32, scale=True),
    v2.Resize(size=(224, 224))
])

collect_fn = CollectFn(tokenizer, transformers=transformers)

model = VggTransformer(out_channel=512, vocab_size=tokenizer.vocab_size + 1, padding_idx=0, num_layer=3).to(device)
model.load_state_dict(torch.load("../checkpoints/best_model - 最新.pth", weights_only=True, map_location=device))



loader = DataLoader(sets, 1, collate_fn=collect_fn)


def generate(_model, _img, max_length=32):
    _model.eval()
    _img = _img.to(device)
    buffer = torch.ones((_img.size(0), 1), dtype=torch.long, device=device)
    attention_mask = torch.ones((_img.size(0), max_length), dtype=torch.long, device=device)
    with torch.no_grad():
        for i in range(max_length):
            out = _model(_img, buffer, attention_mask[:, :(i + 1)])
            _, logits = torch.max(out[:, -1, :], dim=-1)
            buffer = torch.cat([buffer, logits.reshape(-1, 1)], dim=-1)

    return buffer.detach().cpu()




# i = 0
# for img, con, _ in loader:
#     if i < 23000:
#         i += 1
#         continue
#
#     plt.imshow(img.squeeze().permute(1, 2, 0).detach().numpy())
#     print(tokenizer.batch_decode(con["input_ids"]))
#     out = generate(model, img, 64)
#     print(tokenizer.batch_decode(out))
#     plt.show()
#     break



import cv2



# imgv = cv2.imread("E:\\01_git\\datas\\test2.jpg")
imgv = cv2.imread("E:\\01_git\\generate_dir\\generate\\img\\000e6b2f-ef9a-411a-a39f-d2da7be3abbd.png")
img_s = cv2.cvtColor(imgv, cv2.COLOR_BGR2RGB) / 255.0


imgv = transformers(img_s).unsqueeze(0)
out = generate(model, imgv)
print(tokenizer.batch_decode(out))

plt.imshow(img_s)
plt.show()