import matplotlib.pyplot as plt
from models import *
import torchvision.transforms as transforms
from  PIL import Image
from config import *
import pickle

device=torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载词汇表封装
with open('./dict/id2word.pkl', 'rb') as f:
        vocab = pickle.load(f)

tranform_img=transforms.Compose([transforms.Resize((224,224)),
        transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ])

image_name="./images/testset.jpg"
image = Image.open(image_name).convert('RGB')
x = tranform_img(image)
x =x.unsqueeze(0).to(device)  #必须加批次1


encoder = EncoderCNN(embedding_dim).to(device)
decoder = DecoderRNN(embedding_dim, hidden_state_size, len(vocab), 1).to(device)

encoder.load_state_dict(torch.load("./models/encoder22.pth"))
decoder.load_state_dict(torch.load("./models/decoder22.pth"))
encoder.eval()
decoder.eval()


with torch.no_grad():
        feature = encoder(x)
        sampled_ids = decoder.sample(feature,max_len)
        sampled_ids = sampled_ids[0].cpu().numpy()

print(sampled_ids)


ids = [x for x in sampled_ids if x != 2]
sentence_list=[vocab[i] for i in ids]
sentence=' '.join(sentence_list)
print(sentence)


plt.figure(figsize=(16,8))
plt.imshow(image)
plt.text(0, -10, sentence, fontsize=15 , color='red', wrap=True)
plt.show()



