from transformers import CLIPProcessor, CLIPModel
from PIL import Image

model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")

# print(model)

# from IPython.display import Image, display
# display(Image(filename="data_examples/truck.jpg"))


image = Image.open("truck.jpg")
cls_list = ["dog", "woman", "man", "car", "truck", "a black truck", "bird", "a white truck", "black cat"]

input = processor(text=cls_list, images=image, return_tensors="pt", padding=True)
outputs = model(**input)
print(outputs.keys())

'''
CLIP 模型反馈的结果包含['logits_per_image', 'logits_per_text', 'text_embeds', 'image_embeds', 'text_model_output', 'vision_model_output'] 这六组结果，分别对应：

- `logits_per_image`: 每个图像于 cls_list 中所有文本标签的相似度；[1x9]
- `logits_per_text`: logits_per_image 的矩阵转置 `logits_per_text = logits_per_image.t()`
- `text_embeds`: 每个文本标签对应的特征矩阵
- `image_embeds`: 每个图像对应的特征矩阵
- `text_model_output`: 文本模型（未经过）特征映射的输出
- `vision_model_output`: 图像模型（未经过）特征映射的输出
'''

logits_per_image = outputs.logits_per_image
probs = logits_per_image.softmax(dim=1)

for i in range(len(cls_list)):
    print(f"{cls_list[i]}: {probs[0][i]}")
