# import os
# print(os.getcwd())

# 测试预训练多模态

# 1. 导入包
from transformers import CLIPProcessor, CLIPModel
from PIL import Image
import torch
import time


# 2. 加载CLIP模型和处理器
t1 =  time.time()
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
print("加载模型耗时：", time.time()-t1)
# 3. 加载图像
image = Image.open("birds.jpg")

# 关于图像的文本描述
text = "a birds on the tree"

# 4. 处理图像和文本
inputs = processor(text=[text], images=image, return_tensors="pt", padding=True)
print("处理文本和图像耗时：", time.time()-t1)

# 5. 获取图像和文本的编码
with torch.no_grad():
    outputs = model(**inputs)
    image_embeds = outputs.image_embeds  # 图像编码
    text_embeds = outputs.text_embeds # 文本编码
    print("获取编码耗时：", time.time()-t1)


print(image_embeds.shape)   # [batch_size, 512]
