from transformers import ViTImageProcessor, ViTForImageClassification
from transformers import ViTModel
from PIL import Image
import requests

model_bak = None
processor_bak = None

def vit_encode(device = "cuda:0", image_files=[],
            pretrained_model_name_or_path = '/home/zry/datasets/utils/vit/vit-base-patch16-224'):
    
    # images = [Image.open(image_file) for image_file in image_files]
    images = image_files

    global model_bak, processor_bak
    if model_bak is None:
        processor_bak = processor = ViTImageProcessor.from_pretrained(pretrained_model_name_or_path)
        model_bak = model = ViTModel.from_pretrained(pretrained_model_name_or_path).to(device)
        for name, parameter in model.named_parameters():
            parameter.requires_grad = False
    else:
        model = model_bak
        processor = processor_bak

    # 这里的input可以输入多batch信息
    inputs = processor(images, return_tensors="pt").to(device)
    outputs = model(**inputs)
    enc = outputs[0][:,0,:] # shape = [batch_size, dim] 这样每个图片都被编码到一个768的向量上去了

    return enc