from PIL import Image
import torch

from transformers import CLIPProcessor, CLIPModel
from torchvision.transforms import AutoAugment

model:CLIPModel = CLIPModel.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K")
processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K")

img_path = "imgs/t3.jpg"
image = Image.open(img_path)

inputs = processor(text=["text", "pattern"], images=image, return_tensors="pt", padding=True)

state = {f'vision_model.{k}':v for k,v in model.vision_model.state_dict().items()}

state_proj = {f'visual_projection.{k}':v for k,v in model.visual_projection.state_dict().items()}
state.update(state_proj)

state['head.weight'] = model.get_text_features(inputs.input_ids, inputs.attention_mask)
state['head.scale'] = model.logit_scale.data
print(state['head.weight'].shape)

torch.save(state, 'ckpts/clip_text_det.ckpt')