import torch
import clip
import argparse
from PIL import Image
import yaml
from tqdm import tqdm
import os
import pickle

import torch.utils.data as data

class TextInput(data.Dataset):
    def __init__(self, in_file):
        with open(in_file, 'r') as f:
            self.caps = yaml.load(f, Loader=yaml.Loader)
        self.data=list(self.caps.items())

    def __getitem__(self, idx):
        return self.data[idx][0], self.data[idx][1]

    def __len__(self):
        return len(self.data)

parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', default='output/x-vlm_cap')
parser.add_argument('--bs', default=1, type=int)
parser.add_argument('--input_file', type=str, default='./input')

args = parser.parse_args()

device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)


dataset=TextInput(args.input_file)
data_loader=data.DataLoader(dataset, args.bs, num_workers=8, shuffle=False, pin_memory=True)

text_features_map={}

for k, cap in tqdm(data_loader):
    #print(cap)
    text = clip.tokenize(cap).to(device)
    text_features = model.encode_text(text).detach().cpu()
    for i,x in enumerate(k):
        text_features_map[x]=text_features[i]

torch.save(text_features_map, os.path.join(args.output_dir, 'caption_features.pth'))