filtered-wit / filter_wit.py
ARKseal's picture
Upload filter_wit.py
ce42599
import gc
from glob import glob
from io import BytesIO
from pathlib import Path
import clip
import pandas as pd
import torch
import ujson
import webdataset as wds
from PIL import Image
from sentence_transformers import SentenceTransformer
from torchvision.transforms import (CenterCrop, Compose, InterpolationMode,
Normalize, Resize, ToTensor)
from tqdm import tqdm
torch.multiprocessing.set_sharing_strategy('file_system')
def load_image(jpg):
return jpg, Image.open(BytesIO(jpg))
def load_json(json):
return ujson.loads(json)
load_preprocess_map = {
'jpg': load_image,
'json': load_json,
}
def convert_image_to_rgb(im):
return im.convert("RGB")
# taken from https://github.com/openai/CLIP
image_transforms = Compose([
Resize(224, interpolation=InterpolationMode.BICUBIC),
CenterCrop(224),
convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711)),
])
def image_preprocess(jpgs):
jpg_orig, im = jpgs
im = image_transforms(im)
return jpg_orig, im
texts_to_check = [
'page_title',
'section_title',
'hierarchical_section_title',
'caption',
'caption_attribution_description',
'caption_alt_text_description',
'context_page_description',
'context_section_description'
]
def meta_preprocess(meta: dict):
return {
'captions': [meta[text] for text in texts_to_check if text in meta and meta[text]],
'orig': meta
}
mclip_preprocess_map = {
'jpg': image_preprocess,
'json': meta_preprocess
}
def log(msg):
print(msg, end='\n\n\n\n')
return msg
def func(wds_dataset_str, device=None, batch_size=4, **kwargs):
nocap = 0
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Loading models:')
model, _ = clip.load('ViT-B/32', device=device, jit=False)
mclip = SentenceTransformer(
'sentence-transformers/clip-ViT-B-32-multilingual-v1', device=device)
cosine_similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
print('Finished loading models')
ds = wds.WebDataset(wds_dataset_str, shardshuffle=False).map_dict(
**load_preprocess_map).map_dict(**mclip_preprocess_map).to_tuple('jpg', 'json').batched(batch_size)
dl = wds.WebLoader(ds, batch_size=None, shuffle=False, **kwargs)
writer = wds.ShardWriter('%05d.tar', 10000)
for i, batch in enumerate(tqdm(dl)):
try:
imss, metas = batch
orig_jpgs, ims = zip(*imss)
ims = torch.stack(ims)
captionss = [meta['captions'] for meta in metas]
with torch.no_grad():
image_features = torch.unbind(
model.encode_image(ims.to(device)).float())
text_featuress = [mclip.encode(captions, convert_to_tensor=True).to(
device).float() for captions in captionss]
similarities = [
cosine_similarity(image_feature.repeat(
len(text_features), 1), text_features).tolist()
for image_feature, text_features in zip(image_features, text_featuress)
]
captionss = [[cap for cap, sim in zip(
captions, similarity) if sim > 0.26] for captions, similarity in zip(captionss, similarities)]
for orig_jpg, captions, meta in zip(orig_jpgs, captionss, metas):
if len(captions) == 0:
nocap += 1
tqdm.write(f'No captions: {nocap}')
continue
sample = {
'__key__': f'{writer.count:08}',
'jpg': orig_jpg,
'txt': ''.join(captions),
'json': ujson.dumps(meta['orig'])
}
writer.write(sample)
if i % 25 == 0:
gc.collect()
torch.cuda.empty_cache()
except Exception as e:
print(f'Error: {e}')
raise e
writer.close()