File size: 4,272 Bytes
ce42599
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import gc
from glob import glob
from io import BytesIO
from pathlib import Path

import clip
import pandas as pd
import torch
import ujson
import webdataset as wds
from PIL import Image
from sentence_transformers import SentenceTransformer
from torchvision.transforms import (CenterCrop, Compose, InterpolationMode,
                                    Normalize, Resize, ToTensor)
from tqdm import tqdm

torch.multiprocessing.set_sharing_strategy('file_system')


def load_image(jpg):
    return jpg, Image.open(BytesIO(jpg))


def load_json(json):
    return ujson.loads(json)


load_preprocess_map = {
    'jpg': load_image,
    'json': load_json,
}


def convert_image_to_rgb(im):
    return im.convert("RGB")


# taken from https://github.com/openai/CLIP
image_transforms = Compose([
    Resize(224, interpolation=InterpolationMode.BICUBIC),
    CenterCrop(224),
    convert_image_to_rgb,
    ToTensor(),
    Normalize((0.48145466, 0.4578275, 0.40821073),
              (0.26862954, 0.26130258, 0.27577711)),
])


def image_preprocess(jpgs):
    jpg_orig, im = jpgs
    im = image_transforms(im)
    return jpg_orig, im


texts_to_check = [
    'page_title',
    'section_title',
    'hierarchical_section_title',
    'caption',
    'caption_attribution_description',
    'caption_alt_text_description',
    'context_page_description',
    'context_section_description'
]


def meta_preprocess(meta: dict):
    return {
        'captions': [meta[text] for text in texts_to_check if text in meta and meta[text]],
        'orig': meta
    }


mclip_preprocess_map = {
    'jpg': image_preprocess,
    'json': meta_preprocess
}


def log(msg):
    print(msg, end='\n\n\n\n')
    return msg


def func(wds_dataset_str, device=None, batch_size=4, **kwargs):
    nocap = 0
    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('Loading models:')
    model, _ = clip.load('ViT-B/32', device=device, jit=False)
    mclip = SentenceTransformer(
        'sentence-transformers/clip-ViT-B-32-multilingual-v1', device=device)
    cosine_similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
    print('Finished loading models')

    ds = wds.WebDataset(wds_dataset_str, shardshuffle=False).map_dict(
        **load_preprocess_map).map_dict(**mclip_preprocess_map).to_tuple('jpg', 'json').batched(batch_size)
    dl = wds.WebLoader(ds, batch_size=None, shuffle=False, **kwargs)

    writer = wds.ShardWriter('%05d.tar', 10000)
    for i, batch in enumerate(tqdm(dl)):
        try:
            imss, metas = batch
            orig_jpgs, ims = zip(*imss)
            ims = torch.stack(ims)

            captionss = [meta['captions'] for meta in metas]

            with torch.no_grad():
                image_features = torch.unbind(
                    model.encode_image(ims.to(device)).float())
                text_featuress = [mclip.encode(captions, convert_to_tensor=True).to(
                    device).float() for captions in captionss]

                similarities = [
                    cosine_similarity(image_feature.repeat(
                        len(text_features), 1), text_features).tolist()
                    for image_feature, text_features in zip(image_features, text_featuress)
                ]

            captionss = [[cap for cap, sim in zip(
                captions, similarity) if sim > 0.26] for captions, similarity in zip(captionss, similarities)]

            for orig_jpg, captions, meta in zip(orig_jpgs, captionss, metas):
                if len(captions) == 0:
                    nocap += 1
                    tqdm.write(f'No captions: {nocap}')
                    continue

                sample = {
                    '__key__': f'{writer.count:08}',
                    'jpg': orig_jpg,
                    'txt': ''.join(captions),
                    'json': ujson.dumps(meta['orig'])
                }
                writer.write(sample)
            if i % 25 == 0:
                gc.collect()
                torch.cuda.empty_cache()
        except Exception as e:
            print(f'Error: {e}')
            raise e
    writer.close()