File size: 2,075 Bytes
475e6a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
from io import BytesIO
import ujson
import webdataset as wds
from PIL import Image
from tqdm import tqdm
def load_text(txt: bytes):
return txt.decode()
def load_image(jpg):
return Image.open(BytesIO(jpg)).convert('RGB')
load_mapping = {
'input.jpg': load_image,
'output.txt': load_text
}
def valid_image(img):
return min(img.size) >= 64
def resize_img(img, max_size=512):
width, height = img.size
if width < max_size and height < max_size:
return img
if width > height:
new_width = max_size
new_height = int(new_width * height / width)
elif height > width:
new_height = max_size
new_width = int(new_height * width / height)
else:
new_height = new_width = max_size
img = img.resize((new_width, new_height), Image.ANTIALIAS)
return img
def img_to_meta(img):
width, height = img.size
return {
'width': width,
'height': height
}
def get_image(img):
if not valid_image(img):
return None, None
resize_img(img)
img_stream = BytesIO()
img.save(img_stream, format='jpeg')
img_stream.seek(0)
return img_stream.read(), ujson.dumps(img_to_meta(img))
change_mapping = {
'input.jpg': get_image
}
def func(wds_dataset_str, **kwargs):
ds = wds.WebDataset(wds_dataset_str, shardshuffle=False).map_dict(**load_mapping).map_dict(**change_mapping).to_tuple(
'input.jpg', 'output.txt')
dl = wds.WebLoader(ds, batch_size=None, num_workers=48, prefetch_factor=16, **kwargs)
writer = wds.ShardWriter('%05d.tar', 10000)
for img, txt in tqdm(dl):
img_str, meta = img
if img_str is None:
continue
sample = {
'__key__': f'{writer.count:08}',
'jpg': img_str,
'txt': txt,
'json': meta
}
writer.write(sample)
if __name__ == '__main__':
func('../conceptual-captions-12m-webdataset/{0..127}/{00000..4}.tar')
|