Pedro Cuenca commited on
Commit
3b508e3
1 Parent(s): 6047b49

Replace notebooks with the correct versions.

Browse files
dev/encoding/vqgan-jax-encoding-streaming.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
dev/encoding/vqgan-jax-encoding-webdataset.ipynb CHANGED
@@ -5,7 +5,7 @@
5
  "id": "d0b72877",
6
  "metadata": {},
7
  "source": [
8
- "# vqgan-jax-encoding-alamy"
9
  ]
10
  },
11
  {
@@ -13,12 +13,14 @@
13
  "id": "ba7b31e6",
14
  "metadata": {},
15
  "source": [
16
- "Encoding notebook for Alamy dataset."
 
 
17
  ]
18
  },
19
  {
20
  "cell_type": "code",
21
- "execution_count": 1,
22
  "id": "3b59489e",
23
  "metadata": {},
24
  "outputs": [],
@@ -46,44 +48,83 @@
46
  "## Dataset and Parameters"
47
  ]
48
  },
 
 
 
 
 
 
 
 
49
  {
50
  "cell_type": "code",
51
  "execution_count": null,
52
- "id": "13c6631b",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  "metadata": {},
54
  "outputs": [],
55
  "source": [
56
- "shards = 'https://s3.us-west-1.wasabisys.com/doodlebot-wasabi/datasets/alamy/webdataset/alamy-{000..895}.tar'\n",
57
- "\n",
58
  "# Enable curl retries to try to work around temporary network / server errors.\n",
59
  "# This shouldn't be necessary when using reliable servers.\n",
60
- "shards = f'pipe:curl -s --retry 5 --retry-delay 5 -L {shards} || true'\n",
61
- "\n",
62
- "length = 44710810 # estimate\n",
63
- "\n",
 
 
 
 
 
 
64
  "from pathlib import Path\n",
65
  "\n",
66
  "# Output directory for encoded files\n",
67
- "encoded_output = Path.home()/'data'/'alamy'/'encoded'\n",
68
  "\n",
69
  "batch_size = 128 # Per device\n",
70
- "num_workers = 8 # Using larger numbers seemed to be less reliable in this case."
71
  ]
72
  },
73
  {
74
  "cell_type": "code",
75
- "execution_count": 3,
76
  "id": "3435fb85",
77
  "metadata": {},
78
  "outputs": [],
79
  "source": [
80
- "bs = batch_size * jax.device_count() # Use a smaller size for testing\n",
81
  "batches = math.ceil(length / bs)"
82
  ]
83
  },
 
 
 
 
 
 
 
 
84
  {
85
  "cell_type": "code",
86
- "execution_count": 4,
87
  "id": "669b35df",
88
  "metadata": {},
89
  "outputs": [],
@@ -98,97 +139,125 @@
98
  " center_crop,\n",
99
  " T.ToTensor(),\n",
100
  " lambda t: t.permute(1, 2, 0) # Reorder, we need dimensions last\n",
101
- "])\n",
 
 
 
 
 
 
 
 
102
  "\n",
103
- "# Is there a shortcut for this?\n",
104
- "def extract_from_json(item):\n",
105
- " item['caption'] = item['json']['caption']\n",
106
- " item['url'] = item['json']['url']\n",
107
- " return item"
108
  ]
109
  },
110
  {
111
  "cell_type": "code",
112
- "execution_count": 7,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  "id": "369d9719",
114
  "metadata": {},
115
  "outputs": [],
116
  "source": [
117
- "# Log exceptions to a hardcoded file\n",
118
  "def ignore_and_log(exn):\n",
119
  " with open('errors.txt', 'a') as f:\n",
120
- " f.write(f'{exn}\\n')\n",
121
- " return True\n",
122
- "\n",
 
 
 
 
 
 
 
 
123
  "# Or simply use `wds.ignore_and_continue`\n",
124
- "exception_handler = ignore_and_log\n",
125
  "exception_handler = wds.warn_and_continue"
126
  ]
127
  },
128
  {
129
  "cell_type": "code",
130
- "execution_count": 8,
131
  "id": "5149b6d5",
132
  "metadata": {},
133
  "outputs": [],
134
  "source": [
135
  "dataset = wds.WebDataset(shards,\n",
136
- " length=batches, # Hint so `len` is implemented\n",
137
- " shardshuffle=False, # Keep same order for encoded files for easier bookkeeping\n",
138
- " handler=exception_handler, # Ignore read errors instead of failing. See also: `warn_and_continue`\n",
139
  ")\n",
140
  "\n",
141
  "dataset = (dataset \n",
142
  " .decode('pil') # decode image with PIL\n",
143
- " .map(extract_from_json)\n",
144
- " .map_dict(jpg=preprocess_image, handler=exception_handler)\n",
145
- " .to_tuple('url', 'jpg', 'caption') # filter to keep only url (for reference), image, caption.\n",
146
  " .batched(bs)) # better to batch in the dataset (but we could also do it in the dataloader) - this arg does not affect speed and we could remove it"
147
  ]
148
  },
149
  {
150
  "cell_type": "code",
151
- "execution_count": 10,
152
  "id": "8cac98cb",
153
  "metadata": {
154
  "scrolled": true
155
  },
156
- "outputs": [
157
- {
158
- "name": "stdout",
159
- "output_type": "stream",
160
- "text": [
161
- "CPU times: user 8min 26s, sys: 12.5 s, total: 8min 38s\n",
162
- "Wall time: 14.4 s\n"
163
- ]
164
- }
165
- ],
166
  "source": [
167
  "%%time\n",
168
- "urls, images, captions = next(iter(dataset))"
169
  ]
170
  },
171
  {
172
  "cell_type": "code",
173
- "execution_count": 7,
174
  "id": "cd268fbf",
175
  "metadata": {},
176
- "outputs": [
177
- {
178
- "data": {
179
- "text/plain": [
180
- "torch.Size([1024, 256, 256, 3])"
181
- ]
182
- },
183
- "execution_count": 7,
184
- "metadata": {},
185
- "output_type": "execute_result"
186
- }
187
- ],
188
  "source": [
189
  "images.shape"
190
  ]
191
  },
 
 
 
 
 
 
 
 
 
 
192
  {
193
  "cell_type": "markdown",
194
  "id": "44d50a51",
@@ -199,7 +268,7 @@
199
  },
200
  {
201
  "cell_type": "code",
202
- "execution_count": 8,
203
  "id": "e2df5e13",
204
  "metadata": {},
205
  "outputs": [],
@@ -217,7 +286,7 @@
217
  },
218
  {
219
  "cell_type": "code",
220
- "execution_count": 9,
221
  "id": "2fcf01d7",
222
  "metadata": {},
223
  "outputs": [],
@@ -235,20 +304,12 @@
235
  },
236
  {
237
  "cell_type": "code",
238
- "execution_count": 10,
239
  "id": "47a8b818",
240
  "metadata": {
241
  "scrolled": true
242
  },
243
- "outputs": [
244
- {
245
- "name": "stdout",
246
- "output_type": "stream",
247
- "text": [
248
- "Working with z of shape (1, 256, 16, 16) = 65536 dimensions.\n"
249
- ]
250
- }
251
- ],
252
  "source": [
253
  "model = VQModel.from_pretrained(\"flax-community/vqgan_f16_16384\")"
254
  ]
@@ -271,7 +332,7 @@
271
  },
272
  {
273
  "cell_type": "code",
274
- "execution_count": 11,
275
  "id": "6686b004",
276
  "metadata": {},
277
  "outputs": [],
@@ -282,7 +343,7 @@
282
  },
283
  {
284
  "cell_type": "code",
285
- "execution_count": 12,
286
  "id": "322a4619",
287
  "metadata": {},
288
  "outputs": [],
@@ -304,7 +365,7 @@
304
  },
305
  {
306
  "cell_type": "code",
307
- "execution_count": 13,
308
  "id": "ff6c10d4",
309
  "metadata": {},
310
  "outputs": [],
@@ -320,7 +381,7 @@
320
  " # - Save each batch after processing.\n",
321
  " # - Keep the file open until we are done with it.\n",
322
  " file = None \n",
323
- " for n, (urls, images, captions) in enumerate(tqdm(dataloader)):\n",
324
  " if (n % save_every == 0):\n",
325
  " if file is not None:\n",
326
  " file.close()\n",
@@ -332,7 +393,7 @@
332
  " encoded = encoded.reshape(-1, encoded.shape[-1])\n",
333
  "\n",
334
  " encoded_as_string = list(map(lambda item: np.array2string(item, separator=',', max_line_width=50000, formatter={'int':lambda x: str(x)}), encoded))\n",
335
- " batch_df = pd.DataFrame.from_dict({\"url\": urls, \"caption\": captions, \"encoding\": encoded_as_string})\n",
336
  " batch_df.to_json(file, orient='records', lines=True)"
337
  ]
338
  },
@@ -346,7 +407,7 @@
346
  },
347
  {
348
  "cell_type": "code",
349
- "execution_count": 14,
350
  "id": "96222bb4",
351
  "metadata": {},
352
  "outputs": [],
@@ -359,15 +420,7 @@
359
  "execution_count": null,
360
  "id": "7704863d",
361
  "metadata": {},
362
- "outputs": [
363
- {
364
- "name": "stderr",
365
- "output_type": "stream",
366
- "text": [
367
- " 2%|█▌ | 1085/43663 [31:58<20:43:42, 1.75s/it]"
368
- ]
369
- }
370
- ],
371
  "source": [
372
  "encode_captioned_dataset(dl, encoded_output, save_every=save_every)"
373
  ]
 
5
  "id": "d0b72877",
6
  "metadata": {},
7
  "source": [
8
+ "# VQGAN JAX Encoding for `webdataset`"
9
  ]
10
  },
11
  {
 
13
  "id": "ba7b31e6",
14
  "metadata": {},
15
  "source": [
16
+ "This notebook shows how to pre-encode images to token sequences using JAX, VQGAN and a dataset in the [`webdataset` format](https://webdataset.github.io/webdataset/).\n",
17
+ "\n",
18
+ "This example uses a small subset of YFCC100M we created for testing, but it should be easy to adapt to any other image/caption dataset in the `webdataset` format."
19
  ]
20
  },
21
  {
22
  "cell_type": "code",
23
+ "execution_count": null,
24
  "id": "3b59489e",
25
  "metadata": {},
26
  "outputs": [],
 
48
  "## Dataset and Parameters"
49
  ]
50
  },
51
+ {
52
+ "cell_type": "markdown",
53
+ "id": "9822850f",
54
+ "metadata": {},
55
+ "source": [
56
+ "The following is the list of shards we'll process. We hardcode the length of data so that we can see nice progress bars using `tqdm`."
57
+ ]
58
+ },
59
  {
60
  "cell_type": "code",
61
  "execution_count": null,
62
+ "id": "1265dbfe",
63
+ "metadata": {},
64
+ "outputs": [],
65
+ "source": [
66
+ "shards = 'https://huggingface.co/datasets/dalle-mini/YFCC100M_OpenAI_subset/resolve/main/data/shard-{0000..0008}.tar'\n",
67
+ "length = 8320"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "markdown",
72
+ "id": "7e38fa14",
73
+ "metadata": {},
74
+ "source": [
75
+ "If we are extra cautious or our server is unreliable, we can enable retries by providing a custom `curl` retrieval command:"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": null,
81
+ "id": "4c8c5960",
82
  "metadata": {},
83
  "outputs": [],
84
  "source": [
 
 
85
  "# Enable curl retries to try to work around temporary network / server errors.\n",
86
  "# This shouldn't be necessary when using reliable servers.\n",
87
+ "# shards = f'pipe:curl -s --retry 5 --retry-delay 5 -L {shards} || true'"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": null,
93
+ "id": "13c6631b",
94
+ "metadata": {},
95
+ "outputs": [],
96
+ "source": [
97
  "from pathlib import Path\n",
98
  "\n",
99
  "# Output directory for encoded files\n",
100
+ "encoded_output = Path.home()/'data'/'wds'/'encoded'\n",
101
  "\n",
102
  "batch_size = 128 # Per device\n",
103
+ "num_workers = 8 # For parallel processing"
104
  ]
105
  },
106
  {
107
  "cell_type": "code",
108
+ "execution_count": null,
109
  "id": "3435fb85",
110
  "metadata": {},
111
  "outputs": [],
112
  "source": [
113
+ "bs = batch_size * jax.device_count() # You can use a smaller size while testing\n",
114
  "batches = math.ceil(length / bs)"
115
  ]
116
  },
117
+ {
118
+ "cell_type": "markdown",
119
+ "id": "88598e4b",
120
+ "metadata": {},
121
+ "source": [
122
+ "Image processing"
123
+ ]
124
+ },
125
  {
126
  "cell_type": "code",
127
+ "execution_count": null,
128
  "id": "669b35df",
129
  "metadata": {},
130
  "outputs": [],
 
139
  " center_crop,\n",
140
  " T.ToTensor(),\n",
141
  " lambda t: t.permute(1, 2, 0) # Reorder, we need dimensions last\n",
142
+ "])"
143
+ ]
144
+ },
145
+ {
146
+ "cell_type": "markdown",
147
+ "id": "a185e90c",
148
+ "metadata": {},
149
+ "source": [
150
+ "Caption preparation.\n",
151
  "\n",
152
+ "Note that we receive the contents of the `json` structure, which will be replaced by the string we return.\n",
153
+ "If we want to keep other fields inside `json`, we can add `caption` as a new field."
 
 
 
154
  ]
155
  },
156
  {
157
  "cell_type": "code",
158
+ "execution_count": null,
159
+ "id": "423ee10e",
160
+ "metadata": {},
161
+ "outputs": [],
162
+ "source": [
163
+ "def create_caption(item):\n",
164
+ " title = item['title_clean'].strip()\n",
165
+ " description = item['description_clean'].strip()\n",
166
+ " if len(title) > 0 and title[-1] not in '.!?': title += '.'\n",
167
+ " return f'{title} {description}'"
168
+ ]
169
+ },
170
+ {
171
+ "cell_type": "markdown",
172
+ "id": "8d3a95db",
173
+ "metadata": {},
174
+ "source": [
175
+ "When an error occurs (a download is disconnected, an image cannot be decoded, etc) the process stops with an exception. We can use one of the exception handlers provided by the `webdataset` library, such as `wds.warn_and_continue` or `wds.ignore_and_continue` to ignore the offending entry and keep iterating.\n",
176
+ "\n",
177
+ "**IMPORTANT WARNING:** Do not use error handlers to ignore exceptions until you have tested that your processing pipeline works fine. Otherwise, the process will continue trying to find a valid entry, and it will consume your whole dataset without doing any work.\n",
178
+ "\n",
179
+ "We can also create our custom exception handler as demonstrated here:"
180
+ ]
181
+ },
182
+ {
183
+ "cell_type": "code",
184
+ "execution_count": null,
185
  "id": "369d9719",
186
  "metadata": {},
187
  "outputs": [],
188
  "source": [
189
+ "# UNUSED - Log exceptions to a file\n",
190
  "def ignore_and_log(exn):\n",
191
  " with open('errors.txt', 'a') as f:\n",
192
+ " f.write(f'{repr(exn)}\\n')\n",
193
+ " return True"
194
+ ]
195
+ },
196
+ {
197
+ "cell_type": "code",
198
+ "execution_count": null,
199
+ "id": "27de1414",
200
+ "metadata": {},
201
+ "outputs": [],
202
+ "source": [
203
  "# Or simply use `wds.ignore_and_continue`\n",
 
204
  "exception_handler = wds.warn_and_continue"
205
  ]
206
  },
207
  {
208
  "cell_type": "code",
209
+ "execution_count": null,
210
  "id": "5149b6d5",
211
  "metadata": {},
212
  "outputs": [],
213
  "source": [
214
  "dataset = wds.WebDataset(shards,\n",
215
+ " length=batches, # Hint so `len` is implemented\n",
216
+ " shardshuffle=False, # Keep same order for encoded files for easier bookkeeping. Set to `True` for training.\n",
217
+ " handler=exception_handler, # Ignore read errors instead of failing.\n",
218
  ")\n",
219
  "\n",
220
  "dataset = (dataset \n",
221
  " .decode('pil') # decode image with PIL\n",
222
+ "# .map_dict(jpg=preprocess_image, json=create_caption, handler=exception_handler) # Process fields with functions defined above\n",
223
+ " .map_dict(jpg=preprocess_image, json=create_caption) # Process fields with functions defined above\n",
224
+ " .to_tuple('__key__', 'jpg', 'json') # filter to keep only key (for reference), image, caption.\n",
225
  " .batched(bs)) # better to batch in the dataset (but we could also do it in the dataloader) - this arg does not affect speed and we could remove it"
226
  ]
227
  },
228
  {
229
  "cell_type": "code",
230
+ "execution_count": null,
231
  "id": "8cac98cb",
232
  "metadata": {
233
  "scrolled": true
234
  },
235
+ "outputs": [],
 
 
 
 
 
 
 
 
 
236
  "source": [
237
  "%%time\n",
238
+ "keys, images, captions = next(iter(dataset))"
239
  ]
240
  },
241
  {
242
  "cell_type": "code",
243
+ "execution_count": null,
244
  "id": "cd268fbf",
245
  "metadata": {},
246
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
247
  "source": [
248
  "images.shape"
249
  ]
250
  },
251
+ {
252
+ "cell_type": "code",
253
+ "execution_count": null,
254
+ "id": "c24693c0",
255
+ "metadata": {},
256
+ "outputs": [],
257
+ "source": [
258
+ "T.ToPILImage()(images[0].permute(2, 0, 1))"
259
+ ]
260
+ },
261
  {
262
  "cell_type": "markdown",
263
  "id": "44d50a51",
 
268
  },
269
  {
270
  "cell_type": "code",
271
+ "execution_count": null,
272
  "id": "e2df5e13",
273
  "metadata": {},
274
  "outputs": [],
 
286
  },
287
  {
288
  "cell_type": "code",
289
+ "execution_count": null,
290
  "id": "2fcf01d7",
291
  "metadata": {},
292
  "outputs": [],
 
304
  },
305
  {
306
  "cell_type": "code",
307
+ "execution_count": null,
308
  "id": "47a8b818",
309
  "metadata": {
310
  "scrolled": true
311
  },
312
+ "outputs": [],
 
 
 
 
 
 
 
 
313
  "source": [
314
  "model = VQModel.from_pretrained(\"flax-community/vqgan_f16_16384\")"
315
  ]
 
332
  },
333
  {
334
  "cell_type": "code",
335
+ "execution_count": null,
336
  "id": "6686b004",
337
  "metadata": {},
338
  "outputs": [],
 
343
  },
344
  {
345
  "cell_type": "code",
346
+ "execution_count": null,
347
  "id": "322a4619",
348
  "metadata": {},
349
  "outputs": [],
 
365
  },
366
  {
367
  "cell_type": "code",
368
+ "execution_count": null,
369
  "id": "ff6c10d4",
370
  "metadata": {},
371
  "outputs": [],
 
381
  " # - Save each batch after processing.\n",
382
  " # - Keep the file open until we are done with it.\n",
383
  " file = None \n",
384
+ " for n, (keys, images, captions) in enumerate(tqdm(dataloader)):\n",
385
  " if (n % save_every == 0):\n",
386
  " if file is not None:\n",
387
  " file.close()\n",
 
393
  " encoded = encoded.reshape(-1, encoded.shape[-1])\n",
394
  "\n",
395
  " encoded_as_string = list(map(lambda item: np.array2string(item, separator=',', max_line_width=50000, formatter={'int':lambda x: str(x)}), encoded))\n",
396
+ " batch_df = pd.DataFrame.from_dict({\"key\": keys, \"caption\": captions, \"encoding\": encoded_as_string})\n",
397
  " batch_df.to_json(file, orient='records', lines=True)"
398
  ]
399
  },
 
407
  },
408
  {
409
  "cell_type": "code",
410
+ "execution_count": null,
411
  "id": "96222bb4",
412
  "metadata": {},
413
  "outputs": [],
 
420
  "execution_count": null,
421
  "id": "7704863d",
422
  "metadata": {},
423
+ "outputs": [],
 
 
 
 
 
 
 
 
424
  "source": [
425
  "encode_captioned_dataset(dl, encoded_output, save_every=save_every)"
426
  ]