iulia-elisa commited on
Commit
33871a9
1 Parent(s): 998792a

Upload 8 files

Browse files
Datasets-Structure.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Datasets-Structure
2
+
3
+ **(YOLOv8 format)**
4
+
5
+ The Yolov8 dataset for segmentation is usually structured as follows:
6
+ ```
7
+ yolo_dataset/
8
+
9
+ ├── train/
10
+ │ ├── images/
11
+ │ │ └── 🖼️ img_n # Example image
12
+ │ │
13
+ │ └── labels/
14
+ │ └── 📄 img_n_labels.txt # Example labels file
15
+
16
+ ├── valid/
17
+ │ │ ... (similar)
18
+
19
+ └── 📄 data.yaml
20
+ ```
21
+
22
+ Each ```img_x_labels.txt``` file contains multiple annotations (one per line) with corresponding class ID and segmentation coordinates:
23
+
24
+ `<class-index> <x1> <y1> <x2> <y2> ... <xn> <yn>`
25
+
26
+ The file `data.yaml` contains keys such as:
27
+ - names (the class names)
28
+ - nc (number or classes)
29
+ - train (path/to/train/images/)
30
+ - val (path/to/val/images/)
31
+
32
+
33
+ **(COCO Instance Segmentation format)**
34
+ ```
35
+ coco_dataset/
36
+
37
+ ├── train/
38
+ │ ├── 🖼️ img_n # Example image
39
+ │ └── 📄 annotations.json # The annotations json file
40
+
41
+ └── valid/
42
+ └── ... (similar)
43
+
44
+ ```
45
+
46
+ The annotations json file contains a dictionary of lists:
47
+
48
+ - images (a list of dictionaries)
49
+ - id - image ID
50
+ - file_name
51
+ - height
52
+ - width
53
+
54
+
55
+ - annotations (a list of dictionaries)
56
+ - id
57
+ - image_id
58
+ - category_id
59
+ - bbox
60
+ - area
61
+ - segmentation (a segmentation polygon)
62
+ - iscrowd
63
+
64
+
65
+ - categories (a list of dictionaries)
66
+ - id
67
+ - name
coco_format_to_parquet.ipynb ADDED
@@ -0,0 +1,988 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "a333665e-77ee-43af-b539-8b2bc87c008c",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import json\n",
11
+ "import pandas as pd\n",
12
+ "from pathlib import Path\n",
13
+ "from PIL import Image, ImageDraw, ImageFile\n",
14
+ "import cv2\n",
15
+ "import io \n",
16
+ "import base64\n",
17
+ "import numpy as np\n",
18
+ "from pycocotools.coco import COCO\n",
19
+ "import os\n",
20
+ "import matplotlib.pyplot as plt\n",
21
+ "import matplotlib.patches as patches\n",
22
+ "from PIL import Image\n",
23
+ "ImageFile.LOAD_TRUNCATED_IMAGES = True\n",
24
+ "\n",
25
+ "import utils\n",
26
+ "\n",
27
+ "dataset_dir = './mskf_0/'\n",
28
+ "train_df, train_coco_data = utils.split_to_df(dataset_dir, 'train/')\n",
29
+ "valid_df, valid_coco_data = utils.split_to_df(dataset_dir, 'valid')"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": 2,
35
+ "id": "0e0d5a99-4eba-49d4-931b-0128691631b1",
36
+ "metadata": {
37
+ "scrolled": true
38
+ },
39
+ "outputs": [
40
+ {
41
+ "data": {
42
+ "text/plain": [
43
+ "{'id': 1,\n",
44
+ " 'license': 1,\n",
45
+ " 'file_name': 'S0505210301_M_png.rf.e47187e88c167fad1db290b0214e2175.jpg',\n",
46
+ " 'height': 512,\n",
47
+ " 'width': 512,\n",
48
+ " 'date_captured': '2024-05-08T06:13:06+00:00'}"
49
+ ]
50
+ },
51
+ "execution_count": 2,
52
+ "metadata": {},
53
+ "output_type": "execute_result"
54
+ }
55
+ ],
56
+ "source": [
57
+ "train_coco_data['images'][0]"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 3,
63
+ "id": "df660d83-5afb-4b7c-83e4-f1f9d68577cd",
64
+ "metadata": {},
65
+ "outputs": [
66
+ {
67
+ "data": {
68
+ "text/plain": [
69
+ "array([1, 2, 3, 4, 5])"
70
+ ]
71
+ },
72
+ "execution_count": 3,
73
+ "metadata": {},
74
+ "output_type": "execute_result"
75
+ }
76
+ ],
77
+ "source": [
78
+ "np.unique(train_df['category_id'])"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": 4,
84
+ "id": "51f26d8d-0116-48fe-8cea-d070350df333",
85
+ "metadata": {
86
+ "scrolled": true
87
+ },
88
+ "outputs": [
89
+ {
90
+ "data": {
91
+ "text/plain": [
92
+ "{'image_id': array([<class 'int'>], dtype=object),\n",
93
+ " 'category_id': array([<class 'int'>], dtype=object),\n",
94
+ " 'bbox': array([<class 'list'>], dtype=object),\n",
95
+ " 'area': array([<class 'float'>], dtype=object),\n",
96
+ " 'segmentation': array([<class 'numpy.ndarray'>], dtype=object),\n",
97
+ " 'iscrowd': array([<class 'int'>], dtype=object),\n",
98
+ " 'width': array([<class 'int'>], dtype=object),\n",
99
+ " 'height': array([<class 'int'>], dtype=object),\n",
100
+ " 'observation': array([<class 'str'>], dtype=object),\n",
101
+ " 'image': array([<class 'numpy.ndarray'>], dtype=object),\n",
102
+ " 'annot_id': array([<class 'int'>], dtype=object)}"
103
+ ]
104
+ },
105
+ "execution_count": 4,
106
+ "metadata": {},
107
+ "output_type": "execute_result"
108
+ }
109
+ ],
110
+ "source": [
111
+ "unique_types = {col: train_df[col].apply(type).unique() for col in train_df.columns}\n",
112
+ "unique_types"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": 5,
118
+ "id": "6f464df1-9e86-47ed-a8c6-d5116a71085e",
119
+ "metadata": {
120
+ "scrolled": true
121
+ },
122
+ "outputs": [],
123
+ "source": [
124
+ "# train_df.head(20)"
125
+ ]
126
+ },
127
+ {
128
+ "cell_type": "code",
129
+ "execution_count": 6,
130
+ "id": "3f4afc2a-2ee4-458d-95c0-19e8497653da",
131
+ "metadata": {
132
+ "scrolled": true
133
+ },
134
+ "outputs": [],
135
+ "source": [
136
+ "from collections import defaultdict\n",
137
+ "from datasets import Dataset, Features, Sequence\n",
138
+ "import datasets\n",
139
+ "from io import BytesIO\n",
140
+ "\n",
141
+ "cats_to_colours = { 1:('central-ring', (1,252,214)), \n",
142
+ " 2:('other', (255,128,1)),\n",
143
+ " 3:('read-out-streak', (20, 77, 158)), \n",
144
+ " 4:('smoke-ring', (159,21,100)),\n",
145
+ " 5:('star-loop', (255, 188, 248))}\n",
146
+ "\n",
147
+ "train_dataset=utils.df_to_dataset_dict(train_df, train_coco_data, cats_to_colours)\n",
148
+ "valid_dataset=utils.df_to_dataset_dict(valid_df, valid_coco_data, cats_to_colours)"
149
+ ]
150
+ },
151
+ {
152
+ "cell_type": "code",
153
+ "execution_count": 7,
154
+ "id": "e3ed2ab1-8632-4e49-a661-c306554bd010",
155
+ "metadata": {
156
+ "scrolled": true
157
+ },
158
+ "outputs": [
159
+ {
160
+ "data": {
161
+ "application/vnd.jupyter.widget-view+json": {
162
+ "model_id": "6ebf9675899e4ffcbcb3ebebbe7d6147",
163
+ "version_major": 2,
164
+ "version_minor": 0
165
+ },
166
+ "text/plain": [
167
+ "Uploading the dataset shards: 0%| | 0/1 [00:00<?, ?it/s]"
168
+ ]
169
+ },
170
+ "metadata": {},
171
+ "output_type": "display_data"
172
+ },
173
+ {
174
+ "data": {
175
+ "application/vnd.jupyter.widget-view+json": {
176
+ "model_id": "2bf3a5fcfc914785b99bc6e6892531be",
177
+ "version_major": 2,
178
+ "version_minor": 0
179
+ },
180
+ "text/plain": [
181
+ "Map: 0%| | 0/87 [00:00<?, ? examples/s]"
182
+ ]
183
+ },
184
+ "metadata": {},
185
+ "output_type": "display_data"
186
+ },
187
+ {
188
+ "data": {
189
+ "application/vnd.jupyter.widget-view+json": {
190
+ "model_id": "871d1f3b21d242cf86e1d1f20295fa7d",
191
+ "version_major": 2,
192
+ "version_minor": 0
193
+ },
194
+ "text/plain": [
195
+ "Creating parquet from Arrow format: 0%| | 0/1 [00:00<?, ?ba/s]"
196
+ ]
197
+ },
198
+ "metadata": {},
199
+ "output_type": "display_data"
200
+ },
201
+ {
202
+ "data": {
203
+ "application/vnd.jupyter.widget-view+json": {
204
+ "model_id": "9376ea5e4c174f31827a0dd50a0af9b2",
205
+ "version_major": 2,
206
+ "version_minor": 0
207
+ },
208
+ "text/plain": [
209
+ "Uploading the dataset shards: 0%| | 0/1 [00:00<?, ?it/s]"
210
+ ]
211
+ },
212
+ "metadata": {},
213
+ "output_type": "display_data"
214
+ },
215
+ {
216
+ "data": {
217
+ "application/vnd.jupyter.widget-view+json": {
218
+ "model_id": "f5a6d9c4be554f4f937b1afd82f934b9",
219
+ "version_major": 2,
220
+ "version_minor": 0
221
+ },
222
+ "text/plain": [
223
+ "Map: 0%| | 0/88 [00:00<?, ? examples/s]"
224
+ ]
225
+ },
226
+ "metadata": {},
227
+ "output_type": "display_data"
228
+ },
229
+ {
230
+ "data": {
231
+ "application/vnd.jupyter.widget-view+json": {
232
+ "model_id": "c2a692042a7748f9adea02a38012bb4c",
233
+ "version_major": 2,
234
+ "version_minor": 0
235
+ },
236
+ "text/plain": [
237
+ "Creating parquet from Arrow format: 0%| | 0/1 [00:00<?, ?ba/s]"
238
+ ]
239
+ },
240
+ "metadata": {},
241
+ "output_type": "display_data"
242
+ },
243
+ {
244
+ "data": {
245
+ "application/vnd.jupyter.widget-view+json": {
246
+ "model_id": "e2d0aec5b6f14da889ebb4876d6a6cc3",
247
+ "version_major": 2,
248
+ "version_minor": 0
249
+ },
250
+ "text/plain": [
251
+ "README.md: 0%| | 0.00/5.71k [00:00<?, ?B/s]"
252
+ ]
253
+ },
254
+ "metadata": {},
255
+ "output_type": "display_data"
256
+ },
257
+ {
258
+ "data": {
259
+ "text/plain": [
260
+ "CommitInfo(commit_url='https://huggingface.co/datasets/iulia-elisa/XMM_OM_AI_dataset/commit/e4f969ebf7e6b84ece6a91380cbe2dd2fc669261', commit_message='Upload dataset', commit_description='', oid='e4f969ebf7e6b84ece6a91380cbe2dd2fc669261', pr_url=None, pr_revision=None, pr_num=None)"
261
+ ]
262
+ },
263
+ "execution_count": 7,
264
+ "metadata": {},
265
+ "output_type": "execute_result"
266
+ }
267
+ ],
268
+ "source": [
269
+ "from datasets import DatasetDict\n",
270
+ "\n",
271
+ "dataset_dict = DatasetDict({\n",
272
+ " 'train': train_dataset,\n",
273
+ " 'valid': valid_dataset\n",
274
+ "})\n",
275
+ "\n",
276
+ "dataset_dict.push_to_hub('iulia-elisa/XMM_OM_AI_dataset', private=False)"
277
+ ]
278
+ }
279
+ ],
280
+ "metadata": {
281
+ "kernelspec": {
282
+ "display_name": "env_py311",
283
+ "language": "python",
284
+ "name": "python3"
285
+ },
286
+ "language_info": {
287
+ "codemirror_mode": {
288
+ "name": "ipython",
289
+ "version": 3
290
+ },
291
+ "file_extension": ".py",
292
+ "mimetype": "text/x-python",
293
+ "name": "python",
294
+ "nbconvert_exporter": "python",
295
+ "pygments_lexer": "ipython3",
296
+ "version": "3.11.7"
297
+ },
298
+ "widgets": {
299
+ "application/vnd.jupyter.widget-state+json": {
300
+ "state": {
301
+ "048f277744ff4c8f9fe447563d554654": {
302
+ "model_module": "@jupyter-widgets/base",
303
+ "model_module_version": "2.0.0",
304
+ "model_name": "LayoutModel",
305
+ "state": {}
306
+ },
307
+ "08cf7cea944a4322ae9b6ef361862bb4": {
308
+ "model_module": "@jupyter-widgets/controls",
309
+ "model_module_version": "2.0.0",
310
+ "model_name": "HTMLModel",
311
+ "state": {
312
+ "layout": "IPY_MODEL_99548805c93d4e2bbccaf691aba88980",
313
+ "style": "IPY_MODEL_23e0985cd92e4db1a9006cab93b41a35",
314
+ "value": "Creating parquet from Arrow format: 100%"
315
+ }
316
+ },
317
+ "0e36fda6f39745ef85e3846289e165eb": {
318
+ "model_module": "@jupyter-widgets/base",
319
+ "model_module_version": "2.0.0",
320
+ "model_name": "LayoutModel",
321
+ "state": {}
322
+ },
323
+ "0f6e7836683d40efa731e77aa6746fa5": {
324
+ "model_module": "@jupyter-widgets/base",
325
+ "model_module_version": "2.0.0",
326
+ "model_name": "LayoutModel",
327
+ "state": {}
328
+ },
329
+ "11970b4573344669adcfd5d9df924aea": {
330
+ "model_module": "@jupyter-widgets/controls",
331
+ "model_module_version": "2.0.0",
332
+ "model_name": "FloatProgressModel",
333
+ "state": {
334
+ "bar_style": "success",
335
+ "layout": "IPY_MODEL_e6ec6e37a6304a2590e81ec2b4a01e15",
336
+ "max": 1,
337
+ "style": "IPY_MODEL_a2c1ef897ce849d9a1f773724f617d40",
338
+ "value": 1
339
+ }
340
+ },
341
+ "1c7cfbd83be64f8eafa6d8ad98b08bc8": {
342
+ "model_module": "@jupyter-widgets/controls",
343
+ "model_module_version": "2.0.0",
344
+ "model_name": "HTMLStyleModel",
345
+ "state": {
346
+ "description_width": "",
347
+ "font_size": null,
348
+ "text_color": null
349
+ }
350
+ },
351
+ "1fa82ba064f548319607a027721117d8": {
352
+ "model_module": "@jupyter-widgets/controls",
353
+ "model_module_version": "2.0.0",
354
+ "model_name": "HTMLModel",
355
+ "state": {
356
+ "layout": "IPY_MODEL_048f277744ff4c8f9fe447563d554654",
357
+ "style": "IPY_MODEL_5874491ff76a470e9db86621e2e5d1a7",
358
+ "value": " 1/1 [00:00&lt;00:00, 9.50ba/s]"
359
+ }
360
+ },
361
+ "23640986547d47b3900f002fe5522f39": {
362
+ "model_module": "@jupyter-widgets/base",
363
+ "model_module_version": "2.0.0",
364
+ "model_name": "LayoutModel",
365
+ "state": {}
366
+ },
367
+ "23e0985cd92e4db1a9006cab93b41a35": {
368
+ "model_module": "@jupyter-widgets/controls",
369
+ "model_module_version": "2.0.0",
370
+ "model_name": "HTMLStyleModel",
371
+ "state": {
372
+ "description_width": "",
373
+ "font_size": null,
374
+ "text_color": null
375
+ }
376
+ },
377
+ "2bf3a5fcfc914785b99bc6e6892531be": {
378
+ "model_module": "@jupyter-widgets/controls",
379
+ "model_module_version": "2.0.0",
380
+ "model_name": "HBoxModel",
381
+ "state": {
382
+ "children": [
383
+ "IPY_MODEL_47be27d426034da9a97c49bae4626ddc",
384
+ "IPY_MODEL_71f517a070b643b3a5b6ad86c3710f95",
385
+ "IPY_MODEL_fb1c614683cf4531b6ac405a9373d74d"
386
+ ],
387
+ "layout": "IPY_MODEL_db5e0e8d61d742ef823112ebe06cd33b"
388
+ }
389
+ },
390
+ "2d56d23ee7804546ab50c8ceaddd2d05": {
391
+ "model_module": "@jupyter-widgets/controls",
392
+ "model_module_version": "2.0.0",
393
+ "model_name": "HTMLModel",
394
+ "state": {
395
+ "layout": "IPY_MODEL_0e36fda6f39745ef85e3846289e165eb",
396
+ "style": "IPY_MODEL_a591c55a93f84703b8070bcf65e957e6",
397
+ "value": "Map: 100%"
398
+ }
399
+ },
400
+ "2f221cdc0bb44e23ab02f00a658801bf": {
401
+ "model_module": "@jupyter-widgets/controls",
402
+ "model_module_version": "2.0.0",
403
+ "model_name": "FloatProgressModel",
404
+ "state": {
405
+ "bar_style": "success",
406
+ "layout": "IPY_MODEL_e5f9516196b0461daf95817bb9408d7b",
407
+ "max": 5714,
408
+ "style": "IPY_MODEL_c38b2aa4bbd04b248b23de98a3fb3f04",
409
+ "value": 5714
410
+ }
411
+ },
412
+ "2f4e9a17492b430eb38dbf580a8f6112": {
413
+ "model_module": "@jupyter-widgets/controls",
414
+ "model_module_version": "2.0.0",
415
+ "model_name": "HTMLModel",
416
+ "state": {
417
+ "layout": "IPY_MODEL_692de2a4a32e4c009f021c3c604ee245",
418
+ "style": "IPY_MODEL_1c7cfbd83be64f8eafa6d8ad98b08bc8",
419
+ "value": " 1/1 [00:02&lt;00:00, 2.68s/it]"
420
+ }
421
+ },
422
+ "326fe11988d74d66b99b448cf2001714": {
423
+ "model_module": "@jupyter-widgets/base",
424
+ "model_module_version": "2.0.0",
425
+ "model_name": "LayoutModel",
426
+ "state": {}
427
+ },
428
+ "3723e0afbc944b679032254d2b9d543c": {
429
+ "model_module": "@jupyter-widgets/base",
430
+ "model_module_version": "2.0.0",
431
+ "model_name": "LayoutModel",
432
+ "state": {}
433
+ },
434
+ "38f0c8d251154e15ba2a621db92bbf89": {
435
+ "model_module": "@jupyter-widgets/controls",
436
+ "model_module_version": "2.0.0",
437
+ "model_name": "HTMLStyleModel",
438
+ "state": {
439
+ "description_width": "",
440
+ "font_size": null,
441
+ "text_color": null
442
+ }
443
+ },
444
+ "391d57117eb649beae65ed7a6a79a18b": {
445
+ "model_module": "@jupyter-widgets/base",
446
+ "model_module_version": "2.0.0",
447
+ "model_name": "LayoutModel",
448
+ "state": {}
449
+ },
450
+ "401d5cb498aa42ccbf4e7bc4aafacb11": {
451
+ "model_module": "@jupyter-widgets/base",
452
+ "model_module_version": "2.0.0",
453
+ "model_name": "LayoutModel",
454
+ "state": {}
455
+ },
456
+ "46cba72f63ef469b82a4b1a96fb796ee": {
457
+ "model_module": "@jupyter-widgets/controls",
458
+ "model_module_version": "2.0.0",
459
+ "model_name": "HTMLStyleModel",
460
+ "state": {
461
+ "description_width": "",
462
+ "font_size": null,
463
+ "text_color": null
464
+ }
465
+ },
466
+ "47be27d426034da9a97c49bae4626ddc": {
467
+ "model_module": "@jupyter-widgets/controls",
468
+ "model_module_version": "2.0.0",
469
+ "model_name": "HTMLModel",
470
+ "state": {
471
+ "layout": "IPY_MODEL_911d271807e8414da96305fdd80796a7",
472
+ "style": "IPY_MODEL_73726b2d576c4644acaf676b9b9ec34f",
473
+ "value": "Map: 100%"
474
+ }
475
+ },
476
+ "488243dfb6574985bcbf7e790134e744": {
477
+ "model_module": "@jupyter-widgets/controls",
478
+ "model_module_version": "2.0.0",
479
+ "model_name": "ProgressStyleModel",
480
+ "state": {
481
+ "description_width": ""
482
+ }
483
+ },
484
+ "4b35ce2957784c83a9444926b0a7e976": {
485
+ "model_module": "@jupyter-widgets/controls",
486
+ "model_module_version": "2.0.0",
487
+ "model_name": "HTMLModel",
488
+ "state": {
489
+ "layout": "IPY_MODEL_b90c8d0425ac4bcca937ea6b545e48cd",
490
+ "style": "IPY_MODEL_46cba72f63ef469b82a4b1a96fb796ee",
491
+ "value": " 88/88 [00:00&lt;00:00, 819.47 examples/s]"
492
+ }
493
+ },
494
+ "4f42920d671545ed8e0e7c5ceaedf8ed": {
495
+ "model_module": "@jupyter-widgets/controls",
496
+ "model_module_version": "2.0.0",
497
+ "model_name": "HTMLStyleModel",
498
+ "state": {
499
+ "description_width": "",
500
+ "font_size": null,
501
+ "text_color": null
502
+ }
503
+ },
504
+ "52dfe57cd5b44d62889af8a4df9467e1": {
505
+ "model_module": "@jupyter-widgets/controls",
506
+ "model_module_version": "2.0.0",
507
+ "model_name": "ProgressStyleModel",
508
+ "state": {
509
+ "description_width": ""
510
+ }
511
+ },
512
+ "5874491ff76a470e9db86621e2e5d1a7": {
513
+ "model_module": "@jupyter-widgets/controls",
514
+ "model_module_version": "2.0.0",
515
+ "model_name": "HTMLStyleModel",
516
+ "state": {
517
+ "description_width": "",
518
+ "font_size": null,
519
+ "text_color": null
520
+ }
521
+ },
522
+ "59ca0f5d7fff4b52a7051b923253c66f": {
523
+ "model_module": "@jupyter-widgets/base",
524
+ "model_module_version": "2.0.0",
525
+ "model_name": "LayoutModel",
526
+ "state": {}
527
+ },
528
+ "5d9126ed84e347379beddb410cda4381": {
529
+ "model_module": "@jupyter-widgets/base",
530
+ "model_module_version": "2.0.0",
531
+ "model_name": "LayoutModel",
532
+ "state": {}
533
+ },
534
+ "657e2016f007499cafa8ea389a461201": {
535
+ "model_module": "@jupyter-widgets/controls",
536
+ "model_module_version": "2.0.0",
537
+ "model_name": "HTMLStyleModel",
538
+ "state": {
539
+ "description_width": "",
540
+ "font_size": null,
541
+ "text_color": null
542
+ }
543
+ },
544
+ "68409dd4c6a042899d8f32b8d426bd6c": {
545
+ "model_module": "@jupyter-widgets/controls",
546
+ "model_module_version": "2.0.0",
547
+ "model_name": "FloatProgressModel",
548
+ "state": {
549
+ "bar_style": "success",
550
+ "layout": "IPY_MODEL_5d9126ed84e347379beddb410cda4381",
551
+ "max": 1,
552
+ "style": "IPY_MODEL_e7c643f02df04f39a6232d4ef37dbe53",
553
+ "value": 1
554
+ }
555
+ },
556
+ "692de2a4a32e4c009f021c3c604ee245": {
557
+ "model_module": "@jupyter-widgets/base",
558
+ "model_module_version": "2.0.0",
559
+ "model_name": "LayoutModel",
560
+ "state": {}
561
+ },
562
+ "6ebf9675899e4ffcbcb3ebebbe7d6147": {
563
+ "model_module": "@jupyter-widgets/controls",
564
+ "model_module_version": "2.0.0",
565
+ "model_name": "HBoxModel",
566
+ "state": {
567
+ "children": [
568
+ "IPY_MODEL_d753a4db07294b83a86784bfc211dcaf",
569
+ "IPY_MODEL_68409dd4c6a042899d8f32b8d426bd6c",
570
+ "IPY_MODEL_a056610c2b23491aba3f37c38f0ad8c5"
571
+ ],
572
+ "layout": "IPY_MODEL_920bced53c3d42099f1a87eed19ce05e"
573
+ }
574
+ },
575
+ "71f517a070b643b3a5b6ad86c3710f95": {
576
+ "model_module": "@jupyter-widgets/controls",
577
+ "model_module_version": "2.0.0",
578
+ "model_name": "FloatProgressModel",
579
+ "state": {
580
+ "bar_style": "success",
581
+ "layout": "IPY_MODEL_dc80b5c47d4c4f9faab022f05b691228",
582
+ "max": 87,
583
+ "style": "IPY_MODEL_b9fb72eff10047289a9597d08009af95",
584
+ "value": 87
585
+ }
586
+ },
587
+ "73391c6e41d4483e93582af7934d1e54": {
588
+ "model_module": "@jupyter-widgets/controls",
589
+ "model_module_version": "2.0.0",
590
+ "model_name": "HTMLModel",
591
+ "state": {
592
+ "layout": "IPY_MODEL_e813397704674d81b3fe6dd3ce444e95",
593
+ "style": "IPY_MODEL_eaf483365e224158aad8fd8f604ac64b",
594
+ "value": "Uploading the dataset shards: 100%"
595
+ }
596
+ },
597
+ "73726b2d576c4644acaf676b9b9ec34f": {
598
+ "model_module": "@jupyter-widgets/controls",
599
+ "model_module_version": "2.0.0",
600
+ "model_name": "HTMLStyleModel",
601
+ "state": {
602
+ "description_width": "",
603
+ "font_size": null,
604
+ "text_color": null
605
+ }
606
+ },
607
+ "76f47b25344e4f1db6549e268af826ba": {
608
+ "model_module": "@jupyter-widgets/controls",
609
+ "model_module_version": "2.0.0",
610
+ "model_name": "HTMLModel",
611
+ "state": {
612
+ "layout": "IPY_MODEL_d45797a1fbc54c7cb7b5a863972fb4b1",
613
+ "style": "IPY_MODEL_cbad9576bda8439d8fa5ae8391b31ff9",
614
+ "value": " 5.71k/5.71k [00:00&lt;00:00, 1.12MB/s]"
615
+ }
616
+ },
617
+ "7d6080218e2a4888bdfd0c184b7c5ffc": {
618
+ "model_module": "@jupyter-widgets/base",
619
+ "model_module_version": "2.0.0",
620
+ "model_name": "LayoutModel",
621
+ "state": {}
622
+ },
623
+ "842b6cc23bcc485ba3d034f5f955f314": {
624
+ "model_module": "@jupyter-widgets/base",
625
+ "model_module_version": "2.0.0",
626
+ "model_name": "LayoutModel",
627
+ "state": {}
628
+ },
629
+ "871d1f3b21d242cf86e1d1f20295fa7d": {
630
+ "model_module": "@jupyter-widgets/controls",
631
+ "model_module_version": "2.0.0",
632
+ "model_name": "HBoxModel",
633
+ "state": {
634
+ "children": [
635
+ "IPY_MODEL_afbc23b673314807bd81dcc9bcf3e3df",
636
+ "IPY_MODEL_e5d66c90b5a842de84296f97abbdaae9",
637
+ "IPY_MODEL_1fa82ba064f548319607a027721117d8"
638
+ ],
639
+ "layout": "IPY_MODEL_0f6e7836683d40efa731e77aa6746fa5"
640
+ }
641
+ },
642
+ "911d271807e8414da96305fdd80796a7": {
643
+ "model_module": "@jupyter-widgets/base",
644
+ "model_module_version": "2.0.0",
645
+ "model_name": "LayoutModel",
646
+ "state": {}
647
+ },
648
+ "920bced53c3d42099f1a87eed19ce05e": {
649
+ "model_module": "@jupyter-widgets/base",
650
+ "model_module_version": "2.0.0",
651
+ "model_name": "LayoutModel",
652
+ "state": {}
653
+ },
654
+ "9376ea5e4c174f31827a0dd50a0af9b2": {
655
+ "model_module": "@jupyter-widgets/controls",
656
+ "model_module_version": "2.0.0",
657
+ "model_name": "HBoxModel",
658
+ "state": {
659
+ "children": [
660
+ "IPY_MODEL_73391c6e41d4483e93582af7934d1e54",
661
+ "IPY_MODEL_acf8c82fec8f4360877e2f1ad3d41e74",
662
+ "IPY_MODEL_2f4e9a17492b430eb38dbf580a8f6112"
663
+ ],
664
+ "layout": "IPY_MODEL_ca406b13430643bdab8a048bdf35dd75"
665
+ }
666
+ },
667
+ "93b9ea63c9ee44cbbe4a266b83a86083": {
668
+ "model_module": "@jupyter-widgets/controls",
669
+ "model_module_version": "2.0.0",
670
+ "model_name": "HTMLModel",
671
+ "state": {
672
+ "layout": "IPY_MODEL_391d57117eb649beae65ed7a6a79a18b",
673
+ "style": "IPY_MODEL_657e2016f007499cafa8ea389a461201",
674
+ "value": "README.md: 100%"
675
+ }
676
+ },
677
+ "99133f4e57fe4309b610cdb4ce2a90bd": {
678
+ "model_module": "@jupyter-widgets/base",
679
+ "model_module_version": "2.0.0",
680
+ "model_name": "LayoutModel",
681
+ "state": {}
682
+ },
683
+ "99548805c93d4e2bbccaf691aba88980": {
684
+ "model_module": "@jupyter-widgets/base",
685
+ "model_module_version": "2.0.0",
686
+ "model_name": "LayoutModel",
687
+ "state": {}
688
+ },
689
+ "9b8ccb3e67354701a09c2a18ace98ad6": {
690
+ "model_module": "@jupyter-widgets/controls",
691
+ "model_module_version": "2.0.0",
692
+ "model_name": "HTMLStyleModel",
693
+ "state": {
694
+ "description_width": "",
695
+ "font_size": null,
696
+ "text_color": null
697
+ }
698
+ },
699
+ "a056610c2b23491aba3f37c38f0ad8c5": {
700
+ "model_module": "@jupyter-widgets/controls",
701
+ "model_module_version": "2.0.0",
702
+ "model_name": "HTMLModel",
703
+ "state": {
704
+ "layout": "IPY_MODEL_3723e0afbc944b679032254d2b9d543c",
705
+ "style": "IPY_MODEL_9b8ccb3e67354701a09c2a18ace98ad6",
706
+ "value": " 1/1 [00:04&lt;00:00, 4.22s/it]"
707
+ }
708
+ },
709
+ "a2c1ef897ce849d9a1f773724f617d40": {
710
+ "model_module": "@jupyter-widgets/controls",
711
+ "model_module_version": "2.0.0",
712
+ "model_name": "ProgressStyleModel",
713
+ "state": {
714
+ "description_width": ""
715
+ }
716
+ },
717
+ "a33667021736405b82666b555a5493a8": {
718
+ "model_module": "@jupyter-widgets/controls",
719
+ "model_module_version": "2.0.0",
720
+ "model_name": "HTMLStyleModel",
721
+ "state": {
722
+ "description_width": "",
723
+ "font_size": null,
724
+ "text_color": null
725
+ }
726
+ },
727
+ "a39e3942259747fd9c7759cbbba73d3d": {
728
+ "model_module": "@jupyter-widgets/controls",
729
+ "model_module_version": "2.0.0",
730
+ "model_name": "FloatProgressModel",
731
+ "state": {
732
+ "bar_style": "success",
733
+ "layout": "IPY_MODEL_401d5cb498aa42ccbf4e7bc4aafacb11",
734
+ "max": 88,
735
+ "style": "IPY_MODEL_488243dfb6574985bcbf7e790134e744",
736
+ "value": 88
737
+ }
738
+ },
739
+ "a591c55a93f84703b8070bcf65e957e6": {
740
+ "model_module": "@jupyter-widgets/controls",
741
+ "model_module_version": "2.0.0",
742
+ "model_name": "HTMLStyleModel",
743
+ "state": {
744
+ "description_width": "",
745
+ "font_size": null,
746
+ "text_color": null
747
+ }
748
+ },
749
+ "abc65776f8504948b9e91ea432ebe8ef": {
750
+ "model_module": "@jupyter-widgets/base",
751
+ "model_module_version": "2.0.0",
752
+ "model_name": "LayoutModel",
753
+ "state": {}
754
+ },
755
+ "acf8c82fec8f4360877e2f1ad3d41e74": {
756
+ "model_module": "@jupyter-widgets/controls",
757
+ "model_module_version": "2.0.0",
758
+ "model_name": "FloatProgressModel",
759
+ "state": {
760
+ "bar_style": "success",
761
+ "layout": "IPY_MODEL_59ca0f5d7fff4b52a7051b923253c66f",
762
+ "max": 1,
763
+ "style": "IPY_MODEL_52dfe57cd5b44d62889af8a4df9467e1",
764
+ "value": 1
765
+ }
766
+ },
767
+ "afbc23b673314807bd81dcc9bcf3e3df": {
768
+ "model_module": "@jupyter-widgets/controls",
769
+ "model_module_version": "2.0.0",
770
+ "model_name": "HTMLModel",
771
+ "state": {
772
+ "layout": "IPY_MODEL_f8cd90e4cb13496780c19d3a482fb43f",
773
+ "style": "IPY_MODEL_4f42920d671545ed8e0e7c5ceaedf8ed",
774
+ "value": "Creating parquet from Arrow format: 100%"
775
+ }
776
+ },
777
+ "b90c8d0425ac4bcca937ea6b545e48cd": {
778
+ "model_module": "@jupyter-widgets/base",
779
+ "model_module_version": "2.0.0",
780
+ "model_name": "LayoutModel",
781
+ "state": {}
782
+ },
783
+ "b9fb72eff10047289a9597d08009af95": {
784
+ "model_module": "@jupyter-widgets/controls",
785
+ "model_module_version": "2.0.0",
786
+ "model_name": "ProgressStyleModel",
787
+ "state": {
788
+ "description_width": ""
789
+ }
790
+ },
791
+ "c2a692042a7748f9adea02a38012bb4c": {
792
+ "model_module": "@jupyter-widgets/controls",
793
+ "model_module_version": "2.0.0",
794
+ "model_name": "HBoxModel",
795
+ "state": {
796
+ "children": [
797
+ "IPY_MODEL_08cf7cea944a4322ae9b6ef361862bb4",
798
+ "IPY_MODEL_11970b4573344669adcfd5d9df924aea",
799
+ "IPY_MODEL_fe70cc023aa049e0ba921c8eb1c65646"
800
+ ],
801
+ "layout": "IPY_MODEL_23640986547d47b3900f002fe5522f39"
802
+ }
803
+ },
804
+ "c38b2aa4bbd04b248b23de98a3fb3f04": {
805
+ "model_module": "@jupyter-widgets/controls",
806
+ "model_module_version": "2.0.0",
807
+ "model_name": "ProgressStyleModel",
808
+ "state": {
809
+ "description_width": ""
810
+ }
811
+ },
812
+ "c827c4d520c342a1bcb54966869a6edc": {
813
+ "model_module": "@jupyter-widgets/controls",
814
+ "model_module_version": "2.0.0",
815
+ "model_name": "HTMLStyleModel",
816
+ "state": {
817
+ "description_width": "",
818
+ "font_size": null,
819
+ "text_color": null
820
+ }
821
+ },
822
+ "ca406b13430643bdab8a048bdf35dd75": {
823
+ "model_module": "@jupyter-widgets/base",
824
+ "model_module_version": "2.0.0",
825
+ "model_name": "LayoutModel",
826
+ "state": {}
827
+ },
828
+ "cbad9576bda8439d8fa5ae8391b31ff9": {
829
+ "model_module": "@jupyter-widgets/controls",
830
+ "model_module_version": "2.0.0",
831
+ "model_name": "HTMLStyleModel",
832
+ "state": {
833
+ "description_width": "",
834
+ "font_size": null,
835
+ "text_color": null
836
+ }
837
+ },
838
+ "d45797a1fbc54c7cb7b5a863972fb4b1": {
839
+ "model_module": "@jupyter-widgets/base",
840
+ "model_module_version": "2.0.0",
841
+ "model_name": "LayoutModel",
842
+ "state": {}
843
+ },
844
+ "d753a4db07294b83a86784bfc211dcaf": {
845
+ "model_module": "@jupyter-widgets/controls",
846
+ "model_module_version": "2.0.0",
847
+ "model_name": "HTMLModel",
848
+ "state": {
849
+ "layout": "IPY_MODEL_326fe11988d74d66b99b448cf2001714",
850
+ "style": "IPY_MODEL_a33667021736405b82666b555a5493a8",
851
+ "value": "Uploading the dataset shards: 100%"
852
+ }
853
+ },
854
+ "db5e0e8d61d742ef823112ebe06cd33b": {
855
+ "model_module": "@jupyter-widgets/base",
856
+ "model_module_version": "2.0.0",
857
+ "model_name": "LayoutModel",
858
+ "state": {}
859
+ },
860
+ "dc80b5c47d4c4f9faab022f05b691228": {
861
+ "model_module": "@jupyter-widgets/base",
862
+ "model_module_version": "2.0.0",
863
+ "model_name": "LayoutModel",
864
+ "state": {}
865
+ },
866
+ "e2d0aec5b6f14da889ebb4876d6a6cc3": {
867
+ "model_module": "@jupyter-widgets/controls",
868
+ "model_module_version": "2.0.0",
869
+ "model_name": "HBoxModel",
870
+ "state": {
871
+ "children": [
872
+ "IPY_MODEL_93b9ea63c9ee44cbbe4a266b83a86083",
873
+ "IPY_MODEL_2f221cdc0bb44e23ab02f00a658801bf",
874
+ "IPY_MODEL_76f47b25344e4f1db6549e268af826ba"
875
+ ],
876
+ "layout": "IPY_MODEL_f91d0f2d7018430fb1f6f9fef6d4bcb8"
877
+ }
878
+ },
879
+ "e5d66c90b5a842de84296f97abbdaae9": {
880
+ "model_module": "@jupyter-widgets/controls",
881
+ "model_module_version": "2.0.0",
882
+ "model_name": "FloatProgressModel",
883
+ "state": {
884
+ "bar_style": "success",
885
+ "layout": "IPY_MODEL_7d6080218e2a4888bdfd0c184b7c5ffc",
886
+ "max": 1,
887
+ "style": "IPY_MODEL_ea8839476ee147bcbdbd484c04e1e829",
888
+ "value": 1
889
+ }
890
+ },
891
+ "e5f9516196b0461daf95817bb9408d7b": {
892
+ "model_module": "@jupyter-widgets/base",
893
+ "model_module_version": "2.0.0",
894
+ "model_name": "LayoutModel",
895
+ "state": {}
896
+ },
897
+ "e6ec6e37a6304a2590e81ec2b4a01e15": {
898
+ "model_module": "@jupyter-widgets/base",
899
+ "model_module_version": "2.0.0",
900
+ "model_name": "LayoutModel",
901
+ "state": {}
902
+ },
903
+ "e7c643f02df04f39a6232d4ef37dbe53": {
904
+ "model_module": "@jupyter-widgets/controls",
905
+ "model_module_version": "2.0.0",
906
+ "model_name": "ProgressStyleModel",
907
+ "state": {
908
+ "description_width": ""
909
+ }
910
+ },
911
+ "e813397704674d81b3fe6dd3ce444e95": {
912
+ "model_module": "@jupyter-widgets/base",
913
+ "model_module_version": "2.0.0",
914
+ "model_name": "LayoutModel",
915
+ "state": {}
916
+ },
917
+ "ea8839476ee147bcbdbd484c04e1e829": {
918
+ "model_module": "@jupyter-widgets/controls",
919
+ "model_module_version": "2.0.0",
920
+ "model_name": "ProgressStyleModel",
921
+ "state": {
922
+ "description_width": ""
923
+ }
924
+ },
925
+ "eaf483365e224158aad8fd8f604ac64b": {
926
+ "model_module": "@jupyter-widgets/controls",
927
+ "model_module_version": "2.0.0",
928
+ "model_name": "HTMLStyleModel",
929
+ "state": {
930
+ "description_width": "",
931
+ "font_size": null,
932
+ "text_color": null
933
+ }
934
+ },
935
+ "f5a6d9c4be554f4f937b1afd82f934b9": {
936
+ "model_module": "@jupyter-widgets/controls",
937
+ "model_module_version": "2.0.0",
938
+ "model_name": "HBoxModel",
939
+ "state": {
940
+ "children": [
941
+ "IPY_MODEL_2d56d23ee7804546ab50c8ceaddd2d05",
942
+ "IPY_MODEL_a39e3942259747fd9c7759cbbba73d3d",
943
+ "IPY_MODEL_4b35ce2957784c83a9444926b0a7e976"
944
+ ],
945
+ "layout": "IPY_MODEL_abc65776f8504948b9e91ea432ebe8ef"
946
+ }
947
+ },
948
+ "f8cd90e4cb13496780c19d3a482fb43f": {
949
+ "model_module": "@jupyter-widgets/base",
950
+ "model_module_version": "2.0.0",
951
+ "model_name": "LayoutModel",
952
+ "state": {}
953
+ },
954
+ "f91d0f2d7018430fb1f6f9fef6d4bcb8": {
955
+ "model_module": "@jupyter-widgets/base",
956
+ "model_module_version": "2.0.0",
957
+ "model_name": "LayoutModel",
958
+ "state": {}
959
+ },
960
+ "fb1c614683cf4531b6ac405a9373d74d": {
961
+ "model_module": "@jupyter-widgets/controls",
962
+ "model_module_version": "2.0.0",
963
+ "model_name": "HTMLModel",
964
+ "state": {
965
+ "layout": "IPY_MODEL_842b6cc23bcc485ba3d034f5f955f314",
966
+ "style": "IPY_MODEL_c827c4d520c342a1bcb54966869a6edc",
967
+ "value": " 87/87 [00:00&lt;00:00, 1110.37 examples/s]"
968
+ }
969
+ },
970
+ "fe70cc023aa049e0ba921c8eb1c65646": {
971
+ "model_module": "@jupyter-widgets/controls",
972
+ "model_module_version": "2.0.0",
973
+ "model_name": "HTMLModel",
974
+ "state": {
975
+ "layout": "IPY_MODEL_99133f4e57fe4309b610cdb4ce2a90bd",
976
+ "style": "IPY_MODEL_38f0c8d251154e15ba2a621db92bbf89",
977
+ "value": " 1/1 [00:00&lt;00:00, 6.98ba/s]"
978
+ }
979
+ }
980
+ },
981
+ "version_major": 2,
982
+ "version_minor": 0
983
+ }
984
+ }
985
+ },
986
+ "nbformat": 4,
987
+ "nbformat_minor": 5
988
+ }
dataset_archive.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64f01c9672784cc26cc266287f3276757727bf4ad618b6b207fc05b3b90d3136
3
+ size 133
id2label.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"1": "central-ring", "2": "other", "3": "read-out-streak", "4": "smoke-ring", "5":"star-loop"}
load_and_visualise_dataset.ipynb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfd90e91aa9df0f7e65a33b6ec24b5c3ca30a7fca2f62dbe4ca958a80efb5d91
3
+ size 15765
obs_info_1024_all.json ADDED
The diff for this file is too large to render. See raw diff
 
stratified_kfold.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
utils.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from shutil import copy
4
+ import pandas as pd
5
+ from pathlib import Path
6
+ from PIL import Image, ImageDraw
7
+ import cv2
8
+ import numpy as np
9
+ import re
10
+ import datasets
11
+ from datasets import Value
12
+ from io import BytesIO
13
+ from PIL import Image, ImageDraw, ImageFont
14
+ import matplotlib.pyplot as plt
15
+ import matplotlib.patches as patches
16
+
17
+ def create_directories_and_copy_files(images_dir, coco_data, image_data, k):
18
+ base_dir = os.path.join(images_dir, f'mskf_{k}')
19
+ os.makedirs(base_dir, exist_ok=True)
20
+
21
+ for split in np.unique(image_data['SPLIT']):
22
+ split_dir = os.path.join(base_dir, split)
23
+ os.makedirs(split_dir, exist_ok=True)
24
+
25
+ # Filter the annotations
26
+ split_ids = image_data[image_data['SPLIT'] == split]['IMADE_ID'].tolist()
27
+ annotations = {
28
+ 'images': [img for img in coco_data['images'] if img['id'] in split_ids],
29
+ 'annotations': [ann for ann in coco_data['annotations'] if ann['image_id'] in split_ids],
30
+ 'categories': coco_data['categories']
31
+ }
32
+
33
+ # Write the filtered annotations to a file
34
+ with open(os.path.join(split_dir, '_annotations.coco.json'), 'w') as f:
35
+ json.dump(annotations, f, indent=4)
36
+
37
+ # Copy the images
38
+ split_data = image_data[image_data['SPLIT'] == split]
39
+ for _, row in split_data.iterrows():
40
+ source = row['IMAGE_PATH']
41
+ destination = os.path.join(split_dir, os.path.basename(source))
42
+ copy(source, destination)
43
+
44
+ print(f'Dataset split for mskf_{k} was successful.')
45
+
46
+ def split_to_df(dataset_dir, split):
47
+ annotations_path = Path(dataset_dir+split+'/_annotations.coco.json')
48
+
49
+ with annotations_path.open('r') as f:
50
+ coco_data = json.load(f)
51
+
52
+ def image_from_path(file_path):
53
+ image = cv2.imread(file_path)
54
+ return image
55
+
56
+ def gen_segmentation(segmentation, width, height):
57
+ mask_img = np.zeros((height, width, 3), dtype=np.uint8)
58
+ for segment in segmentation:
59
+ pts = np.array(segment, np.int32).reshape((-1, 1, 2))
60
+ cv2.fillPoly(mask_img, [pts], (255, 255, 255)) # Fill color in BGR
61
+
62
+ return mask_img
63
+
64
+ images_df = pd.DataFrame(coco_data['images'][5:25], columns=['id', 'file_name', 'width', 'height'])
65
+ annotations_df = pd.DataFrame(coco_data['annotations'])
66
+ df = pd.merge(annotations_df, images_df, left_on='image_id', right_on='id')
67
+ image_folder = annotations_path.parent
68
+ df['file_path'] = df['file_name'].apply(lambda x: str(image_folder / x))
69
+ df['observation'] = df['file_name'].apply(lambda x: x.split('.')[0].replace('_png', ''))
70
+ df['image'] = df['file_path'].apply(image_from_path)
71
+ df['segmentation'] = df.apply(lambda row: gen_segmentation(row['segmentation'], row['width'], row['height']), axis=1)
72
+ df = df.drop('file_path', axis=1)
73
+ df = df.drop('file_name', axis=1)
74
+ df['annot_id'] = df['id_x']
75
+ df = df.drop('id_x', axis=1)
76
+ df = df.drop('id_y', axis=1)
77
+
78
+ # take image fro df, and the corresponging annotations and plot them on image
79
+ # for i in range(5):
80
+ # img = df['image'][i]
81
+ # annot_id = df['annot_id'][i]
82
+ # # plot the image with the annotation using plt
83
+ # if img.dtype != np.uint8:
84
+ # img = img.astype(np.uint8)
85
+ # # plot
86
+ # segm_polygon = df['segmentation'][i]
87
+ # plt.imshow(segm_polygon)
88
+ # plt.axis('off')
89
+ # plt.show()
90
+ # plt.close()
91
+
92
+ return df, coco_data
93
+
94
+ def df_to_dataset_dict(df, coco_data, cats_to_colours):
95
+
96
+ def annot_on_image(annot_id, img_array, cat_id, annot_type='segm'):
97
+ if img_array.dtype != np.uint8:
98
+ img_array = img_array.astype(np.uint8)
99
+
100
+ pil_image = Image.fromarray(img_array)
101
+ draw = ImageDraw.Draw(pil_image)
102
+ if annot_type=='bbox':
103
+ bbox = [annot for annot in coco_data['annotations'] if annot['id'] == annot_id][0]['bbox']
104
+ x_min, y_min, width, height = bbox
105
+ top_left = (x_min, y_min)
106
+ bottom_right = (x_min + width, y_min + height)
107
+
108
+ draw.rectangle([top_left, bottom_right], outline=cats_to_colours[cat_id][1], width=2)
109
+ else:
110
+ # look for the annotation in coco_data that corresponds to the annot_id
111
+ segm_polygon = [annot for annot in coco_data['annotations'] if annot['id'] == annot_id][0]['segmentation'][0]
112
+ polygon = [(segm_polygon[i], segm_polygon[i+1]) for i in range(0, len(segm_polygon), 2)]
113
+ draw.polygon(polygon, outline=cats_to_colours[cat_id][1], width=2)
114
+
115
+ # plt.imshow(pil_image)
116
+ # plt.axis('off')
117
+ # plt.show()
118
+ # plt.close()
119
+
120
+ byte_io = BytesIO()
121
+ pil_image.save(byte_io, 'PNG')
122
+ byte_io.seek(0)
123
+ png_image = Image.open(byte_io)
124
+
125
+ return png_image
126
+
127
+ dictionary = df.to_dict(orient='list')
128
+ feats=datasets.Features({"observation id":Value(dtype='string'), \
129
+ 'segmentation': datasets.Image(), \
130
+ 'bbox':datasets.Image() , \
131
+ 'label': Value(dtype='string'),\
132
+ 'area':Value(dtype='string'),
133
+ 'image shape':Value(dtype='string')})
134
+
135
+ dataset_data = {"observation id":dictionary['observation'], \
136
+ 'segmentation': [annot_on_image(dictionary['annot_id'][i], dictionary['image'][i], dictionary['category_id'][i]) \
137
+ for i in range(len(dictionary['segmentation']))], \
138
+ 'bbox': [annot_on_image(dictionary['annot_id'][i], dictionary['image'][i], dictionary['category_id'][i], annot_type='bbox') \
139
+ for i in range(len(dictionary['bbox']))], \
140
+ 'label': [cats_to_colours[cat][0] for cat in dictionary['category_id']],\
141
+ 'area':['%.3f'%(value) for value in dictionary['area']], \
142
+ 'image shape':[f"({dictionary['width'][i]}, {dictionary['height'][i]})" for i in range(len(dictionary['width']))]}
143
+ the_dataset=datasets.Dataset.from_dict(dataset_data,features=feats)
144
+
145
+ return the_dataset
146
+
147
+ def merge_coco_jsons(first_json, second_json, output_path):
148
+
149
+ # Load the first JSON file
150
+ with open(first_json) as f:
151
+ coco1 = json.load(f)
152
+
153
+ # Load the second JSON file
154
+ with open(second_json) as f:
155
+ coco2 = json.load(f)
156
+
157
+ # Update IDs in coco2 to ensure they are unique and do not overlap with coco1
158
+ max_image_id = max(image['id'] for image in coco1['images'])
159
+ max_annotation_id = max(annotation['id'] for annotation in coco1['annotations'])
160
+ max_category_id = max(category['id'] for category in coco1['categories'])
161
+
162
+ # Add an offset to the second coco IDs
163
+ image_id_offset = max_image_id + 1
164
+ annotation_id_offset = max_annotation_id + 1
165
+ # category_id_offset = max_category_id + 1
166
+
167
+ # Apply offset to images, annotations, and categories in the second JSON
168
+ for image in coco2['images']:
169
+ image['id'] += image_id_offset
170
+
171
+ for annotation in coco2['annotations']:
172
+ annotation['id'] += annotation_id_offset
173
+ annotation['image_id'] += image_id_offset # Update the image_id reference
174
+
175
+ # Merge the two datasets
176
+ merged_coco = {
177
+ 'images': coco1['images'] + coco2['images'],
178
+ 'annotations': coco1['annotations'] + coco2['annotations'],
179
+ 'categories': coco1['categories'] # If categories are the same; otherwise, merge as needed
180
+ }
181
+
182
+ # Save the merged annotations to a new JSON file
183
+ with open(output_path, 'w') as f:
184
+ json.dump(merged_coco, f)
185
+
186
+ def percentages(n_splits, image_ids, labels):
187
+ labels_percentages = {}
188
+ for i in range(n_splits):
189
+ train_k, valid_k = 0, 0
190
+ train_labels_counts = {'0':0, '1':0, '2':0, '3':0, '4':0, '5':0}
191
+ valid_labels_counts = {'0':0, '1':0, '2':0, '3':0, '4':0, '5':0}
192
+ for j in range(len(image_ids[i]['train'])):
193
+ for cat in list(labels[i]['train'][j]):
194
+ train_labels_counts[cat] += 1
195
+ train_k+=1
196
+
197
+ for j in range(len(image_ids[i]['valid'])):
198
+ for cat in list(labels[i]['valid'][j]):
199
+ valid_labels_counts[cat] += 1
200
+ valid_k+=1
201
+
202
+ train_labels_counts = {cat:counts * 1.0/train_k for cat, counts in train_labels_counts.items()}
203
+ valid_labels_counts = {cat:counts * 1.0/valid_k for cat, counts in valid_labels_counts.items()}
204
+
205
+ labels_percentages[i] = {'train':train_labels_counts, 'valid': valid_labels_counts}
206
+
207
+ return labels_percentages
208
+
209
+ def make_split(data_in, train_index, valid_index):
210
+
211
+ data_in_train = data_in.copy()
212
+ data_in_valid = data_in.copy()
213
+
214
+ data_in_train['images'] = [data_in['images'][train_index[i][0]] for i in range(len(train_index))]
215
+ data_in_valid['images'] = [data_in['images'][valid_index[i][0]] for i in range(len(valid_index))]
216
+ train_annot_ids, valid_annot_ids = [], []
217
+
218
+ for img_i in data_in_train['images']:
219
+ annotation_ids = [annot['id'] for annot in data_in_train['annotations'] if annot['image_id'] == img_i['id']]
220
+ train_annot_ids +=annotation_ids
221
+
222
+ for img_i in data_in_valid['images']:
223
+ annotation_ids = [annot['id'] for annot in data_in_valid['annotations'] if annot['image_id'] == img_i['id']]
224
+ valid_annot_ids +=annotation_ids
225
+
226
+ data_in_train['annotations'] = [data_in_train['annotations'][id] for id in train_annot_ids]
227
+ data_in_valid['annotations'] = [data_in_valid['annotations'][id] for id in valid_annot_ids]
228
+
229
+ print(len(data_in_train['images']), len(data_in_valid['images']))
230
+ return data_in_train, data_in_valid
231
+
232
+ def correct_bboxes(annotations):
233
+ for ann in annotations:
234
+ # If the segmentation is in polygon format (COCO polygon)
235
+ if isinstance(ann['segmentation'], list):
236
+
237
+ points = np.array(ann['segmentation']).reshape(-1, 2)
238
+ x_min, y_min = np.inf, np.inf
239
+ x_max, y_max = -np.inf, -np.inf
240
+ x_min = min(x_min, points[:, 0].min())
241
+ y_min = min(y_min, points[:, 1].min())
242
+ x_max = max(x_max, points[:, 0].max())
243
+ y_max = max(y_max, points[:, 1].max())
244
+
245
+ width = x_max - x_min
246
+ height = y_max - y_min
247
+
248
+ # The bbox in COCO format [x_min, y_min, width, height]
249
+ bbox = [x_min, y_min, width, height]
250
+ x, y, w, h = map(int, bbox)
251
+ ann['bbox'] = [x, y, w, h]
252
+
253
+ return annotations
254
+
255
+ def highlight_max(s):
256
+ is_max = s == s.max()
257
+ return ['background-color: yellow' if v else '' for v in is_max]
258
+
259
+ def highlight_max_str(s):
260
+
261
+ cats = []
262
+ for cat in s:
263
+ cats.append([float(match) for match in re.findall(r"[-+]?[0-9]*\.?[0-9]+", cat)][0])
264
+
265
+ is_max = cats == np.max(cats)
266
+ return ['background-color: yellow' if v else '' for v in is_max]
267
+
268
+ def read_yolo_annotations(annotation_file):
269
+ with open(annotation_file, 'r') as file:
270
+ lines = file.readlines()
271
+
272
+ annotations = []
273
+ for line in lines:
274
+ parts = line.strip().split()
275
+ class_id = int(parts[0])
276
+ points = list(map(float, parts[1:]))
277
+ annotations.append((class_id, points))
278
+
279
+ return annotations
280
+
281
+ def display_image_with_annotations(coco, cat_names, image_id):
282
+ img = coco.loadImgs(image_id)[0]
283
+ image_path = os.path.join('./mskf_0/train/', img['file_name'])
284
+ I = Image.open(image_path)
285
+ plt.imshow(I); plt.axis('off')
286
+ ann_ids = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
287
+ anns = coco.loadAnns(ann_ids)
288
+ ax = plt.gca()
289
+
290
+ for ann in anns:
291
+ bbox = ann['bbox']
292
+ rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3],
293
+ linewidth=2, edgecolor='b', facecolor='none')
294
+ ax.add_patch(rect)
295
+ ax.text(bbox[0], bbox[1] - 5, cat_names[ann['category_id']],
296
+ color='blue', fontsize=12, bbox=dict(facecolor='white', alpha=0.5))
297
+
298
+ plt.show()
299
+
300
+ def plot_segmentations(image_path, annotations, category_mapping):
301
+ image = Image.open(image_path)
302
+ width, height = image.size
303
+ draw = ImageDraw.Draw(image)
304
+
305
+ try:
306
+ font = ImageFont.truetype("DejaVuSans.ttf", 16) # Load a font
307
+ except IOError:
308
+ font = ImageFont.load_default()
309
+
310
+ for class_id, points in annotations:
311
+ # Scale points from normalized coordinates to image dimensions
312
+ scaled_points = [(p[0] * width, p[1] * height) for p in zip(points[0::2], points[1::2])]
313
+ draw.polygon(scaled_points, outline='green', fill=None)
314
+
315
+ category_name = category_mapping[class_id][0]
316
+ centroid_x = sum([p[0] for p in scaled_points]) / len(scaled_points)
317
+ centroid_y = sum([p[1] for p in scaled_points]) / len(scaled_points)
318
+ draw.text((centroid_x, centroid_y), category_name, fill='red', font=font, anchor='ms')
319
+
320
+ plt.imshow(image)
321
+ plt.axis('off')
322
+ plt.show()