donb-hf commited on
Commit
8fd3d2c
·
1 Parent(s): b1f2ae2

update changes

Browse files
notebooks/finetune_florence_2_large_on_blood_cell_dataset_40_epochs copy.ipynb ADDED
@@ -0,0 +1,1602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "KVmYbqqUSlCH"
7
+ },
8
+ "source": [
9
+ "# Fine-tuning Florence-2 on Blood Cell Object Detection Dataset"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "z16cfHRE8yi8"
16
+ },
17
+ "source": [
18
+ "## Setup"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {
25
+ "colab": {
26
+ "base_uri": "https://localhost:8080/"
27
+ },
28
+ "id": "_vp9cS2-gXbn",
29
+ "outputId": "85a70762-1fe8-4482-dc7f-552d817b27c7"
30
+ },
31
+ "outputs": [],
32
+ "source": [
33
+ "from google.colab import drive\n",
34
+ "drive.mount('/content/drive')"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "markdown",
39
+ "metadata": {
40
+ "id": "mqd30Ndg9dbt"
41
+ },
42
+ "source": [
43
+ "### Configure your API keys\n",
44
+ "\n",
45
+ "To fine-tune Florence-2, you need to provide your HuggingFace Token and Roboflow API key."
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "markdown",
50
+ "metadata": {
51
+ "id": "n32nrwCeAEYP"
52
+ },
53
+ "source": [
54
+ "### Select the runtime\n",
55
+ "\n",
56
+ "Let's make sure that we have access to GPU. We can use `nvidia-smi` command to do that. In case of any problems navigate to `Edit` -> `Notebook settings` -> `Hardware accelerator`, set it to `L4 GPU`, and then click `Save`."
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "metadata": {
63
+ "colab": {
64
+ "base_uri": "https://localhost:8080/"
65
+ },
66
+ "id": "rMmBuhiiC2mX",
67
+ "outputId": "e0c91cc2-104c-4826-a4e8-3ddff36488f5"
68
+ },
69
+ "outputs": [],
70
+ "source": [
71
+ "!nvidia-smi"
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "markdown",
76
+ "metadata": {
77
+ "id": "dOshHQM3Ebq5"
78
+ },
79
+ "source": [
80
+ "### Download example data\n",
81
+ "\n",
82
+ "**NOTE:** Feel free to replace our example image with your own photo."
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ "execution_count": null,
88
+ "metadata": {
89
+ "colab": {
90
+ "base_uri": "https://localhost:8080/"
91
+ },
92
+ "id": "u3ZhBCTPEnEO",
93
+ "outputId": "99bd166b-dda7-4b85-bf22-4824ab643a5a"
94
+ },
95
+ "outputs": [],
96
+ "source": [
97
+ "image_url=\"https://huggingface.co/spaces/dwb2023/omniscience/resolve/main/examples/BloodImage_00038_jpg.rf.1b0ce1635e11b3b49302de527c86bb02.jpg\"\n",
98
+ "\n",
99
+ "# get image_url and write it to /content/source_img.jpg\n",
100
+ "!wget -O /content/source_img.jpg $image_url"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": null,
106
+ "metadata": {
107
+ "id": "PbglpBOOFCHm"
108
+ },
109
+ "outputs": [],
110
+ "source": [
111
+ "EXAMPLE_IMAGE_PATH = \"/content/source_img.jpg\""
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "markdown",
116
+ "metadata": {
117
+ "id": "GM4QlaUfCFsv"
118
+ },
119
+ "source": [
120
+ "## Download and configure the model\n",
121
+ "\n",
122
+ " Let's download the model checkpoint and configure it so that you can fine-tune it later on."
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": null,
128
+ "metadata": {
129
+ "colab": {
130
+ "base_uri": "https://localhost:8080/"
131
+ },
132
+ "id": "Y6b1dvjgYXOD",
133
+ "outputId": "e62ff6f2-4820-443b-88ab-9d0f9041ab48"
134
+ },
135
+ "outputs": [],
136
+ "source": [
137
+ "!pip install -q transformers flash_attn timm einops peft\n",
138
+ "!pip install -q roboflow git+https://github.com/roboflow/supervision.git"
139
+ ]
140
+ },
141
+ {
142
+ "cell_type": "code",
143
+ "execution_count": null,
144
+ "metadata": {
145
+ "id": "HMd6tb4sSh9G"
146
+ },
147
+ "outputs": [],
148
+ "source": [
149
+ "# @title Imports\n",
150
+ "\n",
151
+ "import io\n",
152
+ "import os\n",
153
+ "import re\n",
154
+ "import json\n",
155
+ "import torch\n",
156
+ "import html\n",
157
+ "import base64\n",
158
+ "import itertools\n",
159
+ "\n",
160
+ "import numpy as np\n",
161
+ "import supervision as sv\n",
162
+ "\n",
163
+ "from google.colab import userdata\n",
164
+ "from IPython.core.display import display, HTML\n",
165
+ "from torch.utils.data import Dataset, DataLoader\n",
166
+ "from transformers import (\n",
167
+ " AdamW,\n",
168
+ " AutoModelForCausalLM,\n",
169
+ " AutoProcessor,\n",
170
+ " get_scheduler\n",
171
+ ")\n",
172
+ "from tqdm import tqdm\n",
173
+ "from typing import List, Dict, Any, Tuple, Generator\n",
174
+ "from peft import LoraConfig, get_peft_model\n",
175
+ "from PIL import Image\n",
176
+ "from roboflow import Roboflow"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "markdown",
181
+ "metadata": {
182
+ "id": "flp13B-8Myjf"
183
+ },
184
+ "source": [
185
+ "Load the model using `AutoModelForCausalLM` and the processor using `AutoProcessor` classes from the transformers library. Note that you need to pass `trust_remote_code` as `True` since this model is not a standard transformers model."
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "code",
190
+ "execution_count": null,
191
+ "metadata": {
192
+ "id": "zqDWEWDcaSxN"
193
+ },
194
+ "outputs": [],
195
+ "source": [
196
+ "CHECKPOINT = \"microsoft/Florence-2-large-ft\"\n",
197
+ "# REVISION = 'refs/pr/6'\n",
198
+ "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
199
+ "\n",
200
+ "model = AutoModelForCausalLM.from_pretrained(CHECKPOINT, trust_remote_code=True).to(DEVICE)\n",
201
+ "processor = AutoProcessor.from_pretrained(CHECKPOINT, trust_remote_code=True)"
202
+ ]
203
+ },
204
+ {
205
+ "cell_type": "markdown",
206
+ "metadata": {
207
+ "id": "rf1GlvvQFec-"
208
+ },
209
+ "source": [
210
+ "## Run inference with pre-trained Florence-2 model"
211
+ ]
212
+ },
213
+ {
214
+ "cell_type": "code",
215
+ "execution_count": null,
216
+ "metadata": {
217
+ "colab": {
218
+ "base_uri": "https://localhost:8080/",
219
+ "height": 485
220
+ },
221
+ "id": "ReAKWNxAFmv1",
222
+ "outputId": "5df8fd92-c2b2-4549-a45b-6d83dd8c3835"
223
+ },
224
+ "outputs": [],
225
+ "source": [
226
+ "# @title Example object detection inference\n",
227
+ "\n",
228
+ "image = Image.open(EXAMPLE_IMAGE_PATH)\n",
229
+ "task = \"<OD>\"\n",
230
+ "text = \"<OD>\"\n",
231
+ "\n",
232
+ "inputs = processor(text=text, images=image, return_tensors=\"pt\").to(DEVICE)\n",
233
+ "generated_ids = model.generate(\n",
234
+ " input_ids=inputs[\"input_ids\"],\n",
235
+ " pixel_values=inputs[\"pixel_values\"],\n",
236
+ " max_new_tokens=256,\n",
237
+ " num_beams=3\n",
238
+ ")\n",
239
+ "generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]\n",
240
+ "response = processor.post_process_generation(generated_text, task=task, image_size=(image.width, image.height))\n",
241
+ "detections = sv.Detections.from_lmm(sv.LMM.FLORENCE_2, response, resolution_wh=image.size)\n",
242
+ "\n",
243
+ "bounding_box_annotator = sv.BoundingBoxAnnotator(color_lookup=sv.ColorLookup.INDEX)\n",
244
+ "label_annotator = sv.LabelAnnotator(color_lookup=sv.ColorLookup.INDEX)\n",
245
+ "\n",
246
+ "image = bounding_box_annotator.annotate(image, detections)\n",
247
+ "image = label_annotator.annotate(image, detections)\n",
248
+ "image.thumbnail((600, 600))\n",
249
+ "image"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "markdown",
254
+ "metadata": {
255
+ "id": "eQetrQM7Jziy"
256
+ },
257
+ "source": [
258
+ "## Fine-tune Florence-2 on custom dataset"
259
+ ]
260
+ },
261
+ {
262
+ "cell_type": "markdown",
263
+ "metadata": {
264
+ "id": "Sw7D6ZYzAs9a"
265
+ },
266
+ "source": [
267
+ "### Download dataset from Roboflow Universe"
268
+ ]
269
+ },
270
+ {
271
+ "cell_type": "code",
272
+ "execution_count": null,
273
+ "metadata": {
274
+ "colab": {
275
+ "base_uri": "https://localhost:8080/"
276
+ },
277
+ "id": "K1IlyjYmBCxX",
278
+ "outputId": "a0aefa84-f828-49b0-b5e5-463edbb22ec9"
279
+ },
280
+ "outputs": [],
281
+ "source": [
282
+ "ROBOFLOW_API_KEY = userdata.get('ROBOFLOW_API_KEY')\n",
283
+ "rf = Roboflow(api_key=ROBOFLOW_API_KEY)\n",
284
+ "\n",
285
+ "project = rf.workspace(\"roboflow-100\").project(\"bccd-ouzjz\")\n",
286
+ "version = project.version(2)\n",
287
+ "dataset = version.download(\"florence2-od\")"
288
+ ]
289
+ },
290
+ {
291
+ "cell_type": "code",
292
+ "execution_count": null,
293
+ "metadata": {
294
+ "colab": {
295
+ "base_uri": "https://localhost:8080/"
296
+ },
297
+ "id": "iiLclUnKTrLE",
298
+ "outputId": "e8655a6a-aedd-409c-9a80-c2aec8f438dc"
299
+ },
300
+ "outputs": [],
301
+ "source": [
302
+ "!head -n 5 {dataset.location}/train/annotations.jsonl"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "code",
307
+ "execution_count": null,
308
+ "metadata": {
309
+ "id": "dExvJNFkxymc"
310
+ },
311
+ "outputs": [],
312
+ "source": [
313
+ "# @title Define `DetectionsDataset` class\n",
314
+ "\n",
315
+ "class JSONLDataset:\n",
316
+ " def __init__(self, jsonl_file_path: str, image_directory_path: str):\n",
317
+ " self.jsonl_file_path = jsonl_file_path\n",
318
+ " self.image_directory_path = image_directory_path\n",
319
+ " self.entries = self._load_entries()\n",
320
+ "\n",
321
+ " def _load_entries(self) -> List[Dict[str, Any]]:\n",
322
+ " entries = []\n",
323
+ " with open(self.jsonl_file_path, 'r') as file:\n",
324
+ " for line in file:\n",
325
+ " data = json.loads(line)\n",
326
+ " entries.append(data)\n",
327
+ " return entries\n",
328
+ "\n",
329
+ " def __len__(self) -> int:\n",
330
+ " return len(self.entries)\n",
331
+ "\n",
332
+ " def __getitem__(self, idx: int) -> Tuple[Image.Image, Dict[str, Any]]:\n",
333
+ " if idx < 0 or idx >= len(self.entries):\n",
334
+ " raise IndexError(\"Index out of range\")\n",
335
+ "\n",
336
+ " entry = self.entries[idx]\n",
337
+ " image_path = os.path.join(self.image_directory_path, entry['image'])\n",
338
+ " try:\n",
339
+ " image = Image.open(image_path)\n",
340
+ " return (image, entry)\n",
341
+ " except FileNotFoundError:\n",
342
+ " raise FileNotFoundError(f\"Image file {image_path} not found.\")\n",
343
+ "\n",
344
+ "\n",
345
+ "class DetectionDataset(Dataset):\n",
346
+ " def __init__(self, jsonl_file_path: str, image_directory_path: str):\n",
347
+ " self.dataset = JSONLDataset(jsonl_file_path, image_directory_path)\n",
348
+ "\n",
349
+ " def __len__(self):\n",
350
+ " return len(self.dataset)\n",
351
+ "\n",
352
+ " def __getitem__(self, idx):\n",
353
+ " image, data = self.dataset[idx]\n",
354
+ " prefix = data['prefix']\n",
355
+ " suffix = data['suffix']\n",
356
+ " return prefix, suffix, image"
357
+ ]
358
+ },
359
+ {
360
+ "cell_type": "code",
361
+ "execution_count": null,
362
+ "metadata": {
363
+ "id": "ilMb0ivGdt9l"
364
+ },
365
+ "outputs": [],
366
+ "source": [
367
+ "# @title Initiate `DetectionsDataset` and `DataLoader` for train and validation subsets\n",
368
+ "\n",
369
+ "BATCH_SIZE = 6\n",
370
+ "NUM_WORKERS = 0\n",
371
+ "\n",
372
+ "def collate_fn(batch):\n",
373
+ " questions, answers, images = zip(*batch)\n",
374
+ " inputs = processor(text=list(questions), images=list(images), return_tensors=\"pt\", padding=True).to(DEVICE)\n",
375
+ " return inputs, answers\n",
376
+ "\n",
377
+ "train_dataset = DetectionDataset(\n",
378
+ " jsonl_file_path = f\"{dataset.location}/train/annotations.jsonl\",\n",
379
+ " image_directory_path = f\"{dataset.location}/train/\"\n",
380
+ ")\n",
381
+ "val_dataset = DetectionDataset(\n",
382
+ " jsonl_file_path = f\"{dataset.location}/valid/annotations.jsonl\",\n",
383
+ " image_directory_path = f\"{dataset.location}/valid/\"\n",
384
+ ")\n",
385
+ "\n",
386
+ "train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn, num_workers=NUM_WORKERS, shuffle=True)\n",
387
+ "val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn, num_workers=NUM_WORKERS)"
388
+ ]
389
+ },
390
+ {
391
+ "cell_type": "code",
392
+ "execution_count": null,
393
+ "metadata": {
394
+ "colab": {
395
+ "base_uri": "https://localhost:8080/"
396
+ },
397
+ "id": "FZYoV_EjOo5A",
398
+ "outputId": "7ab4f8a8-e9be-4ac0-b370-003fc32a6332"
399
+ },
400
+ "outputs": [],
401
+ "source": [
402
+ "def analyze_suffix_length(dataset, processor, num_samples=100):\n",
403
+ " max_suffix_length = 0\n",
404
+ " max_suffix_token_length = 0\n",
405
+ "\n",
406
+ " for i in range(min(num_samples, len(dataset))):\n",
407
+ " _, suffix, _ = dataset[i]\n",
408
+ "\n",
409
+ " # Get token length using the processor\n",
410
+ " tokens = processor.tokenizer(suffix, return_tensors=\"pt\").input_ids[0]\n",
411
+ " token_length = len(tokens)\n",
412
+ "\n",
413
+ " # Update max lengths\n",
414
+ " max_suffix_length = max(max_suffix_length, len(suffix))\n",
415
+ " max_suffix_token_length = max(max_suffix_token_length, token_length)\n",
416
+ "\n",
417
+ " print(f\"Max suffix length (characters): {max_suffix_length}\")\n",
418
+ " print(f\"Max suffix length (tokens): {max_suffix_token_length}\")\n",
419
+ " print(f\"Current max_new_tokens: 1024\")\n",
420
+ "\n",
421
+ " if max_suffix_token_length > 1024:\n",
422
+ " print(\"Warning: max_new_tokens may be too small for some suffixes\")\n",
423
+ " else:\n",
424
+ " print(\"Current max_new_tokens should be sufficient\")\n",
425
+ "\n",
426
+ "# Use the function\n",
427
+ "analyze_suffix_length(train_dataset, processor)"
428
+ ]
429
+ },
430
+ {
431
+ "cell_type": "code",
432
+ "execution_count": null,
433
+ "metadata": {
434
+ "colab": {
435
+ "base_uri": "https://localhost:8080/"
436
+ },
437
+ "id": "FmPJOXCzB-29",
438
+ "outputId": "cbee50a7-4e06-402c-f2d0-92e0c3a4eac8"
439
+ },
440
+ "outputs": [],
441
+ "source": [
442
+ "# @title Setup LoRA Florence-2 model\n",
443
+ "\n",
444
+ "config = LoraConfig(\n",
445
+ " r=8,\n",
446
+ " lora_alpha=8,\n",
447
+ " target_modules=[\"q_proj\", \"o_proj\", \"k_proj\", \"v_proj\", \"linear\", \"Conv2d\", \"lm_head\", \"fc2\"],\n",
448
+ " task_type=\"CAUSAL_LM\",\n",
449
+ " lora_dropout=0.05,\n",
450
+ " bias=\"none\",\n",
451
+ " inference_mode=False,\n",
452
+ " use_rslora=True,\n",
453
+ " init_lora_weights=\"gaussian\",\n",
454
+ ")\n",
455
+ "\n",
456
+ "peft_model = get_peft_model(model, config)\n",
457
+ "peft_model.print_trainable_parameters()"
458
+ ]
459
+ },
460
+ {
461
+ "cell_type": "code",
462
+ "execution_count": null,
463
+ "metadata": {
464
+ "id": "1V9BcVQMycgq"
465
+ },
466
+ "outputs": [],
467
+ "source": [
468
+ "torch.cuda.empty_cache()"
469
+ ]
470
+ },
471
+ {
472
+ "cell_type": "code",
473
+ "execution_count": null,
474
+ "metadata": {
475
+ "colab": {
476
+ "base_uri": "https://localhost:8080/",
477
+ "height": 1000
478
+ },
479
+ "id": "i9LEEXRwN9cX",
480
+ "outputId": "6b9e4cd9-7852-4826-ca4c-4f2c79aa470e"
481
+ },
482
+ "outputs": [],
483
+ "source": [
484
+ "# @title Run inference with pre-trained Florence-2 model on validation dataset\n",
485
+ "\n",
486
+ "def render_inline(image: Image.Image, resize=(128, 128)):\n",
487
+ " \"\"\"Convert image into inline html.\"\"\"\n",
488
+ " image.resize(resize)\n",
489
+ " with io.BytesIO() as buffer:\n",
490
+ " image.save(buffer, format='jpeg')\n",
491
+ " image_b64 = str(base64.b64encode(buffer.getvalue()), \"utf-8\")\n",
492
+ " return f\"data:image/jpeg;base64,{image_b64}\"\n",
493
+ "\n",
494
+ "\n",
495
+ "def render_example(image: Image.Image, response):\n",
496
+ " try:\n",
497
+ " detections = sv.Detections.from_lmm(sv.LMM.FLORENCE_2, response, resolution_wh=image.size)\n",
498
+ " image = sv.BoundingBoxAnnotator(color_lookup=sv.ColorLookup.INDEX).annotate(image.copy(), detections)\n",
499
+ " image = sv.LabelAnnotator(color_lookup=sv.ColorLookup.INDEX).annotate(image, detections)\n",
500
+ " except:\n",
501
+ " print('failed to redner model response')\n",
502
+ " return f\"\"\"\n",
503
+ "<div style=\"display: inline-flex; align-items: center; justify-content: center;\">\n",
504
+ " <img style=\"width:256px; height:256px;\" src=\"{render_inline(image, resize=(128, 128))}\" />\n",
505
+ " <p style=\"width:512px; margin:10px; font-size:small;\">{html.escape(json.dumps(response))}</p>\n",
506
+ "</div>\n",
507
+ "\"\"\"\n",
508
+ "\n",
509
+ "\n",
510
+ "def render_inference_results(model, dataset: DetectionDataset, count: int):\n",
511
+ " html_out = \"\"\n",
512
+ " count = min(count, len(dataset))\n",
513
+ " for i in range(count):\n",
514
+ " image, data = dataset.dataset[i]\n",
515
+ " prefix = data['prefix']\n",
516
+ " suffix = data['suffix']\n",
517
+ " inputs = processor(text=prefix, images=image, return_tensors=\"pt\").to(DEVICE)\n",
518
+ " generated_ids = model.generate(\n",
519
+ " input_ids=inputs[\"input_ids\"],\n",
520
+ " pixel_values=inputs[\"pixel_values\"],\n",
521
+ " max_new_tokens=256,\n",
522
+ " num_beams=3\n",
523
+ " )\n",
524
+ " generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]\n",
525
+ " answer = processor.post_process_generation(generated_text, task='<OD>', image_size=image.size)\n",
526
+ " html_out += render_example(image, answer)\n",
527
+ "\n",
528
+ " display(HTML(html_out))\n",
529
+ "\n",
530
+ "render_inference_results(peft_model, val_dataset, 4)"
531
+ ]
532
+ },
533
+ {
534
+ "cell_type": "markdown",
535
+ "metadata": {
536
+ "id": "RH9JTq_RytE2"
537
+ },
538
+ "source": [
539
+ "## Fine-tune Florence-2 on custom object detection dataset"
540
+ ]
541
+ },
542
+ {
543
+ "cell_type": "code",
544
+ "execution_count": null,
545
+ "metadata": {
546
+ "id": "bC06Mc7jOdpY"
547
+ },
548
+ "outputs": [],
549
+ "source": [
550
+ "# @title Define train loop\n",
551
+ "\n",
552
+ "def train_model(train_loader, val_loader, model, processor, epochs=10, lr=1e-6):\n",
553
+ " optimizer = AdamW(model.parameters(), lr=lr)\n",
554
+ " num_training_steps = epochs * len(train_loader)\n",
555
+ " lr_scheduler = get_scheduler(\n",
556
+ " name=\"linear\",\n",
557
+ " optimizer=optimizer,\n",
558
+ " num_warmup_steps=0,\n",
559
+ " num_training_steps=num_training_steps,\n",
560
+ " )\n",
561
+ "\n",
562
+ " render_inference_results(peft_model, val_loader.dataset, 6)\n",
563
+ "\n",
564
+ " for epoch in range(epochs):\n",
565
+ " model.train()\n",
566
+ " train_loss = 0\n",
567
+ " for inputs, answers in tqdm(train_loader, desc=f\"Training Epoch {epoch + 1}/{epochs}\"):\n",
568
+ "\n",
569
+ " input_ids = inputs[\"input_ids\"]\n",
570
+ " pixel_values = inputs[\"pixel_values\"]\n",
571
+ " labels = processor.tokenizer(\n",
572
+ " text=answers,\n",
573
+ " return_tensors=\"pt\",\n",
574
+ " padding=True,\n",
575
+ " return_token_type_ids=False\n",
576
+ " ).input_ids.to(DEVICE)\n",
577
+ "\n",
578
+ " outputs = model(input_ids=input_ids, pixel_values=pixel_values, labels=labels)\n",
579
+ " loss = outputs.loss\n",
580
+ "\n",
581
+ " loss.backward(), optimizer.step(), lr_scheduler.step(), optimizer.zero_grad()\n",
582
+ " train_loss += loss.item()\n",
583
+ "\n",
584
+ " avg_train_loss = train_loss / len(train_loader)\n",
585
+ " print(f\"Average Training Loss: {avg_train_loss}\")\n",
586
+ "\n",
587
+ " model.eval()\n",
588
+ " val_loss = 0\n",
589
+ " with torch.no_grad():\n",
590
+ " for inputs, answers in tqdm(val_loader, desc=f\"Validation Epoch {epoch + 1}/{epochs}\"):\n",
591
+ "\n",
592
+ " input_ids = inputs[\"input_ids\"]\n",
593
+ " pixel_values = inputs[\"pixel_values\"]\n",
594
+ " labels = processor.tokenizer(\n",
595
+ " text=answers,\n",
596
+ " return_tensors=\"pt\",\n",
597
+ " padding=True,\n",
598
+ " return_token_type_ids=False\n",
599
+ " ).input_ids.to(DEVICE)\n",
600
+ "\n",
601
+ " outputs = model(input_ids=input_ids, pixel_values=pixel_values, labels=labels)\n",
602
+ " loss = outputs.loss\n",
603
+ "\n",
604
+ " val_loss += loss.item()\n",
605
+ "\n",
606
+ " avg_val_loss = val_loss / len(val_loader)\n",
607
+ " print(f\"Average Validation Loss: {avg_val_loss}\")\n",
608
+ "\n",
609
+ " render_inference_results(peft_model, val_loader.dataset, 6)\n",
610
+ "\n",
611
+ " output_dir = f\"./model_checkpoints/epoch_{epoch+1}\"\n",
612
+ " os.makedirs(output_dir, exist_ok=True)\n",
613
+ " model.save_pretrained(output_dir)\n",
614
+ " processor.save_pretrained(output_dir)"
615
+ ]
616
+ },
617
+ {
618
+ "cell_type": "code",
619
+ "execution_count": null,
620
+ "metadata": {
621
+ "colab": {
622
+ "base_uri": "https://localhost:8080/",
623
+ "height": 1000
624
+ },
625
+ "id": "LZybGHd3fNJ1",
626
+ "outputId": "c1c7be61-c4c5-4994-f3ac-a040f9f22c31"
627
+ },
628
+ "outputs": [],
629
+ "source": [
630
+ "# @title Run train loop\n",
631
+ "\n",
632
+ "%%time\n",
633
+ "\n",
634
+ "EPOCHS = 40\n",
635
+ "LR = 5e-6\n",
636
+ "\n",
637
+ "train_model(train_loader, val_loader, peft_model, processor, epochs=EPOCHS, lr=LR)"
638
+ ]
639
+ },
640
+ {
641
+ "cell_type": "markdown",
642
+ "metadata": {
643
+ "id": "MBHMu7WGWpeu"
644
+ },
645
+ "source": [
646
+ "## Fine-tuned model evaluation"
647
+ ]
648
+ },
649
+ {
650
+ "cell_type": "code",
651
+ "execution_count": null,
652
+ "metadata": {
653
+ "id": "8f1BYeQw3xhl"
654
+ },
655
+ "outputs": [],
656
+ "source": [
657
+ "# @title Collect predictions\n",
658
+ "\n",
659
+ "# Corrected pattern to capture class names correctly\n",
660
+ "PATTERN = r'(RBC|WBC|Platelets)'\n",
661
+ "\n",
662
+ "def extract_classes(dataset: DetectionDataset):\n",
663
+ " class_set = set()\n",
664
+ " for i in range(len(dataset.dataset)):\n",
665
+ " image, data = dataset.dataset[i]\n",
666
+ " suffix = data[\"suffix\"]\n",
667
+ " classes = re.findall(PATTERN, suffix)\n",
668
+ " class_set.update(classes)\n",
669
+ " return sorted(class_set)\n",
670
+ "\n",
671
+ "CLASSES = extract_classes(train_dataset)\n",
672
+ "\n",
673
+ "targets = []\n",
674
+ "predictions = []\n",
675
+ "\n",
676
+ "for i in range(len(val_dataset.dataset)):\n",
677
+ " image, data = val_dataset.dataset[i]\n",
678
+ " prefix = data['prefix']\n",
679
+ " suffix = data['suffix']\n",
680
+ "\n",
681
+ " inputs = processor(text=prefix, images=image, return_tensors=\"pt\").to(DEVICE)\n",
682
+ " generated_ids = model.generate(\n",
683
+ " input_ids=inputs[\"input_ids\"],\n",
684
+ " pixel_values=inputs[\"pixel_values\"],\n",
685
+ " max_new_tokens=256,\n",
686
+ " num_beams=3\n",
687
+ " )\n",
688
+ " generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]\n",
689
+ "\n",
690
+ " prediction = processor.post_process_generation(generated_text, task='<OD>', image_size=image.size)\n",
691
+ " prediction = sv.Detections.from_lmm(sv.LMM.FLORENCE_2, prediction, resolution_wh=image.size)\n",
692
+ " prediction = prediction[np.isin(prediction['class_name'], CLASSES)]\n",
693
+ " prediction.class_id = np.array([CLASSES.index(class_name) for class_name in prediction['class_name']])\n",
694
+ " prediction.confidence = np.ones(len(prediction))\n",
695
+ "\n",
696
+ " target = processor.post_process_generation(suffix, task='<OD>', image_size=image.size)\n",
697
+ " target = sv.Detections.from_lmm(sv.LMM.FLORENCE_2, target, resolution_wh=image.size)\n",
698
+ " target.class_id = np.array([CLASSES.index(class_name) for class_name in target['class_name']])\n",
699
+ "\n",
700
+ " targets.append(target)\n",
701
+ " predictions.append(prediction)"
702
+ ]
703
+ },
704
+ {
705
+ "cell_type": "code",
706
+ "execution_count": null,
707
+ "metadata": {
708
+ "colab": {
709
+ "base_uri": "https://localhost:8080/"
710
+ },
711
+ "id": "nKECYHh-z95f",
712
+ "outputId": "690ab21b-f5d3-4608-f291-bcf1c941990c"
713
+ },
714
+ "outputs": [],
715
+ "source": [
716
+ "CLASSES"
717
+ ]
718
+ },
719
+ {
720
+ "cell_type": "code",
721
+ "execution_count": null,
722
+ "metadata": {
723
+ "colab": {
724
+ "base_uri": "https://localhost:8080/"
725
+ },
726
+ "id": "88VnIZ_feHPo",
727
+ "outputId": "9fc48273-24ae-4b3a-a71b-57fdfee2f0c6"
728
+ },
729
+ "outputs": [],
730
+ "source": [
731
+ "# @title Calculate mAP\n",
732
+ "mean_average_precision = sv.MeanAveragePrecision.from_detections(\n",
733
+ " predictions=predictions,\n",
734
+ " targets=targets,\n",
735
+ ")\n",
736
+ "\n",
737
+ "print(f\"map50_95: {mean_average_precision.map50_95:.2f}\")\n",
738
+ "print(f\"map50: {mean_average_precision.map50:.2f}\")\n",
739
+ "print(f\"map75: {mean_average_precision.map75:.2f}\")"
740
+ ]
741
+ },
742
+ {
743
+ "cell_type": "code",
744
+ "execution_count": null,
745
+ "metadata": {
746
+ "colab": {
747
+ "base_uri": "https://localhost:8080/",
748
+ "height": 1000
749
+ },
750
+ "id": "85APzNRfe8xp",
751
+ "outputId": "260eb915-49e3-49b1-e215-d7cc6b504526"
752
+ },
753
+ "outputs": [],
754
+ "source": [
755
+ "import numpy as np\n",
756
+ "import supervision as sv # Ensure this is the correct library\n",
757
+ "import json\n",
758
+ "\n",
759
+ "# @title Calculate Confusion Matrix\n",
760
+ "confusion_matrix = sv.ConfusionMatrix.from_detections(\n",
761
+ " predictions=predictions,\n",
762
+ " targets=targets,\n",
763
+ " classes=CLASSES\n",
764
+ ")\n",
765
+ "\n",
766
+ "_ = confusion_matrix.plot()"
767
+ ]
768
+ },
769
+ {
770
+ "cell_type": "code",
771
+ "execution_count": null,
772
+ "metadata": {
773
+ "colab": {
774
+ "base_uri": "https://localhost:8080/"
775
+ },
776
+ "id": "nfTi6NmpmiuU",
777
+ "outputId": "8af15af7-da64-40d1-c577-944a7d1b8be6"
778
+ },
779
+ "outputs": [],
780
+ "source": [
781
+ "# Correctly access the matrix attribute\n",
782
+ "conf_matrix_values = confusion_matrix.matrix\n",
783
+ "\n",
784
+ "# Print to check the values are extracted correctly\n",
785
+ "print(\"Confusion Matrix Values:\", conf_matrix_values)"
786
+ ]
787
+ },
788
+ {
789
+ "cell_type": "code",
790
+ "execution_count": null,
791
+ "metadata": {
792
+ "colab": {
793
+ "base_uri": "https://localhost:8080/",
794
+ "height": 217
795
+ },
796
+ "id": "w4jbxLsmlO7j",
797
+ "outputId": "1c022a10-ca1b-4f82-e74d-4ae72fa6ce17"
798
+ },
799
+ "outputs": [],
800
+ "source": [
801
+ "import json\n",
802
+ "from sklearn.metrics import confusion_matrix\n",
803
+ "\n",
804
+ "# Assuming y_true and y_pred are your ground truth and predicted labels\n",
805
+ "conf_matrix = confusion_matrix(y_true, y_pred, labels=range(len(CLASSES)))\n",
806
+ "\n",
807
+ "# Convert confusion matrix to JSON format\n",
808
+ "def confusion_matrix_to_json(conf_matrix, classes):\n",
809
+ " conf_matrix_dict = {\n",
810
+ " \"classes\": classes,\n",
811
+ " \"matrix\": conf_matrix.tolist()\n",
812
+ " }\n",
813
+ " return json.dumps(conf_matrix_dict, indent=4)\n",
814
+ "\n",
815
+ "json_output = confusion_matrix_to_json(conf_matrix, CLASSES)\n",
816
+ "print(json_output)\n"
817
+ ]
818
+ },
819
+ {
820
+ "cell_type": "markdown",
821
+ "metadata": {
822
+ "id": "8rR2naNXzEB0"
823
+ },
824
+ "source": [
825
+ "## Save fine-tuned model on hard drive"
826
+ ]
827
+ },
828
+ {
829
+ "cell_type": "code",
830
+ "execution_count": null,
831
+ "metadata": {
832
+ "colab": {
833
+ "base_uri": "https://localhost:8080/"
834
+ },
835
+ "id": "Rdbmcv3TcIe8",
836
+ "outputId": "218d993c-414e-4682-86ab-c58db826ad0b"
837
+ },
838
+ "outputs": [],
839
+ "source": [
840
+ "peft_model.save_pretrained(\"/content/florence2-large-ft\")\n",
841
+ "processor.save_pretrained(\"/content/florence2-large-ft/\")\n",
842
+ "!ls -la /content/florence2-large/"
843
+ ]
844
+ },
845
+ {
846
+ "cell_type": "code",
847
+ "execution_count": null,
848
+ "metadata": {
849
+ "colab": {
850
+ "base_uri": "https://localhost:8080/",
851
+ "height": 117,
852
+ "referenced_widgets": [
853
+ "a9289c461dbe4cecb9e691766eecbe37",
854
+ "bf1811c46899427d920af49b20bb8ee2",
855
+ "0e385484fed6499eaf9a563a41623acc",
856
+ "95d542586c2c4726b3ec10ea4eb06011",
857
+ "e8a9f64dc4ad458e83580fc708514e7a",
858
+ "079e0c3428ec4572bcfe845d345f7318",
859
+ "ece7b911ecda44a7ac3bad2f5229b8be",
860
+ "1c18c58c22f94d12b0f61c034ee8dd7a",
861
+ "9118b17e1e9a4949af081f13717a34db",
862
+ "e154937e4db249ca93eeb86652858455",
863
+ "8bfeb74ca14f4f2d8cbf36081f73b7a3",
864
+ "2bdadfa556454c219aa6ebb1ddcf3485",
865
+ "9710de1c4e54496fbc501a9056fd9af1",
866
+ "83742b470564441c93a8a5a88a41d7d7",
867
+ "5c2272cd6d2d417f9f0c6fee95ae3d83",
868
+ "ba460d52106641a8a0168e1610933fe4",
869
+ "f4d85a0a6d9043c1afe1cdab71b0bf75",
870
+ "e5d93b4839f7415fbbb6263f51b0e5e1",
871
+ "62c3411212544a2aadbf4af460bce439",
872
+ "37eff0dac8144865bfc319b1954b2968",
873
+ "e89e8d9b8c364a34a4f2e7981ffeed85",
874
+ "c9a571522381476c9ff997ac776992c3"
875
+ ]
876
+ },
877
+ "id": "fyP9ZW2bf1te",
878
+ "outputId": "80405e7f-6b36-4366-a00c-5d65e4d9e96a"
879
+ },
880
+ "outputs": [],
881
+ "source": [
882
+ "# Push the model to the Hub with your desired name\n",
883
+ "peft_model.push_to_hub(\"dwb2023/florence2-large-bccd-base-ft\")\n",
884
+ "processor.push_to_hub(\"dwb2023/florence2-large-bccd-base-ft\")"
885
+ ]
886
+ },
887
+ {
888
+ "cell_type": "code",
889
+ "execution_count": null,
890
+ "metadata": {
891
+ "id": "M_DdE_NOfJLG"
892
+ },
893
+ "outputs": [],
894
+ "source": []
895
+ }
896
+ ],
897
+ "metadata": {
898
+ "accelerator": "GPU",
899
+ "colab": {
900
+ "gpuType": "A100",
901
+ "machine_shape": "hm",
902
+ "provenance": []
903
+ },
904
+ "kernelspec": {
905
+ "display_name": "Python 3",
906
+ "name": "python3"
907
+ },
908
+ "language_info": {
909
+ "name": "python"
910
+ },
911
+ "widgets": {
912
+ "application/vnd.jupyter.widget-state+json": {
913
+ "079e0c3428ec4572bcfe845d345f7318": {
914
+ "model_module": "@jupyter-widgets/base",
915
+ "model_module_version": "1.2.0",
916
+ "model_name": "LayoutModel",
917
+ "state": {
918
+ "_model_module": "@jupyter-widgets/base",
919
+ "_model_module_version": "1.2.0",
920
+ "_model_name": "LayoutModel",
921
+ "_view_count": null,
922
+ "_view_module": "@jupyter-widgets/base",
923
+ "_view_module_version": "1.2.0",
924
+ "_view_name": "LayoutView",
925
+ "align_content": null,
926
+ "align_items": null,
927
+ "align_self": null,
928
+ "border": null,
929
+ "bottom": null,
930
+ "display": null,
931
+ "flex": null,
932
+ "flex_flow": null,
933
+ "grid_area": null,
934
+ "grid_auto_columns": null,
935
+ "grid_auto_flow": null,
936
+ "grid_auto_rows": null,
937
+ "grid_column": null,
938
+ "grid_gap": null,
939
+ "grid_row": null,
940
+ "grid_template_areas": null,
941
+ "grid_template_columns": null,
942
+ "grid_template_rows": null,
943
+ "height": null,
944
+ "justify_content": null,
945
+ "justify_items": null,
946
+ "left": null,
947
+ "margin": null,
948
+ "max_height": null,
949
+ "max_width": null,
950
+ "min_height": null,
951
+ "min_width": null,
952
+ "object_fit": null,
953
+ "object_position": null,
954
+ "order": null,
955
+ "overflow": null,
956
+ "overflow_x": null,
957
+ "overflow_y": null,
958
+ "padding": null,
959
+ "right": null,
960
+ "top": null,
961
+ "visibility": null,
962
+ "width": null
963
+ }
964
+ },
965
+ "0e385484fed6499eaf9a563a41623acc": {
966
+ "model_module": "@jupyter-widgets/controls",
967
+ "model_module_version": "1.5.0",
968
+ "model_name": "FloatProgressModel",
969
+ "state": {
970
+ "_dom_classes": [],
971
+ "_model_module": "@jupyter-widgets/controls",
972
+ "_model_module_version": "1.5.0",
973
+ "_model_name": "FloatProgressModel",
974
+ "_view_count": null,
975
+ "_view_module": "@jupyter-widgets/controls",
976
+ "_view_module_version": "1.5.0",
977
+ "_view_name": "ProgressView",
978
+ "bar_style": "success",
979
+ "description": "",
980
+ "description_tooltip": null,
981
+ "layout": "IPY_MODEL_1c18c58c22f94d12b0f61c034ee8dd7a",
982
+ "max": 5174,
983
+ "min": 0,
984
+ "orientation": "horizontal",
985
+ "style": "IPY_MODEL_9118b17e1e9a4949af081f13717a34db",
986
+ "value": 5174
987
+ }
988
+ },
989
+ "1c18c58c22f94d12b0f61c034ee8dd7a": {
990
+ "model_module": "@jupyter-widgets/base",
991
+ "model_module_version": "1.2.0",
992
+ "model_name": "LayoutModel",
993
+ "state": {
994
+ "_model_module": "@jupyter-widgets/base",
995
+ "_model_module_version": "1.2.0",
996
+ "_model_name": "LayoutModel",
997
+ "_view_count": null,
998
+ "_view_module": "@jupyter-widgets/base",
999
+ "_view_module_version": "1.2.0",
1000
+ "_view_name": "LayoutView",
1001
+ "align_content": null,
1002
+ "align_items": null,
1003
+ "align_self": null,
1004
+ "border": null,
1005
+ "bottom": null,
1006
+ "display": null,
1007
+ "flex": null,
1008
+ "flex_flow": null,
1009
+ "grid_area": null,
1010
+ "grid_auto_columns": null,
1011
+ "grid_auto_flow": null,
1012
+ "grid_auto_rows": null,
1013
+ "grid_column": null,
1014
+ "grid_gap": null,
1015
+ "grid_row": null,
1016
+ "grid_template_areas": null,
1017
+ "grid_template_columns": null,
1018
+ "grid_template_rows": null,
1019
+ "height": null,
1020
+ "justify_content": null,
1021
+ "justify_items": null,
1022
+ "left": null,
1023
+ "margin": null,
1024
+ "max_height": null,
1025
+ "max_width": null,
1026
+ "min_height": null,
1027
+ "min_width": null,
1028
+ "object_fit": null,
1029
+ "object_position": null,
1030
+ "order": null,
1031
+ "overflow": null,
1032
+ "overflow_x": null,
1033
+ "overflow_y": null,
1034
+ "padding": null,
1035
+ "right": null,
1036
+ "top": null,
1037
+ "visibility": null,
1038
+ "width": null
1039
+ }
1040
+ },
1041
+ "2bdadfa556454c219aa6ebb1ddcf3485": {
1042
+ "model_module": "@jupyter-widgets/controls",
1043
+ "model_module_version": "1.5.0",
1044
+ "model_name": "HBoxModel",
1045
+ "state": {
1046
+ "_dom_classes": [],
1047
+ "_model_module": "@jupyter-widgets/controls",
1048
+ "_model_module_version": "1.5.0",
1049
+ "_model_name": "HBoxModel",
1050
+ "_view_count": null,
1051
+ "_view_module": "@jupyter-widgets/controls",
1052
+ "_view_module_version": "1.5.0",
1053
+ "_view_name": "HBoxView",
1054
+ "box_style": "",
1055
+ "children": [
1056
+ "IPY_MODEL_9710de1c4e54496fbc501a9056fd9af1",
1057
+ "IPY_MODEL_83742b470564441c93a8a5a88a41d7d7",
1058
+ "IPY_MODEL_5c2272cd6d2d417f9f0c6fee95ae3d83"
1059
+ ],
1060
+ "layout": "IPY_MODEL_ba460d52106641a8a0168e1610933fe4"
1061
+ }
1062
+ },
1063
+ "37eff0dac8144865bfc319b1954b2968": {
1064
+ "model_module": "@jupyter-widgets/controls",
1065
+ "model_module_version": "1.5.0",
1066
+ "model_name": "ProgressStyleModel",
1067
+ "state": {
1068
+ "_model_module": "@jupyter-widgets/controls",
1069
+ "_model_module_version": "1.5.0",
1070
+ "_model_name": "ProgressStyleModel",
1071
+ "_view_count": null,
1072
+ "_view_module": "@jupyter-widgets/base",
1073
+ "_view_module_version": "1.2.0",
1074
+ "_view_name": "StyleView",
1075
+ "bar_color": null,
1076
+ "description_width": ""
1077
+ }
1078
+ },
1079
+ "5c2272cd6d2d417f9f0c6fee95ae3d83": {
1080
+ "model_module": "@jupyter-widgets/controls",
1081
+ "model_module_version": "1.5.0",
1082
+ "model_name": "HTMLModel",
1083
+ "state": {
1084
+ "_dom_classes": [],
1085
+ "_model_module": "@jupyter-widgets/controls",
1086
+ "_model_module_version": "1.5.0",
1087
+ "_model_name": "HTMLModel",
1088
+ "_view_count": null,
1089
+ "_view_module": "@jupyter-widgets/controls",
1090
+ "_view_module_version": "1.5.0",
1091
+ "_view_name": "HTMLView",
1092
+ "description": "",
1093
+ "description_tooltip": null,
1094
+ "layout": "IPY_MODEL_e89e8d9b8c364a34a4f2e7981ffeed85",
1095
+ "placeholder": "​",
1096
+ "style": "IPY_MODEL_c9a571522381476c9ff997ac776992c3",
1097
+ "value": " 16.6M/16.6M [00:01&lt;00:00, 18.1MB/s]"
1098
+ }
1099
+ },
1100
+ "62c3411212544a2aadbf4af460bce439": {
1101
+ "model_module": "@jupyter-widgets/base",
1102
+ "model_module_version": "1.2.0",
1103
+ "model_name": "LayoutModel",
1104
+ "state": {
1105
+ "_model_module": "@jupyter-widgets/base",
1106
+ "_model_module_version": "1.2.0",
1107
+ "_model_name": "LayoutModel",
1108
+ "_view_count": null,
1109
+ "_view_module": "@jupyter-widgets/base",
1110
+ "_view_module_version": "1.2.0",
1111
+ "_view_name": "LayoutView",
1112
+ "align_content": null,
1113
+ "align_items": null,
1114
+ "align_self": null,
1115
+ "border": null,
1116
+ "bottom": null,
1117
+ "display": null,
1118
+ "flex": null,
1119
+ "flex_flow": null,
1120
+ "grid_area": null,
1121
+ "grid_auto_columns": null,
1122
+ "grid_auto_flow": null,
1123
+ "grid_auto_rows": null,
1124
+ "grid_column": null,
1125
+ "grid_gap": null,
1126
+ "grid_row": null,
1127
+ "grid_template_areas": null,
1128
+ "grid_template_columns": null,
1129
+ "grid_template_rows": null,
1130
+ "height": null,
1131
+ "justify_content": null,
1132
+ "justify_items": null,
1133
+ "left": null,
1134
+ "margin": null,
1135
+ "max_height": null,
1136
+ "max_width": null,
1137
+ "min_height": null,
1138
+ "min_width": null,
1139
+ "object_fit": null,
1140
+ "object_position": null,
1141
+ "order": null,
1142
+ "overflow": null,
1143
+ "overflow_x": null,
1144
+ "overflow_y": null,
1145
+ "padding": null,
1146
+ "right": null,
1147
+ "top": null,
1148
+ "visibility": null,
1149
+ "width": null
1150
+ }
1151
+ },
1152
+ "83742b470564441c93a8a5a88a41d7d7": {
1153
+ "model_module": "@jupyter-widgets/controls",
1154
+ "model_module_version": "1.5.0",
1155
+ "model_name": "FloatProgressModel",
1156
+ "state": {
1157
+ "_dom_classes": [],
1158
+ "_model_module": "@jupyter-widgets/controls",
1159
+ "_model_module_version": "1.5.0",
1160
+ "_model_name": "FloatProgressModel",
1161
+ "_view_count": null,
1162
+ "_view_module": "@jupyter-widgets/controls",
1163
+ "_view_module_version": "1.5.0",
1164
+ "_view_name": "ProgressView",
1165
+ "bar_style": "success",
1166
+ "description": "",
1167
+ "description_tooltip": null,
1168
+ "layout": "IPY_MODEL_62c3411212544a2aadbf4af460bce439",
1169
+ "max": 16582384,
1170
+ "min": 0,
1171
+ "orientation": "horizontal",
1172
+ "style": "IPY_MODEL_37eff0dac8144865bfc319b1954b2968",
1173
+ "value": 16582384
1174
+ }
1175
+ },
1176
+ "8bfeb74ca14f4f2d8cbf36081f73b7a3": {
1177
+ "model_module": "@jupyter-widgets/controls",
1178
+ "model_module_version": "1.5.0",
1179
+ "model_name": "DescriptionStyleModel",
1180
+ "state": {
1181
+ "_model_module": "@jupyter-widgets/controls",
1182
+ "_model_module_version": "1.5.0",
1183
+ "_model_name": "DescriptionStyleModel",
1184
+ "_view_count": null,
1185
+ "_view_module": "@jupyter-widgets/base",
1186
+ "_view_module_version": "1.2.0",
1187
+ "_view_name": "StyleView",
1188
+ "description_width": ""
1189
+ }
1190
+ },
1191
+ "9118b17e1e9a4949af081f13717a34db": {
1192
+ "model_module": "@jupyter-widgets/controls",
1193
+ "model_module_version": "1.5.0",
1194
+ "model_name": "ProgressStyleModel",
1195
+ "state": {
1196
+ "_model_module": "@jupyter-widgets/controls",
1197
+ "_model_module_version": "1.5.0",
1198
+ "_model_name": "ProgressStyleModel",
1199
+ "_view_count": null,
1200
+ "_view_module": "@jupyter-widgets/base",
1201
+ "_view_module_version": "1.2.0",
1202
+ "_view_name": "StyleView",
1203
+ "bar_color": null,
1204
+ "description_width": ""
1205
+ }
1206
+ },
1207
+ "95d542586c2c4726b3ec10ea4eb06011": {
1208
+ "model_module": "@jupyter-widgets/controls",
1209
+ "model_module_version": "1.5.0",
1210
+ "model_name": "HTMLModel",
1211
+ "state": {
1212
+ "_dom_classes": [],
1213
+ "_model_module": "@jupyter-widgets/controls",
1214
+ "_model_module_version": "1.5.0",
1215
+ "_model_name": "HTMLModel",
1216
+ "_view_count": null,
1217
+ "_view_module": "@jupyter-widgets/controls",
1218
+ "_view_module_version": "1.5.0",
1219
+ "_view_name": "HTMLView",
1220
+ "description": "",
1221
+ "description_tooltip": null,
1222
+ "layout": "IPY_MODEL_e154937e4db249ca93eeb86652858455",
1223
+ "placeholder": "​",
1224
+ "style": "IPY_MODEL_8bfeb74ca14f4f2d8cbf36081f73b7a3",
1225
+ "value": " 5.17k/5.17k [00:00&lt;00:00, 424kB/s]"
1226
+ }
1227
+ },
1228
+ "9710de1c4e54496fbc501a9056fd9af1": {
1229
+ "model_module": "@jupyter-widgets/controls",
1230
+ "model_module_version": "1.5.0",
1231
+ "model_name": "HTMLModel",
1232
+ "state": {
1233
+ "_dom_classes": [],
1234
+ "_model_module": "@jupyter-widgets/controls",
1235
+ "_model_module_version": "1.5.0",
1236
+ "_model_name": "HTMLModel",
1237
+ "_view_count": null,
1238
+ "_view_module": "@jupyter-widgets/controls",
1239
+ "_view_module_version": "1.5.0",
1240
+ "_view_name": "HTMLView",
1241
+ "description": "",
1242
+ "description_tooltip": null,
1243
+ "layout": "IPY_MODEL_f4d85a0a6d9043c1afe1cdab71b0bf75",
1244
+ "placeholder": "​",
1245
+ "style": "IPY_MODEL_e5d93b4839f7415fbbb6263f51b0e5e1",
1246
+ "value": "adapter_model.safetensors: 100%"
1247
+ }
1248
+ },
1249
+ "a9289c461dbe4cecb9e691766eecbe37": {
1250
+ "model_module": "@jupyter-widgets/controls",
1251
+ "model_module_version": "1.5.0",
1252
+ "model_name": "HBoxModel",
1253
+ "state": {
1254
+ "_dom_classes": [],
1255
+ "_model_module": "@jupyter-widgets/controls",
1256
+ "_model_module_version": "1.5.0",
1257
+ "_model_name": "HBoxModel",
1258
+ "_view_count": null,
1259
+ "_view_module": "@jupyter-widgets/controls",
1260
+ "_view_module_version": "1.5.0",
1261
+ "_view_name": "HBoxView",
1262
+ "box_style": "",
1263
+ "children": [
1264
+ "IPY_MODEL_bf1811c46899427d920af49b20bb8ee2",
1265
+ "IPY_MODEL_0e385484fed6499eaf9a563a41623acc",
1266
+ "IPY_MODEL_95d542586c2c4726b3ec10ea4eb06011"
1267
+ ],
1268
+ "layout": "IPY_MODEL_e8a9f64dc4ad458e83580fc708514e7a"
1269
+ }
1270
+ },
1271
+ "ba460d52106641a8a0168e1610933fe4": {
1272
+ "model_module": "@jupyter-widgets/base",
1273
+ "model_module_version": "1.2.0",
1274
+ "model_name": "LayoutModel",
1275
+ "state": {
1276
+ "_model_module": "@jupyter-widgets/base",
1277
+ "_model_module_version": "1.2.0",
1278
+ "_model_name": "LayoutModel",
1279
+ "_view_count": null,
1280
+ "_view_module": "@jupyter-widgets/base",
1281
+ "_view_module_version": "1.2.0",
1282
+ "_view_name": "LayoutView",
1283
+ "align_content": null,
1284
+ "align_items": null,
1285
+ "align_self": null,
1286
+ "border": null,
1287
+ "bottom": null,
1288
+ "display": null,
1289
+ "flex": null,
1290
+ "flex_flow": null,
1291
+ "grid_area": null,
1292
+ "grid_auto_columns": null,
1293
+ "grid_auto_flow": null,
1294
+ "grid_auto_rows": null,
1295
+ "grid_column": null,
1296
+ "grid_gap": null,
1297
+ "grid_row": null,
1298
+ "grid_template_areas": null,
1299
+ "grid_template_columns": null,
1300
+ "grid_template_rows": null,
1301
+ "height": null,
1302
+ "justify_content": null,
1303
+ "justify_items": null,
1304
+ "left": null,
1305
+ "margin": null,
1306
+ "max_height": null,
1307
+ "max_width": null,
1308
+ "min_height": null,
1309
+ "min_width": null,
1310
+ "object_fit": null,
1311
+ "object_position": null,
1312
+ "order": null,
1313
+ "overflow": null,
1314
+ "overflow_x": null,
1315
+ "overflow_y": null,
1316
+ "padding": null,
1317
+ "right": null,
1318
+ "top": null,
1319
+ "visibility": null,
1320
+ "width": null
1321
+ }
1322
+ },
1323
+ "bf1811c46899427d920af49b20bb8ee2": {
1324
+ "model_module": "@jupyter-widgets/controls",
1325
+ "model_module_version": "1.5.0",
1326
+ "model_name": "HTMLModel",
1327
+ "state": {
1328
+ "_dom_classes": [],
1329
+ "_model_module": "@jupyter-widgets/controls",
1330
+ "_model_module_version": "1.5.0",
1331
+ "_model_name": "HTMLModel",
1332
+ "_view_count": null,
1333
+ "_view_module": "@jupyter-widgets/controls",
1334
+ "_view_module_version": "1.5.0",
1335
+ "_view_name": "HTMLView",
1336
+ "description": "",
1337
+ "description_tooltip": null,
1338
+ "layout": "IPY_MODEL_079e0c3428ec4572bcfe845d345f7318",
1339
+ "placeholder": "​",
1340
+ "style": "IPY_MODEL_ece7b911ecda44a7ac3bad2f5229b8be",
1341
+ "value": "README.md: 100%"
1342
+ }
1343
+ },
1344
+ "c9a571522381476c9ff997ac776992c3": {
1345
+ "model_module": "@jupyter-widgets/controls",
1346
+ "model_module_version": "1.5.0",
1347
+ "model_name": "DescriptionStyleModel",
1348
+ "state": {
1349
+ "_model_module": "@jupyter-widgets/controls",
1350
+ "_model_module_version": "1.5.0",
1351
+ "_model_name": "DescriptionStyleModel",
1352
+ "_view_count": null,
1353
+ "_view_module": "@jupyter-widgets/base",
1354
+ "_view_module_version": "1.2.0",
1355
+ "_view_name": "StyleView",
1356
+ "description_width": ""
1357
+ }
1358
+ },
1359
+ "e154937e4db249ca93eeb86652858455": {
1360
+ "model_module": "@jupyter-widgets/base",
1361
+ "model_module_version": "1.2.0",
1362
+ "model_name": "LayoutModel",
1363
+ "state": {
1364
+ "_model_module": "@jupyter-widgets/base",
1365
+ "_model_module_version": "1.2.0",
1366
+ "_model_name": "LayoutModel",
1367
+ "_view_count": null,
1368
+ "_view_module": "@jupyter-widgets/base",
1369
+ "_view_module_version": "1.2.0",
1370
+ "_view_name": "LayoutView",
1371
+ "align_content": null,
1372
+ "align_items": null,
1373
+ "align_self": null,
1374
+ "border": null,
1375
+ "bottom": null,
1376
+ "display": null,
1377
+ "flex": null,
1378
+ "flex_flow": null,
1379
+ "grid_area": null,
1380
+ "grid_auto_columns": null,
1381
+ "grid_auto_flow": null,
1382
+ "grid_auto_rows": null,
1383
+ "grid_column": null,
1384
+ "grid_gap": null,
1385
+ "grid_row": null,
1386
+ "grid_template_areas": null,
1387
+ "grid_template_columns": null,
1388
+ "grid_template_rows": null,
1389
+ "height": null,
1390
+ "justify_content": null,
1391
+ "justify_items": null,
1392
+ "left": null,
1393
+ "margin": null,
1394
+ "max_height": null,
1395
+ "max_width": null,
1396
+ "min_height": null,
1397
+ "min_width": null,
1398
+ "object_fit": null,
1399
+ "object_position": null,
1400
+ "order": null,
1401
+ "overflow": null,
1402
+ "overflow_x": null,
1403
+ "overflow_y": null,
1404
+ "padding": null,
1405
+ "right": null,
1406
+ "top": null,
1407
+ "visibility": null,
1408
+ "width": null
1409
+ }
1410
+ },
1411
+ "e5d93b4839f7415fbbb6263f51b0e5e1": {
1412
+ "model_module": "@jupyter-widgets/controls",
1413
+ "model_module_version": "1.5.0",
1414
+ "model_name": "DescriptionStyleModel",
1415
+ "state": {
1416
+ "_model_module": "@jupyter-widgets/controls",
1417
+ "_model_module_version": "1.5.0",
1418
+ "_model_name": "DescriptionStyleModel",
1419
+ "_view_count": null,
1420
+ "_view_module": "@jupyter-widgets/base",
1421
+ "_view_module_version": "1.2.0",
1422
+ "_view_name": "StyleView",
1423
+ "description_width": ""
1424
+ }
1425
+ },
1426
+ "e89e8d9b8c364a34a4f2e7981ffeed85": {
1427
+ "model_module": "@jupyter-widgets/base",
1428
+ "model_module_version": "1.2.0",
1429
+ "model_name": "LayoutModel",
1430
+ "state": {
1431
+ "_model_module": "@jupyter-widgets/base",
1432
+ "_model_module_version": "1.2.0",
1433
+ "_model_name": "LayoutModel",
1434
+ "_view_count": null,
1435
+ "_view_module": "@jupyter-widgets/base",
1436
+ "_view_module_version": "1.2.0",
1437
+ "_view_name": "LayoutView",
1438
+ "align_content": null,
1439
+ "align_items": null,
1440
+ "align_self": null,
1441
+ "border": null,
1442
+ "bottom": null,
1443
+ "display": null,
1444
+ "flex": null,
1445
+ "flex_flow": null,
1446
+ "grid_area": null,
1447
+ "grid_auto_columns": null,
1448
+ "grid_auto_flow": null,
1449
+ "grid_auto_rows": null,
1450
+ "grid_column": null,
1451
+ "grid_gap": null,
1452
+ "grid_row": null,
1453
+ "grid_template_areas": null,
1454
+ "grid_template_columns": null,
1455
+ "grid_template_rows": null,
1456
+ "height": null,
1457
+ "justify_content": null,
1458
+ "justify_items": null,
1459
+ "left": null,
1460
+ "margin": null,
1461
+ "max_height": null,
1462
+ "max_width": null,
1463
+ "min_height": null,
1464
+ "min_width": null,
1465
+ "object_fit": null,
1466
+ "object_position": null,
1467
+ "order": null,
1468
+ "overflow": null,
1469
+ "overflow_x": null,
1470
+ "overflow_y": null,
1471
+ "padding": null,
1472
+ "right": null,
1473
+ "top": null,
1474
+ "visibility": null,
1475
+ "width": null
1476
+ }
1477
+ },
1478
+ "e8a9f64dc4ad458e83580fc708514e7a": {
1479
+ "model_module": "@jupyter-widgets/base",
1480
+ "model_module_version": "1.2.0",
1481
+ "model_name": "LayoutModel",
1482
+ "state": {
1483
+ "_model_module": "@jupyter-widgets/base",
1484
+ "_model_module_version": "1.2.0",
1485
+ "_model_name": "LayoutModel",
1486
+ "_view_count": null,
1487
+ "_view_module": "@jupyter-widgets/base",
1488
+ "_view_module_version": "1.2.0",
1489
+ "_view_name": "LayoutView",
1490
+ "align_content": null,
1491
+ "align_items": null,
1492
+ "align_self": null,
1493
+ "border": null,
1494
+ "bottom": null,
1495
+ "display": null,
1496
+ "flex": null,
1497
+ "flex_flow": null,
1498
+ "grid_area": null,
1499
+ "grid_auto_columns": null,
1500
+ "grid_auto_flow": null,
1501
+ "grid_auto_rows": null,
1502
+ "grid_column": null,
1503
+ "grid_gap": null,
1504
+ "grid_row": null,
1505
+ "grid_template_areas": null,
1506
+ "grid_template_columns": null,
1507
+ "grid_template_rows": null,
1508
+ "height": null,
1509
+ "justify_content": null,
1510
+ "justify_items": null,
1511
+ "left": null,
1512
+ "margin": null,
1513
+ "max_height": null,
1514
+ "max_width": null,
1515
+ "min_height": null,
1516
+ "min_width": null,
1517
+ "object_fit": null,
1518
+ "object_position": null,
1519
+ "order": null,
1520
+ "overflow": null,
1521
+ "overflow_x": null,
1522
+ "overflow_y": null,
1523
+ "padding": null,
1524
+ "right": null,
1525
+ "top": null,
1526
+ "visibility": null,
1527
+ "width": null
1528
+ }
1529
+ },
1530
+ "ece7b911ecda44a7ac3bad2f5229b8be": {
1531
+ "model_module": "@jupyter-widgets/controls",
1532
+ "model_module_version": "1.5.0",
1533
+ "model_name": "DescriptionStyleModel",
1534
+ "state": {
1535
+ "_model_module": "@jupyter-widgets/controls",
1536
+ "_model_module_version": "1.5.0",
1537
+ "_model_name": "DescriptionStyleModel",
1538
+ "_view_count": null,
1539
+ "_view_module": "@jupyter-widgets/base",
1540
+ "_view_module_version": "1.2.0",
1541
+ "_view_name": "StyleView",
1542
+ "description_width": ""
1543
+ }
1544
+ },
1545
+ "f4d85a0a6d9043c1afe1cdab71b0bf75": {
1546
+ "model_module": "@jupyter-widgets/base",
1547
+ "model_module_version": "1.2.0",
1548
+ "model_name": "LayoutModel",
1549
+ "state": {
1550
+ "_model_module": "@jupyter-widgets/base",
1551
+ "_model_module_version": "1.2.0",
1552
+ "_model_name": "LayoutModel",
1553
+ "_view_count": null,
1554
+ "_view_module": "@jupyter-widgets/base",
1555
+ "_view_module_version": "1.2.0",
1556
+ "_view_name": "LayoutView",
1557
+ "align_content": null,
1558
+ "align_items": null,
1559
+ "align_self": null,
1560
+ "border": null,
1561
+ "bottom": null,
1562
+ "display": null,
1563
+ "flex": null,
1564
+ "flex_flow": null,
1565
+ "grid_area": null,
1566
+ "grid_auto_columns": null,
1567
+ "grid_auto_flow": null,
1568
+ "grid_auto_rows": null,
1569
+ "grid_column": null,
1570
+ "grid_gap": null,
1571
+ "grid_row": null,
1572
+ "grid_template_areas": null,
1573
+ "grid_template_columns": null,
1574
+ "grid_template_rows": null,
1575
+ "height": null,
1576
+ "justify_content": null,
1577
+ "justify_items": null,
1578
+ "left": null,
1579
+ "margin": null,
1580
+ "max_height": null,
1581
+ "max_width": null,
1582
+ "min_height": null,
1583
+ "min_width": null,
1584
+ "object_fit": null,
1585
+ "object_position": null,
1586
+ "order": null,
1587
+ "overflow": null,
1588
+ "overflow_x": null,
1589
+ "overflow_y": null,
1590
+ "padding": null,
1591
+ "right": null,
1592
+ "top": null,
1593
+ "visibility": null,
1594
+ "width": null
1595
+ }
1596
+ }
1597
+ }
1598
+ }
1599
+ },
1600
+ "nbformat": 4,
1601
+ "nbformat_minor": 0
1602
+ }