stevhliu HF staff commited on
Commit
8fe6c36
1 Parent(s): 0b57438

Upload peft_lora_token_cls.ipynb

Browse files
Files changed (1) hide show
  1. peft_lora_token_cls.ipynb +1369 -0
peft_lora_token_cls.ipynb ADDED
@@ -0,0 +1,1369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "colab_type": "text",
7
+ "id": "view-in-github"
8
+ },
9
+ "source": [
10
+ "<a href=\"https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "markdown",
15
+ "metadata": {
16
+ "id": "ngqdEv0rP01q"
17
+ },
18
+ "source": [
19
+ "## Introduction\n",
20
+ "\n",
21
+ "In this notebook, we are going to fine-tune the LayoutLM model by Microsoft Research on the [FUNSD](https://guillaumejaume.github.io/FUNSD/) dataset, which is a collection of annotated form documents. The goal of our model is to learn the annotations of a number of labels (\"question\", \"answer\", \"header\" and \"other\") on those forms, such that it can be used to annotate unseen forms in the future.\n",
22
+ "\n",
23
+ "* Original LayoutLM paper: https://arxiv.org/abs/1912.13318\n",
24
+ "\n",
25
+ "* Original FUNSD paper: https://arxiv.org/abs/1905.13538\n"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "markdown",
30
+ "metadata": {
31
+ "id": "6K4S2s33ebY0"
32
+ },
33
+ "source": [
34
+ "## Install libraries\n",
35
+ "\n",
36
+ "Currently you have to first install the `unilm` package, and then the `transformers` package (which updates the outdated `transformers` package that is included in the `unilm` package). The reason we also install the `unilm` package is because we need its preprocessing files. I've forked it, and removed some statements which introduced some issues."
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "execution_count": null,
42
+ "metadata": {
43
+ "colab": {
44
+ "base_uri": "https://localhost:8080/",
45
+ "height": 1000
46
+ },
47
+ "id": "5cngOTr6SqEf",
48
+ "outputId": "6c7a2f76-682b-4f93-a3db-59ab010e5ffe"
49
+ },
50
+ "outputs": [],
51
+ "source": [
52
+ "# ! rm -r unilm\n",
53
+ "# ! pip install unilm"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "markdown",
58
+ "metadata": {
59
+ "id": "RGMkEG5aRB0D"
60
+ },
61
+ "source": [
62
+ "## Getting the data\n",
63
+ "\n",
64
+ "Here we download the data of the [FUNSD dataset](https://guillaumejaume.github.io/FUNSD/) from the web. This results in a directory called \"data\" being created, which has 2 subdirectories, one for training and one for testing. Each of those has 2 subdirectories in turn, one containing the images as png files and one containing the annotations in json format."
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "code",
69
+ "execution_count": null,
70
+ "metadata": {
71
+ "colab": {
72
+ "base_uri": "https://localhost:8080/"
73
+ },
74
+ "id": "DTFnEZagQm4v",
75
+ "outputId": "97ce03ba-a6bb-4444-8eba-77eceece44e0"
76
+ },
77
+ "outputs": [],
78
+ "source": [
79
+ "# ! wget https://guillaumejaume.github.io/FUNSD/dataset.zip\n",
80
+ "# ! unzip dataset.zip && mv dataset data && rm -rf dataset.zip __MACOSX"
81
+ ]
82
+ },
83
+ {
84
+ "cell_type": "markdown",
85
+ "metadata": {
86
+ "id": "UrNMR64LsJXm"
87
+ },
88
+ "source": [
89
+ "Let's take a look at a training example. For this, we are going to use PIL (Python Image Library)."
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "execution_count": null,
95
+ "metadata": {
96
+ "colab": {
97
+ "base_uri": "https://localhost:8080/",
98
+ "height": 1000
99
+ },
100
+ "id": "eG-eGcj3sNPs",
101
+ "outputId": "69ead0ea-15d6-4d5e-af61-a99a7533d31b"
102
+ },
103
+ "outputs": [],
104
+ "source": [
105
+ "from PIL import Image, ImageDraw, ImageFont\n",
106
+ "import os\n",
107
+ "\n",
108
+ "base_path = \"/home/sourab/temp/data/dataset\"\n",
109
+ "\n",
110
+ "image = Image.open(os.path.join(base_path, \"training_data/images/0000971160.png\"))\n",
111
+ "image = image.convert(\"RGB\")\n",
112
+ "image"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "markdown",
117
+ "metadata": {
118
+ "id": "uAVffmnZyUvw"
119
+ },
120
+ "source": [
121
+ "Now let's plot its corresponding annotations. Basically, if you type `data['form']`, you get a list of all general annotations. Each general annotation has a label, a bounding box, and one or more words, which in also have their own bounding box. The bounding boxes are in [xleft, ytop, xright, ybottom] format.\n",
122
+ " "
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": null,
128
+ "metadata": {
129
+ "colab": {
130
+ "base_uri": "https://localhost:8080/"
131
+ },
132
+ "id": "JPKkuJQ4sdZc",
133
+ "outputId": "c95bf306-98bb-4480-cc6b-ebb3aea548b3"
134
+ },
135
+ "outputs": [],
136
+ "source": [
137
+ "import json\n",
138
+ "\n",
139
+ "with open(os.path.join(base_path, \"training_data/annotations/0000971160.json\")) as f:\n",
140
+ " data = json.load(f)\n",
141
+ "\n",
142
+ "for annotation in data[\"form\"]:\n",
143
+ " print(annotation)"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "markdown",
148
+ "metadata": {
149
+ "id": "Hs4L3S5a2Gfb"
150
+ },
151
+ "source": [
152
+ "The PIL library has a handy ImageDraw module, which -you guessed it- allows to draw things (such as rectangles) on an image:"
153
+ ]
154
+ },
155
+ {
156
+ "cell_type": "code",
157
+ "execution_count": null,
158
+ "metadata": {
159
+ "colab": {
160
+ "base_uri": "https://localhost:8080/",
161
+ "height": 1000
162
+ },
163
+ "id": "gWaHFM_LtKPP",
164
+ "outputId": "c498e560-035f-4170-b0b9-85ba3956711c"
165
+ },
166
+ "outputs": [],
167
+ "source": [
168
+ "draw = ImageDraw.Draw(image, \"RGBA\")\n",
169
+ "\n",
170
+ "font = ImageFont.load_default()\n",
171
+ "\n",
172
+ "label2color = {\"question\": \"blue\", \"answer\": \"green\", \"header\": \"orange\", \"other\": \"violet\"}\n",
173
+ "\n",
174
+ "for annotation in data[\"form\"]:\n",
175
+ " label = annotation[\"label\"]\n",
176
+ " general_box = annotation[\"box\"]\n",
177
+ " draw.rectangle(general_box, outline=label2color[label], width=2)\n",
178
+ " draw.text((general_box[0] + 10, general_box[1] - 10), label, fill=label2color[label], font=font)\n",
179
+ " words = annotation[\"words\"]\n",
180
+ " for word in words:\n",
181
+ " box = word[\"box\"]\n",
182
+ " draw.rectangle(box, outline=label2color[label], width=1)\n",
183
+ "\n",
184
+ "image"
185
+ ]
186
+ },
187
+ {
188
+ "cell_type": "markdown",
189
+ "metadata": {
190
+ "id": "uyWQNLSCRJN7"
191
+ },
192
+ "source": [
193
+ "## Preprocessing the data\n",
194
+ "\n",
195
+ "Next, we need to turn the document images into individual tokens and corresponding labels (BIOES format, see further). We do this both for the training and test datasets. Make sure to run this from the `/content` directory:"
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": 4,
201
+ "metadata": {
202
+ "colab": {
203
+ "base_uri": "https://localhost:8080/"
204
+ },
205
+ "id": "4DWRyOR9RuY6",
206
+ "outputId": "4215a24b-8049-4b1a-a23f-5aaa48e14083"
207
+ },
208
+ "outputs": [],
209
+ "source": [
210
+ "# ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/training_data/annotations \\\n",
211
+ "# --data_split train \\\n",
212
+ "# --output_dir data \\\n",
213
+ "# --model_name_or_path microsoft/layoutlm-base-uncased \\\n",
214
+ "# --max_len 510\n",
215
+ "\n",
216
+ "# ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/testing_data/annotations \\\n",
217
+ "# --data_split test \\\n",
218
+ "# --output_dir data \\\n",
219
+ "# --model_name_or_path microsoft/layoutlm-base-uncased \\\n",
220
+ "# --max_len 510"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "markdown",
225
+ "metadata": {
226
+ "id": "gc4Cu0ZyO5M_"
227
+ },
228
+ "source": [
229
+ "Next, we create a labels.txt file that contains the unique labels of the FUNSD dataset:"
230
+ ]
231
+ },
232
+ {
233
+ "cell_type": "code",
234
+ "execution_count": 5,
235
+ "metadata": {
236
+ "id": "8iGOU0s3UR2u"
237
+ },
238
+ "outputs": [],
239
+ "source": [
240
+ "# ! cat data/train.txt | cut -d$'\\t' -f 2 | grep -v \"^$\"| sort | uniq > data/labels.txt"
241
+ ]
242
+ },
243
+ {
244
+ "cell_type": "markdown",
245
+ "metadata": {
246
+ "id": "mC9FhkG9U8yg"
247
+ },
248
+ "source": [
249
+ "## Define a PyTorch dataset\n",
250
+ "\n",
251
+ "First, we create a list containing the unique labels based on `data/labels.txt` (run this from the content directory):"
252
+ ]
253
+ },
254
+ {
255
+ "cell_type": "code",
256
+ "execution_count": 6,
257
+ "metadata": {
258
+ "id": "675rRa0QXnMp"
259
+ },
260
+ "outputs": [],
261
+ "source": [
262
+ "from torch.nn import CrossEntropyLoss\n",
263
+ "\n",
264
+ "\n",
265
+ "def get_labels(path):\n",
266
+ " with open(path, \"r\") as f:\n",
267
+ " labels = f.read().splitlines()\n",
268
+ " if \"O\" not in labels:\n",
269
+ " labels = [\"O\"] + labels\n",
270
+ " return labels\n",
271
+ "\n",
272
+ "\n",
273
+ "labels = get_labels(\"data/labels.txt\")\n",
274
+ "num_labels = len(labels)\n",
275
+ "label_map = {i: label for i, label in enumerate(labels)}\n",
276
+ "# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later\n",
277
+ "pad_token_label_id = CrossEntropyLoss().ignore_index"
278
+ ]
279
+ },
280
+ {
281
+ "cell_type": "markdown",
282
+ "metadata": {
283
+ "id": "kZ2LGEsez2u2"
284
+ },
285
+ "source": [
286
+ "We can see that the dataset uses the so-called BIOES annotation scheme to annotate the tokens. This means that a given token can be either at the beginning (B), inside (I), outside (O), at the end (E) or start (S) of a given entity. Entities include ANSWER, QUESTION, HEADER and OTHER: "
287
+ ]
288
+ },
289
+ {
290
+ "cell_type": "code",
291
+ "execution_count": 7,
292
+ "metadata": {
293
+ "colab": {
294
+ "base_uri": "https://localhost:8080/"
295
+ },
296
+ "id": "_-qXLkP9Yq_L",
297
+ "outputId": "32ab46a4-4cf0-400c-816b-570f950035ec"
298
+ },
299
+ "outputs": [
300
+ {
301
+ "name": "stdout",
302
+ "output_type": "stream",
303
+ "text": [
304
+ "['B-ANSWER', 'B-HEADER', 'B-QUESTION', 'E-ANSWER', 'E-HEADER', 'E-QUESTION', 'I-ANSWER', 'I-HEADER', 'I-QUESTION', 'O', 'S-ANSWER', 'S-HEADER', 'S-QUESTION']\n"
305
+ ]
306
+ }
307
+ ],
308
+ "source": [
309
+ "print(labels)"
310
+ ]
311
+ },
312
+ {
313
+ "cell_type": "markdown",
314
+ "metadata": {
315
+ "id": "9_ck0ZFfZInR"
316
+ },
317
+ "source": [
318
+ "Next, we can create a PyTorch dataset and corresponding dataloader (both for training and evaluation):"
319
+ ]
320
+ },
321
+ {
322
+ "cell_type": "code",
323
+ "execution_count": 8,
324
+ "metadata": {},
325
+ "outputs": [],
326
+ "source": [
327
+ "import logging\n",
328
+ "import os\n",
329
+ "\n",
330
+ "import torch\n",
331
+ "from torch.utils.data import Dataset\n",
332
+ "\n",
333
+ "logger = logging.getLogger(__name__)\n",
334
+ "\n",
335
+ "\n",
336
+ "class FunsdDataset(Dataset):\n",
337
+ " def __init__(self, args, tokenizer, labels, pad_token_label_id, mode):\n",
338
+ " if args.local_rank not in [-1, 0] and mode == \"train\":\n",
339
+ " torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n",
340
+ "\n",
341
+ " # Load data features from cache or dataset file\n",
342
+ " cached_features_file = os.path.join(\n",
343
+ " args.data_dir,\n",
344
+ " \"cached_{}_{}_{}\".format(\n",
345
+ " mode,\n",
346
+ " list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n",
347
+ " str(args.max_seq_length),\n",
348
+ " ),\n",
349
+ " )\n",
350
+ " if os.path.exists(cached_features_file) and not args.overwrite_cache:\n",
351
+ " logger.info(\"Loading features from cached file %s\", cached_features_file)\n",
352
+ " features = torch.load(cached_features_file)\n",
353
+ " else:\n",
354
+ " logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n",
355
+ " examples = read_examples_from_file(args.data_dir, mode)\n",
356
+ " features = convert_examples_to_features(\n",
357
+ " examples,\n",
358
+ " labels,\n",
359
+ " args.max_seq_length,\n",
360
+ " tokenizer,\n",
361
+ " cls_token_at_end=bool(args.model_type in [\"xlnet\"]),\n",
362
+ " # xlnet has a cls token at the end\n",
363
+ " cls_token=tokenizer.cls_token,\n",
364
+ " cls_token_segment_id=2 if args.model_type in [\"xlnet\"] else 0,\n",
365
+ " sep_token=tokenizer.sep_token,\n",
366
+ " sep_token_extra=bool(args.model_type in [\"roberta\"]),\n",
367
+ " # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n",
368
+ " pad_on_left=bool(args.model_type in [\"xlnet\"]),\n",
369
+ " # pad on the left for xlnet\n",
370
+ " pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n",
371
+ " pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,\n",
372
+ " pad_token_label_id=pad_token_label_id,\n",
373
+ " )\n",
374
+ " # if args.local_rank in [-1, 0]:\n",
375
+ " # logger.info(\"Saving features into cached file %s\", cached_features_file)\n",
376
+ " # torch.save(features, cached_features_file)\n",
377
+ "\n",
378
+ " if args.local_rank == 0 and mode == \"train\":\n",
379
+ " torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n",
380
+ "\n",
381
+ " self.features = features\n",
382
+ " # Convert to Tensors and build dataset\n",
383
+ " self.all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n",
384
+ " self.all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n",
385
+ " self.all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n",
386
+ " self.all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n",
387
+ " self.all_bboxes = torch.tensor([f.boxes for f in features], dtype=torch.long)\n",
388
+ "\n",
389
+ " def __len__(self):\n",
390
+ " return len(self.features)\n",
391
+ "\n",
392
+ " def __getitem__(self, index):\n",
393
+ " return (\n",
394
+ " self.all_input_ids[index],\n",
395
+ " self.all_input_mask[index],\n",
396
+ " self.all_segment_ids[index],\n",
397
+ " self.all_label_ids[index],\n",
398
+ " self.all_bboxes[index],\n",
399
+ " )\n",
400
+ "\n",
401
+ "\n",
402
+ "class InputExample(object):\n",
403
+ " \"\"\"A single training/test example for token classification.\"\"\"\n",
404
+ "\n",
405
+ " def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size):\n",
406
+ " \"\"\"Constructs a InputExample.\n",
407
+ "\n",
408
+ " Args:\n",
409
+ " guid: Unique id for the example.\n",
410
+ " words: list. The words of the sequence.\n",
411
+ " labels: (Optional) list. The labels for each word of the sequence. This should be\n",
412
+ " specified for train and dev examples, but not for test examples.\n",
413
+ " \"\"\"\n",
414
+ " self.guid = guid\n",
415
+ " self.words = words\n",
416
+ " self.labels = labels\n",
417
+ " self.boxes = boxes\n",
418
+ " self.actual_bboxes = actual_bboxes\n",
419
+ " self.file_name = file_name\n",
420
+ " self.page_size = page_size\n",
421
+ "\n",
422
+ "\n",
423
+ "class InputFeatures(object):\n",
424
+ " \"\"\"A single set of features of data.\"\"\"\n",
425
+ "\n",
426
+ " def __init__(\n",
427
+ " self,\n",
428
+ " input_ids,\n",
429
+ " input_mask,\n",
430
+ " segment_ids,\n",
431
+ " label_ids,\n",
432
+ " boxes,\n",
433
+ " actual_bboxes,\n",
434
+ " file_name,\n",
435
+ " page_size,\n",
436
+ " ):\n",
437
+ " assert (\n",
438
+ " 0 <= all(boxes) <= 1000\n",
439
+ " ), \"Error with input bbox ({}): the coordinate value is not between 0 and 1000\".format(boxes)\n",
440
+ " self.input_ids = input_ids\n",
441
+ " self.input_mask = input_mask\n",
442
+ " self.segment_ids = segment_ids\n",
443
+ " self.label_ids = label_ids\n",
444
+ " self.boxes = boxes\n",
445
+ " self.actual_bboxes = actual_bboxes\n",
446
+ " self.file_name = file_name\n",
447
+ " self.page_size = page_size\n",
448
+ "\n",
449
+ "\n",
450
+ "def read_examples_from_file(data_dir, mode):\n",
451
+ " file_path = os.path.join(data_dir, \"{}.txt\".format(mode))\n",
452
+ " box_file_path = os.path.join(data_dir, \"{}_box.txt\".format(mode))\n",
453
+ " image_file_path = os.path.join(data_dir, \"{}_image.txt\".format(mode))\n",
454
+ " guid_index = 1\n",
455
+ " examples = []\n",
456
+ " with open(file_path, encoding=\"utf-8\") as f, open(box_file_path, encoding=\"utf-8\") as fb, open(\n",
457
+ " image_file_path, encoding=\"utf-8\"\n",
458
+ " ) as fi:\n",
459
+ " words = []\n",
460
+ " boxes = []\n",
461
+ " actual_bboxes = []\n",
462
+ " file_name = None\n",
463
+ " page_size = None\n",
464
+ " labels = []\n",
465
+ " for line, bline, iline in zip(f, fb, fi):\n",
466
+ " if line.startswith(\"-DOCSTART-\") or line == \"\" or line == \"\\n\":\n",
467
+ " if words:\n",
468
+ " examples.append(\n",
469
+ " InputExample(\n",
470
+ " guid=\"{}-{}\".format(mode, guid_index),\n",
471
+ " words=words,\n",
472
+ " labels=labels,\n",
473
+ " boxes=boxes,\n",
474
+ " actual_bboxes=actual_bboxes,\n",
475
+ " file_name=file_name,\n",
476
+ " page_size=page_size,\n",
477
+ " )\n",
478
+ " )\n",
479
+ " guid_index += 1\n",
480
+ " words = []\n",
481
+ " boxes = []\n",
482
+ " actual_bboxes = []\n",
483
+ " file_name = None\n",
484
+ " page_size = None\n",
485
+ " labels = []\n",
486
+ " else:\n",
487
+ " splits = line.split(\"\\t\")\n",
488
+ " bsplits = bline.split(\"\\t\")\n",
489
+ " isplits = iline.split(\"\\t\")\n",
490
+ " assert len(splits) == 2\n",
491
+ " assert len(bsplits) == 2\n",
492
+ " assert len(isplits) == 4\n",
493
+ " assert splits[0] == bsplits[0]\n",
494
+ " words.append(splits[0])\n",
495
+ " if len(splits) > 1:\n",
496
+ " labels.append(splits[-1].replace(\"\\n\", \"\"))\n",
497
+ " box = bsplits[-1].replace(\"\\n\", \"\")\n",
498
+ " box = [int(b) for b in box.split()]\n",
499
+ " boxes.append(box)\n",
500
+ " actual_bbox = [int(b) for b in isplits[1].split()]\n",
501
+ " actual_bboxes.append(actual_bbox)\n",
502
+ " page_size = [int(i) for i in isplits[2].split()]\n",
503
+ " file_name = isplits[3].strip()\n",
504
+ " else:\n",
505
+ " # Examples could have no label for mode = \"test\"\n",
506
+ " labels.append(\"O\")\n",
507
+ " if words:\n",
508
+ " examples.append(\n",
509
+ " InputExample(\n",
510
+ " guid=\"%s-%d\".format(mode, guid_index),\n",
511
+ " words=words,\n",
512
+ " labels=labels,\n",
513
+ " boxes=boxes,\n",
514
+ " actual_bboxes=actual_bboxes,\n",
515
+ " file_name=file_name,\n",
516
+ " page_size=page_size,\n",
517
+ " )\n",
518
+ " )\n",
519
+ " return examples\n",
520
+ "\n",
521
+ "\n",
522
+ "def convert_examples_to_features(\n",
523
+ " examples,\n",
524
+ " label_list,\n",
525
+ " max_seq_length,\n",
526
+ " tokenizer,\n",
527
+ " cls_token_at_end=False,\n",
528
+ " cls_token=\"[CLS]\",\n",
529
+ " cls_token_segment_id=1,\n",
530
+ " sep_token=\"[SEP]\",\n",
531
+ " sep_token_extra=False,\n",
532
+ " pad_on_left=False,\n",
533
+ " pad_token=0,\n",
534
+ " cls_token_box=[0, 0, 0, 0],\n",
535
+ " sep_token_box=[1000, 1000, 1000, 1000],\n",
536
+ " pad_token_box=[0, 0, 0, 0],\n",
537
+ " pad_token_segment_id=0,\n",
538
+ " pad_token_label_id=-1,\n",
539
+ " sequence_a_segment_id=0,\n",
540
+ " mask_padding_with_zero=True,\n",
541
+ "):\n",
542
+ " \"\"\"Loads a data file into a list of `InputBatch`s\n",
543
+ " `cls_token_at_end` define the location of the CLS token:\n",
544
+ " - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]\n",
545
+ " - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]\n",
546
+ " `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)\n",
547
+ " \"\"\"\n",
548
+ "\n",
549
+ " label_map = {label: i for i, label in enumerate(label_list)}\n",
550
+ "\n",
551
+ " features = []\n",
552
+ " for ex_index, example in enumerate(examples):\n",
553
+ " file_name = example.file_name\n",
554
+ " page_size = example.page_size\n",
555
+ " width, height = page_size\n",
556
+ " if ex_index % 10000 == 0:\n",
557
+ " logger.info(\"Writing example %d of %d\", ex_index, len(examples))\n",
558
+ "\n",
559
+ " tokens = []\n",
560
+ " token_boxes = []\n",
561
+ " actual_bboxes = []\n",
562
+ " label_ids = []\n",
563
+ " for word, label, box, actual_bbox in zip(example.words, example.labels, example.boxes, example.actual_bboxes):\n",
564
+ " word_tokens = tokenizer.tokenize(word)\n",
565
+ " tokens.extend(word_tokens)\n",
566
+ " token_boxes.extend([box] * len(word_tokens))\n",
567
+ " actual_bboxes.extend([actual_bbox] * len(word_tokens))\n",
568
+ " # Use the real label id for the first token of the word, and padding ids for the remaining tokens\n",
569
+ " label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))\n",
570
+ "\n",
571
+ " # Account for [CLS] and [SEP] with \"- 2\" and with \"- 3\" for RoBERTa.\n",
572
+ " special_tokens_count = 3 if sep_token_extra else 2\n",
573
+ " if len(tokens) > max_seq_length - special_tokens_count:\n",
574
+ " tokens = tokens[: (max_seq_length - special_tokens_count)]\n",
575
+ " token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]\n",
576
+ " actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)]\n",
577
+ " label_ids = label_ids[: (max_seq_length - special_tokens_count)]\n",
578
+ "\n",
579
+ " # The convention in BERT is:\n",
580
+ " # (a) For sequence pairs:\n",
581
+ " # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n",
582
+ " # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n",
583
+ " # (b) For single sequences:\n",
584
+ " # tokens: [CLS] the dog is hairy . [SEP]\n",
585
+ " # type_ids: 0 0 0 0 0 0 0\n",
586
+ " #\n",
587
+ " # Where \"type_ids\" are used to indicate whether this is the first\n",
588
+ " # sequence or the second sequence. The embedding vectors for `type=0` and\n",
589
+ " # `type=1` were learned during pre-training and are added to the wordpiece\n",
590
+ " # embedding vector (and position vector). This is not *strictly* necessary\n",
591
+ " # since the [SEP] token unambiguously separates the sequences, but it makes\n",
592
+ " # it easier for the model to learn the concept of sequences.\n",
593
+ " #\n",
594
+ " # For classification tasks, the first vector (corresponding to [CLS]) is\n",
595
+ " # used as as the \"sentence vector\". Note that this only makes sense because\n",
596
+ " # the entire model is fine-tuned.\n",
597
+ " tokens += [sep_token]\n",
598
+ " token_boxes += [sep_token_box]\n",
599
+ " actual_bboxes += [[0, 0, width, height]]\n",
600
+ " label_ids += [pad_token_label_id]\n",
601
+ " if sep_token_extra:\n",
602
+ " # roberta uses an extra separator b/w pairs of sentences\n",
603
+ " tokens += [sep_token]\n",
604
+ " token_boxes += [sep_token_box]\n",
605
+ " actual_bboxes += [[0, 0, width, height]]\n",
606
+ " label_ids += [pad_token_label_id]\n",
607
+ " segment_ids = [sequence_a_segment_id] * len(tokens)\n",
608
+ "\n",
609
+ " if cls_token_at_end:\n",
610
+ " tokens += [cls_token]\n",
611
+ " token_boxes += [cls_token_box]\n",
612
+ " actual_bboxes += [[0, 0, width, height]]\n",
613
+ " label_ids += [pad_token_label_id]\n",
614
+ " segment_ids += [cls_token_segment_id]\n",
615
+ " else:\n",
616
+ " tokens = [cls_token] + tokens\n",
617
+ " token_boxes = [cls_token_box] + token_boxes\n",
618
+ " actual_bboxes = [[0, 0, width, height]] + actual_bboxes\n",
619
+ " label_ids = [pad_token_label_id] + label_ids\n",
620
+ " segment_ids = [cls_token_segment_id] + segment_ids\n",
621
+ "\n",
622
+ " input_ids = tokenizer.convert_tokens_to_ids(tokens)\n",
623
+ "\n",
624
+ " # The mask has 1 for real tokens and 0 for padding tokens. Only real\n",
625
+ " # tokens are attended to.\n",
626
+ " input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n",
627
+ "\n",
628
+ " # Zero-pad up to the sequence length.\n",
629
+ " padding_length = max_seq_length - len(input_ids)\n",
630
+ " if pad_on_left:\n",
631
+ " input_ids = ([pad_token] * padding_length) + input_ids\n",
632
+ " input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask\n",
633
+ " segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids\n",
634
+ " label_ids = ([pad_token_label_id] * padding_length) + label_ids\n",
635
+ " token_boxes = ([pad_token_box] * padding_length) + token_boxes\n",
636
+ " else:\n",
637
+ " input_ids += [pad_token] * padding_length\n",
638
+ " input_mask += [0 if mask_padding_with_zero else 1] * padding_length\n",
639
+ " segment_ids += [pad_token_segment_id] * padding_length\n",
640
+ " label_ids += [pad_token_label_id] * padding_length\n",
641
+ " token_boxes += [pad_token_box] * padding_length\n",
642
+ "\n",
643
+ " assert len(input_ids) == max_seq_length\n",
644
+ " assert len(input_mask) == max_seq_length\n",
645
+ " assert len(segment_ids) == max_seq_length\n",
646
+ " assert len(label_ids) == max_seq_length\n",
647
+ " assert len(token_boxes) == max_seq_length\n",
648
+ "\n",
649
+ " if ex_index < 5:\n",
650
+ " logger.info(\"*** Example ***\")\n",
651
+ " logger.info(\"guid: %s\", example.guid)\n",
652
+ " logger.info(\"tokens: %s\", \" \".join([str(x) for x in tokens]))\n",
653
+ " logger.info(\"input_ids: %s\", \" \".join([str(x) for x in input_ids]))\n",
654
+ " logger.info(\"input_mask: %s\", \" \".join([str(x) for x in input_mask]))\n",
655
+ " logger.info(\"segment_ids: %s\", \" \".join([str(x) for x in segment_ids]))\n",
656
+ " logger.info(\"label_ids: %s\", \" \".join([str(x) for x in label_ids]))\n",
657
+ " logger.info(\"boxes: %s\", \" \".join([str(x) for x in token_boxes]))\n",
658
+ " logger.info(\"actual_bboxes: %s\", \" \".join([str(x) for x in actual_bboxes]))\n",
659
+ "\n",
660
+ " features.append(\n",
661
+ " InputFeatures(\n",
662
+ " input_ids=input_ids,\n",
663
+ " input_mask=input_mask,\n",
664
+ " segment_ids=segment_ids,\n",
665
+ " label_ids=label_ids,\n",
666
+ " boxes=token_boxes,\n",
667
+ " actual_bboxes=actual_bboxes,\n",
668
+ " file_name=file_name,\n",
669
+ " page_size=page_size,\n",
670
+ " )\n",
671
+ " )\n",
672
+ " return features"
673
+ ]
674
+ },
675
+ {
676
+ "cell_type": "code",
677
+ "execution_count": 9,
678
+ "metadata": {
679
+ "id": "HUJftzeBWh2S"
680
+ },
681
+ "outputs": [],
682
+ "source": [
683
+ "from transformers import LayoutLMTokenizer\n",
684
+ "\n",
685
+ "# from .unilm.layoutlm.data.funsd import FunsdDataset, InputFeatures\n",
686
+ "from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n",
687
+ "\n",
688
+ "batch_size = 16\n",
689
+ "args = {\n",
690
+ " \"local_rank\": -1,\n",
691
+ " \"overwrite_cache\": True,\n",
692
+ " \"data_dir\": \"/home/sourab/temp/data/\",\n",
693
+ " \"model_name_or_path\": \"microsoft/layoutlm-base-uncased\",\n",
694
+ " \"max_seq_length\": 512,\n",
695
+ " \"model_type\": \"layoutlm\",\n",
696
+ "}\n",
697
+ "\n",
698
+ "\n",
699
+ "# class to turn the keys of a dict into attributes (thanks Stackoverflow)\n",
700
+ "class AttrDict(dict):\n",
701
+ " def __init__(self, *args, **kwargs):\n",
702
+ " super(AttrDict, self).__init__(*args, **kwargs)\n",
703
+ " self.__dict__ = self\n",
704
+ "\n",
705
+ "\n",
706
+ "args = AttrDict(args)\n",
707
+ "\n",
708
+ "tokenizer = LayoutLMTokenizer.from_pretrained(\"microsoft/layoutlm-base-uncased\")\n",
709
+ "\n",
710
+ "# the LayoutLM authors already defined a specific FunsdDataset, so we are going to use this here\n",
711
+ "train_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode=\"train\")\n",
712
+ "train_sampler = RandomSampler(train_dataset)\n",
713
+ "train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size)\n",
714
+ "\n",
715
+ "eval_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode=\"test\")\n",
716
+ "eval_sampler = SequentialSampler(eval_dataset)\n",
717
+ "eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=batch_size)"
718
+ ]
719
+ },
720
+ {
721
+ "cell_type": "code",
722
+ "execution_count": 10,
723
+ "metadata": {
724
+ "colab": {
725
+ "base_uri": "https://localhost:8080/"
726
+ },
727
+ "id": "18NMUBzgOdqu",
728
+ "outputId": "eef47b70-3a9a-4b19-be6b-95900c58337b"
729
+ },
730
+ "outputs": [
731
+ {
732
+ "data": {
733
+ "text/plain": [
734
+ "10"
735
+ ]
736
+ },
737
+ "execution_count": 10,
738
+ "metadata": {},
739
+ "output_type": "execute_result"
740
+ }
741
+ ],
742
+ "source": [
743
+ "len(train_dataloader)"
744
+ ]
745
+ },
746
+ {
747
+ "cell_type": "code",
748
+ "execution_count": 11,
749
+ "metadata": {
750
+ "colab": {
751
+ "base_uri": "https://localhost:8080/"
752
+ },
753
+ "id": "toFjxtn71B1U",
754
+ "outputId": "f4651896-cafc-449a-98b4-c81f41177e6d"
755
+ },
756
+ "outputs": [
757
+ {
758
+ "data": {
759
+ "text/plain": [
760
+ "4"
761
+ ]
762
+ },
763
+ "execution_count": 11,
764
+ "metadata": {},
765
+ "output_type": "execute_result"
766
+ }
767
+ ],
768
+ "source": [
769
+ "len(eval_dataloader)"
770
+ ]
771
+ },
772
+ {
773
+ "cell_type": "code",
774
+ "execution_count": null,
775
+ "metadata": {
776
+ "colab": {
777
+ "base_uri": "https://localhost:8080/",
778
+ "height": 137
779
+ },
780
+ "id": "RhINSBw9I24G",
781
+ "outputId": "28738ce2-617c-47d3-b8c9-f949d3066d60"
782
+ },
783
+ "outputs": [],
784
+ "source": [
785
+ "batch = next(iter(train_dataloader))\n",
786
+ "input_ids = batch[0][0]\n",
787
+ "tokenizer.decode(input_ids)"
788
+ ]
789
+ },
790
+ {
791
+ "cell_type": "markdown",
792
+ "metadata": {
793
+ "id": "66cEmLDoUFcm"
794
+ },
795
+ "source": [
796
+ "## Define and fine-tune the model\n",
797
+ "\n",
798
+ "As this is a sequence labeling task, we are going to load `LayoutLMForTokenClassification` (the base sized model) from the hub. We are going to fine-tune it on a downstream task, namely FUNSD."
799
+ ]
800
+ },
801
+ {
802
+ "cell_type": "code",
803
+ "execution_count": 13,
804
+ "metadata": {},
805
+ "outputs": [
806
+ {
807
+ "data": {
808
+ "text/plain": [
809
+ "LoRAConfig(pet_type='LORA', task_type='TOKEN_CLS', inference_mode=False, r=16, target_modules=None, lora_alpha=16, lora_dropout=0.1, fan_in_fan_out=False, enable_lora=None, bias='all')"
810
+ ]
811
+ },
812
+ "execution_count": 13,
813
+ "metadata": {},
814
+ "output_type": "execute_result"
815
+ }
816
+ ],
817
+ "source": [
818
+ "from peft import get_peft_config, PeftModel, get_peft_model, LoraConfig, TaskType\n",
819
+ "\n",
820
+ "peft_config = LoraConfig(\n",
821
+ " task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias=\"all\"\n",
822
+ ")\n",
823
+ "peft_config"
824
+ ]
825
+ },
826
+ {
827
+ "cell_type": "code",
828
+ "execution_count": null,
829
+ "metadata": {
830
+ "colab": {
831
+ "base_uri": "https://localhost:8080/",
832
+ "height": 1000,
833
+ "referenced_widgets": [
834
+ "da1094982d044ab28eb0effebbfcbb78",
835
+ "513e00b619924f5693259cd919a927ab",
836
+ "63e819a04f6e4829838c0e30e65516ed",
837
+ "d1c3e1a66db04227a74ef8d6481d6daf",
838
+ "c75f0da13a1e4dbe94800711d55390a6",
839
+ "31642aacae2a44879960da09f938ecc4",
840
+ "138a6b922e454ebbaeb315ecd5f476b8",
841
+ "a8126ba98376402888e9ba344cf1c538"
842
+ ]
843
+ },
844
+ "id": "xIdOsFBiTsuw",
845
+ "outputId": "95e8811c-025a-41a0-9d03-4285a17f2a9b"
846
+ },
847
+ "outputs": [],
848
+ "source": [
849
+ "from transformers import LayoutLMForTokenClassification\n",
850
+ "import torch\n",
851
+ "from transformers import set_seed\n",
852
+ "\n",
853
+ "seed = 100\n",
854
+ "set_seed(seed)\n",
855
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
856
+ "\n",
857
+ "model = LayoutLMForTokenClassification.from_pretrained(\"microsoft/layoutlm-base-uncased\", num_labels=num_labels)\n",
858
+ "model = get_peft_model(model, peft_config)\n",
859
+ "model.to(device)"
860
+ ]
861
+ },
862
+ {
863
+ "cell_type": "code",
864
+ "execution_count": null,
865
+ "metadata": {},
866
+ "outputs": [],
867
+ "source": [
868
+ "print(model.model.layoutlm.encoder.layer[0].attention.self.query.weight)\n",
869
+ "print(model.model.layoutlm.encoder.layer[0].attention.self.query.lora_A.weight)\n",
870
+ "print(model.model.classifier.weight)"
871
+ ]
872
+ },
873
+ {
874
+ "cell_type": "markdown",
875
+ "metadata": {
876
+ "id": "3weFr_pz1mla"
877
+ },
878
+ "source": [
879
+ "Now we can start training:"
880
+ ]
881
+ },
882
+ {
883
+ "cell_type": "code",
884
+ "execution_count": null,
885
+ "metadata": {
886
+ "colab": {
887
+ "base_uri": "https://localhost:8080/"
888
+ },
889
+ "id": "Yu0qePs2cRKo",
890
+ "outputId": "cdbb9a03-eb9b-4740-bbe3-da06b9192bae"
891
+ },
892
+ "outputs": [],
893
+ "source": [
894
+ "from transformers import AdamW, get_linear_schedule_with_warmup\n",
895
+ "from tqdm import tqdm\n",
896
+ "\n",
897
+ "num_train_epochs = 100\n",
898
+ "\n",
899
+ "optimizer = torch.optim.AdamW(model.parameters(), lr=3e-3)\n",
900
+ "lr_scheduler = get_linear_schedule_with_warmup(\n",
901
+ " optimizer=optimizer,\n",
902
+ " num_warmup_steps=0.06 * (len(train_dataloader) * num_train_epochs),\n",
903
+ " num_training_steps=(len(train_dataloader) * num_train_epochs),\n",
904
+ ")\n",
905
+ "\n",
906
+ "\n",
907
+ "global_step = 0\n",
908
+ "\n",
909
+ "t_total = len(train_dataloader) * num_train_epochs # total number of training steps\n",
910
+ "\n",
911
+ "# put the model in training mode\n",
912
+ "model.train()\n",
913
+ "for epoch in range(num_train_epochs):\n",
914
+ " for batch in tqdm(train_dataloader, desc=\"Training\"):\n",
915
+ " input_ids = batch[0].to(device)\n",
916
+ " bbox = batch[4].to(device)\n",
917
+ " attention_mask = batch[1].to(device)\n",
918
+ " token_type_ids = batch[2].to(device)\n",
919
+ " labels = batch[3].to(device)\n",
920
+ "\n",
921
+ " # forward pass\n",
922
+ " outputs = model(\n",
923
+ " input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels\n",
924
+ " )\n",
925
+ " loss = outputs.loss\n",
926
+ "\n",
927
+ " # print loss every 100 steps\n",
928
+ " if global_step % 10 == 0:\n",
929
+ " print(f\"Loss after {global_step} steps: {loss.item()}\")\n",
930
+ "\n",
931
+ " # backward pass to get the gradients\n",
932
+ " loss.backward()\n",
933
+ "\n",
934
+ " # print(\"Gradients on classification head:\")\n",
935
+ " # print(model.classifier.weight.grad[6,:].sum())\n",
936
+ "\n",
937
+ " # update\n",
938
+ " optimizer.step()\n",
939
+ " lr_scheduler.step()\n",
940
+ " optimizer.zero_grad()\n",
941
+ " global_step += 1"
942
+ ]
943
+ },
944
+ {
945
+ "cell_type": "code",
946
+ "execution_count": 17,
947
+ "metadata": {
948
+ "colab": {
949
+ "base_uri": "https://localhost:8080/"
950
+ },
951
+ "id": "u1rNslap5Y3N",
952
+ "outputId": "877183d4-1d29-4d09-bd3a-0e5f88611dc8"
953
+ },
954
+ "outputs": [
955
+ {
956
+ "name": "stderr",
957
+ "output_type": "stream",
958
+ "text": [
959
+ "Evaluating: 100%|█████████████████████████████████████████████████████| 4/4 [00:00<00:00, 9.73it/s]\n"
960
+ ]
961
+ },
962
+ {
963
+ "name": "stdout",
964
+ "output_type": "stream",
965
+ "text": [
966
+ "{'loss': 1.7456395626068115, 'precision': 0.7407071622846781, 'recall': 0.817408704352176, 'f1': 0.7771700356718193}\n"
967
+ ]
968
+ }
969
+ ],
970
+ "source": [
971
+ "import numpy as np\n",
972
+ "from seqeval.metrics import (\n",
973
+ " classification_report,\n",
974
+ " f1_score,\n",
975
+ " precision_score,\n",
976
+ " recall_score,\n",
977
+ ")\n",
978
+ "\n",
979
+ "eval_loss = 0.0\n",
980
+ "nb_eval_steps = 0\n",
981
+ "preds = None\n",
982
+ "out_label_ids = None\n",
983
+ "\n",
984
+ "# put model in evaluation mode\n",
985
+ "model.eval()\n",
986
+ "for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n",
987
+ " with torch.no_grad():\n",
988
+ " input_ids = batch[0].to(device)\n",
989
+ " bbox = batch[4].to(device)\n",
990
+ " attention_mask = batch[1].to(device)\n",
991
+ " token_type_ids = batch[2].to(device)\n",
992
+ " labels = batch[3].to(device)\n",
993
+ "\n",
994
+ " # forward pass\n",
995
+ " outputs = model(\n",
996
+ " input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels\n",
997
+ " )\n",
998
+ " # get the loss and logits\n",
999
+ " tmp_eval_loss = outputs.loss\n",
1000
+ " logits = outputs.logits\n",
1001
+ "\n",
1002
+ " eval_loss += tmp_eval_loss.item()\n",
1003
+ " nb_eval_steps += 1\n",
1004
+ "\n",
1005
+ " # compute the predictions\n",
1006
+ " if preds is None:\n",
1007
+ " preds = logits.detach().cpu().numpy()\n",
1008
+ " out_label_ids = labels.detach().cpu().numpy()\n",
1009
+ " else:\n",
1010
+ " preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n",
1011
+ " out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n",
1012
+ "\n",
1013
+ "# compute average evaluation loss\n",
1014
+ "eval_loss = eval_loss / nb_eval_steps\n",
1015
+ "preds = np.argmax(preds, axis=2)\n",
1016
+ "\n",
1017
+ "out_label_list = [[] for _ in range(out_label_ids.shape[0])]\n",
1018
+ "preds_list = [[] for _ in range(out_label_ids.shape[0])]\n",
1019
+ "\n",
1020
+ "for i in range(out_label_ids.shape[0]):\n",
1021
+ " for j in range(out_label_ids.shape[1]):\n",
1022
+ " if out_label_ids[i, j] != pad_token_label_id:\n",
1023
+ " out_label_list[i].append(label_map[out_label_ids[i][j]])\n",
1024
+ " preds_list[i].append(label_map[preds[i][j]])\n",
1025
+ "\n",
1026
+ "results = {\n",
1027
+ " \"loss\": eval_loss,\n",
1028
+ " \"precision\": precision_score(out_label_list, preds_list),\n",
1029
+ " \"recall\": recall_score(out_label_list, preds_list),\n",
1030
+ " \"f1\": f1_score(out_label_list, preds_list),\n",
1031
+ "}\n",
1032
+ "print(results)"
1033
+ ]
1034
+ },
1035
+ {
1036
+ "cell_type": "code",
1037
+ "execution_count": 18,
1038
+ "metadata": {},
1039
+ "outputs": [
1040
+ {
1041
+ "name": "stdout",
1042
+ "output_type": "stream",
1043
+ "text": [
1044
+ "trainable params: 702733 || all params: 113227789 || trainable%: 0.6206365117665593\n"
1045
+ ]
1046
+ }
1047
+ ],
1048
+ "source": [
1049
+ "model.print_trainable_parameters()"
1050
+ ]
1051
+ },
1052
+ {
1053
+ "cell_type": "code",
1054
+ "execution_count": 19,
1055
+ "metadata": {},
1056
+ "outputs": [],
1057
+ "source": [
1058
+ "model.save_pretrained(\"peft_layoutlm\")"
1059
+ ]
1060
+ },
1061
+ {
1062
+ "cell_type": "code",
1063
+ "execution_count": 21,
1064
+ "metadata": {},
1065
+ "outputs": [
1066
+ {
1067
+ "name": "stdout",
1068
+ "output_type": "stream",
1069
+ "text": [
1070
+ "2,8M\tlayoutlm_funsd.pt\r\n"
1071
+ ]
1072
+ }
1073
+ ],
1074
+ "source": [
1075
+ "!du -h \"peft_layoutlm/adapter_model.bin\""
1076
+ ]
1077
+ },
1078
+ {
1079
+ "cell_type": "code",
1080
+ "execution_count": null,
1081
+ "metadata": {},
1082
+ "outputs": [],
1083
+ "source": []
1084
+ }
1085
+ ],
1086
+ "metadata": {
1087
+ "accelerator": "GPU",
1088
+ "colab": {
1089
+ "collapsed_sections": [],
1090
+ "include_colab_link": true,
1091
+ "name": "Fine-tuning LayoutLMForTokenClassification on FUNSD.ipynb",
1092
+ "provenance": [],
1093
+ "toc_visible": true
1094
+ },
1095
+ "kernelspec": {
1096
+ "display_name": "Python 3",
1097
+ "language": "python",
1098
+ "name": "python3"
1099
+ },
1100
+ "language_info": {
1101
+ "codemirror_mode": {
1102
+ "name": "ipython",
1103
+ "version": 3
1104
+ },
1105
+ "file_extension": ".py",
1106
+ "mimetype": "text/x-python",
1107
+ "name": "python",
1108
+ "nbconvert_exporter": "python",
1109
+ "pygments_lexer": "ipython3",
1110
+ "version": "3.10.5 (v3.10.5:f377153967, Jun 6 2022, 12:36:10) [Clang 13.0.0 (clang-1300.0.29.30)]"
1111
+ },
1112
+ "vscode": {
1113
+ "interpreter": {
1114
+ "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
1115
+ }
1116
+ },
1117
+ "widgets": {
1118
+ "application/vnd.jupyter.widget-state+json": {
1119
+ "138a6b922e454ebbaeb315ecd5f476b8": {
1120
+ "model_module": "@jupyter-widgets/controls",
1121
+ "model_name": "DescriptionStyleModel",
1122
+ "state": {
1123
+ "_model_module": "@jupyter-widgets/controls",
1124
+ "_model_module_version": "1.5.0",
1125
+ "_model_name": "DescriptionStyleModel",
1126
+ "_view_count": null,
1127
+ "_view_module": "@jupyter-widgets/base",
1128
+ "_view_module_version": "1.2.0",
1129
+ "_view_name": "StyleView",
1130
+ "description_width": ""
1131
+ }
1132
+ },
1133
+ "31642aacae2a44879960da09f938ecc4": {
1134
+ "model_module": "@jupyter-widgets/base",
1135
+ "model_name": "LayoutModel",
1136
+ "state": {
1137
+ "_model_module": "@jupyter-widgets/base",
1138
+ "_model_module_version": "1.2.0",
1139
+ "_model_name": "LayoutModel",
1140
+ "_view_count": null,
1141
+ "_view_module": "@jupyter-widgets/base",
1142
+ "_view_module_version": "1.2.0",
1143
+ "_view_name": "LayoutView",
1144
+ "align_content": null,
1145
+ "align_items": null,
1146
+ "align_self": null,
1147
+ "border": null,
1148
+ "bottom": null,
1149
+ "display": null,
1150
+ "flex": null,
1151
+ "flex_flow": null,
1152
+ "grid_area": null,
1153
+ "grid_auto_columns": null,
1154
+ "grid_auto_flow": null,
1155
+ "grid_auto_rows": null,
1156
+ "grid_column": null,
1157
+ "grid_gap": null,
1158
+ "grid_row": null,
1159
+ "grid_template_areas": null,
1160
+ "grid_template_columns": null,
1161
+ "grid_template_rows": null,
1162
+ "height": null,
1163
+ "justify_content": null,
1164
+ "justify_items": null,
1165
+ "left": null,
1166
+ "margin": null,
1167
+ "max_height": null,
1168
+ "max_width": null,
1169
+ "min_height": null,
1170
+ "min_width": null,
1171
+ "object_fit": null,
1172
+ "object_position": null,
1173
+ "order": null,
1174
+ "overflow": null,
1175
+ "overflow_x": null,
1176
+ "overflow_y": null,
1177
+ "padding": null,
1178
+ "right": null,
1179
+ "top": null,
1180
+ "visibility": null,
1181
+ "width": null
1182
+ }
1183
+ },
1184
+ "513e00b619924f5693259cd919a927ab": {
1185
+ "model_module": "@jupyter-widgets/base",
1186
+ "model_name": "LayoutModel",
1187
+ "state": {
1188
+ "_model_module": "@jupyter-widgets/base",
1189
+ "_model_module_version": "1.2.0",
1190
+ "_model_name": "LayoutModel",
1191
+ "_view_count": null,
1192
+ "_view_module": "@jupyter-widgets/base",
1193
+ "_view_module_version": "1.2.0",
1194
+ "_view_name": "LayoutView",
1195
+ "align_content": null,
1196
+ "align_items": null,
1197
+ "align_self": null,
1198
+ "border": null,
1199
+ "bottom": null,
1200
+ "display": null,
1201
+ "flex": null,
1202
+ "flex_flow": null,
1203
+ "grid_area": null,
1204
+ "grid_auto_columns": null,
1205
+ "grid_auto_flow": null,
1206
+ "grid_auto_rows": null,
1207
+ "grid_column": null,
1208
+ "grid_gap": null,
1209
+ "grid_row": null,
1210
+ "grid_template_areas": null,
1211
+ "grid_template_columns": null,
1212
+ "grid_template_rows": null,
1213
+ "height": null,
1214
+ "justify_content": null,
1215
+ "justify_items": null,
1216
+ "left": null,
1217
+ "margin": null,
1218
+ "max_height": null,
1219
+ "max_width": null,
1220
+ "min_height": null,
1221
+ "min_width": null,
1222
+ "object_fit": null,
1223
+ "object_position": null,
1224
+ "order": null,
1225
+ "overflow": null,
1226
+ "overflow_x": null,
1227
+ "overflow_y": null,
1228
+ "padding": null,
1229
+ "right": null,
1230
+ "top": null,
1231
+ "visibility": null,
1232
+ "width": null
1233
+ }
1234
+ },
1235
+ "63e819a04f6e4829838c0e30e65516ed": {
1236
+ "model_module": "@jupyter-widgets/controls",
1237
+ "model_name": "FloatProgressModel",
1238
+ "state": {
1239
+ "_dom_classes": [],
1240
+ "_model_module": "@jupyter-widgets/controls",
1241
+ "_model_module_version": "1.5.0",
1242
+ "_model_name": "FloatProgressModel",
1243
+ "_view_count": null,
1244
+ "_view_module": "@jupyter-widgets/controls",
1245
+ "_view_module_version": "1.5.0",
1246
+ "_view_name": "ProgressView",
1247
+ "bar_style": "success",
1248
+ "description": "Downloading: 100%",
1249
+ "description_tooltip": null,
1250
+ "layout": "IPY_MODEL_31642aacae2a44879960da09f938ecc4",
1251
+ "max": 453093832,
1252
+ "min": 0,
1253
+ "orientation": "horizontal",
1254
+ "style": "IPY_MODEL_c75f0da13a1e4dbe94800711d55390a6",
1255
+ "value": 453093832
1256
+ }
1257
+ },
1258
+ "a8126ba98376402888e9ba344cf1c538": {
1259
+ "model_module": "@jupyter-widgets/base",
1260
+ "model_name": "LayoutModel",
1261
+ "state": {
1262
+ "_model_module": "@jupyter-widgets/base",
1263
+ "_model_module_version": "1.2.0",
1264
+ "_model_name": "LayoutModel",
1265
+ "_view_count": null,
1266
+ "_view_module": "@jupyter-widgets/base",
1267
+ "_view_module_version": "1.2.0",
1268
+ "_view_name": "LayoutView",
1269
+ "align_content": null,
1270
+ "align_items": null,
1271
+ "align_self": null,
1272
+ "border": null,
1273
+ "bottom": null,
1274
+ "display": null,
1275
+ "flex": null,
1276
+ "flex_flow": null,
1277
+ "grid_area": null,
1278
+ "grid_auto_columns": null,
1279
+ "grid_auto_flow": null,
1280
+ "grid_auto_rows": null,
1281
+ "grid_column": null,
1282
+ "grid_gap": null,
1283
+ "grid_row": null,
1284
+ "grid_template_areas": null,
1285
+ "grid_template_columns": null,
1286
+ "grid_template_rows": null,
1287
+ "height": null,
1288
+ "justify_content": null,
1289
+ "justify_items": null,
1290
+ "left": null,
1291
+ "margin": null,
1292
+ "max_height": null,
1293
+ "max_width": null,
1294
+ "min_height": null,
1295
+ "min_width": null,
1296
+ "object_fit": null,
1297
+ "object_position": null,
1298
+ "order": null,
1299
+ "overflow": null,
1300
+ "overflow_x": null,
1301
+ "overflow_y": null,
1302
+ "padding": null,
1303
+ "right": null,
1304
+ "top": null,
1305
+ "visibility": null,
1306
+ "width": null
1307
+ }
1308
+ },
1309
+ "c75f0da13a1e4dbe94800711d55390a6": {
1310
+ "model_module": "@jupyter-widgets/controls",
1311
+ "model_name": "ProgressStyleModel",
1312
+ "state": {
1313
+ "_model_module": "@jupyter-widgets/controls",
1314
+ "_model_module_version": "1.5.0",
1315
+ "_model_name": "ProgressStyleModel",
1316
+ "_view_count": null,
1317
+ "_view_module": "@jupyter-widgets/base",
1318
+ "_view_module_version": "1.2.0",
1319
+ "_view_name": "StyleView",
1320
+ "bar_color": null,
1321
+ "description_width": "initial"
1322
+ }
1323
+ },
1324
+ "d1c3e1a66db04227a74ef8d6481d6daf": {
1325
+ "model_module": "@jupyter-widgets/controls",
1326
+ "model_name": "HTMLModel",
1327
+ "state": {
1328
+ "_dom_classes": [],
1329
+ "_model_module": "@jupyter-widgets/controls",
1330
+ "_model_module_version": "1.5.0",
1331
+ "_model_name": "HTMLModel",
1332
+ "_view_count": null,
1333
+ "_view_module": "@jupyter-widgets/controls",
1334
+ "_view_module_version": "1.5.0",
1335
+ "_view_name": "HTMLView",
1336
+ "description": "",
1337
+ "description_tooltip": null,
1338
+ "layout": "IPY_MODEL_a8126ba98376402888e9ba344cf1c538",
1339
+ "placeholder": "​",
1340
+ "style": "IPY_MODEL_138a6b922e454ebbaeb315ecd5f476b8",
1341
+ "value": " 453M/453M [00:15&lt;00:00, 30.0MB/s]"
1342
+ }
1343
+ },
1344
+ "da1094982d044ab28eb0effebbfcbb78": {
1345
+ "model_module": "@jupyter-widgets/controls",
1346
+ "model_name": "HBoxModel",
1347
+ "state": {
1348
+ "_dom_classes": [],
1349
+ "_model_module": "@jupyter-widgets/controls",
1350
+ "_model_module_version": "1.5.0",
1351
+ "_model_name": "HBoxModel",
1352
+ "_view_count": null,
1353
+ "_view_module": "@jupyter-widgets/controls",
1354
+ "_view_module_version": "1.5.0",
1355
+ "_view_name": "HBoxView",
1356
+ "box_style": "",
1357
+ "children": [
1358
+ "IPY_MODEL_63e819a04f6e4829838c0e30e65516ed",
1359
+ "IPY_MODEL_d1c3e1a66db04227a74ef8d6481d6daf"
1360
+ ],
1361
+ "layout": "IPY_MODEL_513e00b619924f5693259cd919a927ab"
1362
+ }
1363
+ }
1364
+ }
1365
+ }
1366
+ },
1367
+ "nbformat": 4,
1368
+ "nbformat_minor": 1
1369
+ }