seal345 commited on
Commit
6acdb52
1 Parent(s): 6d256fa

Upload Collab.ipynb

Browse files
Files changed (1) hide show
  1. Collab.ipynb +427 -0
Collab.ipynb ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "### Ноутбук с обучением модели"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import pandas as pd\n",
17
+ "import numpy as np\n",
18
+ "from transformers import AutoTokenizer\n",
19
+ "from datasets import load_dataset\n",
20
+ "from transformers import DataCollatorWithPadding, Trainer, TrainingArguments, AutoModelForSequenceClassification"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": 2,
26
+ "metadata": {},
27
+ "outputs": [],
28
+ "source": [
29
+ "tokenizer = AutoTokenizer.from_pretrained('roberta-base')"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": 3,
35
+ "metadata": {},
36
+ "outputs": [
37
+ {
38
+ "name": "stderr",
39
+ "output_type": "stream",
40
+ "text": [
41
+ "Using custom data configuration default-bd943fc5bb724360\n"
42
+ ]
43
+ },
44
+ {
45
+ "name": "stdout",
46
+ "output_type": "stream",
47
+ "text": [
48
+ "Downloading and preparing dataset csv/default to /Users/seal/.cache/huggingface/datasets/csv/default-bd943fc5bb724360/0.0.0/51cce309a08df9c4d82ffd9363bbe090bf173197fc01a71b034e8594995a1a58...\n"
49
+ ]
50
+ },
51
+ {
52
+ "data": {
53
+ "application/vnd.jupyter.widget-view+json": {
54
+ "model_id": "548860682d5749cd979a64b49275ea32",
55
+ "version_major": 2,
56
+ "version_minor": 0
57
+ },
58
+ "text/plain": [
59
+ "Downloading data files: 0%| | 0/2 [00:00<?, ?it/s]"
60
+ ]
61
+ },
62
+ "metadata": {},
63
+ "output_type": "display_data"
64
+ },
65
+ {
66
+ "data": {
67
+ "application/vnd.jupyter.widget-view+json": {
68
+ "model_id": "97bf74e33b0d4607ba6dbed03fc625ae",
69
+ "version_major": 2,
70
+ "version_minor": 0
71
+ },
72
+ "text/plain": [
73
+ "Extracting data files: 0%| | 0/2 [00:00<?, ?it/s]"
74
+ ]
75
+ },
76
+ "metadata": {},
77
+ "output_type": "display_data"
78
+ },
79
+ {
80
+ "data": {
81
+ "application/vnd.jupyter.widget-view+json": {
82
+ "model_id": "",
83
+ "version_major": 2,
84
+ "version_minor": 0
85
+ },
86
+ "text/plain": [
87
+ "0 tables [00:00, ? tables/s]"
88
+ ]
89
+ },
90
+ "metadata": {},
91
+ "output_type": "display_data"
92
+ },
93
+ {
94
+ "data": {
95
+ "application/vnd.jupyter.widget-view+json": {
96
+ "model_id": "",
97
+ "version_major": 2,
98
+ "version_minor": 0
99
+ },
100
+ "text/plain": [
101
+ "0 tables [00:00, ? tables/s]"
102
+ ]
103
+ },
104
+ "metadata": {},
105
+ "output_type": "display_data"
106
+ },
107
+ {
108
+ "name": "stdout",
109
+ "output_type": "stream",
110
+ "text": [
111
+ "Dataset csv downloaded and prepared to /Users/seal/.cache/huggingface/datasets/csv/default-bd943fc5bb724360/0.0.0/51cce309a08df9c4d82ffd9363bbe090bf173197fc01a71b034e8594995a1a58. Subsequent calls will reuse this data.\n"
112
+ ]
113
+ },
114
+ {
115
+ "data": {
116
+ "application/vnd.jupyter.widget-view+json": {
117
+ "model_id": "7ffe361486c64883b25e6ab953c1cd16",
118
+ "version_major": 2,
119
+ "version_minor": 0
120
+ },
121
+ "text/plain": [
122
+ " 0%| | 0/2 [00:00<?, ?it/s]"
123
+ ]
124
+ },
125
+ "metadata": {},
126
+ "output_type": "display_data"
127
+ },
128
+ {
129
+ "name": "stderr",
130
+ "output_type": "stream",
131
+ "text": [
132
+ "Parameter 'function'=<function <lambda> at 0x1641221f0> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.\n"
133
+ ]
134
+ },
135
+ {
136
+ "data": {
137
+ "application/vnd.jupyter.widget-view+json": {
138
+ "model_id": "484e702a879448a3b09e2c7a2d672407",
139
+ "version_major": 2,
140
+ "version_minor": 0
141
+ },
142
+ "text/plain": [
143
+ " 0%| | 0/11883 [00:00<?, ?ex/s]"
144
+ ]
145
+ },
146
+ "metadata": {},
147
+ "output_type": "display_data"
148
+ },
149
+ {
150
+ "data": {
151
+ "application/vnd.jupyter.widget-view+json": {
152
+ "model_id": "2cb11d60bff946fa86c386e1d03ba9fb",
153
+ "version_major": 2,
154
+ "version_minor": 0
155
+ },
156
+ "text/plain": [
157
+ " 0%| | 0/2097 [00:00<?, ?ex/s]"
158
+ ]
159
+ },
160
+ "metadata": {},
161
+ "output_type": "display_data"
162
+ }
163
+ ],
164
+ "source": [
165
+ "data_hf = load_dataset('csv', data_files={'train': 'data_train.csv',\n",
166
+ " 'test': 'data_test.csv'})\n",
167
+ "data_hf_tokenized = data_hf.map(lambda x: tokenizer(x['full_info'], max_length=512, truncation=True))\n",
168
+ "data_hf_tokenized = data_hf_tokenized.rename_column('tags', 'label')\n",
169
+ "data_hf_tokenized = data_hf_tokenized.remove_columns(['full_info'])"
170
+ ]
171
+ },
172
+ {
173
+ "cell_type": "code",
174
+ "execution_count": 4,
175
+ "metadata": {},
176
+ "outputs": [],
177
+ "source": [
178
+ "args_dict = {'output_dir': 'logs', 'per_device_train_batch_size': 8, 'do_train': True}\n",
179
+ "args = TrainingArguments(**args_dict)"
180
+ ]
181
+ },
182
+ {
183
+ "cell_type": "code",
184
+ "execution_count": 5,
185
+ "metadata": {},
186
+ "outputs": [
187
+ {
188
+ "name": "stderr",
189
+ "output_type": "stream",
190
+ "text": [
191
+ "Some weights of the model checkpoint at roberta-base were not used when initializing RobertaForSequenceClassification: ['lm_head.decoder.weight', 'lm_head.dense.bias', 'lm_head.dense.weight', 'lm_head.layer_norm.bias', 'roberta.pooler.dense.weight', 'lm_head.layer_norm.weight', 'lm_head.bias', 'roberta.pooler.dense.bias']\n",
192
+ "- This IS expected if you are initializing RobertaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
193
+ "- This IS NOT expected if you are initializing RobertaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
194
+ "Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-base and are newly initialized: ['classifier.out_proj.bias', 'classifier.out_proj.weight', 'classifier.dense.bias', 'classifier.dense.weight']\n",
195
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
196
+ ]
197
+ }
198
+ ],
199
+ "source": [
200
+ "model = AutoModelForSequenceClassification.from_pretrained('roberta-base', num_labels=19)"
201
+ ]
202
+ },
203
+ {
204
+ "cell_type": "code",
205
+ "execution_count": 6,
206
+ "metadata": {},
207
+ "outputs": [
208
+ {
209
+ "name": "stderr",
210
+ "output_type": "stream",
211
+ "text": [
212
+ "/usr/local/lib/python3.9/site-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
213
+ " warnings.warn(\n",
214
+ "***** Running training *****\n",
215
+ " Num examples = 11883\n",
216
+ " Num Epochs = 3\n",
217
+ " Instantaneous batch size per device = 8\n",
218
+ " Total train batch size (w. parallel, distributed & accumulation) = 8\n",
219
+ " Gradient Accumulation steps = 1\n",
220
+ " Total optimization steps = 4458\n"
221
+ ]
222
+ },
223
+ {
224
+ "data": {
225
+ "text/html": [
226
+ "\n",
227
+ " <div>\n",
228
+ " \n",
229
+ " <progress value='4458' max='4458' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
230
+ " [4458/4458 20:39:50, Epoch 3/3]\n",
231
+ " </div>\n",
232
+ " <table border=\"1\" class=\"dataframe\">\n",
233
+ " <thead>\n",
234
+ " <tr style=\"text-align: left;\">\n",
235
+ " <th>Step</th>\n",
236
+ " <th>Training Loss</th>\n",
237
+ " </tr>\n",
238
+ " </thead>\n",
239
+ " <tbody>\n",
240
+ " <tr>\n",
241
+ " <td>500</td>\n",
242
+ " <td>1.356300</td>\n",
243
+ " </tr>\n",
244
+ " <tr>\n",
245
+ " <td>1000</td>\n",
246
+ " <td>1.088700</td>\n",
247
+ " </tr>\n",
248
+ " <tr>\n",
249
+ " <td>1500</td>\n",
250
+ " <td>1.026400</td>\n",
251
+ " </tr>\n",
252
+ " <tr>\n",
253
+ " <td>2000</td>\n",
254
+ " <td>0.897900</td>\n",
255
+ " </tr>\n",
256
+ " <tr>\n",
257
+ " <td>2500</td>\n",
258
+ " <td>0.835500</td>\n",
259
+ " </tr>\n",
260
+ " <tr>\n",
261
+ " <td>3000</td>\n",
262
+ " <td>0.795100</td>\n",
263
+ " </tr>\n",
264
+ " <tr>\n",
265
+ " <td>3500</td>\n",
266
+ " <td>0.684000</td>\n",
267
+ " </tr>\n",
268
+ " <tr>\n",
269
+ " <td>4000</td>\n",
270
+ " <td>0.641000</td>\n",
271
+ " </tr>\n",
272
+ " </tbody>\n",
273
+ "</table><p>"
274
+ ],
275
+ "text/plain": [
276
+ "<IPython.core.display.HTML object>"
277
+ ]
278
+ },
279
+ "metadata": {},
280
+ "output_type": "display_data"
281
+ },
282
+ {
283
+ "name": "stderr",
284
+ "output_type": "stream",
285
+ "text": [
286
+ "Saving model checkpoint to logs/checkpoint-500\n",
287
+ "Configuration saved in logs/checkpoint-500/config.json\n",
288
+ "Model weights saved in logs/checkpoint-500/pytorch_model.bin\n",
289
+ "Saving model checkpoint to logs/checkpoint-1000\n",
290
+ "Configuration saved in logs/checkpoint-1000/config.json\n",
291
+ "Model weights saved in logs/checkpoint-1000/pytorch_model.bin\n",
292
+ "Saving model checkpoint to logs/checkpoint-1500\n",
293
+ "Configuration saved in logs/checkpoint-1500/config.json\n",
294
+ "Model weights saved in logs/checkpoint-1500/pytorch_model.bin\n",
295
+ "Saving model checkpoint to logs/checkpoint-2000\n",
296
+ "Configuration saved in logs/checkpoint-2000/config.json\n",
297
+ "Model weights saved in logs/checkpoint-2000/pytorch_model.bin\n",
298
+ "Saving model checkpoint to logs/checkpoint-2500\n",
299
+ "Configuration saved in logs/checkpoint-2500/config.json\n",
300
+ "Model weights saved in logs/checkpoint-2500/pytorch_model.bin\n",
301
+ "Saving model checkpoint to logs/checkpoint-3000\n",
302
+ "Configuration saved in logs/checkpoint-3000/config.json\n",
303
+ "Model weights saved in logs/checkpoint-3000/pytorch_model.bin\n",
304
+ "Saving model checkpoint to logs/checkpoint-3500\n",
305
+ "Configuration saved in logs/checkpoint-3500/config.json\n",
306
+ "Model weights saved in logs/checkpoint-3500/pytorch_model.bin\n",
307
+ "Saving model checkpoint to logs/checkpoint-4000\n",
308
+ "Configuration saved in logs/checkpoint-4000/config.json\n",
309
+ "Model weights saved in logs/checkpoint-4000/pytorch_model.bin\n",
310
+ "\n",
311
+ "\n",
312
+ "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
313
+ "\n",
314
+ "\n"
315
+ ]
316
+ },
317
+ {
318
+ "data": {
319
+ "text/plain": [
320
+ "TrainOutput(global_step=4458, training_loss=0.8849190480509057, metrics={'train_runtime': 74409.2364, 'train_samples_per_second': 0.479, 'train_steps_per_second': 0.06, 'total_flos': 4731926386857384.0, 'train_loss': 0.8849190480509057, 'epoch': 3.0})"
321
+ ]
322
+ },
323
+ "execution_count": 6,
324
+ "metadata": {},
325
+ "output_type": "execute_result"
326
+ }
327
+ ],
328
+ "source": [
329
+ "trainer = Trainer(\n",
330
+ " model, \n",
331
+ " args,\n",
332
+ " train_dataset=data_hf_tokenized[\"train\"],\n",
333
+ " eval_dataset=data_hf_tokenized[\"test\"],\n",
334
+ " data_collator=DataCollatorWithPadding(tokenizer),\n",
335
+ ")\n",
336
+ "trainer.train()"
337
+ ]
338
+ },
339
+ {
340
+ "cell_type": "code",
341
+ "execution_count": 7,
342
+ "metadata": {},
343
+ "outputs": [
344
+ {
345
+ "name": "stderr",
346
+ "output_type": "stream",
347
+ "text": [
348
+ "***** Running Prediction *****\n",
349
+ " Num examples = 2097\n",
350
+ " Batch size = 8\n"
351
+ ]
352
+ },
353
+ {
354
+ "data": {
355
+ "text/html": [
356
+ "\n",
357
+ " <div>\n",
358
+ " \n",
359
+ " <progress value='263' max='263' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
360
+ " [263/263 21:44]\n",
361
+ " </div>\n",
362
+ " "
363
+ ],
364
+ "text/plain": [
365
+ "<IPython.core.display.HTML object>"
366
+ ]
367
+ },
368
+ "metadata": {},
369
+ "output_type": "display_data"
370
+ }
371
+ ],
372
+ "source": [
373
+ "predictions = trainer.predict(data_hf_tokenized[\"test\"])"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "code",
378
+ "execution_count": 9,
379
+ "metadata": {},
380
+ "outputs": [
381
+ {
382
+ "name": "stderr",
383
+ "output_type": "stream",
384
+ "text": [
385
+ "Saving model checkpoint to model_roberta_trained\n",
386
+ "Configuration saved in model_roberta_trained/config.json\n",
387
+ "Model weights saved in model_roberta_trained/pytorch_model.bin\n"
388
+ ]
389
+ }
390
+ ],
391
+ "source": [
392
+ "trainer.save_model('model_roberta_trained')"
393
+ ]
394
+ },
395
+ {
396
+ "cell_type": "markdown",
397
+ "metadata": {},
398
+ "source": [
399
+ "Обучила ещё distilbert в коллабе, но там результат немного хуже.\n",
400
+ "Пользовалась этим:\n",
401
+ "https://github.com/ThilinaRajapakse/pytorch-transformers-classification\n",
402
+ "https://github.com/huggingface/transformers/tree/main/src/transformers"
403
+ ]
404
+ }
405
+ ],
406
+ "metadata": {
407
+ "kernelspec": {
408
+ "display_name": "Python 3 (ipykernel)",
409
+ "language": "python",
410
+ "name": "python3"
411
+ },
412
+ "language_info": {
413
+ "codemirror_mode": {
414
+ "name": "ipython",
415
+ "version": 3
416
+ },
417
+ "file_extension": ".py",
418
+ "mimetype": "text/x-python",
419
+ "name": "python",
420
+ "nbconvert_exporter": "python",
421
+ "pygments_lexer": "ipython3",
422
+ "version": "3.9.13"
423
+ }
424
+ },
425
+ "nbformat": 4,
426
+ "nbformat_minor": 2
427
+ }