florentianayuwono commited on
Commit
082fe19
1 Parent(s): b12fa7d

Add transformer use cases

Browse files
Files changed (1) hide show
  1. training.ipynb +945 -0
training.ipynb ADDED
@@ -0,0 +1,945 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "attachments": {},
5
+ "cell_type": "markdown",
6
+ "metadata": {},
7
+ "source": [
8
+ "# 1. Transformer Models"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 20,
14
+ "metadata": {},
15
+ "outputs": [],
16
+ "source": [
17
+ "import transformers"
18
+ ]
19
+ },
20
+ {
21
+ "attachments": {},
22
+ "cell_type": "markdown",
23
+ "metadata": {},
24
+ "source": [
25
+ "## Transformers, what can they do?"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": 7,
31
+ "metadata": {},
32
+ "outputs": [
33
+ {
34
+ "name": "stderr",
35
+ "output_type": "stream",
36
+ "text": [
37
+ "No model was supplied, defaulted to distilbert-base-uncased-finetuned-sst-2-english and revision af0f99b (https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english).\n",
38
+ "Using a pipeline without specifying a model name and revision in production is not recommended.\n"
39
+ ]
40
+ },
41
+ {
42
+ "data": {
43
+ "text/plain": [
44
+ "[{'label': 'POSITIVE', 'score': 0.6012226343154907}]"
45
+ ]
46
+ },
47
+ "execution_count": 7,
48
+ "metadata": {},
49
+ "output_type": "execute_result"
50
+ }
51
+ ],
52
+ "source": [
53
+ "from transformers import pipeline\n",
54
+ "\n",
55
+ "classifier = pipeline(\"sentiment-analysis\")\n",
56
+ "classifier(\"OMG this is my first time trying this!\")"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": 6,
62
+ "metadata": {},
63
+ "outputs": [
64
+ {
65
+ "data": {
66
+ "text/plain": [
67
+ "[{'label': 'POSITIVE', 'score': 0.9998352527618408},\n",
68
+ " {'label': 'NEGATIVE', 'score': 0.9995977282524109}]"
69
+ ]
70
+ },
71
+ "execution_count": 6,
72
+ "metadata": {},
73
+ "output_type": "execute_result"
74
+ }
75
+ ],
76
+ "source": [
77
+ "classifier(\n",
78
+ " [\"I really like this a lot!\", \"I hate it like this.\"]\n",
79
+ ")"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": 12,
85
+ "metadata": {},
86
+ "outputs": [
87
+ {
88
+ "name": "stderr",
89
+ "output_type": "stream",
90
+ "text": [
91
+ "No model was supplied, defaulted to facebook/bart-large-mnli and revision c626438 (https://huggingface.co/facebook/bart-large-mnli).\n",
92
+ "Using a pipeline without specifying a model name and revision in production is not recommended.\n"
93
+ ]
94
+ },
95
+ {
96
+ "data": {
97
+ "text/plain": [
98
+ "{'sequence': 'How to differentiate sun and cloud?',\n",
99
+ " 'labels': ['education', 'business', 'politics'],\n",
100
+ " 'scores': [0.7144545316696167, 0.19746531546115875, 0.08808010816574097]}"
101
+ ]
102
+ },
103
+ "execution_count": 12,
104
+ "metadata": {},
105
+ "output_type": "execute_result"
106
+ }
107
+ ],
108
+ "source": [
109
+ "classifier = pipeline(\"zero-shot-classification\")\n",
110
+ "classifier(\n",
111
+ " \"How to differentiate sun and cloud?\",\n",
112
+ " candidate_labels = [\"education\", \"politics\", \"business\"]\n",
113
+ ")"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": 13,
119
+ "metadata": {},
120
+ "outputs": [
121
+ {
122
+ "name": "stderr",
123
+ "output_type": "stream",
124
+ "text": [
125
+ "No model was supplied, defaulted to gpt2 and revision 6c0e608 (https://huggingface.co/gpt2).\n",
126
+ "Using a pipeline without specifying a model name and revision in production is not recommended.\n"
127
+ ]
128
+ },
129
+ {
130
+ "data": {
131
+ "application/vnd.jupyter.widget-view+json": {
132
+ "model_id": "f065452c7f924df7a5666b71186fd6d5",
133
+ "version_major": 2,
134
+ "version_minor": 0
135
+ },
136
+ "text/plain": [
137
+ "Downloading: 0%| | 0.00/665 [00:00<?, ?B/s]"
138
+ ]
139
+ },
140
+ "metadata": {},
141
+ "output_type": "display_data"
142
+ },
143
+ {
144
+ "data": {
145
+ "application/vnd.jupyter.widget-view+json": {
146
+ "model_id": "f261021fbdc5427abc2c54914de96ed2",
147
+ "version_major": 2,
148
+ "version_minor": 0
149
+ },
150
+ "text/plain": [
151
+ "Downloading: 0%| | 0.00/548M [00:00<?, ?B/s]"
152
+ ]
153
+ },
154
+ "metadata": {},
155
+ "output_type": "display_data"
156
+ },
157
+ {
158
+ "data": {
159
+ "application/vnd.jupyter.widget-view+json": {
160
+ "model_id": "b3dbab5273b64fb09e75ced6c5380c1c",
161
+ "version_major": 2,
162
+ "version_minor": 0
163
+ },
164
+ "text/plain": [
165
+ "Downloading: 0%| | 0.00/124 [00:00<?, ?B/s]"
166
+ ]
167
+ },
168
+ "metadata": {},
169
+ "output_type": "display_data"
170
+ },
171
+ {
172
+ "data": {
173
+ "application/vnd.jupyter.widget-view+json": {
174
+ "model_id": "85df61ca152e4e9dabccb090df5b195e",
175
+ "version_major": 2,
176
+ "version_minor": 0
177
+ },
178
+ "text/plain": [
179
+ "Downloading: 0%| | 0.00/1.04M [00:00<?, ?B/s]"
180
+ ]
181
+ },
182
+ "metadata": {},
183
+ "output_type": "display_data"
184
+ },
185
+ {
186
+ "data": {
187
+ "application/vnd.jupyter.widget-view+json": {
188
+ "model_id": "3a371f13013f4e90890b17b0d56fa7d6",
189
+ "version_major": 2,
190
+ "version_minor": 0
191
+ },
192
+ "text/plain": [
193
+ "Downloading: 0%| | 0.00/456k [00:00<?, ?B/s]"
194
+ ]
195
+ },
196
+ "metadata": {},
197
+ "output_type": "display_data"
198
+ },
199
+ {
200
+ "data": {
201
+ "application/vnd.jupyter.widget-view+json": {
202
+ "model_id": "389ab29d60d64fd7bc9c7745599cf713",
203
+ "version_major": 2,
204
+ "version_minor": 0
205
+ },
206
+ "text/plain": [
207
+ "Downloading: 0%| | 0.00/1.36M [00:00<?, ?B/s]"
208
+ ]
209
+ },
210
+ "metadata": {},
211
+ "output_type": "display_data"
212
+ },
213
+ {
214
+ "name": "stderr",
215
+ "output_type": "stream",
216
+ "text": [
217
+ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
218
+ "/Users/florentiana.yuwono/anaconda3/lib/python3.10/site-packages/transformers/generation/utils.py:1353: UserWarning: Using `max_length`'s default (50) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n",
219
+ " warnings.warn(\n"
220
+ ]
221
+ },
222
+ {
223
+ "data": {
224
+ "text/plain": [
225
+ "[{'generated_text': \"In this class, I will speak about something I've been thinking about for quite some time and it won't even come up for a while.\\n\\nLet's be honest and tell you; it has to be so simple. You do not need\"}]"
226
+ ]
227
+ },
228
+ "execution_count": 13,
229
+ "metadata": {},
230
+ "output_type": "execute_result"
231
+ }
232
+ ],
233
+ "source": [
234
+ "generator = pipeline(\"text-generation\")\n",
235
+ "generator(\"In this class, I will speak\")"
236
+ ]
237
+ },
238
+ {
239
+ "cell_type": "code",
240
+ "execution_count": 14,
241
+ "metadata": {},
242
+ "outputs": [
243
+ {
244
+ "data": {
245
+ "application/vnd.jupyter.widget-view+json": {
246
+ "model_id": "d893dfa6d45e4dce8f8f548ce903c330",
247
+ "version_major": 2,
248
+ "version_minor": 0
249
+ },
250
+ "text/plain": [
251
+ "Downloading: 0%| | 0.00/762 [00:00<?, ?B/s]"
252
+ ]
253
+ },
254
+ "metadata": {},
255
+ "output_type": "display_data"
256
+ },
257
+ {
258
+ "data": {
259
+ "application/vnd.jupyter.widget-view+json": {
260
+ "model_id": "5e0cafc878e94597872168c19bcfbe00",
261
+ "version_major": 2,
262
+ "version_minor": 0
263
+ },
264
+ "text/plain": [
265
+ "Downloading: 0%| | 0.00/353M [00:00<?, ?B/s]"
266
+ ]
267
+ },
268
+ "metadata": {},
269
+ "output_type": "display_data"
270
+ },
271
+ {
272
+ "data": {
273
+ "application/vnd.jupyter.widget-view+json": {
274
+ "model_id": "a374402e1a4748f4b3b8b964589f6868",
275
+ "version_major": 2,
276
+ "version_minor": 0
277
+ },
278
+ "text/plain": [
279
+ "Downloading: 0%| | 0.00/124 [00:00<?, ?B/s]"
280
+ ]
281
+ },
282
+ "metadata": {},
283
+ "output_type": "display_data"
284
+ },
285
+ {
286
+ "data": {
287
+ "application/vnd.jupyter.widget-view+json": {
288
+ "model_id": "8476884d2c114598904427a1891a6beb",
289
+ "version_major": 2,
290
+ "version_minor": 0
291
+ },
292
+ "text/plain": [
293
+ "Downloading: 0%| | 0.00/1.04M [00:00<?, ?B/s]"
294
+ ]
295
+ },
296
+ "metadata": {},
297
+ "output_type": "display_data"
298
+ },
299
+ {
300
+ "data": {
301
+ "application/vnd.jupyter.widget-view+json": {
302
+ "model_id": "4730d96e505a4cf0b9f2761a1201822b",
303
+ "version_major": 2,
304
+ "version_minor": 0
305
+ },
306
+ "text/plain": [
307
+ "Downloading: 0%| | 0.00/456k [00:00<?, ?B/s]"
308
+ ]
309
+ },
310
+ "metadata": {},
311
+ "output_type": "display_data"
312
+ },
313
+ {
314
+ "data": {
315
+ "application/vnd.jupyter.widget-view+json": {
316
+ "model_id": "ca8e61625c024428963339dfca8ecb67",
317
+ "version_major": 2,
318
+ "version_minor": 0
319
+ },
320
+ "text/plain": [
321
+ "Downloading: 0%| | 0.00/1.36M [00:00<?, ?B/s]"
322
+ ]
323
+ },
324
+ "metadata": {},
325
+ "output_type": "display_data"
326
+ },
327
+ {
328
+ "name": "stderr",
329
+ "output_type": "stream",
330
+ "text": [
331
+ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
332
+ ]
333
+ },
334
+ {
335
+ "data": {
336
+ "text/plain": [
337
+ "[{'generated_text': 'In this class, I will speak as a lecturer to the students on social media (for those of you who are interested).\\nThere are many classes'},\n",
338
+ " {'generated_text': 'In this class, I will speak for a particular type of group of writers that I want to talk about, like us writers like us writers, people'}]"
339
+ ]
340
+ },
341
+ "execution_count": 14,
342
+ "metadata": {},
343
+ "output_type": "execute_result"
344
+ }
345
+ ],
346
+ "source": [
347
+ "generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n",
348
+ "generator(\n",
349
+ " \"In this class, I will speak\",\n",
350
+ " max_length=30,\n",
351
+ " num_return_sequences=2\n",
352
+ ")"
353
+ ]
354
+ },
355
+ {
356
+ "cell_type": "code",
357
+ "execution_count": 15,
358
+ "metadata": {},
359
+ "outputs": [
360
+ {
361
+ "name": "stderr",
362
+ "output_type": "stream",
363
+ "text": [
364
+ "No model was supplied, defaulted to distilroberta-base and revision ec58a5b (https://huggingface.co/distilroberta-base).\n",
365
+ "Using a pipeline without specifying a model name and revision in production is not recommended.\n"
366
+ ]
367
+ },
368
+ {
369
+ "data": {
370
+ "application/vnd.jupyter.widget-view+json": {
371
+ "model_id": "cf08df8aa5a444649e21d7c0d7e64039",
372
+ "version_major": 2,
373
+ "version_minor": 0
374
+ },
375
+ "text/plain": [
376
+ "Downloading: 0%| | 0.00/480 [00:00<?, ?B/s]"
377
+ ]
378
+ },
379
+ "metadata": {},
380
+ "output_type": "display_data"
381
+ },
382
+ {
383
+ "data": {
384
+ "application/vnd.jupyter.widget-view+json": {
385
+ "model_id": "8b5e4348c7904d0890da6b768461c5af",
386
+ "version_major": 2,
387
+ "version_minor": 0
388
+ },
389
+ "text/plain": [
390
+ "Downloading: 0%| | 0.00/331M [00:00<?, ?B/s]"
391
+ ]
392
+ },
393
+ "metadata": {},
394
+ "output_type": "display_data"
395
+ },
396
+ {
397
+ "data": {
398
+ "application/vnd.jupyter.widget-view+json": {
399
+ "model_id": "100d8fbb75b14bea92bb635b116c1d08",
400
+ "version_major": 2,
401
+ "version_minor": 0
402
+ },
403
+ "text/plain": [
404
+ "Downloading: 0%| | 0.00/899k [00:00<?, ?B/s]"
405
+ ]
406
+ },
407
+ "metadata": {},
408
+ "output_type": "display_data"
409
+ },
410
+ {
411
+ "data": {
412
+ "application/vnd.jupyter.widget-view+json": {
413
+ "model_id": "a4cdc5db6d7541918624369df29642ed",
414
+ "version_major": 2,
415
+ "version_minor": 0
416
+ },
417
+ "text/plain": [
418
+ "Downloading: 0%| | 0.00/456k [00:00<?, ?B/s]"
419
+ ]
420
+ },
421
+ "metadata": {},
422
+ "output_type": "display_data"
423
+ },
424
+ {
425
+ "data": {
426
+ "application/vnd.jupyter.widget-view+json": {
427
+ "model_id": "d808f036abef4c62bea2dd62601502e8",
428
+ "version_major": 2,
429
+ "version_minor": 0
430
+ },
431
+ "text/plain": [
432
+ "Downloading: 0%| | 0.00/1.36M [00:00<?, ?B/s]"
433
+ ]
434
+ },
435
+ "metadata": {},
436
+ "output_type": "display_data"
437
+ },
438
+ {
439
+ "data": {
440
+ "text/plain": [
441
+ "[{'score': 0.06216174364089966,\n",
442
+ " 'token': 42,\n",
443
+ " 'token_str': ' this',\n",
444
+ " 'sequence': 'The sky is blue and bright, I wonder what this is about.'},\n",
445
+ " {'score': 0.040428631007671356,\n",
446
+ " 'token': 24,\n",
447
+ " 'token_str': ' it',\n",
448
+ " 'sequence': 'The sky is blue and bright, I wonder what it is about.'},\n",
449
+ " {'score': 0.023530298843979836,\n",
450
+ " 'token': 14,\n",
451
+ " 'token_str': ' that',\n",
452
+ " 'sequence': 'The sky is blue and bright, I wonder what that is about.'}]"
453
+ ]
454
+ },
455
+ "execution_count": 15,
456
+ "metadata": {},
457
+ "output_type": "execute_result"
458
+ }
459
+ ],
460
+ "source": [
461
+ "unmasker = pipeline(\"fill-mask\")\n",
462
+ "unmasker(\"The sky is blue and bright, I wonder what <mask> is about.\", top_k=3)"
463
+ ]
464
+ },
465
+ {
466
+ "cell_type": "code",
467
+ "execution_count": 16,
468
+ "metadata": {},
469
+ "outputs": [
470
+ {
471
+ "name": "stderr",
472
+ "output_type": "stream",
473
+ "text": [
474
+ "No model was supplied, defaulted to dbmdz/bert-large-cased-finetuned-conll03-english and revision f2482bf (https://huggingface.co/dbmdz/bert-large-cased-finetuned-conll03-english).\n",
475
+ "Using a pipeline without specifying a model name and revision in production is not recommended.\n"
476
+ ]
477
+ },
478
+ {
479
+ "data": {
480
+ "application/vnd.jupyter.widget-view+json": {
481
+ "model_id": "d1a918d701ee46ccb54ab2989305585f",
482
+ "version_major": 2,
483
+ "version_minor": 0
484
+ },
485
+ "text/plain": [
486
+ "Downloading: 0%| | 0.00/998 [00:00<?, ?B/s]"
487
+ ]
488
+ },
489
+ "metadata": {},
490
+ "output_type": "display_data"
491
+ },
492
+ {
493
+ "data": {
494
+ "application/vnd.jupyter.widget-view+json": {
495
+ "model_id": "a95eff2616b64dcfba172bc43dfe23be",
496
+ "version_major": 2,
497
+ "version_minor": 0
498
+ },
499
+ "text/plain": [
500
+ "Downloading: 0%| | 0.00/1.33G [00:00<?, ?B/s]"
501
+ ]
502
+ },
503
+ "metadata": {},
504
+ "output_type": "display_data"
505
+ },
506
+ {
507
+ "data": {
508
+ "application/vnd.jupyter.widget-view+json": {
509
+ "model_id": "63c9f30a9542417aabc57971b9d9b44f",
510
+ "version_major": 2,
511
+ "version_minor": 0
512
+ },
513
+ "text/plain": [
514
+ "Downloading: 0%| | 0.00/60.0 [00:00<?, ?B/s]"
515
+ ]
516
+ },
517
+ "metadata": {},
518
+ "output_type": "display_data"
519
+ },
520
+ {
521
+ "data": {
522
+ "application/vnd.jupyter.widget-view+json": {
523
+ "model_id": "e61894113e164c1eb5a66ad300971bfe",
524
+ "version_major": 2,
525
+ "version_minor": 0
526
+ },
527
+ "text/plain": [
528
+ "Downloading: 0%| | 0.00/213k [00:00<?, ?B/s]"
529
+ ]
530
+ },
531
+ "metadata": {},
532
+ "output_type": "display_data"
533
+ },
534
+ {
535
+ "name": "stderr",
536
+ "output_type": "stream",
537
+ "text": [
538
+ "/Users/florentiana.yuwono/anaconda3/lib/python3.10/site-packages/transformers/pipelines/token_classification.py:169: UserWarning: `grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy=\"simple\"` instead.\n",
539
+ " warnings.warn(\n"
540
+ ]
541
+ },
542
+ {
543
+ "data": {
544
+ "text/plain": [
545
+ "[{'entity_group': 'LOC',\n",
546
+ " 'score': 0.86960346,\n",
547
+ " 'word': 'Owl City',\n",
548
+ " 'start': 56,\n",
549
+ " 'end': 64}]"
550
+ ]
551
+ },
552
+ "execution_count": 16,
553
+ "metadata": {},
554
+ "output_type": "execute_result"
555
+ }
556
+ ],
557
+ "source": [
558
+ "ner = pipeline(\"ner\", grouped_entities=True)\n",
559
+ "\n",
560
+ "ner(\"Mine is Hilarious, usually spotted at united nations in Owl City.\")"
561
+ ]
562
+ },
563
+ {
564
+ "cell_type": "code",
565
+ "execution_count": 17,
566
+ "metadata": {},
567
+ "outputs": [
568
+ {
569
+ "name": "stderr",
570
+ "output_type": "stream",
571
+ "text": [
572
+ "No model was supplied, defaulted to distilbert-base-cased-distilled-squad and revision 626af31 (https://huggingface.co/distilbert-base-cased-distilled-squad).\n",
573
+ "Using a pipeline without specifying a model name and revision in production is not recommended.\n"
574
+ ]
575
+ },
576
+ {
577
+ "data": {
578
+ "application/vnd.jupyter.widget-view+json": {
579
+ "model_id": "95136a50322745ea8715e36e9e2a4b50",
580
+ "version_major": 2,
581
+ "version_minor": 0
582
+ },
583
+ "text/plain": [
584
+ "Downloading: 0%| | 0.00/473 [00:00<?, ?B/s]"
585
+ ]
586
+ },
587
+ "metadata": {},
588
+ "output_type": "display_data"
589
+ },
590
+ {
591
+ "data": {
592
+ "application/vnd.jupyter.widget-view+json": {
593
+ "model_id": "a1440abd06884310b97a38e669641a21",
594
+ "version_major": 2,
595
+ "version_minor": 0
596
+ },
597
+ "text/plain": [
598
+ "Downloading: 0%| | 0.00/261M [00:00<?, ?B/s]"
599
+ ]
600
+ },
601
+ "metadata": {},
602
+ "output_type": "display_data"
603
+ },
604
+ {
605
+ "data": {
606
+ "application/vnd.jupyter.widget-view+json": {
607
+ "model_id": "3359a992135d47a48b29f5e0f939eb44",
608
+ "version_major": 2,
609
+ "version_minor": 0
610
+ },
611
+ "text/plain": [
612
+ "Downloading: 0%| | 0.00/29.0 [00:00<?, ?B/s]"
613
+ ]
614
+ },
615
+ "metadata": {},
616
+ "output_type": "display_data"
617
+ },
618
+ {
619
+ "data": {
620
+ "application/vnd.jupyter.widget-view+json": {
621
+ "model_id": "bd5f68624b5c4b62b4c87d8e046b69f4",
622
+ "version_major": 2,
623
+ "version_minor": 0
624
+ },
625
+ "text/plain": [
626
+ "Downloading: 0%| | 0.00/213k [00:00<?, ?B/s]"
627
+ ]
628
+ },
629
+ "metadata": {},
630
+ "output_type": "display_data"
631
+ },
632
+ {
633
+ "data": {
634
+ "application/vnd.jupyter.widget-view+json": {
635
+ "model_id": "7160d2a6f8a942d7b298453684e9ee49",
636
+ "version_major": 2,
637
+ "version_minor": 0
638
+ },
639
+ "text/plain": [
640
+ "Downloading: 0%| | 0.00/436k [00:00<?, ?B/s]"
641
+ ]
642
+ },
643
+ "metadata": {},
644
+ "output_type": "display_data"
645
+ },
646
+ {
647
+ "data": {
648
+ "text/plain": [
649
+ "{'score': 0.4208132028579712,\n",
650
+ " 'start': 0,\n",
651
+ " 'end': 44,\n",
652
+ " 'answer': 'Mine is Hilarious, usually grab from library'}"
653
+ ]
654
+ },
655
+ "execution_count": 17,
656
+ "metadata": {},
657
+ "output_type": "execute_result"
658
+ }
659
+ ],
660
+ "source": [
661
+ "qa = pipeline(\"question-answering\")\n",
662
+ "qa(\n",
663
+ " question=\"Where is it?\",\n",
664
+ " context=\"Mine is Hilarious, usually grab from library though.\"\n",
665
+ ")"
666
+ ]
667
+ },
668
+ {
669
+ "cell_type": "code",
670
+ "execution_count": 18,
671
+ "metadata": {},
672
+ "outputs": [
673
+ {
674
+ "name": "stderr",
675
+ "output_type": "stream",
676
+ "text": [
677
+ "No model was supplied, defaulted to sshleifer/distilbart-cnn-12-6 and revision a4f8f3e (https://huggingface.co/sshleifer/distilbart-cnn-12-6).\n",
678
+ "Using a pipeline without specifying a model name and revision in production is not recommended.\n"
679
+ ]
680
+ },
681
+ {
682
+ "data": {
683
+ "application/vnd.jupyter.widget-view+json": {
684
+ "model_id": "201842ea28eb45b9838d9ad77e90b528",
685
+ "version_major": 2,
686
+ "version_minor": 0
687
+ },
688
+ "text/plain": [
689
+ "Downloading: 0%| | 0.00/1.80k [00:00<?, ?B/s]"
690
+ ]
691
+ },
692
+ "metadata": {},
693
+ "output_type": "display_data"
694
+ },
695
+ {
696
+ "data": {
697
+ "application/vnd.jupyter.widget-view+json": {
698
+ "model_id": "848fa320643340798fc4e3c6119b567e",
699
+ "version_major": 2,
700
+ "version_minor": 0
701
+ },
702
+ "text/plain": [
703
+ "Downloading: 0%| | 0.00/1.22G [00:00<?, ?B/s]"
704
+ ]
705
+ },
706
+ "metadata": {},
707
+ "output_type": "display_data"
708
+ },
709
+ {
710
+ "data": {
711
+ "application/vnd.jupyter.widget-view+json": {
712
+ "model_id": "bf8110edeb18471fa048308bb782b089",
713
+ "version_major": 2,
714
+ "version_minor": 0
715
+ },
716
+ "text/plain": [
717
+ "Downloading: 0%| | 0.00/26.0 [00:00<?, ?B/s]"
718
+ ]
719
+ },
720
+ "metadata": {},
721
+ "output_type": "display_data"
722
+ },
723
+ {
724
+ "data": {
725
+ "application/vnd.jupyter.widget-view+json": {
726
+ "model_id": "b05eb6e6ada44e1ea88342739fc0f215",
727
+ "version_major": 2,
728
+ "version_minor": 0
729
+ },
730
+ "text/plain": [
731
+ "Downloading: 0%| | 0.00/899k [00:00<?, ?B/s]"
732
+ ]
733
+ },
734
+ "metadata": {},
735
+ "output_type": "display_data"
736
+ },
737
+ {
738
+ "data": {
739
+ "application/vnd.jupyter.widget-view+json": {
740
+ "model_id": "992d67bdfdb64d2387f4029d4edb8d4f",
741
+ "version_major": 2,
742
+ "version_minor": 0
743
+ },
744
+ "text/plain": [
745
+ "Downloading: 0%| | 0.00/456k [00:00<?, ?B/s]"
746
+ ]
747
+ },
748
+ "metadata": {},
749
+ "output_type": "display_data"
750
+ },
751
+ {
752
+ "data": {
753
+ "text/plain": [
754
+ "[{'summary_text': ' America has changed dramatically during recent years . The number of engineering graduates in the U.S. has declined in traditional engineering disciplines such as mechanical, civil, electrical, chemical, and aeronautical engineering . Rapidly developing economies such as China and India, as well as other industrial countries in Europe and Asia, continue to encourage and advance engineering .'}]"
755
+ ]
756
+ },
757
+ "execution_count": 18,
758
+ "metadata": {},
759
+ "output_type": "execute_result"
760
+ }
761
+ ],
762
+ "source": [
763
+ "summarizer = pipeline(\"summarization\")\n",
764
+ "summarizer(\n",
765
+ " \"\"\"\n",
766
+ " America has changed dramatically during recent years. Not only has the number of \n",
767
+ " graduates in traditional engineering disciplines such as mechanical, civil, \n",
768
+ " electrical, chemical, and aeronautical engineering declined, but in most of \n",
769
+ " the premier American universities engineering curricula now concentrate on \n",
770
+ " and encourage largely the study of engineering science. As a result, there \n",
771
+ " are declining offerings in engineering subjects dealing with infrastructure, \n",
772
+ " the environment, and related issues, and greater concentration on high \n",
773
+ " technology subjects, largely supporting increasingly complex scientific \n",
774
+ " developments. While the latter is important, it should not be at the expense \n",
775
+ " of more traditional engineering.\n",
776
+ "\n",
777
+ " Rapidly developing economies such as China and India, as well as other \n",
778
+ " industrial countries in Europe and Asia, continue to encourage and advance \n",
779
+ " the teaching of engineering. Both China and India, respectively, graduate \n",
780
+ " six and eight times as many traditional engineers as does the United States. \n",
781
+ " Other industrial countries at minimum maintain their output, while America \n",
782
+ " suffers an increasingly serious decline in the number of engineering graduates \n",
783
+ " and a lack of well-educated engineers.\n",
784
+ " \"\"\"\n",
785
+ ")"
786
+ ]
787
+ },
788
+ {
789
+ "cell_type": "code",
790
+ "execution_count": 24,
791
+ "metadata": {},
792
+ "outputs": [
793
+ {
794
+ "ename": "ValueError",
795
+ "evalue": "This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed in order to use this tokenizer.",
796
+ "output_type": "error",
797
+ "traceback": [
798
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
799
+ "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
800
+ "Cell \u001b[0;32mIn[24], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m translator \u001b[39m=\u001b[39m pipeline(\u001b[39m\"\u001b[39;49m\u001b[39mtranslation\u001b[39;49m\u001b[39m\"\u001b[39;49m, model\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39mHelsinki-NLP/opus-mt-fr-en\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n\u001b[1;32m 2\u001b[0m translator(\u001b[39m\"\u001b[39m\u001b[39mCe cours est produit par.\u001b[39m\u001b[39m\"\u001b[39m)\n",
801
+ "File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/transformers/pipelines/__init__.py:885\u001b[0m, in \u001b[0;36mpipeline\u001b[0;34m(task, model, config, tokenizer, feature_extractor, image_processor, framework, revision, use_fast, use_auth_token, device, device_map, torch_dtype, trust_remote_code, model_kwargs, pipeline_class, **kwargs)\u001b[0m\n\u001b[1;32m 882\u001b[0m tokenizer_kwargs \u001b[39m=\u001b[39m model_kwargs\u001b[39m.\u001b[39mcopy()\n\u001b[1;32m 883\u001b[0m tokenizer_kwargs\u001b[39m.\u001b[39mpop(\u001b[39m\"\u001b[39m\u001b[39mtorch_dtype\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mNone\u001b[39;00m)\n\u001b[0;32m--> 885\u001b[0m tokenizer \u001b[39m=\u001b[39m AutoTokenizer\u001b[39m.\u001b[39;49mfrom_pretrained(\n\u001b[1;32m 886\u001b[0m tokenizer_identifier, use_fast\u001b[39m=\u001b[39;49muse_fast, _from_pipeline\u001b[39m=\u001b[39;49mtask, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mhub_kwargs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mtokenizer_kwargs\n\u001b[1;32m 887\u001b[0m )\n\u001b[1;32m 889\u001b[0m \u001b[39mif\u001b[39;00m load_image_processor:\n\u001b[1;32m 890\u001b[0m \u001b[39m# Try to infer image processor from model or config name (if provided as str)\u001b[39;00m\n\u001b[1;32m 891\u001b[0m \u001b[39mif\u001b[39;00m image_processor \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n",
802
+ "File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py:714\u001b[0m, in \u001b[0;36mAutoTokenizer.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *inputs, **kwargs)\u001b[0m\n\u001b[1;32m 712\u001b[0m \u001b[39mreturn\u001b[39;00m tokenizer_class_py\u001b[39m.\u001b[39mfrom_pretrained(pretrained_model_name_or_path, \u001b[39m*\u001b[39minputs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[1;32m 713\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m--> 714\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\n\u001b[1;32m 715\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mThis tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 716\u001b[0m \u001b[39m\"\u001b[39m\u001b[39min order to use this tokenizer.\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 717\u001b[0m )\n\u001b[1;32m 719\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\n\u001b[1;32m 720\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mUnrecognized configuration class \u001b[39m\u001b[39m{\u001b[39;00mconfig\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m to build an AutoTokenizer.\u001b[39m\u001b[39m\\n\u001b[39;00m\u001b[39m\"\u001b[39m\n\u001b[1;32m 721\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mModel type should be one of \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39m'\u001b[39m\u001b[39m, \u001b[39m\u001b[39m'\u001b[39m\u001b[39m.\u001b[39mjoin(c\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m \u001b[39mfor\u001b[39;00m c \u001b[39min\u001b[39;00m TOKENIZER_MAPPING\u001b[39m.\u001b[39mkeys())\u001b[39m}\u001b[39;00m\u001b[39m.\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 722\u001b[0m )\n",
803
+ "\u001b[0;31mValueError\u001b[0m: This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed in order to use this tokenizer."
804
+ ]
805
+ }
806
+ ],
807
+ "source": [
808
+ "translator = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-fr-en\")\n",
809
+ "translator(\"Ce cours est produit par.\")"
810
+ ]
811
+ },
812
+ {
813
+ "attachments": {},
814
+ "cell_type": "markdown",
815
+ "metadata": {},
816
+ "source": [
817
+ "## Bias and limitations"
818
+ ]
819
+ },
820
+ {
821
+ "cell_type": "code",
822
+ "execution_count": 26,
823
+ "metadata": {},
824
+ "outputs": [
825
+ {
826
+ "data": {
827
+ "application/vnd.jupyter.widget-view+json": {
828
+ "model_id": "e7aa993970d14ab986e0a7ea1e60087a",
829
+ "version_major": 2,
830
+ "version_minor": 0
831
+ },
832
+ "text/plain": [
833
+ "Downloading: 0%| | 0.00/570 [00:00<?, ?B/s]"
834
+ ]
835
+ },
836
+ "metadata": {},
837
+ "output_type": "display_data"
838
+ },
839
+ {
840
+ "data": {
841
+ "application/vnd.jupyter.widget-view+json": {
842
+ "model_id": "67fc2d322c01448b97219733efdaed9e",
843
+ "version_major": 2,
844
+ "version_minor": 0
845
+ },
846
+ "text/plain": [
847
+ "Downloading: 0%| | 0.00/440M [00:00<?, ?B/s]"
848
+ ]
849
+ },
850
+ "metadata": {},
851
+ "output_type": "display_data"
852
+ },
853
+ {
854
+ "name": "stderr",
855
+ "output_type": "stream",
856
+ "text": [
857
+ "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n",
858
+ "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
859
+ "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
860
+ ]
861
+ },
862
+ {
863
+ "data": {
864
+ "application/vnd.jupyter.widget-view+json": {
865
+ "model_id": "0621be2286144ee296194d349ae8dd5d",
866
+ "version_major": 2,
867
+ "version_minor": 0
868
+ },
869
+ "text/plain": [
870
+ "Downloading: 0%| | 0.00/28.0 [00:00<?, ?B/s]"
871
+ ]
872
+ },
873
+ "metadata": {},
874
+ "output_type": "display_data"
875
+ },
876
+ {
877
+ "data": {
878
+ "application/vnd.jupyter.widget-view+json": {
879
+ "model_id": "674c967d777243819fc3dc85915a1764",
880
+ "version_major": 2,
881
+ "version_minor": 0
882
+ },
883
+ "text/plain": [
884
+ "Downloading: 0%| | 0.00/232k [00:00<?, ?B/s]"
885
+ ]
886
+ },
887
+ "metadata": {},
888
+ "output_type": "display_data"
889
+ },
890
+ {
891
+ "data": {
892
+ "application/vnd.jupyter.widget-view+json": {
893
+ "model_id": "d27abdd37df84243b85e16dea5d6d65e",
894
+ "version_major": 2,
895
+ "version_minor": 0
896
+ },
897
+ "text/plain": [
898
+ "Downloading: 0%| | 0.00/466k [00:00<?, ?B/s]"
899
+ ]
900
+ },
901
+ "metadata": {},
902
+ "output_type": "display_data"
903
+ },
904
+ {
905
+ "name": "stdout",
906
+ "output_type": "stream",
907
+ "text": [
908
+ "['carpenter', 'lawyer', 'farmer', 'businessman', 'doctor']\n",
909
+ "['nurse', 'maid', 'teacher', 'waitress', 'prostitute']\n"
910
+ ]
911
+ }
912
+ ],
913
+ "source": [
914
+ "unmasker = pipeline(\"fill-mask\", model=\"bert-base-uncased\")\n",
915
+ "result = unmasker(\"This man works as a [MASK].\")\n",
916
+ "print([r[\"token_str\"] for r in result])\n",
917
+ "\n",
918
+ "result = unmasker(\"This woman works as a [MASK].\")\n",
919
+ "print([r[\"token_str\"] for r in result])"
920
+ ]
921
+ }
922
+ ],
923
+ "metadata": {
924
+ "kernelspec": {
925
+ "display_name": "datascience",
926
+ "language": "python",
927
+ "name": "python3"
928
+ },
929
+ "language_info": {
930
+ "codemirror_mode": {
931
+ "name": "ipython",
932
+ "version": 3
933
+ },
934
+ "file_extension": ".py",
935
+ "mimetype": "text/x-python",
936
+ "name": "python",
937
+ "nbconvert_exporter": "python",
938
+ "pygments_lexer": "ipython3",
939
+ "version": "3.10.9"
940
+ },
941
+ "orig_nbformat": 4
942
+ },
943
+ "nbformat": 4,
944
+ "nbformat_minor": 2
945
+ }