ola13 commited on
Commit
a8874d3
1 Parent(s): 6312dbf
Files changed (3) hide show
  1. README.md +577 -0
  2. requirements.txt +4 -0
  3. wikipedia_citations.py +658 -0
README.md CHANGED
@@ -1,5 +1,6 @@
1
  ---
2
  dataset_info:
 
3
  features:
4
  - name: id
5
  dtype: string
@@ -57,6 +58,582 @@ dataset_info:
57
  num_examples: 45750324
58
  download_size: 12683322513
59
  dataset_size: 29536547204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  ---
61
  # Dataset Card for "wikipedia_citations"
62
 
 
1
  ---
2
  dataset_info:
3
+ - config_name: default
4
  features:
5
  - name: id
6
  dtype: string
 
58
  num_examples: 45750324
59
  download_size: 12683322513
60
  dataset_size: 29536547204
61
+ - config_name: 20230301.aa
62
+ features:
63
+ - name: id
64
+ dtype: string
65
+ - name: wiki_id
66
+ dtype: string
67
+ - name: wiki_url
68
+ dtype: string
69
+ - name: wiki_title
70
+ dtype: string
71
+ - name: citation_type
72
+ dtype: string
73
+ - name: template
74
+ dtype: string
75
+ - name: title
76
+ dtype: string
77
+ - name: url
78
+ dtype: string
79
+ - name: domain
80
+ dtype: string
81
+ - name: format
82
+ dtype: string
83
+ - name: publisher
84
+ dtype: string
85
+ - name: last
86
+ dtype: string
87
+ - name: first
88
+ dtype: string
89
+ - name: archiveurl
90
+ dtype: string
91
+ - name: urlstatus
92
+ dtype: string
93
+ - name: work
94
+ dtype: string
95
+ - name: language
96
+ dtype: string
97
+ - name: author
98
+ dtype: string
99
+ - name: year
100
+ dtype: string
101
+ - name: isbn
102
+ dtype: string
103
+ - name: journal
104
+ dtype: string
105
+ - name: volume
106
+ dtype: string
107
+ - name: doi
108
+ dtype: string
109
+ - name: issue
110
+ dtype: string
111
+ - name: newspaper
112
+ dtype: string
113
+ splits:
114
+ - name: train
115
+ download_size: 45886
116
+ dataset_size: 0
117
+ - config_name: 20230301.ab
118
+ features:
119
+ - name: id
120
+ dtype: string
121
+ - name: wiki_id
122
+ dtype: string
123
+ - name: wiki_url
124
+ dtype: string
125
+ - name: wiki_title
126
+ dtype: string
127
+ - name: citation_type
128
+ dtype: string
129
+ - name: template
130
+ dtype: string
131
+ - name: title
132
+ dtype: string
133
+ - name: url
134
+ dtype: string
135
+ - name: domain
136
+ dtype: string
137
+ - name: format
138
+ dtype: string
139
+ - name: publisher
140
+ dtype: string
141
+ - name: last
142
+ dtype: string
143
+ - name: first
144
+ dtype: string
145
+ - name: archiveurl
146
+ dtype: string
147
+ - name: urlstatus
148
+ dtype: string
149
+ - name: work
150
+ dtype: string
151
+ - name: language
152
+ dtype: string
153
+ - name: author
154
+ dtype: string
155
+ - name: year
156
+ dtype: string
157
+ - name: isbn
158
+ dtype: string
159
+ - name: journal
160
+ dtype: string
161
+ - name: volume
162
+ dtype: string
163
+ - name: doi
164
+ dtype: string
165
+ - name: issue
166
+ dtype: string
167
+ - name: newspaper
168
+ dtype: string
169
+ splits:
170
+ - name: train
171
+ num_bytes: 408735
172
+ num_examples: 857
173
+ download_size: 3222122
174
+ dataset_size: 408735
175
+ - config_name: 20230301.ace
176
+ features:
177
+ - name: id
178
+ dtype: string
179
+ - name: wiki_id
180
+ dtype: string
181
+ - name: wiki_url
182
+ dtype: string
183
+ - name: wiki_title
184
+ dtype: string
185
+ - name: citation_type
186
+ dtype: string
187
+ - name: template
188
+ dtype: string
189
+ - name: title
190
+ dtype: string
191
+ - name: url
192
+ dtype: string
193
+ - name: domain
194
+ dtype: string
195
+ - name: format
196
+ dtype: string
197
+ - name: publisher
198
+ dtype: string
199
+ - name: last
200
+ dtype: string
201
+ - name: first
202
+ dtype: string
203
+ - name: archiveurl
204
+ dtype: string
205
+ - name: urlstatus
206
+ dtype: string
207
+ - name: work
208
+ dtype: string
209
+ - name: language
210
+ dtype: string
211
+ - name: author
212
+ dtype: string
213
+ - name: year
214
+ dtype: string
215
+ - name: isbn
216
+ dtype: string
217
+ - name: journal
218
+ dtype: string
219
+ - name: volume
220
+ dtype: string
221
+ - name: doi
222
+ dtype: string
223
+ - name: issue
224
+ dtype: string
225
+ - name: newspaper
226
+ dtype: string
227
+ splits:
228
+ - name: train
229
+ num_bytes: 4377671
230
+ num_examples: 4337
231
+ download_size: 3608741
232
+ dataset_size: 4377671
233
+ - config_name: 20230301.ady
234
+ features:
235
+ - name: id
236
+ dtype: string
237
+ - name: wiki_id
238
+ dtype: string
239
+ - name: wiki_url
240
+ dtype: string
241
+ - name: wiki_title
242
+ dtype: string
243
+ - name: citation_type
244
+ dtype: string
245
+ - name: template
246
+ dtype: string
247
+ - name: title
248
+ dtype: string
249
+ - name: url
250
+ dtype: string
251
+ - name: domain
252
+ dtype: string
253
+ - name: format
254
+ dtype: string
255
+ - name: publisher
256
+ dtype: string
257
+ - name: last
258
+ dtype: string
259
+ - name: first
260
+ dtype: string
261
+ - name: archiveurl
262
+ dtype: string
263
+ - name: urlstatus
264
+ dtype: string
265
+ - name: work
266
+ dtype: string
267
+ - name: language
268
+ dtype: string
269
+ - name: author
270
+ dtype: string
271
+ - name: year
272
+ dtype: string
273
+ - name: isbn
274
+ dtype: string
275
+ - name: journal
276
+ dtype: string
277
+ - name: volume
278
+ dtype: string
279
+ - name: doi
280
+ dtype: string
281
+ - name: issue
282
+ dtype: string
283
+ - name: newspaper
284
+ dtype: string
285
+ splits:
286
+ - name: train
287
+ num_bytes: 1788
288
+ num_examples: 4
289
+ download_size: 1065537
290
+ dataset_size: 1788
291
+ - config_name: 20230301.af
292
+ features:
293
+ - name: id
294
+ dtype: string
295
+ - name: wiki_id
296
+ dtype: string
297
+ - name: wiki_url
298
+ dtype: string
299
+ - name: wiki_title
300
+ dtype: string
301
+ - name: citation_type
302
+ dtype: string
303
+ - name: template
304
+ dtype: string
305
+ - name: title
306
+ dtype: string
307
+ - name: url
308
+ dtype: string
309
+ - name: domain
310
+ dtype: string
311
+ - name: format
312
+ dtype: string
313
+ - name: publisher
314
+ dtype: string
315
+ - name: last
316
+ dtype: string
317
+ - name: first
318
+ dtype: string
319
+ - name: archiveurl
320
+ dtype: string
321
+ - name: urlstatus
322
+ dtype: string
323
+ - name: work
324
+ dtype: string
325
+ - name: language
326
+ dtype: string
327
+ - name: author
328
+ dtype: string
329
+ - name: year
330
+ dtype: string
331
+ - name: isbn
332
+ dtype: string
333
+ - name: journal
334
+ dtype: string
335
+ - name: volume
336
+ dtype: string
337
+ - name: doi
338
+ dtype: string
339
+ - name: issue
340
+ dtype: string
341
+ - name: newspaper
342
+ dtype: string
343
+ splits:
344
+ - name: train
345
+ num_bytes: 95435587
346
+ num_examples: 159932
347
+ download_size: 133044790
348
+ dataset_size: 95435587
349
+ - config_name: 20230301.ak
350
+ features:
351
+ - name: id
352
+ dtype: string
353
+ - name: wiki_id
354
+ dtype: string
355
+ - name: wiki_url
356
+ dtype: string
357
+ - name: wiki_title
358
+ dtype: string
359
+ - name: citation_type
360
+ dtype: string
361
+ - name: template
362
+ dtype: string
363
+ - name: title
364
+ dtype: string
365
+ - name: url
366
+ dtype: string
367
+ - name: domain
368
+ dtype: string
369
+ - name: format
370
+ dtype: string
371
+ - name: publisher
372
+ dtype: string
373
+ - name: last
374
+ dtype: string
375
+ - name: first
376
+ dtype: string
377
+ - name: archiveurl
378
+ dtype: string
379
+ - name: urlstatus
380
+ dtype: string
381
+ - name: work
382
+ dtype: string
383
+ - name: language
384
+ dtype: string
385
+ - name: author
386
+ dtype: string
387
+ - name: year
388
+ dtype: string
389
+ - name: isbn
390
+ dtype: string
391
+ - name: journal
392
+ dtype: string
393
+ - name: volume
394
+ dtype: string
395
+ - name: doi
396
+ dtype: string
397
+ - name: issue
398
+ dtype: string
399
+ - name: newspaper
400
+ dtype: string
401
+ splits:
402
+ - name: train
403
+ num_bytes: 181375
404
+ num_examples: 301
405
+ download_size: 692116
406
+ dataset_size: 181375
407
+ - config_name: 20230301.als
408
+ features:
409
+ - name: id
410
+ dtype: string
411
+ - name: wiki_id
412
+ dtype: string
413
+ - name: wiki_url
414
+ dtype: string
415
+ - name: wiki_title
416
+ dtype: string
417
+ - name: citation_type
418
+ dtype: string
419
+ - name: template
420
+ dtype: string
421
+ - name: title
422
+ dtype: string
423
+ - name: url
424
+ dtype: string
425
+ - name: domain
426
+ dtype: string
427
+ - name: format
428
+ dtype: string
429
+ - name: publisher
430
+ dtype: string
431
+ - name: last
432
+ dtype: string
433
+ - name: first
434
+ dtype: string
435
+ - name: archiveurl
436
+ dtype: string
437
+ - name: urlstatus
438
+ dtype: string
439
+ - name: work
440
+ dtype: string
441
+ - name: language
442
+ dtype: string
443
+ - name: author
444
+ dtype: string
445
+ - name: year
446
+ dtype: string
447
+ - name: isbn
448
+ dtype: string
449
+ - name: journal
450
+ dtype: string
451
+ - name: volume
452
+ dtype: string
453
+ - name: doi
454
+ dtype: string
455
+ - name: issue
456
+ dtype: string
457
+ - name: newspaper
458
+ dtype: string
459
+ splits:
460
+ - name: train
461
+ num_bytes: 10771658
462
+ num_examples: 21089
463
+ download_size: 60679007
464
+ dataset_size: 10771658
465
+ - config_name: 20230301.alt
466
+ features:
467
+ - name: id
468
+ dtype: string
469
+ - name: wiki_id
470
+ dtype: string
471
+ - name: wiki_url
472
+ dtype: string
473
+ - name: wiki_title
474
+ dtype: string
475
+ - name: citation_type
476
+ dtype: string
477
+ - name: template
478
+ dtype: string
479
+ - name: title
480
+ dtype: string
481
+ - name: url
482
+ dtype: string
483
+ - name: domain
484
+ dtype: string
485
+ - name: format
486
+ dtype: string
487
+ - name: publisher
488
+ dtype: string
489
+ - name: last
490
+ dtype: string
491
+ - name: first
492
+ dtype: string
493
+ - name: archiveurl
494
+ dtype: string
495
+ - name: urlstatus
496
+ dtype: string
497
+ - name: work
498
+ dtype: string
499
+ - name: language
500
+ dtype: string
501
+ - name: author
502
+ dtype: string
503
+ - name: year
504
+ dtype: string
505
+ - name: isbn
506
+ dtype: string
507
+ - name: journal
508
+ dtype: string
509
+ - name: volume
510
+ dtype: string
511
+ - name: doi
512
+ dtype: string
513
+ - name: issue
514
+ dtype: string
515
+ - name: newspaper
516
+ dtype: string
517
+ splits:
518
+ - name: train
519
+ num_bytes: 2085328
520
+ num_examples: 2704
521
+ download_size: 3845233
522
+ dataset_size: 2085328
523
+ - config_name: 20230301.am
524
+ features:
525
+ - name: id
526
+ dtype: string
527
+ - name: wiki_id
528
+ dtype: string
529
+ - name: wiki_url
530
+ dtype: string
531
+ - name: wiki_title
532
+ dtype: string
533
+ - name: citation_type
534
+ dtype: string
535
+ - name: template
536
+ dtype: string
537
+ - name: title
538
+ dtype: string
539
+ - name: url
540
+ dtype: string
541
+ - name: domain
542
+ dtype: string
543
+ - name: format
544
+ dtype: string
545
+ - name: publisher
546
+ dtype: string
547
+ - name: last
548
+ dtype: string
549
+ - name: first
550
+ dtype: string
551
+ - name: archiveurl
552
+ dtype: string
553
+ - name: urlstatus
554
+ dtype: string
555
+ - name: work
556
+ dtype: string
557
+ - name: language
558
+ dtype: string
559
+ - name: author
560
+ dtype: string
561
+ - name: year
562
+ dtype: string
563
+ - name: isbn
564
+ dtype: string
565
+ - name: journal
566
+ dtype: string
567
+ - name: volume
568
+ dtype: string
569
+ - name: doi
570
+ dtype: string
571
+ - name: issue
572
+ dtype: string
573
+ - name: newspaper
574
+ dtype: string
575
+ splits:
576
+ - name: train
577
+ num_bytes: 1068734
578
+ num_examples: 1562
579
+ download_size: 8450310
580
+ dataset_size: 1068734
581
+ - config_name: 20230301.ami
582
+ features:
583
+ - name: id
584
+ dtype: string
585
+ - name: wiki_id
586
+ dtype: string
587
+ - name: wiki_url
588
+ dtype: string
589
+ - name: wiki_title
590
+ dtype: string
591
+ - name: citation_type
592
+ dtype: string
593
+ - name: template
594
+ dtype: string
595
+ - name: title
596
+ dtype: string
597
+ - name: url
598
+ dtype: string
599
+ - name: domain
600
+ dtype: string
601
+ - name: format
602
+ dtype: string
603
+ - name: publisher
604
+ dtype: string
605
+ - name: last
606
+ dtype: string
607
+ - name: first
608
+ dtype: string
609
+ - name: archiveurl
610
+ dtype: string
611
+ - name: urlstatus
612
+ dtype: string
613
+ - name: work
614
+ dtype: string
615
+ - name: language
616
+ dtype: string
617
+ - name: author
618
+ dtype: string
619
+ - name: year
620
+ dtype: string
621
+ - name: isbn
622
+ dtype: string
623
+ - name: journal
624
+ dtype: string
625
+ - name: volume
626
+ dtype: string
627
+ - name: doi
628
+ dtype: string
629
+ - name: issue
630
+ dtype: string
631
+ - name: newspaper
632
+ dtype: string
633
+ splits:
634
+ - name: train
635
+ download_size: 1259913
636
+ dataset_size: 0
637
  ---
638
  # Dataset Card for "wikipedia_citations"
639
 
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ jsonlines
2
+ multiprocess
3
+ mwparserfromhell
4
+ tqdm
wikipedia_citations.py ADDED
@@ -0,0 +1,658 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bz2
2
+ import codecs
3
+ import datasets
4
+ import mwparserfromhell
5
+ import json
6
+ import re
7
+ import string
8
+ import traceback
9
+ import uuid
10
+ import xml.etree.cElementTree as etree
11
+
12
+ from multiprocessing import Process, Manager
13
+ from urllib.parse import quote
14
+ from tqdm import tqdm
15
+
16
+ _BASE_URL_TMPL = "https://dumps.wikimedia.org/{lang}wiki/{date}/"
17
+ _CITATION = """\
18
+ @ONLINE {wikidump,
19
+ author = {Wikimedia Foundation},
20
+ title = {Wikimedia Downloads},
21
+ url = {https://dumps.wikimedia.org}
22
+ }
23
+ """
24
+ _DATE = "20230301"
25
+ _DESCRIPTION = None
26
+ _INFO_FILE = "dumpstatus.json"
27
+ _LICENSE = (
28
+ "This work is licensed under the Creative Commons Attribution-ShareAlike "
29
+ "3.0 Unported License. To view a copy of this license, visit "
30
+ "http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
31
+ "Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
32
+ )
33
+
34
+
35
+ # Source: https://en.wikipedia.org/wiki/List_of_Wikipedias#Detailed_list (accessed 13/03/2023)
36
+ _WIKIPEDIA_LANGUAGES = [
37
+ "aa",
38
+ "ab",
39
+ "ace",
40
+ "ady",
41
+ "af",
42
+ "ak",
43
+ "als",
44
+ "alt",
45
+ "am",
46
+ "ami",
47
+ "an",
48
+ "ang",
49
+ "ar",
50
+ "arc",
51
+ "ary",
52
+ "arz",
53
+ "as",
54
+ "ast",
55
+ "atj",
56
+ "av",
57
+ "avk",
58
+ "awa",
59
+ "ay",
60
+ "az",
61
+ "azb",
62
+ "ba",
63
+ "ban",
64
+ "bar",
65
+ "bat-smg",
66
+ "bcl",
67
+ "be",
68
+ "be-tarask",
69
+ "bg",
70
+ "bh",
71
+ "bi",
72
+ "bjn",
73
+ "blk",
74
+ "bm",
75
+ "bn",
76
+ "bo",
77
+ "bpy",
78
+ "br",
79
+ "bs",
80
+ "bug",
81
+ "bxr",
82
+ "ca",
83
+ "cbk-zam",
84
+ "cdo",
85
+ "ce",
86
+ "ceb",
87
+ "ch",
88
+ "cho",
89
+ "chr",
90
+ "chy",
91
+ "ckb",
92
+ "co",
93
+ "cr",
94
+ "crh",
95
+ "cs",
96
+ "csb",
97
+ "cu",
98
+ "cv",
99
+ "cy",
100
+ "da",
101
+ "dag",
102
+ "de",
103
+ "din",
104
+ "diq",
105
+ "dsb",
106
+ "dty",
107
+ "dv",
108
+ "dz",
109
+ "ee",
110
+ "el",
111
+ "eml",
112
+ "en",
113
+ "eo",
114
+ "es",
115
+ "et",
116
+ "eu",
117
+ "ext",
118
+ "fa",
119
+ "ff",
120
+ "fi",
121
+ "fiu-vro",
122
+ "fj",
123
+ "fo",
124
+ "fr",
125
+ "frp",
126
+ "frr",
127
+ "fur",
128
+ "fy",
129
+ "ga",
130
+ "gag",
131
+ "gan",
132
+ "gcr",
133
+ "gd",
134
+ "gl",
135
+ "glk",
136
+ "gn",
137
+ "gom",
138
+ "gor",
139
+ "got",
140
+ "gu",
141
+ "guc",
142
+ "gur",
143
+ "guw",
144
+ "gv",
145
+ "ha",
146
+ "hak",
147
+ "haw",
148
+ "he",
149
+ "hi",
150
+ "hif",
151
+ "ho",
152
+ "hr",
153
+ "hsb",
154
+ "ht",
155
+ "hu",
156
+ "hy",
157
+ "hyw",
158
+ "hz",
159
+ "ia",
160
+ "id",
161
+ "ie",
162
+ "ig",
163
+ "ii",
164
+ "ik",
165
+ "ilo",
166
+ "inh",
167
+ "io",
168
+ "is",
169
+ "it",
170
+ "iu",
171
+ "ja",
172
+ "jam",
173
+ "jbo",
174
+ "jv",
175
+ "ka",
176
+ "kaa",
177
+ "kab",
178
+ "kbd",
179
+ "kbp",
180
+ "kcg",
181
+ "kg",
182
+ "ki",
183
+ "kj",
184
+ "kk",
185
+ "kl",
186
+ "km",
187
+ "kn",
188
+ "ko",
189
+ "koi",
190
+ "kr",
191
+ "krc",
192
+ "ks",
193
+ "ksh",
194
+ "ku",
195
+ "kv",
196
+ "kw",
197
+ "ky",
198
+ "la",
199
+ "lad",
200
+ "lb",
201
+ "lbe",
202
+ "lez",
203
+ "lfn",
204
+ "lg",
205
+ "li",
206
+ "lij",
207
+ "lld",
208
+ "lmo",
209
+ "ln",
210
+ "lo",
211
+ "lrc",
212
+ "lt",
213
+ "ltg",
214
+ "lv",
215
+ "mad",
216
+ "mai",
217
+ "map-bms",
218
+ "mdf",
219
+ "mg",
220
+ "mh",
221
+ "mhr",
222
+ "mi",
223
+ "min",
224
+ "mk",
225
+ "ml",
226
+ "mn",
227
+ "mni",
228
+ "mnw",
229
+ "mr",
230
+ "mrj",
231
+ "ms",
232
+ "mt",
233
+ "mus",
234
+ "mwl",
235
+ "my",
236
+ "myv",
237
+ "mzn",
238
+ "na",
239
+ "nah",
240
+ "nap",
241
+ "nds",
242
+ "nds-nl",
243
+ "ne",
244
+ "new",
245
+ "ng",
246
+ "nia",
247
+ "nl",
248
+ "nn",
249
+ "no",
250
+ "nov",
251
+ "nqo",
252
+ "nrm",
253
+ "nso",
254
+ "nv",
255
+ "ny",
256
+ "oc",
257
+ "olo",
258
+ "om",
259
+ "or",
260
+ "os",
261
+ "pa",
262
+ "pag",
263
+ "pam",
264
+ "pap",
265
+ "pcd",
266
+ "pcm",
267
+ "pdc",
268
+ "pfl",
269
+ "pi",
270
+ "pih",
271
+ "pl",
272
+ "pms",
273
+ "pnb",
274
+ "pnt",
275
+ "ps",
276
+ "pt",
277
+ "pwn",
278
+ "qu",
279
+ "rm",
280
+ "rmy",
281
+ "rn",
282
+ "ro",
283
+ "roa-rup",
284
+ "roa-tara",
285
+ "ru",
286
+ "rue",
287
+ "rw",
288
+ "sa",
289
+ "sah",
290
+ "sat",
291
+ "sc",
292
+ "scn",
293
+ "sco",
294
+ "sd",
295
+ "se",
296
+ "sg",
297
+ "sh",
298
+ "shi",
299
+ "shn",
300
+ "si",
301
+ "simple",
302
+ "sk",
303
+ "skr",
304
+ "sl",
305
+ "sm",
306
+ "smn",
307
+ "sn",
308
+ "so",
309
+ "sq",
310
+ "sr",
311
+ "srn",
312
+ "ss",
313
+ "st",
314
+ "stq",
315
+ "su",
316
+ "sv",
317
+ "sw",
318
+ "szl",
319
+ "szy",
320
+ "ta",
321
+ "tay",
322
+ "tcy",
323
+ "te",
324
+ "tet",
325
+ "tg",
326
+ "th",
327
+ "ti",
328
+ "tk",
329
+ "tl",
330
+ "tn",
331
+ "to",
332
+ "tpi",
333
+ "tr",
334
+ "trv",
335
+ "ts",
336
+ "tt",
337
+ "tum",
338
+ "tw",
339
+ "ty",
340
+ "tyv",
341
+ "udm",
342
+ "ug",
343
+ "uk",
344
+ "ur",
345
+ "uz",
346
+ "ve",
347
+ "vec",
348
+ "vep",
349
+ "vi",
350
+ "vls",
351
+ "vo",
352
+ "wa",
353
+ "war",
354
+ "wo",
355
+ "wuu",
356
+ "xal",
357
+ "xh",
358
+ "xmf",
359
+ "yi",
360
+ "yo",
361
+ "za",
362
+ "zea",
363
+ "zh",
364
+ "zh-classical",
365
+ "zh-min-nan",
366
+ "zh-yue",
367
+ "zu",
368
+ ]
369
+ _VERSION = datasets.Version("2.0.0", "")
370
+
371
+ core_params = {
372
+ "title",
373
+ "url",
374
+ "accessdate",
375
+ "date",
376
+ "publisher",
377
+ "archivedate",
378
+ "archiveurl",
379
+ "website",
380
+ "work",
381
+ "pages",
382
+ "isbn",
383
+ "page",
384
+ "journal",
385
+ "volume",
386
+ "location",
387
+ "doi",
388
+ "issue",
389
+ "newspaper",
390
+ }
391
+
392
+
393
+ class WikipediaCitationsConfig(datasets.BuilderConfig):
394
+ """BuilderConfig for Wikipedia Citations."""
395
+
396
+ def __init__(self, language=None, date=None, version=_VERSION, **kwargs):
397
+ """BuilderConfig for Wikipedia Citations.
398
+
399
+ Args:
400
+ language: string, the language code for the Wikipedia dump to use.
401
+ date: string, date of the Wikipedia dump in YYYYMMDD format. A list of
402
+ available dates can be found at https://dumps.wikimedia.org/enwiki/.
403
+ **kwargs: keyword arguments forwarded to super.
404
+ """
405
+ super().__init__(
406
+ name=f"{date}.{language}",
407
+ description=f"Wikipedia Citations dataset for {language}, parsed from {date} dump.",
408
+ version=version,
409
+ **kwargs,
410
+ )
411
+ self.date = date
412
+ self.language = language
413
+
414
+
415
+ class WikipediaCitations(datasets.GeneratorBasedBuilder):
416
+ # Use mirror (your.org) to avoid download caps.
417
+ BUILDER_CONFIG_CLASS = WikipediaCitationsConfig
418
+ BUILDER_CONFIGS = [
419
+ WikipediaCitationsConfig(
420
+ language=lang,
421
+ date=_DATE,
422
+ ) # pylint:disable=g-complex-comprehension
423
+ for lang in _WIKIPEDIA_LANGUAGES
424
+ ]
425
+
426
+ def _info(self):
427
+ return datasets.DatasetInfo(
428
+ description=_DESCRIPTION,
429
+ features=datasets.Features(
430
+ {
431
+ "id": datasets.Value("string"),
432
+ "wiki_id": datasets.Value("string"),
433
+ "wiki_url": datasets.Value("string"),
434
+ "wiki_title": datasets.Value("string"),
435
+ "citation_type": datasets.Value("string"),
436
+ "template": datasets.Value("string"),
437
+ "title": datasets.Value("string"),
438
+ "url": datasets.Value("string"),
439
+ "domain": datasets.Value("string"),
440
+ "format": datasets.Value("string"),
441
+ "publisher": datasets.Value("string"),
442
+ "archiveurl": datasets.Value("string"),
443
+ "work": datasets.Value("string"),
444
+ "isbn": datasets.Value("string"),
445
+ "journal": datasets.Value("string"),
446
+ "volume": datasets.Value("string"),
447
+ "doi": datasets.Value("string"),
448
+ "issue": datasets.Value("string"),
449
+ "newspaper": datasets.Value("string"),
450
+ }
451
+ ),
452
+ supervised_keys=None, # No default supervised_keys.
453
+ homepage="https://dumps.wikimedia.org",
454
+ citation=_CITATION,
455
+ )
456
+
457
+ def _construct_url(self, title, language):
458
+ # See: https://meta.wikimedia.org/wiki/Help:URL
459
+ return f"https://{language}.wikipedia.org/wiki/{quote(title)}"
460
+
461
+ def _split_generators(self, dl_manager):
462
+ def _base_url(lang):
463
+ return _BASE_URL_TMPL.format(
464
+ lang=lang.replace("-", "_"), date=self.config.date
465
+ )
466
+
467
+ lang = self.config.language
468
+
469
+ info_url = _base_url(lang) + _INFO_FILE
470
+ # Use dictionary since testing mock always returns the same result.
471
+ downloaded_files = dl_manager.download_and_extract({"info": info_url})
472
+
473
+ xml_urls = []
474
+ total_bytes = 0
475
+ with open(downloaded_files["info"], encoding="utf-8") as f:
476
+ dump_info = json.load(f)
477
+ multistream_dump_info = dump_info["jobs"]["articlesmultistreamdump"]
478
+ assert (
479
+ multistream_dump_info["status"] == "done"
480
+ ), "Specified dump (%s) multistream status is not 'done': %s" % (
481
+ _base_url(lang),
482
+ multistream_dump_info["status"],
483
+ )
484
+
485
+ for fname, info in multistream_dump_info["files"].items():
486
+ if ".xml" not in fname:
487
+ continue
488
+ total_bytes += info["size"]
489
+ xml_urls.append(_base_url(lang) + fname)
490
+
491
+ # Use dictionary since testing mock always returns the same result.
492
+
493
+ print("Dowloading Wikipedia dump")
494
+ downloaded_files = dl_manager.download({"xml": xml_urls})
495
+ print("Finished downloading Wikipedia dump")
496
+
497
+ return [
498
+ datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
499
+ name=datasets.Split.TRAIN,
500
+ gen_kwargs={"filepaths": downloaded_files["xml"], "language": lang},
501
+ )
502
+ ]
503
+
504
+ def _extract_content(self, filepath):
505
+ """Extracts article content from a single WikiMedia XML file."""
506
+ print("generating examples from {}".format(filepath))
507
+ content = []
508
+ f = bz2.BZ2File(filename=filepath)
509
+ # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
510
+ utf_f = codecs.getreader("utf-8")(f)
511
+ context = etree.iterparse(utf_f, events=("end",))
512
+ for unused_event, elem in context:
513
+ if not elem.tag.endswith("page"):
514
+ continue
515
+ namespace = elem.tag[:-4]
516
+ title = elem.find(f"./{namespace}title").text
517
+ ns = elem.find(f"./{namespace}ns").text
518
+ id_ = elem.find(f"./{namespace}id").text
519
+ red_ = elem.find(f"./{namespace}redirect")
520
+
521
+ # Filter pages that are not in the "main" namespace.
522
+ if ns != "0":
523
+ elem.clear()
524
+ continue
525
+
526
+ raw_content = elem.find(f"./{namespace}revision/{namespace}text").text
527
+ elem.clear()
528
+
529
+ # Filter redirects.
530
+ if raw_content is None or red_ is not None:
531
+ continue
532
+
533
+ content.append((id_, title, raw_content))
534
+ return content
535
+
536
+ def _is_ref_tag(self, obj):
537
+ return str(obj.tag) in {"ref"}
538
+
539
+ def _normalize_role(self, text, role):
540
+ role_regex = re.compile(r"{}[0-9]+".format(role))
541
+ if re.fullmatch(role_regex, text) is not None:
542
+ text = role
543
+ return text
544
+
545
+ def _normalize_obj(self, obj):
546
+ text = str(obj).strip().lower()
547
+
548
+ for role in ["first", "last", "author", "editor"]:
549
+ text = self._normalize_role(text, role)
550
+
551
+ return text.translate(str.maketrans("", "", string.punctuation))
552
+
553
+
554
+ def _get_domain(self, url):
555
+ if url is None:
556
+ return None
557
+ url = url.strip().lower()
558
+ while url.startswith("https://web.archive.org/"):
559
+ url = url[len("https://web.archive.org/web/20140109071239/"):]
560
+ while url.startswith("http://web.archive.org/"):
561
+ url = url[len("http://web.archive.org/web/20121023020317/"):]
562
+ while url.startswith("https://archive.today/"):
563
+ url = url[len("https://archive.today/20120728203512/"):]
564
+ while url.startswith("http://archive.today/"):
565
+ url = url[len("http://archive.today/2022.01.15-193252/"):]
566
+
567
+ tokens = url.split("/")
568
+ if len(tokens) < 3:
569
+ return None
570
+ return tokens[2]
571
+
572
+ def _get_format(self, url):
573
+ if url is None:
574
+ return None
575
+ url = url.strip().lower()
576
+ # remove params
577
+ prefix = url.split("?")[0]
578
+ suffix = prefix.split("/")[-1]
579
+ f = suffix.split(".")[-1] if "." in suffix else "no format"
580
+ return f
581
+
582
+ def _parse_obj(self, obj, language):
583
+ """Cleans raw wikicode to extract citations."""
584
+
585
+ refs = []
586
+ id_, title, raw_content = obj
587
+ url = self._construct_url(title, language)
588
+ wikicode = mwparserfromhell.parse(raw_content, skip_style_tags=True)
589
+
590
+ for i, refobj in enumerate(
591
+ wikicode.ifilter_tags(matches=self._is_ref_tag, recursive=True)
592
+ ):
593
+ try:
594
+ templates = mwparserfromhell.parse(refobj).filter_templates()
595
+ if templates is None or len(templates) == 0:
596
+ continue
597
+ for template in templates:
598
+ params = {}
599
+ for param in template.params:
600
+ split_idx = param.find("=")
601
+ key = self._normalize_obj(param[:split_idx])
602
+ val = param[split_idx + 1 :].strip()
603
+ if key in core_params:
604
+ params[key] = val
605
+
606
+ refs.append(
607
+ {
608
+ "id": str(uuid.uuid4()),
609
+ "wiki_id": id_,
610
+ "wiki_url": url,
611
+ "wiki_title": title,
612
+ "citation_type": str(template.name.strip().lower()),
613
+ "template": str(template),
614
+ "title": params.get("title"),
615
+ "url": params.get("url"),
616
+ "domain": self._get_domain(params.get("url")),
617
+ "format": self._get_format(params.get("url")),
618
+ "publisher": params.get("publisher"),
619
+ "archiveurl": params.get("archiveurl"),
620
+ "work": params.get("work"),
621
+ "isbn": params.get("isbn"),
622
+ "journal": params.get("journal"),
623
+ "volume": params.get("volume"),
624
+ "doi": params.get("doi"),
625
+ "issue": params.get("issue"),
626
+ "newspaper": params.get("newspaper"),
627
+ }
628
+ )
629
+ except Exception:
630
+ print(traceback.format_exc())
631
+
632
+ return refs
633
+
634
+ def _generate_examples(self, filepaths, language):
635
+ print("Parsing and cleaning Wikipedia examples")
636
+ with Manager() as manager:
637
+ examples = manager.list()
638
+ processes = []
639
+ for filepath in filepaths:
640
+
641
+ def parse_and_clean(examples):
642
+ content = self._extract_content(filepath)
643
+ for obj in tqdm(content):
644
+ refs = self._parse_obj(obj, language=language)
645
+ if refs is not None and len(refs) > 0:
646
+ examples.extend(refs)
647
+
648
+ p = Process(target=parse_and_clean, args=(examples,))
649
+ p.start()
650
+ processes.append(p)
651
+
652
+ for p in processes:
653
+ p.join()
654
+
655
+ print("Parsed and cleaned Wikipedia examples")
656
+
657
+ for i, example in enumerate(examples):
658
+ yield example["id"], example