Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
parquet-converter commited on
Commit
713f20b
1 Parent(s): dcc50d1

Update parquet files

Browse files
README.md DELETED
@@ -1,645 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - crowdsourced
4
- language_creators:
5
- - crowdsourced
6
- language:
7
- - ar
8
- - cs
9
- - de
10
- - en
11
- - es
12
- - fr
13
- - hi
14
- - id
15
- - it
16
- - ja
17
- - ko
18
- - nl
19
- - pt
20
- - ru
21
- - th
22
- - tr
23
- - vi
24
- - zh
25
- license:
26
- - cc-by-3.0
27
- multilinguality:
28
- - multilingual
29
- size_categories:
30
- - 10K<n<100K
31
- - 1K<n<10K
32
- source_datasets:
33
- - original
34
- task_categories:
35
- - summarization
36
- task_ids: []
37
- paperswithcode_id: wikilingua
38
- pretty_name: WikiLingua
39
- configs:
40
- - arabic
41
- - chinese
42
- - czech
43
- - dutch
44
- - english
45
- - french
46
- - german
47
- - hindi
48
- - indonesian
49
- - italian
50
- - japanese
51
- - korean
52
- - portuguese
53
- - russian
54
- - spanish
55
- - thai
56
- - turkish
57
- - vietnamese
58
- dataset_info:
59
- - config_name: arabic
60
- features:
61
- - name: url
62
- dtype: string
63
- - name: article
64
- sequence:
65
- - name: section_name
66
- dtype: string
67
- - name: document
68
- dtype: string
69
- - name: summary
70
- dtype: string
71
- - name: english_url
72
- dtype: string
73
- - name: english_section_name
74
- dtype: string
75
- splits:
76
- - name: train
77
- num_bytes: 119116119
78
- num_examples: 9995
79
- download_size: 119358890
80
- dataset_size: 119116119
81
- - config_name: chinese
82
- features:
83
- - name: url
84
- dtype: string
85
- - name: article
86
- sequence:
87
- - name: section_name
88
- dtype: string
89
- - name: document
90
- dtype: string
91
- - name: summary
92
- dtype: string
93
- - name: english_url
94
- dtype: string
95
- - name: english_section_name
96
- dtype: string
97
- splits:
98
- - name: train
99
- num_bytes: 41170689
100
- num_examples: 6541
101
- download_size: 41345464
102
- dataset_size: 41170689
103
- - config_name: czech
104
- features:
105
- - name: url
106
- dtype: string
107
- - name: article
108
- sequence:
109
- - name: section_name
110
- dtype: string
111
- - name: document
112
- dtype: string
113
- - name: summary
114
- dtype: string
115
- - name: english_url
116
- dtype: string
117
- - name: english_section_name
118
- dtype: string
119
- splits:
120
- - name: train
121
- num_bytes: 20816390
122
- num_examples: 2520
123
- download_size: 20894511
124
- dataset_size: 20816390
125
- - config_name: dutch
126
- features:
127
- - name: url
128
- dtype: string
129
- - name: article
130
- sequence:
131
- - name: section_name
132
- dtype: string
133
- - name: document
134
- dtype: string
135
- - name: summary
136
- dtype: string
137
- - name: english_url
138
- dtype: string
139
- - name: english_section_name
140
- dtype: string
141
- splits:
142
- - name: train
143
- num_bytes: 87258040
144
- num_examples: 10862
145
- download_size: 87533442
146
- dataset_size: 87258040
147
- - config_name: english
148
- features:
149
- - name: url
150
- dtype: string
151
- - name: article
152
- sequence:
153
- - name: section_name
154
- dtype: string
155
- - name: document
156
- dtype: string
157
- - name: summary
158
- dtype: string
159
- splits:
160
- - name: train
161
- num_bytes: 333700114
162
- num_examples: 57945
163
- download_size: 338036185
164
- dataset_size: 333700114
165
- - config_name: french
166
- features:
167
- - name: url
168
- dtype: string
169
- - name: article
170
- sequence:
171
- - name: section_name
172
- dtype: string
173
- - name: document
174
- dtype: string
175
- - name: summary
176
- dtype: string
177
- - name: english_url
178
- dtype: string
179
- - name: english_section_name
180
- dtype: string
181
- splits:
182
- - name: train
183
- num_bytes: 197550376
184
- num_examples: 21690
185
- download_size: 198114157
186
- dataset_size: 197550376
187
- - config_name: german
188
- features:
189
- - name: url
190
- dtype: string
191
- - name: article
192
- sequence:
193
- - name: section_name
194
- dtype: string
195
- - name: document
196
- dtype: string
197
- - name: summary
198
- dtype: string
199
- - name: english_url
200
- dtype: string
201
- - name: english_section_name
202
- dtype: string
203
- splits:
204
- - name: train
205
- num_bytes: 168674340
206
- num_examples: 20103
207
- download_size: 169195050
208
- dataset_size: 168674340
209
- - config_name: hindi
210
- features:
211
- - name: url
212
- dtype: string
213
- - name: article
214
- sequence:
215
- - name: section_name
216
- dtype: string
217
- - name: document
218
- dtype: string
219
- - name: summary
220
- dtype: string
221
- - name: english_url
222
- dtype: string
223
- - name: english_section_name
224
- dtype: string
225
- splits:
226
- - name: train
227
- num_bytes: 63785051
228
- num_examples: 3402
229
- download_size: 63874759
230
- dataset_size: 63785051
231
- - config_name: indonesian
232
- features:
233
- - name: url
234
- dtype: string
235
- - name: article
236
- sequence:
237
- - name: section_name
238
- dtype: string
239
- - name: document
240
- dtype: string
241
- - name: summary
242
- dtype: string
243
- - name: english_url
244
- dtype: string
245
- - name: english_section_name
246
- dtype: string
247
- splits:
248
- - name: train
249
- num_bytes: 136408861
250
- num_examples: 16308
251
- download_size: 136833587
252
- dataset_size: 136408861
253
- - config_name: italian
254
- features:
255
- - name: url
256
- dtype: string
257
- - name: article
258
- sequence:
259
- - name: section_name
260
- dtype: string
261
- - name: document
262
- dtype: string
263
- - name: summary
264
- dtype: string
265
- - name: english_url
266
- dtype: string
267
- - name: english_section_name
268
- dtype: string
269
- splits:
270
- - name: train
271
- num_bytes: 138119527
272
- num_examples: 17673
273
- download_size: 138578956
274
- dataset_size: 138119527
275
- - config_name: japanese
276
- features:
277
- - name: url
278
- dtype: string
279
- - name: article
280
- sequence:
281
- - name: section_name
282
- dtype: string
283
- - name: document
284
- dtype: string
285
- - name: summary
286
- dtype: string
287
- - name: english_url
288
- dtype: string
289
- - name: english_section_name
290
- dtype: string
291
- splits:
292
- - name: train
293
- num_bytes: 40145031
294
- num_examples: 4372
295
- download_size: 40259570
296
- dataset_size: 40145031
297
- - config_name: korean
298
- features:
299
- - name: url
300
- dtype: string
301
- - name: article
302
- sequence:
303
- - name: section_name
304
- dtype: string
305
- - name: document
306
- dtype: string
307
- - name: summary
308
- dtype: string
309
- - name: english_url
310
- dtype: string
311
- - name: english_section_name
312
- dtype: string
313
- splits:
314
- - name: train
315
- num_bytes: 38647614
316
- num_examples: 4111
317
- download_size: 38748961
318
- dataset_size: 38647614
319
- - config_name: portuguese
320
- features:
321
- - name: url
322
- dtype: string
323
- - name: article
324
- sequence:
325
- - name: section_name
326
- dtype: string
327
- - name: document
328
- dtype: string
329
- - name: summary
330
- dtype: string
331
- - name: english_url
332
- dtype: string
333
- - name: english_section_name
334
- dtype: string
335
- splits:
336
- - name: train
337
- num_bytes: 204270845
338
- num_examples: 28143
339
- download_size: 204997686
340
- dataset_size: 204270845
341
- - config_name: russian
342
- features:
343
- - name: url
344
- dtype: string
345
- - name: article
346
- sequence:
347
- - name: section_name
348
- dtype: string
349
- - name: document
350
- dtype: string
351
- - name: summary
352
- dtype: string
353
- - name: english_url
354
- dtype: string
355
- - name: english_section_name
356
- dtype: string
357
- splits:
358
- - name: train
359
- num_bytes: 241924032
360
- num_examples: 18143
361
- download_size: 242377242
362
- dataset_size: 241924032
363
- - config_name: spanish
364
- features:
365
- - name: url
366
- dtype: string
367
- - name: article
368
- sequence:
369
- - name: section_name
370
- dtype: string
371
- - name: document
372
- dtype: string
373
- - name: summary
374
- dtype: string
375
- - name: english_url
376
- dtype: string
377
- - name: english_section_name
378
- dtype: string
379
- splits:
380
- - name: train
381
- num_bytes: 314618618
382
- num_examples: 38795
383
- download_size: 315609530
384
- dataset_size: 314618618
385
- - config_name: thai
386
- features:
387
- - name: url
388
- dtype: string
389
- - name: article
390
- sequence:
391
- - name: section_name
392
- dtype: string
393
- - name: document
394
- dtype: string
395
- - name: summary
396
- dtype: string
397
- - name: english_url
398
- dtype: string
399
- - name: english_section_name
400
- dtype: string
401
- splits:
402
- - name: train
403
- num_bytes: 86982851
404
- num_examples: 5093
405
- download_size: 87104200
406
- dataset_size: 86982851
407
- - config_name: turkish
408
- features:
409
- - name: url
410
- dtype: string
411
- - name: article
412
- sequence:
413
- - name: section_name
414
- dtype: string
415
- - name: document
416
- dtype: string
417
- - name: summary
418
- dtype: string
419
- - name: english_url
420
- dtype: string
421
- - name: english_section_name
422
- dtype: string
423
- splits:
424
- - name: train
425
- num_bytes: 11371821
426
- num_examples: 1512
427
- download_size: 11405793
428
- dataset_size: 11371821
429
- - config_name: vietnamese
430
- features:
431
- - name: url
432
- dtype: string
433
- - name: article
434
- sequence:
435
- - name: section_name
436
- dtype: string
437
- - name: document
438
- dtype: string
439
- - name: summary
440
- dtype: string
441
- - name: english_url
442
- dtype: string
443
- - name: english_section_name
444
- dtype: string
445
- splits:
446
- - name: train
447
- num_bytes: 69868788
448
- num_examples: 6616
449
- download_size: 70024093
450
- dataset_size: 69868788
451
- ---
452
- # Dataset Card for "wiki_lingua"
453
-
454
- ## Table of Contents
455
- - [Dataset Description](#dataset-description)
456
- - [Dataset Summary](#dataset-summary)
457
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
458
- - [Languages](#languages)
459
- - [Dataset Structure](#dataset-structure)
460
- - [Data Instances](#data-instances)
461
- - [Data Fields](#data-fields)
462
- - [Data Splits](#data-splits)
463
- - [Dataset Creation](#dataset-creation)
464
- - [Curation Rationale](#curation-rationale)
465
- - [Source Data](#source-data)
466
- - [Annotations](#annotations)
467
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
468
- - [Considerations for Using the Data](#considerations-for-using-the-data)
469
- - [Social Impact of Dataset](#social-impact-of-dataset)
470
- - [Discussion of Biases](#discussion-of-biases)
471
- - [Other Known Limitations](#other-known-limitations)
472
- - [Additional Information](#additional-information)
473
- - [Dataset Curators](#dataset-curators)
474
- - [Licensing Information](#licensing-information)
475
- - [Citation Information](#citation-information)
476
- - [Contributions](#contributions)
477
-
478
- ## Dataset Description
479
-
480
- - **Repository:** [URL](https://github.com/esdurmus/Wikilingua)
481
- - **Paper:** [WikiLingua: A Multilingual Abstractive Summarization Dataset](https://arxiv.org/abs/2010.03093)
482
-
483
- ### Dataset Summary
484
-
485
- We introduce WikiLingua, a large-scale, multilingual dataset for the evaluation of crosslingual abstractive summarization systems. We extract article and summary pairs in 18 languages from WikiHow, a high quality, collaborative resource of how-to guides on a diverse set of topics written by human authors. We create gold-standard article-summary alignments across languages by aligning the images that are used to describe each how-to step in an article.
486
-
487
- ### Supported Tasks and Leaderboards
488
-
489
- [More Information Needed]
490
-
491
- ### Languages
492
-
493
- The table below shows number of article-summary pairs with a parallel article-summary pair in English.
494
- ______________________________
495
- | Language | Num. parallel |
496
- | ----------- | --------------|
497
- | English | 141,457 |
498
- | Spanish | 113,215 |
499
- | Portuguese | 81,695 |
500
- | French | 63,692 |
501
- | German | 58,375 |
502
- | Russian | 52,928 |
503
- | Italian | 50,968 |
504
- | Indonesian | 47,511 |
505
- | Dutch | 31,270 |
506
- | Arabic | 29,229 |
507
- | Vietnamese | 19,600 |
508
- | Chinese | 18,887 |
509
- | Thai | 14,770 |
510
- | Japanese | 12,669 |
511
- | Korean | 12,189 |
512
- | Hindi | 9,929 |
513
- | Czech | 7,200 |
514
- | Turkish | 4,503 |
515
-
516
-
517
- ## Dataset Structure
518
-
519
- ### Data Instances
520
-
521
- ```
522
- {
523
- 'article': {
524
- 'document': ['make sure that the area is a safe place, especially if you plan on walking home at night. It’s always a good idea to practice the buddy system. Have a friend meet up and walk with you. Research the bus, train, or streetcar routes available in your area to find safe and affordable travel to your destination. Make sure you check the schedule for your outgoing and return travel. Some public transportation will cease to run late at night. Be sure if you take public transportation to the venue that you will also be able to get home late at night. Check the routes. Even if some public transit is still running late at night, the routing may change. Some may run express past many of the stops, or not travel all the way to the ends. Be sure that your stop will still be available when you need it for your return trip. If you are taking public transit in a vulnerable state after drinking, it is always a good idea to travel in groups. Having friends available is a good way to stay safe and make sure that you reach your destination. This is more expensive option than a taxi or ride share service, but could be a fun and fancy way to stay safe and ensure that you will have a ride home. Plan this service in advance with a scheduled time to pick you up from your home and the venue. You want to be sure that the service will still be available when you need to get home. This may be easy in a large city, but taxis may be less frequent in smaller towns. This is especially true late at night, so this is a less reliable option than scheduling a ride in advance. Have a friend accompany you and help you flag a cab to make sure you are able to get one. Set up a plan to call a friend when you get home to make sure that you made it safely to your destination. If there are no taxis readily available call a local service to send a car to pick you up. You can share a ride with your friends, or other people using the app at the same moment. If you are in a vulnerable state it is best to share the ride with your friends to make sure you get home safe. You can request the car to yourself rather than sharing rides with strangers. If you travel home on your own or are the last of your group to be dropped off, make plans to call a friend when you get home so they know you made it safely to your destination. There may be a designated driver service in your area which can chauffeur your group. Make reservations with them in advance and keep their contact information handy while you are drinking.',
525
- "Designating a driver is a very popular tactic to avoid drinking and driving. It is important to plan in advance, because your brain function will slow down and your decision making skills will be impaired once you start drinking. Decide before you begin drinking that you will not drive. Figure out who will be getting you home before you leave. Make sure this person is responsible and keep them in your sight while you are drinking. Have their contact information handy in case you can’t find them when you are ready to leave. Choose a friend who doesn’t drink alcohol. You likely have someone in your friend group who doesn’t drink. This person is the most likely to remain sober. Decide on one person who will remain sober. You can take turns within your friend group, alternating who will be the designated driver on each occasion. Be sure that the designated driver actually remains sober. The person who has drank the least is still not sober. If you don’t have your car with you, you can guarantee that you won’t make the choice to drive it home. If you are drinking at your home. Give your keys to a responsible friend to ensure that you don't choose to drive somewhere after you have been drinking. It may be tempting to stay longer or leave with someone else. Stick to the plan you made in advance and only leave with your sober, designated driver. Keep the phone number of your driver handy in case you can't find them when you are ready to leave. If your designated driver drinks alcohol, find alternate transportation to get home.",
526
- 'If you have been drinking at all you are at least on the spectrum of drunkenness. You could be showing signs of impairment and slower brain function including lack of motor skills and slower reaction time, leading to the inability to operate a motor vehicle. Some of these signs could be: Poor balance or stumbling. Difficulty speaking clearly and slurred words. Abnormal behavior leading to you doing things you wouldn’t normally do if you were sober. As soon as you notice that you are showing signs of impairment, give your keys to a friend, the host or the bartender to ensure that you won’t drive until you are sober. Make sure to only give them your car key. Hold onto your house keys. If your friend, the host or the bartender are advising you not to drive, you are likely too drunk. Listen to their advice and acknowledge that they are trying to help you. Bystander intervention is common when it comes to drinking and driving. Many people will be willing to step in, take your keys and help you get home safely. If no one if offering to help, you may need to ask. Take a ride from a sober friend. It is best to get in a car with someone you trust when you are in this vulnerable state. Allow the host or bartender to call a cab or car service to take you home. If you are having a difficult time finding a safe way to get home, find a place to stay which does not involve you driving. Ask the host of the party if there is a place you can sleep. Give them your keys and ask that they keep them in a safe place until the morning. Stay with a friend if they live nearby and are on their way home. Find a hotel within walking distance. Call them to book a room, or have a friend help you secure one. Ask the friend if they will walk you to the hotel and make sure you get checked in safely. There are people in your life who care about you and want to be sure that you are safe. It may seem scary or embarrassing to call your parents or your siblings if you are too drunk to drive, but they will be glad you did. Your safety is the most important. You may need your phone to call someone for a ride or get help from a friend. Be sure to charge your phone before you leave the house. It is also a good idea to bring a charger with you in case your battery dies before the end of the night or you end up staying where you are and need to get home the next morning. You may also want to invest in a portable battery charger for your phone should there not be a power outlet available. Make sure it is fully charged before you leave your house. Keep it handy in your pocket or your bag throughout the night.'
527
- ],
528
- 'section_name': ['Finding Other Transportation',
529
- 'Designating a Driver',
530
- 'Staying Safe'
531
- ],
532
- 'summary': ['Walk to the venue where you will be drinking if it is close enough. Take public transit. Show up in style by hiring a limo or black car service. Flag a taxi cab for a convenient option to get where you’re going. Request a rideshare service like Uber or Lyft using an app on your phone. Reserve a designated driver service.',
533
- 'Plan in advance. Assign a designated driver. Leave your car at home. Leave the venue with your designated driver.',
534
- 'Pay attention to your body. Give up your keys. Listen to other people. Accept help. Stay where you are. Have an emergency back-up plan. Make sure that your phone is charged.'
535
- ]
536
- },
537
- 'url': 'https://www.wikihow.com/Avoid-Drinking-and-Driving'
538
- }
539
- ```
540
- ### Data Fields
541
-
542
- - `url`: WikiHow URL of the article
543
- - `article`: A dictionary containing `section_name`, `document` and `summary`
544
- - `section_name`: List of section headings in an article
545
- - `document`: List of documents, one for each section in the `section_name` list
546
- - `summary`: List of summarized document
547
-
548
- ### Data Splits
549
-
550
- | | train |
551
- |:-----------|--------:|
552
- | arabic | 9995 |
553
- | chinese | 6541 |
554
- | czech | 2520 |
555
- | dutch | 10862 |
556
- | english | 57945 |
557
- | french | 21690 |
558
- | german | 20103 |
559
- | hindi | 3402 |
560
- | indonesian | 16308 |
561
- | italian | 17673 |
562
- | japanese | 4372 |
563
- | korean | 4111 |
564
- | portuguese | 28143 |
565
- | russian | 18143 |
566
- | spanish | 6616 |
567
- | thai | 5093 |
568
- | turkish | 1512 |
569
- | vietnamese | 6616 |
570
-
571
- ## Dataset Creation
572
-
573
- ### Curation Rationale
574
-
575
- [More Information Needed]
576
-
577
- ### Source Data
578
-
579
- [More Information Needed]
580
-
581
- #### Initial Data Collection and Normalization
582
-
583
- [More Information Needed]
584
-
585
- #### Who are the source language producers?
586
-
587
- [More Information Needed]
588
-
589
- ### Annotations
590
-
591
- [More Information Needed]
592
-
593
- #### Annotation process
594
-
595
- [More Information Needed]
596
-
597
- #### Who are the annotators?
598
-
599
- [More Information Needed]
600
-
601
- ### Personal and Sensitive Information
602
-
603
- [More Information Needed]
604
-
605
- ## Considerations for Using the Data
606
-
607
- ### Social Impact of Dataset
608
-
609
- [More Information Needed]
610
-
611
- ### Discussion of Biases
612
-
613
- [More Information Needed]
614
-
615
- ### Other Known Limitations
616
-
617
- [More Information Needed]
618
-
619
- ## Additional Information
620
-
621
- ### Dataset Curators
622
-
623
- [More Information Needed]
624
-
625
- ### Licensing Information
626
-
627
- - Article provided by wikiHow https://www.wikihow.com/Main-Page, a wiki building the world's largest, highest quality how-to manual. Please edit this article and find author credits at wikiHow.com. Content on wikiHow can be shared under a [Creative Commons license](http://creativecommons.org/licenses/by-nc-sa/3.0/).
628
- - Refer to [this webpage](https://www.wikihow.com/wikiHow:Attribution) for the specific attribution guidelines.
629
- - also see https://gem-benchmark.com/data_cards/WikiLingua
630
-
631
- ### Citation Information
632
-
633
- ```bibtex
634
- @article{ladhak-wiki-2020,
635
- title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},
636
- authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},
637
- journal = {arXiv preprint arXiv:2010.03093},
638
- year = {2020},
639
- url = {https://arxiv.org/abs/2010.03093}
640
- }
641
- ```
642
-
643
- ### Contributions
644
-
645
- Thanks to [@katnoria](https://github.com/katnoria) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
arabic/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc262dcad4f88763f19f0b2100e001e8465e3c4f83e551f040d48c5a51367f4a
3
+ size 55808459
chinese/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55e22d8ba5cf119ba5e85fb55d148e9e43b8b2807d07f7b11757d28c716b25c9
3
+ size 25187025
create_dummy.py DELETED
@@ -1,84 +0,0 @@
1
- import itertools
2
- import logging
3
- import os
4
- import pickle
5
- import shutil
6
- from glob import glob
7
- from os.path import join as pjoin
8
-
9
-
10
- _URLs = {
11
- "arabic": "https://drive.google.com/uc?export=download&id=1__EjA6oZsgXQpggPm-h54jZu3kP6Y6zu",
12
- "chinese": "https://drive.google.com/uc?export=download&id=1TuWH7uwu6V90QWmZn25qhou1rm97Egmn",
13
- "czech": "https://drive.google.com/uc?export=download&id=1GcUN6mytEcOMBBOvjJOQzBmEkc-LdgQg",
14
- "dutch": "https://drive.google.com/uc?export=download&id=1-w-0uqaC6hnRn1F_3XqJEvi09zlcTIhX",
15
- "english": "https://drive.google.com/uc?export=download&id=11wMGqNVSwwk6zUnDaJEgm3qT71kAHeff",
16
- "french": "https://drive.google.com/uc?export=download&id=1Uit4Og1pk-br_0UJIO5sdhApyhTuHzqo",
17
- "german": "https://drive.google.com/uc?export=download&id=1meSNZHxd_0TZLKCRCYGN-Ke3IA5c1qOE",
18
- "hindi": "https://drive.google.com/uc?export=download&id=1ZyFGufe4puX3vjGPbp4xg9Hca3Gwq22g",
19
- "indonesian": "https://drive.google.com/uc?export=download&id=1PGa8j1_IqxiGTc3SU6NMB38sAzxCPS34",
20
- "italian": "https://drive.google.com/uc?export=download&id=1okwGJiOZmTpNRNgJLCnjFF4Q0H1z4l6_",
21
- "japanese": "https://drive.google.com/uc?export=download&id=1Z2ty5hU0tIGRZRDlFQZLO7b5vijRfvo0",
22
- "korean": "https://drive.google.com/uc?export=download&id=1cqu_YAgvlyVSzzjcUyP1Cz7q0k8Pw7vN",
23
- "portuguese": "https://drive.google.com/uc?export=download&id=1GTHUJxxmjLmG2lnF9dwRgIDRFZaOY3-F",
24
- "russian": "https://drive.google.com/uc?export=download&id=1fUR3MqJ8jTMka6owA0S-Fe6aHmiophc_",
25
- "spanish": "https://drive.google.com/uc?export=download&id=1KtMDsoYNukGP89PLujQTGVgt37cOARs5",
26
- "thai": "https://drive.google.com/uc?export=download&id=1QsV8C5EPJrQl37mwva_5-IJOrCaOi2tH",
27
- "turkish": "https://drive.google.com/uc?export=download&id=1M1M5yIOyjKWGprc3LUeVVwxgKXxgpqxm",
28
- "vietnamese": "https://drive.google.com/uc?export=download&id=17FGi8KI9N9SuGe7elM8qU8_3fx4sfgTr",
29
- }
30
-
31
-
32
- def sanitize_url(url):
33
- """Convert the url into correct format"""
34
- url = url.replace("https://drive.google.com/", "")
35
- url = url.replace("?", "%3F")
36
- url = url.replace("=", "%3D")
37
- url = url.replace("&", "%26")
38
- return url
39
-
40
-
41
- def create():
42
- """Creates the dummy pickle file with a subset of data"""
43
- # 1. Download the google drive folder : https://drive.google.com/drive/folders/1PFvXUOsW_KSEzFm5ixB8J8BDB8zRRfHW
44
- # and specify the decompressed folder location
45
- downloaded_data_path = "/Users/katnoria/Downloads/WikiLingua"
46
- files = glob(f"{downloaded_data_path}/*.pkl")
47
- base_path = "/Users/katnoria/dev/projects/workspaces/python/datasets"
48
- for key in _URLs.keys():
49
- # data = load_dataset('./datasets/wiki_lingua', key)
50
- logging.info(f"Finding {key}.pkl")
51
- filepath = [name for name in files if name.endswith(f"{key}.pkl")][0]
52
- with open(filepath, "rb") as f:
53
- data = pickle.load(f)
54
-
55
- data_subset = dict(itertools.islice(data.items(), 3))
56
- fname = sanitize_url(_URLs[key])
57
- dirname = pjoin(base_path, f"datasets/wiki_lingua/dummy/{key}/1.1.0/dummy_data")
58
- if not os.path.exists(dirname):
59
- logging.info(f"created folder {dirname}")
60
- os.makedirs(dirname)
61
- fname = pjoin(dirname, fname)
62
- logging.info(f"creating for {key}:{fname}")
63
- with open(fname, "wb") as f:
64
- pickle.dump(data_subset, f)
65
- logging.info("SUCCESS")
66
-
67
-
68
- def zip():
69
- """Zip the file"""
70
- base_path = "/Users/katnoria/dev/projects/workspaces/python/datasets"
71
- for key in _URLs.keys():
72
- # dirname = pjoin(base_path, f"datasets/wiki_lingua/dummy/{key}/1.1.0/dummy_data")
73
- dirname = pjoin(base_path, f"datasets/wiki_lingua/dummy/{key}/1.1.0")
74
- logging.info(f"Zipping {dirname}")
75
- shutil.make_archive(f"{dirname}/dummy_data", "zip", dirname, "dummy_data")
76
- shutil.rmtree(f"{dirname}/dummy_data")
77
- logging.info(f"Deleted folder {dirname}/dummy_data")
78
-
79
-
80
- # Utility script to create the dummy data and zip the contents
81
- # 1. Create data
82
- create()
83
- # 2. Zip contents
84
- zip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
czech/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:562654531bec3710a73d9f89802e4ee5fe12341a215ddd4917c1b97c228c2190
3
+ size 12480760
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"arabic": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "arabic", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 119116119, "num_examples": 9995, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1__EjA6oZsgXQpggPm-h54jZu3kP6Y6zu": {"num_bytes": 119358890, "checksum": "25fc655eb53227acf5dbe4de09732dedee6cbd83b4c1e8c3bb018eada79555d1"}}, "download_size": 119358890, "post_processing_size": null, "dataset_size": 119116119, "size_in_bytes": 238475009}, "chinese": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "chinese", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 41170689, "num_examples": 6541, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1TuWH7uwu6V90QWmZn25qhou1rm97Egmn": {"num_bytes": 41345464, "checksum": "be54a90ec9ac9baa2fb006c11363d44b9475c1fb8ac2aa84beeea1e065c58972"}}, "download_size": 41345464, "post_processing_size": null, "dataset_size": 41170689, "size_in_bytes": 82516153}, "czech": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "czech", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 20816390, "num_examples": 2520, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1GcUN6mytEcOMBBOvjJOQzBmEkc-LdgQg": {"num_bytes": 20894511, "checksum": "bb3f9300b8631667d25b9e2b73c98ad90e0b5a3203bba21ed896f12b4a4e39a1"}}, "download_size": 20894511, "post_processing_size": null, "dataset_size": 20816390, "size_in_bytes": 41710901}, "dutch": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "dutch", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 87258040, "num_examples": 10862, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1-w-0uqaC6hnRn1F_3XqJEvi09zlcTIhX": {"num_bytes": 87533442, "checksum": "1282abaa1f70e0d46db2f199a8e0bacd5c06a97220cf874854c41e12c072f10a"}}, "download_size": 87533442, "post_processing_size": null, "dataset_size": 87258040, "size_in_bytes": 174791482}, "english": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "english", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 333700114, "num_examples": 57945, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=11wMGqNVSwwk6zUnDaJEgm3qT71kAHeff": {"num_bytes": 338036185, "checksum": "1f0b51ac4b733e06a067826d9e137ee300d751f12f240e95be4b258f7bb5191d"}}, "download_size": 338036185, "post_processing_size": null, "dataset_size": 333700114, "size_in_bytes": 671736299}, "french": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "french", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 197550376, "num_examples": 21690, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1Uit4Og1pk-br_0UJIO5sdhApyhTuHzqo": {"num_bytes": 198114157, "checksum": "e7e71d214142d06ddfd00411c2ceb3f1abee44eef9f6dbdd61ea5c5b30521230"}}, "download_size": 198114157, "post_processing_size": null, "dataset_size": 197550376, "size_in_bytes": 395664533}, "german": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "german", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 168674340, "num_examples": 20103, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1meSNZHxd_0TZLKCRCYGN-Ke3IA5c1qOE": {"num_bytes": 169195050, "checksum": "88ee4628700c0e58b529a75e3f9f27022be3e7a591a8981f503b078a7116c4eb"}}, "download_size": 169195050, "post_processing_size": null, "dataset_size": 168674340, "size_in_bytes": 337869390}, "hindi": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "hindi", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 63785051, "num_examples": 3402, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1ZyFGufe4puX3vjGPbp4xg9Hca3Gwq22g": {"num_bytes": 63874759, "checksum": "a6a9b0cb313ecad82985269153e03e4c02376f0e52e53168100eacafc1c55037"}}, "download_size": 63874759, "post_processing_size": null, "dataset_size": 63785051, "size_in_bytes": 127659810}, "indonesian": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "indonesian", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 136408861, "num_examples": 16308, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1PGa8j1_IqxiGTc3SU6NMB38sAzxCPS34": {"num_bytes": 136833587, "checksum": "cfa0b6eeb590e0db212b616d455fa00ed376186638c7c4b2771986fb4bd4b7e6"}}, "download_size": 136833587, "post_processing_size": null, "dataset_size": 136408861, "size_in_bytes": 273242448}, "italian": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "italian", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 138119527, "num_examples": 17673, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1okwGJiOZmTpNRNgJLCnjFF4Q0H1z4l6_": {"num_bytes": 138578956, "checksum": "f6960f3d025f65452d3a536065925e86c425f7f559f574ed078172aa30d6a6ae"}}, "download_size": 138578956, "post_processing_size": null, "dataset_size": 138119527, "size_in_bytes": 276698483}, "japanese": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "japanese", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 40145031, "num_examples": 4372, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1Z2ty5hU0tIGRZRDlFQZLO7b5vijRfvo0": {"num_bytes": 40259570, "checksum": "dc080f6db644261e31b0d9564eec0c07f87e939cd4af535ad239ee8813c92a33"}}, "download_size": 40259570, "post_processing_size": null, "dataset_size": 40145031, "size_in_bytes": 80404601}, "korean": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "korean", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 38647614, "num_examples": 4111, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1cqu_YAgvlyVSzzjcUyP1Cz7q0k8Pw7vN": {"num_bytes": 38748961, "checksum": "b6f97c124033c99034696034a19b4e32d0573281281fe2655f7d70032dc65d01"}}, "download_size": 38748961, "post_processing_size": null, "dataset_size": 38647614, "size_in_bytes": 77396575}, "portuguese": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "portuguese", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 204270845, "num_examples": 28143, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1GTHUJxxmjLmG2lnF9dwRgIDRFZaOY3-F": {"num_bytes": 204997686, "checksum": "c5f912b3b00e11f02a9ddd2b879b605f3fd2354eb0b5f8acac13e01e49ea1e59"}}, "download_size": 204997686, "post_processing_size": null, "dataset_size": 204270845, "size_in_bytes": 409268531}, "russian": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "russian", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 241924032, "num_examples": 18143, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1fUR3MqJ8jTMka6owA0S-Fe6aHmiophc_": {"num_bytes": 242377242, "checksum": "246647637d6de8bb84e26f68546c5a5ba04e196d1769716975e52447d43e4d71"}}, "download_size": 242377242, "post_processing_size": null, "dataset_size": 241924032, "size_in_bytes": 484301274}, "spanish": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "spanish", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 314618618, "num_examples": 38795, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1KtMDsoYNukGP89PLujQTGVgt37cOARs5": {"num_bytes": 315609530, "checksum": "b6c42c313d28199c88a0696d920c08ab951820e84f6ebe9137dd7e74b6907912"}}, "download_size": 315609530, "post_processing_size": null, "dataset_size": 314618618, "size_in_bytes": 630228148}, "thai": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "thai", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 86982851, "num_examples": 5093, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1QsV8C5EPJrQl37mwva_5-IJOrCaOi2tH": {"num_bytes": 87104200, "checksum": "464a35114cb35792f0a875ebf653c60be8b83e6eb5baa458dce2629c3b798161"}}, "download_size": 87104200, "post_processing_size": null, "dataset_size": 86982851, "size_in_bytes": 174087051}, "turkish": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "turkish", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 11371821, "num_examples": 1512, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1M1M5yIOyjKWGprc3LUeVVwxgKXxgpqxm": {"num_bytes": 11405793, "checksum": "858406c011fc2c1ef0c8bf3acb77edcf1d05c5189e61be54e1655d6e8a98076d"}}, "download_size": 11405793, "post_processing_size": null, "dataset_size": 11371821, "size_in_bytes": 22777614}, "vietnamese": {"description": "WikiLingua is a large-scale multilingual dataset for the evaluation of\ncrosslingual abstractive summarization systems. The dataset includes ~770k\narticle and summary pairs in 18 languages from WikiHow. The gold-standard\narticle-summary alignments across languages was done by aligning the images\nthat are used to describe each how-to step in an article.\n", "citation": "@article{ladhak-wiki-2020,\n title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},\n authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},\n journal = {arXiv preprint arXiv:2010.03093},\n year = {2020},\n url = {https://arxiv.org/abs/2010.03093}\n}\n", "homepage": "https://github.com/esdurmus/Wikilingua", "license": "CC BY-NC-SA 3.0", "features": {"url": {"dtype": "string", "id": null, "_type": "Value"}, "article": {"feature": {"section_name": {"dtype": "string", "id": null, "_type": "Value"}, "document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "english_url": {"dtype": "string", "id": null, "_type": "Value"}, "english_section_name": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wiki_lingua", "config_name": "vietnamese", "version": {"version_str": "1.1.1", "description": null, "major": 1, "minor": 1, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 69868788, "num_examples": 6616, "dataset_name": "wiki_lingua"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=17FGi8KI9N9SuGe7elM8qU8_3fx4sfgTr": {"num_bytes": 70024093, "checksum": "590e51dbef3cd17ef271088778289596d1363d72708e7f7d625d28a837e395a5"}}, "download_size": 70024093, "post_processing_size": null, "dataset_size": 69868788, "size_in_bytes": 139892881}}
 
 
dutch/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e842f1a4aedc5893cc6f7c6499e880bc8790e5e95e656dd6d5b4f1dc7cdf6e0
3
+ size 47651075
english/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62aa49ba3cb568900e715939154ce7f7770e5d72848091f24806d7c58a5945be
3
+ size 187189232
french/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecda4bb15c6dad7b71445980b9810976d790dcca9beec993ef1afca3a3981b49
3
+ size 105158839
german/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2942b32c5181f88846220998f30bb8a60cae7ef75894ab83814a040a3e1fdb24
3
+ size 93078075
hindi/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76d08268e974a3162a419a95d9f5d1b52b6ddb402181684da48151edb87f7499
3
+ size 22774619
indonesian/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f084c14011b95f20e02e5c16953f78db8c2eec70226a82a17226c8d8dd8bb358
3
+ size 67658969
italian/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed00b878217f3b38f8aea75c954fb8dc238a1ac95557e6926d0340441a7d4734
3
+ size 78108133
japanese/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21dfc8cf98dd3362e2ac443f886a363b2621a21fd1349d20e117d2038335ed8f
3
+ size 19794487
korean/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea7c513360fa4c9fea984cc70bf6460ce6f996b0e439dd970df020df24ac05b5
3
+ size 20029485
portuguese/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:291057268c0f08e1d2e3f6e50f46752e561a1c0298e94fe1f115fd3053a5e215
3
+ size 114735911
russian/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be36416c44998ac5380b1eac5c5b8fa8c6428a9e2eee275c2f669c558f40fcaf
3
+ size 111025227
spanish/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7b8cbf6101b8f9ea417ad1e5958c7bdfd036bb15e03c7b14873bc5315e64207
3
+ size 170995185
thai/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:897faf4ec9750a8cef2d681a7a6cc48a84bd2ebb0b092072a5d49f0b800dbe4b
3
+ size 31944978
turkish/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:420c42312c7673d5296eb58cce26fc8866f0770b5be8c8cb9d800d2719773eca
3
+ size 5964903
vietnamese/wiki_lingua-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2ba2683e8c3569382cae15e5cc9ff065032a39318c81fac39762be026c07782
3
+ size 33194149
wiki_lingua.py DELETED
@@ -1,197 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TODO: Add a description here."""
16
-
17
-
18
- import pickle
19
-
20
- import datasets
21
-
22
-
23
- # Find for instance the citation on arxiv or on the dataset repo/website
24
- _CITATION = """\
25
- @article{ladhak-wiki-2020,
26
- title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},
27
- authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},
28
- journal = {arXiv preprint arXiv:2010.03093},
29
- year = {2020},
30
- url = {https://arxiv.org/abs/2010.03093}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- WikiLingua is a large-scale multilingual dataset for the evaluation of
36
- crosslingual abstractive summarization systems. The dataset includes ~770k
37
- article and summary pairs in 18 languages from WikiHow. The gold-standard
38
- article-summary alignments across languages was done by aligning the images
39
- that are used to describe each how-to step in an article.
40
- """
41
-
42
- _HOMEPAGE = "https://github.com/esdurmus/Wikilingua"
43
-
44
- _LICENSE = "CC BY-NC-SA 3.0"
45
-
46
- # Download links
47
- _URLs = {
48
- "arabic": "https://drive.google.com/uc?export=download&id=1__EjA6oZsgXQpggPm-h54jZu3kP6Y6zu",
49
- "chinese": "https://drive.google.com/uc?export=download&id=1TuWH7uwu6V90QWmZn25qhou1rm97Egmn",
50
- "czech": "https://drive.google.com/uc?export=download&id=1GcUN6mytEcOMBBOvjJOQzBmEkc-LdgQg",
51
- "dutch": "https://drive.google.com/uc?export=download&id=1-w-0uqaC6hnRn1F_3XqJEvi09zlcTIhX",
52
- "english": "https://drive.google.com/uc?export=download&id=11wMGqNVSwwk6zUnDaJEgm3qT71kAHeff",
53
- "french": "https://drive.google.com/uc?export=download&id=1Uit4Og1pk-br_0UJIO5sdhApyhTuHzqo",
54
- "german": "https://drive.google.com/uc?export=download&id=1meSNZHxd_0TZLKCRCYGN-Ke3IA5c1qOE",
55
- "hindi": "https://drive.google.com/uc?export=download&id=1ZyFGufe4puX3vjGPbp4xg9Hca3Gwq22g",
56
- "indonesian": "https://drive.google.com/uc?export=download&id=1PGa8j1_IqxiGTc3SU6NMB38sAzxCPS34",
57
- "italian": "https://drive.google.com/uc?export=download&id=1okwGJiOZmTpNRNgJLCnjFF4Q0H1z4l6_",
58
- "japanese": "https://drive.google.com/uc?export=download&id=1Z2ty5hU0tIGRZRDlFQZLO7b5vijRfvo0",
59
- "korean": "https://drive.google.com/uc?export=download&id=1cqu_YAgvlyVSzzjcUyP1Cz7q0k8Pw7vN",
60
- "portuguese": "https://drive.google.com/uc?export=download&id=1GTHUJxxmjLmG2lnF9dwRgIDRFZaOY3-F",
61
- "russian": "https://drive.google.com/uc?export=download&id=1fUR3MqJ8jTMka6owA0S-Fe6aHmiophc_",
62
- "spanish": "https://drive.google.com/uc?export=download&id=1KtMDsoYNukGP89PLujQTGVgt37cOARs5",
63
- "thai": "https://drive.google.com/uc?export=download&id=1QsV8C5EPJrQl37mwva_5-IJOrCaOi2tH",
64
- "turkish": "https://drive.google.com/uc?export=download&id=1M1M5yIOyjKWGprc3LUeVVwxgKXxgpqxm",
65
- "vietnamese": "https://drive.google.com/uc?export=download&id=17FGi8KI9N9SuGe7elM8qU8_3fx4sfgTr",
66
- }
67
-
68
-
69
- class WikiLingua(datasets.GeneratorBasedBuilder):
70
- """TODO: Short description of my dataset."""
71
-
72
- VERSION = datasets.Version("1.1.1")
73
-
74
- # This is an example of a dataset with multiple configurations.
75
- # If you don't want/need to define several sub-sets in your dataset,
76
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
77
-
78
- # If you need to make complex sub-parts in the datasets with configurable options
79
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
80
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
81
-
82
- # You will be able to load one or the other configurations in the following list with
83
- # data = datasets.load_dataset('my_dataset', 'first_domain')
84
- # data = datasets.load_dataset('my_dataset', 'second_domain')
85
- BUILDER_CONFIGS = [
86
- datasets.BuilderConfig(name="arabic", version=VERSION, description="A subset of article-summary in Arabic"),
87
- datasets.BuilderConfig(name="chinese", version=VERSION, description="A subset of article-summary in Chinese"),
88
- datasets.BuilderConfig(name="czech", version=VERSION, description="A subset of article-summary in Czech"),
89
- datasets.BuilderConfig(name="dutch", version=VERSION, description="A subset of article-summary in Dutch"),
90
- datasets.BuilderConfig(name="english", version=VERSION, description="A subset of article-summary in English"),
91
- datasets.BuilderConfig(name="french", version=VERSION, description="A subset of article-summary in French"),
92
- datasets.BuilderConfig(name="german", version=VERSION, description="A subset of article-summary in German"),
93
- datasets.BuilderConfig(name="hindi", version=VERSION, description="A subset of article-summary in Hindi"),
94
- datasets.BuilderConfig(
95
- name="indonesian", version=VERSION, description="A subset of article-summary in Indonesian"
96
- ),
97
- datasets.BuilderConfig(name="italian", version=VERSION, description="A subset of article-summary in Italian"),
98
- datasets.BuilderConfig(
99
- name="japanese", version=VERSION, description="A subset of article-summary in Japanese"
100
- ),
101
- datasets.BuilderConfig(name="korean", version=VERSION, description="A subset of article-summary in Korean"),
102
- datasets.BuilderConfig(
103
- name="portuguese", version=VERSION, description="A subset of article-summary in Portuguese"
104
- ),
105
- datasets.BuilderConfig(name="russian", version=VERSION, description="A subset of article-summary in Russian"),
106
- datasets.BuilderConfig(name="spanish", version=VERSION, description="A subset of article-summary in Spanish"),
107
- datasets.BuilderConfig(name="thai", version=VERSION, description="A subset of article-summary in Thai"),
108
- datasets.BuilderConfig(name="turkish", version=VERSION, description="A subset of article-summary in Turkish"),
109
- datasets.BuilderConfig(
110
- name="vietnamese", version=VERSION, description="A subset of article-summary in Vietnamese"
111
- ),
112
- ]
113
-
114
- DEFAULT_CONFIG_NAME = "english"
115
-
116
- def _info(self):
117
- if self.config.name == "english":
118
- features = datasets.Features(
119
- {
120
- "url": datasets.Value("string"),
121
- "article": datasets.Sequence(
122
- {
123
- "section_name": datasets.Value("string"),
124
- "document": datasets.Value("string"),
125
- "summary": datasets.Value("string"),
126
- }
127
- ),
128
- }
129
- )
130
- else:
131
- features = datasets.Features(
132
- {
133
- "url": datasets.Value("string"),
134
- "article": datasets.Sequence(
135
- {
136
- "section_name": datasets.Value("string"),
137
- "document": datasets.Value("string"),
138
- "summary": datasets.Value("string"),
139
- "english_url": datasets.Value("string"),
140
- "english_section_name": datasets.Value("string"),
141
- }
142
- ),
143
- }
144
- )
145
-
146
- return datasets.DatasetInfo(
147
- # This is the description that will appear on the datasets page.
148
- description=_DESCRIPTION,
149
- # This defines the different columns of the dataset and their types
150
- features=features, # Here we define them above because they are different between the two configurations
151
- # If there's a common (input, target) tuple from the features,
152
- # specify them here. They'll be used if as_supervised=True in
153
- # builder.as_dataset.
154
- supervised_keys=None,
155
- # Homepage of the dataset for documentation
156
- homepage=_HOMEPAGE,
157
- # License for the dataset if available
158
- license=_LICENSE,
159
- # Citation for the dataset
160
- citation=_CITATION,
161
- )
162
-
163
- def _split_generators(self, dl_manager):
164
- """Returns SplitGenerators."""
165
- my_urls = _URLs[self.config.name]
166
- # See create_dummy.py to create new dummy data
167
- train_fname = dl_manager.download_and_extract(my_urls)
168
- return [
169
- datasets.SplitGenerator(
170
- name=datasets.Split.TRAIN,
171
- # These kwargs will be passed to _generate_examples
172
- gen_kwargs={
173
- "filepath": train_fname,
174
- "split": "train",
175
- },
176
- ),
177
- ]
178
-
179
- def _process_article(self, article):
180
- """Parse the article and convert into list of dict"""
181
- processed_article = []
182
- for key, value in article.items():
183
- row = {"section_name": key, "document": value["document"], "summary": value["summary"]}
184
-
185
- if self.config.name != "english":
186
- row["english_url"] = value["english_url"]
187
- row["english_section_name"] = value["english_section_name"]
188
- processed_article.append(row)
189
-
190
- return processed_article
191
-
192
- def _generate_examples(self, filepath, split):
193
- """Yields examples."""
194
- with open(filepath, "rb") as f:
195
- data = pickle.load(f)
196
- for id_, row in enumerate(data.items()):
197
- yield id_, {"url": row[0], "article": self._process_article(row[1])}