Datasets:

Multilinguality:
multilingual
Size Categories:
10K<n<100K
1K<n<10K
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
07b3108
1 Parent(s): 851acfd

Delete loading script

Browse files
Files changed (1) hide show
  1. wiki_lingua.py +0 -168
wiki_lingua.py DELETED
@@ -1,168 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """WikiLingua."""
16
-
17
-
18
- import json
19
-
20
- import datasets
21
-
22
-
23
- # Find for instance the citation on arxiv or on the dataset repo/website
24
- _CITATION = """\
25
- @inproceedings{ladhak-etal-2020-wikilingua,
26
- title = "{W}iki{L}ingua: A New Benchmark Dataset for Cross-Lingual Abstractive Summarization",
27
- author = "Ladhak, Faisal and
28
- Durmus, Esin and
29
- Cardie, Claire and
30
- McKeown, Kathleen",
31
- booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
32
- month = nov,
33
- year = "2020",
34
- address = "Online",
35
- publisher = "Association for Computational Linguistics",
36
- url = "https://aclanthology.org/2020.findings-emnlp.360",
37
- doi = "10.18653/v1/2020.findings-emnlp.360",
38
- pages = "4034--4048",
39
- }
40
- """
41
-
42
- _DESCRIPTION = """\
43
- WikiLingua is a large-scale multilingual dataset for the evaluation of
44
- cross-lingual abstractive summarization systems. The dataset includes ~770k
45
- article and summary pairs in 18 languages from WikiHow. The gold-standard
46
- article-summary alignments across languages was done by aligning the images
47
- that are used to describe each how-to step in an article.
48
- """
49
-
50
- _HOMEPAGE = "https://github.com/esdurmus/Wikilingua"
51
-
52
- _LICENSE = "CC BY-NC-SA 3.0"
53
-
54
- # Download link
55
- _URL = "data/{language}.jsonl.gz"
56
- _LANGUAGES = [
57
- "arabic",
58
- "chinese",
59
- "czech",
60
- "dutch",
61
- "english",
62
- "french",
63
- "german",
64
- "hindi",
65
- "indonesian",
66
- "italian",
67
- "japanese",
68
- "korean",
69
- "portuguese",
70
- "russian",
71
- "spanish",
72
- "thai",
73
- "turkish",
74
- "vietnamese",
75
- ]
76
-
77
-
78
- class WikiLingua(datasets.GeneratorBasedBuilder):
79
- """WikiLingua dataset."""
80
-
81
- VERSION = datasets.Version("1.1.1")
82
-
83
- BUILDER_CONFIGS = [
84
- datasets.BuilderConfig(
85
- name=lang,
86
- version=datasets.Version("1.1.1"),
87
- description=f"A subset of article-summary in {lang.capitalize()}",
88
- )
89
- for lang in _LANGUAGES
90
- ]
91
-
92
- DEFAULT_CONFIG_NAME = "english"
93
-
94
- def _info(self):
95
- if self.config.name == "english":
96
- features = datasets.Features(
97
- {
98
- "url": datasets.Value("string"),
99
- "article": datasets.Sequence(
100
- {
101
- "section_name": datasets.Value("string"),
102
- "document": datasets.Value("string"),
103
- "summary": datasets.Value("string"),
104
- }
105
- ),
106
- }
107
- )
108
- else:
109
- features = datasets.Features(
110
- {
111
- "url": datasets.Value("string"),
112
- "article": datasets.Sequence(
113
- {
114
- "section_name": datasets.Value("string"),
115
- "document": datasets.Value("string"),
116
- "summary": datasets.Value("string"),
117
- "english_url": datasets.Value("string"),
118
- "english_section_name": datasets.Value("string"),
119
- }
120
- ),
121
- }
122
- )
123
-
124
- return datasets.DatasetInfo(
125
- # This is the description that will appear on the datasets page.
126
- description=_DESCRIPTION,
127
- # This defines the different columns of the dataset and their types
128
- features=features, # Here we define them above because they are different between the two configurations
129
- # Homepage of the dataset for documentation
130
- homepage=_HOMEPAGE,
131
- # License for the dataset if available
132
- license=_LICENSE,
133
- # Citation for the dataset
134
- citation=_CITATION,
135
- )
136
-
137
- def _split_generators(self, dl_manager):
138
- """Returns SplitGenerators."""
139
- filepath = dl_manager.download_and_extract(_URL.format(language=self.config.name))
140
- return [
141
- datasets.SplitGenerator(
142
- name=datasets.Split.TRAIN,
143
- # These kwargs will be passed to _generate_examples
144
- gen_kwargs={
145
- "filepath": filepath,
146
- },
147
- ),
148
- ]
149
-
150
- def _process_article(self, article):
151
- """Parse the article and convert into list of dict"""
152
- processed_article = []
153
- for key, value in article.items():
154
- row = {"section_name": key, "document": value["document"], "summary": value["summary"]}
155
-
156
- if self.config.name != "english":
157
- row["english_url"] = value["english_url"]
158
- row["english_section_name"] = value["english_section_name"]
159
- processed_article.append(row)
160
-
161
- return processed_article
162
-
163
- def _generate_examples(self, filepath):
164
- """Yields examples."""
165
- with open(filepath, "rb") as f:
166
- for id_, line in enumerate(f):
167
- row = json.loads(line)
168
- yield id_, {"url": row["url"], "article": self._process_article(row["article"])}