Datasets:
Modalities:
Text
Formats:
parquet
Sub-tasks:
semantic-similarity-classification
Size:
1M - 10M
Tags:
paraphrase-generation
License:
Commit
•
218ec89
1
Parent(s):
f7bfc3a
Delete loading script
Browse files
tapaco.py
DELETED
@@ -1,220 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""TaPaCo: A Corpus of Sentential Paraphrases for 73 Languages"""
|
16 |
-
|
17 |
-
|
18 |
-
import csv
|
19 |
-
import os
|
20 |
-
|
21 |
-
import datasets
|
22 |
-
|
23 |
-
|
24 |
-
_CITATION = """\
|
25 |
-
@dataset{scherrer_yves_2020_3707949,
|
26 |
-
author = {Scherrer, Yves},
|
27 |
-
title = {{TaPaCo: A Corpus of Sentential Paraphrases for 73 Languages}},
|
28 |
-
month = mar,
|
29 |
-
year = 2020,
|
30 |
-
publisher = {Zenodo},
|
31 |
-
version = {1.0},
|
32 |
-
doi = {10.5281/zenodo.3707949},
|
33 |
-
url = {https://doi.org/10.5281/zenodo.3707949}
|
34 |
-
}
|
35 |
-
"""
|
36 |
-
|
37 |
-
|
38 |
-
_DESCRIPTION = """\
|
39 |
-
A freely available paraphrase corpus for 73 languages extracted from the Tatoeba database. \
|
40 |
-
Tatoeba is a crowdsourcing project mainly geared towards language learners. Its aim is to provide example sentences \
|
41 |
-
and translations for particular linguistic constructions and words. The paraphrase corpus is created by populating a \
|
42 |
-
graph with Tatoeba sentences and equivalence links between sentences “meaning the same thing”. This graph is then \
|
43 |
-
traversed to extract sets of paraphrases. Several language-independent filters and pruning steps are applied to \
|
44 |
-
remove uninteresting sentences. A manual evaluation performed on three languages shows that between half and three \
|
45 |
-
quarters of inferred paraphrases are correct and that most remaining ones are either correct but trivial, \
|
46 |
-
or near-paraphrases that neutralize a morphological distinction. The corpus contains a total of 1.9 million \
|
47 |
-
sentences, with 200 – 250 000 sentences per language. It covers a range of languages for which, to our knowledge,\
|
48 |
-
no other paraphrase dataset exists."""
|
49 |
-
|
50 |
-
|
51 |
-
_HOMEPAGE = "https://zenodo.org/record/3707949#.X9Dh0cYza3I"
|
52 |
-
|
53 |
-
|
54 |
-
_LICENSE = "Creative Commons Attribution 2.0 Generic"
|
55 |
-
|
56 |
-
|
57 |
-
# Original data: "https://zenodo.org/record/3707949/files/tapaco_v1.0.zip?download=1"
|
58 |
-
_URL = "data/tapaco_v1.0.zip"
|
59 |
-
|
60 |
-
_VERSION = "1.0.0"
|
61 |
-
_LANGUAGES = {
|
62 |
-
"af": "Afrikaans",
|
63 |
-
"ar": "Arabic",
|
64 |
-
"az": "Azerbaijani",
|
65 |
-
"be": "Belarusian",
|
66 |
-
"ber": "Berber languages",
|
67 |
-
"bg": "Bulgarian",
|
68 |
-
"bn": "Bengali",
|
69 |
-
"br": "Breton",
|
70 |
-
"ca": "Catalan; Valencian",
|
71 |
-
"cbk": "Chavacano",
|
72 |
-
"cmn": "Mandarin",
|
73 |
-
"cs": "Czech",
|
74 |
-
"da": "Danish",
|
75 |
-
"de": "German",
|
76 |
-
"el": "Greek, Modern (1453-)",
|
77 |
-
"en": "English",
|
78 |
-
"eo": "Esperanto",
|
79 |
-
"es": "Spanish; Castilian",
|
80 |
-
"et": "Estonian",
|
81 |
-
"eu": "Basque",
|
82 |
-
"fi": "Finnish",
|
83 |
-
"fr": "French",
|
84 |
-
"gl": "Galician",
|
85 |
-
"gos": "Gronings",
|
86 |
-
"he": "Hebrew",
|
87 |
-
"hi": "Hindi",
|
88 |
-
"hr": "Croatian",
|
89 |
-
"hu": "Hungarian",
|
90 |
-
"hy": "Armenian",
|
91 |
-
"ia": "Interlingua (International Auxiliary Language Association)",
|
92 |
-
"id": "Indonesian",
|
93 |
-
"ie": "Interlingue; Occidental",
|
94 |
-
"io": "Ido",
|
95 |
-
"is": "Icelandic",
|
96 |
-
"it": "Italian",
|
97 |
-
"ja": "Japanese",
|
98 |
-
"jbo": "Lojban",
|
99 |
-
"kab": "Kabyle",
|
100 |
-
"ko": "Korean",
|
101 |
-
"kw": "Cornish",
|
102 |
-
"la": "Latin",
|
103 |
-
"lfn": "Lingua Franca Nova\t",
|
104 |
-
"lt": "Lithuanian",
|
105 |
-
"mk": "Macedonian",
|
106 |
-
"mr": "Marathi",
|
107 |
-
"nb": "Bokmål, Norwegian; Norwegian Bokmål",
|
108 |
-
"nds": "Low German; Low Saxon; German, Low; Saxon, Low",
|
109 |
-
"nl": "Dutch; Flemish",
|
110 |
-
"orv": "Old Russian",
|
111 |
-
"ota": "Turkish, Ottoman (1500-1928)",
|
112 |
-
"pes": "Iranian Persian",
|
113 |
-
"pl": "Polish",
|
114 |
-
"pt": "Portuguese",
|
115 |
-
"rn": "Rundi",
|
116 |
-
"ro": "Romanian; Moldavian; Moldovan",
|
117 |
-
"ru": "Russian",
|
118 |
-
"sl": "Slovenian",
|
119 |
-
"sr": "Serbian",
|
120 |
-
"sv": "Swedish",
|
121 |
-
"tk": "Turkmen",
|
122 |
-
"tl": "Tagalog",
|
123 |
-
"tlh": "Klingon; tlhIngan-Hol",
|
124 |
-
"toki": "Toki Pona",
|
125 |
-
"tr": "Turkish",
|
126 |
-
"tt": "Tatar",
|
127 |
-
"ug": "Uighur; Uyghur",
|
128 |
-
"uk": "Ukrainian",
|
129 |
-
"ur": "Urdu",
|
130 |
-
"vi": "Vietnamese",
|
131 |
-
"vo": "Volapük",
|
132 |
-
"war": "Waray",
|
133 |
-
"wuu": "Wu Chinese",
|
134 |
-
"yue": "Yue Chinese",
|
135 |
-
}
|
136 |
-
_ALL_LANGUAGES = "all_languages"
|
137 |
-
|
138 |
-
|
139 |
-
class TapacoConfig(datasets.BuilderConfig):
|
140 |
-
"""BuilderConfig for TapacoConfig."""
|
141 |
-
|
142 |
-
def __init__(self, languages=None, **kwargs):
|
143 |
-
super(TapacoConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
|
144 |
-
self.languages = languages
|
145 |
-
|
146 |
-
|
147 |
-
class Tapaco(datasets.GeneratorBasedBuilder):
|
148 |
-
|
149 |
-
BUILDER_CONFIGS = [
|
150 |
-
TapacoConfig(
|
151 |
-
name=_ALL_LANGUAGES,
|
152 |
-
languages=_LANGUAGES,
|
153 |
-
description="A collection of paraphrase corpus for 73 languages to aid paraphrase "
|
154 |
-
"detection and generation.",
|
155 |
-
)
|
156 |
-
] + [
|
157 |
-
TapacoConfig(
|
158 |
-
name=lang,
|
159 |
-
languages=[lang],
|
160 |
-
description=f"{_LANGUAGES[lang]} A collection of paraphrase corpus for 73 languages to "
|
161 |
-
f"aid paraphrase "
|
162 |
-
"detection and generation.",
|
163 |
-
)
|
164 |
-
for lang in _LANGUAGES
|
165 |
-
]
|
166 |
-
BUILDER_CONFIG_CLASS = TapacoConfig
|
167 |
-
DEFAULT_CONFIG_NAME = _ALL_LANGUAGES
|
168 |
-
|
169 |
-
def _info(self):
|
170 |
-
features = datasets.Features(
|
171 |
-
{
|
172 |
-
"paraphrase_set_id": datasets.Value("string"),
|
173 |
-
"sentence_id": datasets.Value("string"),
|
174 |
-
"paraphrase": datasets.Value("string"),
|
175 |
-
"lists": datasets.Sequence(datasets.Value("string")),
|
176 |
-
"tags": datasets.Sequence(datasets.Value("string")),
|
177 |
-
"language": datasets.Value("string"),
|
178 |
-
}
|
179 |
-
)
|
180 |
-
|
181 |
-
return datasets.DatasetInfo(
|
182 |
-
description=_DESCRIPTION,
|
183 |
-
features=features,
|
184 |
-
supervised_keys=None,
|
185 |
-
homepage=_HOMEPAGE,
|
186 |
-
license=_LICENSE,
|
187 |
-
citation=_CITATION,
|
188 |
-
)
|
189 |
-
|
190 |
-
def _split_generators(self, dl_manager):
|
191 |
-
"""Returns SplitGenerators."""
|
192 |
-
data_dir = dl_manager.download_and_extract(_URL)
|
193 |
-
return [
|
194 |
-
datasets.SplitGenerator(
|
195 |
-
name=datasets.Split.TRAIN,
|
196 |
-
gen_kwargs={"data_dir": data_dir},
|
197 |
-
),
|
198 |
-
]
|
199 |
-
|
200 |
-
def _generate_examples(self, data_dir):
|
201 |
-
"""Yields examples."""
|
202 |
-
base_path = os.path.join(data_dir, "tapaco_v1.0")
|
203 |
-
file_dict = {lang: os.path.join(base_path, lang + ".txt") for lang in self.config.languages}
|
204 |
-
id_ = -1
|
205 |
-
for language, filepath in file_dict.items():
|
206 |
-
with open(filepath, encoding="utf-8") as csv_file:
|
207 |
-
csv_reader = csv.reader(
|
208 |
-
csv_file, quotechar='"', delimiter="\t", quoting=csv.QUOTE_ALL, skipinitialspace=True
|
209 |
-
)
|
210 |
-
for row in csv_reader:
|
211 |
-
id_ += 1
|
212 |
-
paraphrase_set_id, sentence_id, paraphrase, lists, tags = row[: len(row)] + [""] * (5 - len(row))
|
213 |
-
yield id_, {
|
214 |
-
"paraphrase_set_id": paraphrase_set_id,
|
215 |
-
"sentence_id": sentence_id,
|
216 |
-
"paraphrase": paraphrase,
|
217 |
-
"lists": lists.split(";"),
|
218 |
-
"tags": tags.split(";"),
|
219 |
-
"language": language,
|
220 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|