gmnlp commited on
Commit
e666438
1 Parent(s): d035382

Create dialect_nli.py

Browse files
Files changed (1) hide show
  1. dialect_nli.py +217 -0
dialect_nli.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """XNLI: The Cross-Lingual NLI Corpus."""
18
+
19
+
20
+ import collections
21
+ import csv
22
+ import os
23
+ from contextlib import ExitStack
24
+
25
+ import datasets
26
+
27
+
28
+ _CITATION = """\
29
+ # @InProceedings{conneau2018xnli,
30
+ # author = {Conneau, Alexis
31
+ # and Rinott, Ruty
32
+ # and Lample, Guillaume
33
+ # and Williams, Adina
34
+ # and Bowman, Samuel R.
35
+ # and Schwenk, Holger
36
+ # and Stoyanov, Veselin},
37
+ # title = {XNLI: Evaluating Cross-lingual Sentence Representations},
38
+ # booktitle = {Proceedings of the 2018 Conference on Empirical Methods
39
+ # in Natural Language Processing},
40
+ # year = {2018},
41
+ # publisher = {Association for Computational Linguistics},
42
+ # location = {Brussels, Belgium},
43
+ # }"""
44
+
45
+ _DESCRIPTION = """\
46
+ XNLI is a subset of a few thousand examples from MNLI which has been translated
47
+ into a 14 different languages (some low-ish resource). As with MNLI, the goal is
48
+ to predict textual entailment (does sentence A imply/contradict/neither sentence
49
+ B) and is a classification task (given two sentences, predict one of three
50
+ labels).
51
+ """
52
+
53
+ _TRAIN_DATA_URL = "https://gmuedu-my.sharepoint.com/:u:/g/personal/ffaisal_gmu_edu/EVJ2LyvweSVJpUFvTMkKiKsB9P7DDr0T4ZL7EPFahruyow?download=1"
54
+
55
+ _TEST_DATA_URL = "https://gmuedu-my.sharepoint.com/:u:/g/personal/ffaisal_gmu_edu/ERNIHGKDoYZNi5mj5HIQbaMB7mWr4s1z3iVq35pbUeBjEg?download=1"
56
+
57
+ _VAL_DATA_URL = "https://gmuedu-my.sharepoint.com/:u:/g/personal/ffaisal_gmu_edu/EWqXGwiQwwpEup1xMmoRRvUBpj675UlDc9qj1EPNEUNM9w?download=1"
58
+
59
+
60
+ _LANGUAGES = ("eng_Latn","lmo_Latn","ita_Latn","fur_Latn","scn_Latn","srd_Latn","vec_Latn","azb_Arab","azj_Latn","tur_Latn","kmr_Latn","ckb_Arab","nno_Latn","nob_Latn","lim_Latn","ltz_Latn","nld_Latn","lvs_Latn","ltg_Latn","acm_Arab","acq_Arab","aeb_Arab","ajp_Arab","apc_Arab","arb_Arab","ars_Arab","ary_Arab","arz_Arab","kab_Latn","asm_Beng","ben_Beng","lij_Latn","oci_Latn","yue_Hant","zho_Hans","zho_Hant","glg_Latn","spa_Latn","por_Latn","nso_Latn","sot_Latn")
61
+
62
+
63
+ class XnliConfig(datasets.BuilderConfig):
64
+ """BuilderConfig for XNLI."""
65
+
66
+ def __init__(self, language: str, languages=None, **kwargs):
67
+ """BuilderConfig for XNLI.
68
+
69
+ Args:
70
+ language: One of ar,bg,de,el,en,es,fr,hi,ru,sw,th,tr,ur,vi,zh, or all_languages
71
+ **kwargs: keyword arguments forwarded to super.
72
+ """
73
+ super(XnliConfig, self).__init__(**kwargs)
74
+ self.language = language
75
+ if language != "all_languages":
76
+ self.languages = [language]
77
+ else:
78
+ self.languages = languages if languages is not None else _LANGUAGES
79
+
80
+
81
+ class Xnli(datasets.GeneratorBasedBuilder):
82
+ """XNLI: The Cross-Lingual NLI Corpus. Version 1.0."""
83
+
84
+ VERSION = datasets.Version("1.1.0", "")
85
+ BUILDER_CONFIG_CLASS = XnliConfig
86
+ BUILDER_CONFIGS = [
87
+ XnliConfig(
88
+ name=lang,
89
+ language=lang,
90
+ version=datasets.Version("1.1.0", ""),
91
+ description=f"Plain text import of XNLI for the {lang} language",
92
+ )
93
+ for lang in _LANGUAGES
94
+ ] + [
95
+ XnliConfig(
96
+ name="all_languages",
97
+ language="all_languages",
98
+ version=datasets.Version("1.1.0", ""),
99
+ description="Plain text import of XNLI for all languages",
100
+ )
101
+ ]
102
+
103
+ def _info(self):
104
+ if self.config.language == "all_languages":
105
+ features = datasets.Features(
106
+ {
107
+ "premise": datasets.Translation(
108
+ languages=_LANGUAGES,
109
+ ),
110
+ "hypothesis": datasets.TranslationVariableLanguages(
111
+ languages=_LANGUAGES,
112
+ ),
113
+ "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
114
+ }
115
+ )
116
+ else:
117
+ features = datasets.Features(
118
+ {
119
+ "premise": datasets.Value("string"),
120
+ "hypothesis": datasets.Value("string"),
121
+ "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
122
+ }
123
+ )
124
+ return datasets.DatasetInfo(
125
+ description=_DESCRIPTION,
126
+ features=features,
127
+ # No default supervised_keys (as we have to pass both premise
128
+ # and hypothesis as input).
129
+ supervised_keys=None,
130
+ homepage="https://www.nyu.edu/projects/bowman/xnli/",
131
+ citation=_CITATION,
132
+ )
133
+
134
+ def _split_generators(self, dl_manager):
135
+ dl_dirs = dl_manager.download_and_extract(
136
+ {
137
+ "train_data": _TRAIN_DATA_URL,
138
+ "test_data": _TEST_DATA_URL,
139
+ "val_data": _VAL_DATA_URL,
140
+ }
141
+ )
142
+ train_dir = os.path.join(dl_dirs["train_data"])
143
+ test_dir = os.path.join(dl_dirs["test_data"])
144
+ val_dir = os.path.join(dl_dirs["val_data"])
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TRAIN,
148
+ gen_kwargs={
149
+ "filepaths": [
150
+ os.path.join(train_dir, f"train-{lang}.tsv") for lang in self.config.languages if lang=='eng_Latn'
151
+ ],
152
+ "data_format": "XNLI-MT",
153
+ },
154
+ ),
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.TEST,
157
+ gen_kwargs={"filepaths": [os.path.join(test_dir, "test.tsv")], "data_format": "XNLI"},
158
+ ),
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.VALIDATION,
161
+ gen_kwargs={"filepaths": [os.path.join(val_dir, "dev.tsv")], "data_format": "XNLI"},
162
+ ),
163
+ ]
164
+
165
+ def _generate_examples(self, data_format, filepaths):
166
+ """This function returns the examples in the raw (text) form."""
167
+
168
+ if self.config.language == "all_languages":
169
+ if data_format == "XNLI-MT":
170
+ with ExitStack() as stack:
171
+ files = [stack.enter_context(open(filepath, encoding="utf-8")) for filepath in filepaths]
172
+ readers = [csv.DictReader(file, delimiter="\t", quoting=csv.QUOTE_NONE) for file in files]
173
+ for row_idx, rows in enumerate(zip(*readers)):
174
+ yield row_idx, {
175
+ "premise": {lang: row["premise"] for lang, row in zip(self.config.languages, rows)},
176
+ "hypothesis": {lang: row["hypo"] for lang, row in zip(self.config.languages, rows)},
177
+ "label": rows[0]["label"].replace("contradictory", "contradiction"),
178
+ }
179
+ else:
180
+ rows_per_pair_id = collections.defaultdict(list)
181
+ for filepath in filepaths:
182
+ with open(filepath, encoding="utf-8") as f:
183
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
184
+ for row in reader:
185
+ rows_per_pair_id[row["pairID"]].append(row)
186
+
187
+ for rows in rows_per_pair_id.values():
188
+ premise = {row["language"]: row["sentence1"] for row in rows}
189
+ hypothesis = {row["language"]: row["sentence2"] for row in rows}
190
+ yield rows[0]["pairID"], {
191
+ "premise": premise,
192
+ "hypothesis": hypothesis,
193
+ "label": rows[0]["gold_label"],
194
+ }
195
+ else:
196
+ if data_format == "XNLI-MT":
197
+ for file_idx, filepath in enumerate(filepaths):
198
+ file = open(filepath, encoding="utf-8")
199
+ reader = csv.DictReader(file, delimiter="\t", quoting=csv.QUOTE_NONE)
200
+ for row_idx, row in enumerate(reader):
201
+ key = str(file_idx) + "_" + str(row_idx)
202
+ yield key, {
203
+ "premise": row["premise"],
204
+ "hypothesis": row["hypo"],
205
+ "label": row["label"].replace("contradictory", "contradiction"),
206
+ }
207
+ else:
208
+ for filepath in filepaths:
209
+ with open(filepath, encoding="utf-8") as f:
210
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
211
+ for row in reader:
212
+ if row["language"] == self.config.language:
213
+ yield row["pairID"], {
214
+ "premise": row["sentence1"],
215
+ "hypothesis": row["sentence2"],
216
+ "label": row["gold_label"],
217
+ }