holylovenia commited on
Commit
148a9a1
1 Parent(s): 3c7d06c

Upload mdia.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. mdia.py +226 -0
mdia.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import pandas as pd
21
+
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import (SCHEMA_TO_FEATURES, TASK_TO_SCHEMA,
24
+ Licenses, Tasks)
25
+
26
+ _CITATION = """\
27
+ @misc{zhang2022mdia,
28
+ title={MDIA: A Benchmark for Multilingual Dialogue Generation in 46 Languages},
29
+ author={Qingyu Zhang and Xiaoyu Shen and Ernie Chang and Jidong Ge and Pengke Chen},
30
+ year={2022},
31
+ eprint={2208.13078},
32
+ archivePrefix={arXiv},
33
+ primaryClass={cs.CL}
34
+ }
35
+ """
36
+
37
+ _DATASETNAME = "mdia"
38
+
39
+ _DESCRIPTION = """\
40
+ This is a multilingual benchmark for dialogue generation containing real-life
41
+ Reddit conversations (parent and response comment pairs) in 46 languages,
42
+ including Indonesian, Tagalog and Vietnamese. English translations are also
43
+ provided for comments.
44
+ """
45
+
46
+ _HOMEPAGE = "https://github.com/DoctorDream/mDIA"
47
+
48
+ _LANGUAGES = ["ind", "tgl", "vie"]
49
+
50
+ _LICENSE = Licenses.CC_BY_4_0.value
51
+
52
+ _LOCAL = False
53
+
54
+ _URLS = {
55
+ "raw": "https://github.com/DoctorDream/mDIA/raw/master/datasets/raw.zip",
56
+ "translated": "https://github.com/DoctorDream/mDIA/raw/master/datasets/translated.zip",
57
+ }
58
+
59
+ _SUPPORTED_TASKS = [Tasks.DIALOGUE_SYSTEM, Tasks.MACHINE_TRANSLATION] # DS, MT
60
+ _SEACROWD_SCHEMA = {task.value: f"seacrowd_{str(TASK_TO_SCHEMA[task]).lower()}" for task in _SUPPORTED_TASKS} # t2t
61
+ _SUBSETS = [
62
+ "ind_dialogue",
63
+ "ind_eng",
64
+ "tgl_dialogue",
65
+ "tgl_eng",
66
+ "vie_dialogue",
67
+ "vie_eng",
68
+ ]
69
+
70
+ _SOURCE_VERSION = "1.0.0"
71
+
72
+ _SEACROWD_VERSION = "2024.06.20"
73
+
74
+
75
+ class MdiaDataset(datasets.GeneratorBasedBuilder):
76
+ """Multilingual benchmark for dialogue generation containing real-life Reddit conversations"""
77
+
78
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
79
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
80
+
81
+ BUILDER_CONFIGS = []
82
+ for subset in _SUBSETS:
83
+ if "dialogue" in subset:
84
+ BUILDER_CONFIGS += [
85
+ SEACrowdConfig(
86
+ name=f"{_DATASETNAME}_{subset}_source",
87
+ version=SOURCE_VERSION,
88
+ description=f"{_DATASETNAME} {subset} source schema",
89
+ schema="source",
90
+ subset_id=subset,
91
+ ),
92
+ SEACrowdConfig(
93
+ name=f"{_DATASETNAME}_{subset}_{_SEACROWD_SCHEMA['DS']}",
94
+ version=SEACROWD_VERSION,
95
+ description=f"{_DATASETNAME} {subset} SEACrowd schema",
96
+ schema=_SEACROWD_SCHEMA["DS"],
97
+ subset_id=subset,
98
+ ),
99
+ ]
100
+ else:
101
+ BUILDER_CONFIGS += [
102
+ SEACrowdConfig(
103
+ name=f"{_DATASETNAME}_{subset}_source",
104
+ version=SOURCE_VERSION,
105
+ description=f"{_DATASETNAME} {subset} source schema",
106
+ schema="source",
107
+ subset_id=subset,
108
+ ),
109
+ SEACrowdConfig(
110
+ name=f"{_DATASETNAME}_{subset}_{_SEACROWD_SCHEMA['MT']}",
111
+ version=SEACROWD_VERSION,
112
+ description=f"{_DATASETNAME} {subset} SEACrowd schema",
113
+ schema=_SEACROWD_SCHEMA["MT"],
114
+ subset_id=subset,
115
+ ),
116
+ ]
117
+
118
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_{_SUBSETS[0]}_source"
119
+
120
+ def _info(self) -> datasets.DatasetInfo:
121
+ if self.config.schema == "source":
122
+ features = datasets.Features(
123
+ {
124
+ "lang": datasets.Value("string"),
125
+ "title": datasets.Value("string"),
126
+ "source_body": datasets.Value("string"),
127
+ "target_body": datasets.Value("string"),
128
+ "link_id": datasets.Value("string"),
129
+ "source_id": datasets.Value("string"),
130
+ "target_id": datasets.Value("string"),
131
+ "translated_source_body": datasets.Value("string"),
132
+ "translated_target_body": datasets.Value("string"),
133
+ }
134
+ )
135
+ elif self.config.schema == _SEACROWD_SCHEMA["DS"]: # same schema with _SEACROWD_SCHEMA["MT"]
136
+ features = SCHEMA_TO_FEATURES[TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]]] # text2text_features
137
+
138
+ return datasets.DatasetInfo(
139
+ description=_DESCRIPTION,
140
+ features=features,
141
+ homepage=_HOMEPAGE,
142
+ license=_LICENSE,
143
+ citation=_CITATION,
144
+ )
145
+
146
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
147
+ """Returns SplitGenerators."""
148
+ lang_map = {"ind": "id", "tgl": "tl", "vie": "vi"}
149
+ lang = lang_map[self.config.subset_id.split("_")[0]]
150
+
151
+ data_url = _URLS["translated"]
152
+ data_dir = Path(dl_manager.download_and_extract(data_url)) / "translated"
153
+ data_path = "{split}_data/{lang}2en_{split}.csv"
154
+
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TRAIN,
158
+ gen_kwargs={
159
+ "data_path": data_dir / data_path.format(split="train", lang=lang),
160
+ },
161
+ ),
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.TEST,
164
+ gen_kwargs={
165
+ "data_path": data_dir / data_path.format(split="test", lang=lang),
166
+ },
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.VALIDATION,
170
+ gen_kwargs={
171
+ "data_path": data_dir / data_path.format(split="eval", lang=lang),
172
+ },
173
+ ),
174
+ ]
175
+
176
+ def _generate_examples(self, data_path: Path) -> Tuple[int, Dict]:
177
+ """Yields examples as (key, example) tuples."""
178
+ df = pd.read_csv(data_path)
179
+
180
+ # source schema
181
+ if self.config.schema == "source":
182
+ for i, row in df.iterrows():
183
+ yield i, {
184
+ "lang": row["lang"],
185
+ "title": row["title"],
186
+ "source_body": row["source_body"],
187
+ "target_body": row["target_body"],
188
+ "link_id": row["link_id"],
189
+ "source_id": row["source_id"],
190
+ "target_id": row["target_id"],
191
+ "translated_source_body": row["translated_source_body"],
192
+ "translated_target_body": row["translated_target_body"],
193
+ }
194
+
195
+ # t2t schema for dialogue
196
+ elif "dialogue" in self.config.subset_id:
197
+ for i, row in df.iterrows():
198
+ yield i, {
199
+ "id": str(i),
200
+ "text_1": row["source_body"],
201
+ "text_2": row["target_body"],
202
+ "text_1_name": "source_body",
203
+ "text_2_name": "target_body",
204
+ }
205
+
206
+ # t2t schema for machine translation
207
+ elif "eng" in self.config.subset_id:
208
+ for i, row in df.iterrows():
209
+ for j in range(2):
210
+ idx = i * 2 + j
211
+ if j == 0:
212
+ yield idx, {
213
+ "id": str(idx),
214
+ "text_1": row["source_body"],
215
+ "text_2": row["translated_source_body"],
216
+ "text_1_name": "source_body",
217
+ "text_2_name": "translated_source_body",
218
+ }
219
+ else:
220
+ yield idx, {
221
+ "id": str(idx),
222
+ "text_1": row["target_body"],
223
+ "text_2": row["translated_target_body"],
224
+ "text_1_name": "target_body",
225
+ "text_2_name": "translated_target_body",
226
+ }