Datasets:

Languages:
Korean
ArXiv:
License:
youngwookkim commited on
Commit
7985525
1 Parent(s): 7ee869e

Revert "remove python scripts (#7)"

Browse files

This reverts commit 7ee869e8e272c6d29f7e46e46ceed2e266442f8f.

Files changed (2) hide show
  1. dataset_infos.json +224 -0
  2. kobest_v1.py +242 -0
dataset_infos.json ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boolq": {
3
+ "description": " Korean Balanced Evaluation of Significant Tasks Benchmark\n",
4
+ "citation": " TBD\n",
5
+ "homepage": "https://github.com/SKT-LSL/KoBEST_datarepo",
6
+ "license": "",
7
+ "features": {
8
+ "paragraph": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "question": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "label": {
19
+ "num_classes": 2,
20
+ "names": [
21
+ "False",
22
+ "True"
23
+ ],
24
+ "names_file": null,
25
+ "id": null,
26
+ "_type": "ClassLabel"
27
+ }
28
+ },
29
+ "post_processed": null,
30
+ "supervised_keys": null,
31
+ "builder_name": "kobest_v1",
32
+ "config_name": "boolq",
33
+ "version": {
34
+ "version_str": "1.0.0",
35
+ "description": "",
36
+ "major": 1,
37
+ "minor": 0,
38
+ "patch": 0
39
+ }
40
+ },
41
+ "copa": {
42
+ "description": " Korean Balanced Evaluation of Significant Tasks Benchmark\n",
43
+ "citation": " TBD\n",
44
+ "homepage": "https://github.com/SKT-LSL/KoBEST_datarepo",
45
+ "license": "",
46
+ "features": {
47
+ "premise": {
48
+ "dtype": "string",
49
+ "id": null,
50
+ "_type": "Value"
51
+ },
52
+ "question": {
53
+ "dtype": "string",
54
+ "id": null,
55
+ "_type": "Value"
56
+ },
57
+ "alternative_1": {
58
+ "dtype": "string",
59
+ "id": null,
60
+ "_type": "Value"
61
+ },
62
+ "alternative_2": {
63
+ "dtype": "string",
64
+ "id": null,
65
+ "_type": "Value"
66
+ },
67
+ "label": {
68
+ "num_classes": 2,
69
+ "names": [
70
+ "alternative_1",
71
+ "alternative_2"
72
+ ],
73
+ "names_file": null,
74
+ "id": null,
75
+ "_type": "ClassLabel"
76
+ }
77
+ },
78
+ "post_processed": null,
79
+ "supervised_keys": null,
80
+ "builder_name": "kobest_v1",
81
+ "config_name": "copa",
82
+ "version": {
83
+ "version_str": "1.0.0",
84
+ "description": "",
85
+ "major": 1,
86
+ "minor": 0,
87
+ "patch": 0
88
+ }
89
+ },
90
+ "wic": {
91
+ "description": " Korean Balanced Evaluation of Significant Tasks Benchmark\n",
92
+ "citation": " TBD\n",
93
+ "homepage": "https://github.com/SKT-LSL/KoBEST_datarepo",
94
+ "license": "",
95
+ "features": {
96
+ "word": {
97
+ "dtype": "string",
98
+ "id": null,
99
+ "_type": "Value"
100
+ },
101
+ "context_1": {
102
+ "dtype": "string",
103
+ "id": null,
104
+ "_type": "Value"
105
+ },
106
+ "context_2": {
107
+ "dtype": "string",
108
+ "id": null,
109
+ "_type": "Value"
110
+ },
111
+ "label": {
112
+ "num_classes": 2,
113
+ "names": [
114
+ "False",
115
+ "True"
116
+ ],
117
+ "names_file": null,
118
+ "id": null,
119
+ "_type": "ClassLabel"
120
+ }
121
+ },
122
+ "post_processed": null,
123
+ "supervised_keys": null,
124
+ "builder_name": "kobest_v1",
125
+ "config_name": "copa",
126
+ "version": {
127
+ "version_str": "1.0.0",
128
+ "description": "",
129
+ "major": 1,
130
+ "minor": 0,
131
+ "patch": 0
132
+ }
133
+ },
134
+ "hellaswag": {
135
+ "description": " Korean Balanced Evaluation of Significant Tasks Benchmark\n",
136
+ "citation": " TBD\n",
137
+ "homepage": "https://github.com/SKT-LSL/KoBEST_datarepo",
138
+ "license": "",
139
+ "features": {
140
+ "context": {
141
+ "dtype": "string",
142
+ "id": null,
143
+ "_type": "Value"
144
+ },
145
+ "ending_1": {
146
+ "dtype": "string",
147
+ "id": null,
148
+ "_type": "Value"
149
+ },
150
+ "ending_2": {
151
+ "dtype": "string",
152
+ "id": null,
153
+ "_type": "Value"
154
+ },
155
+ "ending_3": {
156
+ "dtype": "string",
157
+ "id": null,
158
+ "_type": "Value"
159
+ },
160
+ "ending_4": {
161
+ "dtype": "string",
162
+ "id": null,
163
+ "_type": "Value"
164
+ },
165
+ "label": {
166
+ "num_classes": 4,
167
+ "names": [
168
+ "ending_1",
169
+ "ending_2",
170
+ "ending_3",
171
+ "ending_4"
172
+ ],
173
+ "names_file": null,
174
+ "id": null,
175
+ "_type": "ClassLabel"
176
+ }
177
+ },
178
+ "post_processed": null,
179
+ "supervised_keys": null,
180
+ "builder_name": "kobest_v1",
181
+ "config_name": "copa",
182
+ "version": {
183
+ "version_str": "1.0.0",
184
+ "description": "",
185
+ "major": 1,
186
+ "minor": 0,
187
+ "patch": 0
188
+ }
189
+ },
190
+ "sentineg": {
191
+ "description": " Korean Balanced Evaluation of Significant Tasks Benchmark\n",
192
+ "citation": " TBD\n",
193
+ "homepage": "https://github.com/SKT-LSL/KoBEST_datarepo",
194
+ "license": "",
195
+ "features": {
196
+ "sentence": {
197
+ "dtype": "string",
198
+ "id": null,
199
+ "_type": "Value"
200
+ },
201
+ "label": {
202
+ "num_classes": 2,
203
+ "names": [
204
+ "negative",
205
+ "positive"
206
+ ],
207
+ "names_file": null,
208
+ "id": null,
209
+ "_type": "ClassLabel"
210
+ }
211
+ },
212
+ "post_processed": null,
213
+ "supervised_keys": null,
214
+ "builder_name": "kobest_v1",
215
+ "config_name": "copa",
216
+ "version": {
217
+ "version_str": "1.0.0",
218
+ "description": "",
219
+ "major": 1,
220
+ "minor": 0,
221
+ "patch": 0
222
+ }
223
+ }
224
+ }
kobest_v1.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Korean Balanced Evaluation of Significant Tasks"""
2
+
3
+
4
+ import csv
5
+ import os
6
+ import pandas as pd
7
+
8
+ import datasets
9
+
10
+
11
+ _CITATAION = """\
12
+ @misc{https://doi.org/10.48550/arxiv.2204.04541,
13
+ doi = {10.48550/ARXIV.2204.04541},
14
+ url = {https://arxiv.org/abs/2204.04541},
15
+ author = {Kim, Dohyeong and Jang, Myeongjun and Kwon, Deuk Sin and Davis, Eric},
16
+ title = {KOBEST: Korean Balanced Evaluation of Significant Tasks},
17
+ publisher = {arXiv},
18
+ year = {2022},
19
+ }
20
+ """
21
+
22
+ _DESCRIPTION = """\
23
+ The dataset contains data for KoBEST dataset
24
+ """
25
+
26
+ _URL = "https://github.com/SKT-LSL/KoBEST_datarepo/raw/main"
27
+
28
+
29
+ _DATA_URLS = {
30
+ "boolq": {
31
+ "train": _URL + "/v1.0/BoolQ/train.tsv",
32
+ "dev": _URL + "/v1.0/BoolQ/dev.tsv",
33
+ "test": _URL + "/v1.0/BoolQ/test.tsv",
34
+ },
35
+ "copa": {
36
+ "train": _URL + "/v1.0/COPA/train.tsv",
37
+ "dev": _URL + "/v1.0/COPA/dev.tsv",
38
+ "test": _URL + "/v1.0/COPA/test.tsv",
39
+ },
40
+ "sentineg": {
41
+ "train": _URL + "/v1.0/SentiNeg/train.tsv",
42
+ "dev": _URL + "/v1.0/SentiNeg/dev.tsv",
43
+ "test": _URL + "/v1.0/SentiNeg/test.tsv",
44
+ "test_originated": _URL + "/v1.0/SentiNeg/test.tsv",
45
+ },
46
+ "hellaswag": {
47
+ "train": _URL + "/v1.0/HellaSwag/train.tsv",
48
+ "dev": _URL + "/v1.0/HellaSwag/dev.tsv",
49
+ "test": _URL + "/v1.0/HellaSwag/test.tsv",
50
+ },
51
+ "wic": {
52
+ "train": _URL + "/v1.0/WiC/train.tsv",
53
+ "dev": _URL + "/v1.0/WiC/dev.tsv",
54
+ "test": _URL + "/v1.0/WiC/test.tsv",
55
+ },
56
+ }
57
+
58
+ _LICENSE = "CC-BY-SA-4.0"
59
+
60
+
61
+ class KoBESTConfig(datasets.BuilderConfig):
62
+ """Config for building KoBEST"""
63
+
64
+ def __init__(self, description, data_url, citation, url, **kwargs):
65
+ """
66
+ Args:
67
+ description: `string`, brief description of the dataset
68
+ data_url: `dictionary`, dict with url for each split of data.
69
+ citation: `string`, citation for the dataset.
70
+ url: `string`, url for information about the dataset.
71
+ **kwrags: keyword arguments frowarded to super
72
+ """
73
+ super(KoBESTConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
74
+ self.description = description
75
+ self.data_url = data_url
76
+ self.citation = citation
77
+ self.url = url
78
+
79
+
80
+ class KoBEST(datasets.GeneratorBasedBuilder):
81
+ BUILDER_CONFIGS = [
82
+ KoBESTConfig(name=name, description=_DESCRIPTION, data_url=_DATA_URLS[name], citation=_CITATAION, url=_URL)
83
+ for name in ["boolq", "copa", 'sentineg', 'hellaswag', 'wic']
84
+ ]
85
+ BUILDER_CONFIG_CLASS = KoBESTConfig
86
+
87
+ def _info(self):
88
+ features = {}
89
+ if self.config.name == "boolq":
90
+ labels = ["False", "True"]
91
+ features["paragraph"] = datasets.Value("string")
92
+ features["question"] = datasets.Value("string")
93
+ features["label"] = datasets.features.ClassLabel(names=labels)
94
+
95
+ if self.config.name == "copa":
96
+ labels = ["alternative_1", "alternative_2"]
97
+ features["premise"] = datasets.Value("string")
98
+ features["question"] = datasets.Value("string")
99
+ features["alternative_1"] = datasets.Value("string")
100
+ features["alternative_2"] = datasets.Value("string")
101
+ features["label"] = datasets.features.ClassLabel(names=labels)
102
+
103
+ if self.config.name == "wic":
104
+ labels = ["False", "True"]
105
+ features["word"] = datasets.Value("string")
106
+ features["context_1"] = datasets.Value("string")
107
+ features["context_2"] = datasets.Value("string")
108
+ features["label"] = datasets.features.ClassLabel(names=labels)
109
+
110
+ if self.config.name == "hellaswag":
111
+ labels = ["ending_1", "ending_2", "ending_3", "ending_4"]
112
+
113
+ features["context"] = datasets.Value("string")
114
+ features["ending_1"] = datasets.Value("string")
115
+ features["ending_2"] = datasets.Value("string")
116
+ features["ending_3"] = datasets.Value("string")
117
+ features["ending_4"] = datasets.Value("string")
118
+ features["label"] = datasets.features.ClassLabel(names=labels)
119
+
120
+ if self.config.name == "sentineg":
121
+ labels = ["negative", "positive"]
122
+ features["sentence"] = datasets.Value("string")
123
+ features["label"] = datasets.features.ClassLabel(names=labels)
124
+
125
+ return datasets.DatasetInfo(
126
+ description=_DESCRIPTION, features=datasets.Features(features), homepage=_URL, citation=_CITATAION
127
+ )
128
+
129
+ def _split_generators(self, dl_manager):
130
+
131
+ train = dl_manager.download_and_extract(self.config.data_url["train"])
132
+ dev = dl_manager.download_and_extract(self.config.data_url["dev"])
133
+ test = dl_manager.download_and_extract(self.config.data_url["test"])
134
+
135
+ if self.config.data_url.get("test_originated"):
136
+ test_originated = dl_manager.download_and_extract(self.config.data_url["test_originated"])
137
+
138
+ return [
139
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train, "split": "train"}),
140
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev, "split": "dev"}),
141
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test, "split": "test"}),
142
+ datasets.SplitGenerator(name="test_originated", gen_kwargs={"filepath": test_originated, "split": "test_originated"}),
143
+ ]
144
+
145
+ return [
146
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train, "split": "train"}),
147
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev, "split": "dev"}),
148
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test, "split": "test"}),
149
+ ]
150
+
151
+ def _generate_examples(self, filepath, split):
152
+ if self.config.name == "boolq":
153
+ df = pd.read_csv(filepath, sep="\t")
154
+ df = df.dropna()
155
+ df = df[['Text', 'Question', 'Answer']]
156
+
157
+ df = df.rename(columns={
158
+ 'Text': 'paragraph',
159
+ 'Question': 'question',
160
+ 'Answer': 'label',
161
+ })
162
+ df['label'] = [0 if str(s) == 'False' else 1 for s in df['label'].tolist()]
163
+
164
+ elif self.config.name == "copa":
165
+ df = pd.read_csv(filepath, sep="\t")
166
+ df = df.dropna()
167
+ df = df[['sentence', 'question', '1', '2', 'Answer']]
168
+
169
+ df = df.rename(columns={
170
+ 'sentence': 'premise',
171
+ 'question': 'question',
172
+ '1': 'alternative_1',
173
+ '2': 'alternative_2',
174
+ 'Answer': 'label',
175
+ })
176
+ df['label'] = [i-1 for i in df['label'].tolist()]
177
+
178
+ elif self.config.name == "wic":
179
+ df = pd.read_csv(filepath, sep="\t")
180
+ df = df.dropna()
181
+ df = df[['Target', 'SENTENCE1', 'SENTENCE2', 'ANSWER']]
182
+
183
+ df = df.rename(columns={
184
+ 'Target': 'word',
185
+ 'SENTENCE1': 'context_1',
186
+ 'SENTENCE2': 'context_2',
187
+ 'ANSWER': 'label',
188
+ })
189
+ df['label'] = [0 if str(s) == 'False' else 1 for s in df['label'].tolist()]
190
+
191
+ elif self.config.name == "hellaswag":
192
+ df = pd.read_csv(filepath, sep="\t")
193
+ df = df.dropna()
194
+ df = df[['context', 'choice1', 'choice2', 'choice3', 'choice4', 'label']]
195
+
196
+ df = df.rename(columns={
197
+ 'context': 'context',
198
+ 'choice1': 'ending_1',
199
+ 'choice2': 'ending_2',
200
+ 'choice3': 'ending_3',
201
+ 'choice4': 'ending_4',
202
+ 'label': 'label',
203
+ })
204
+
205
+ elif self.config.name == "sentineg":
206
+ df = pd.read_csv(filepath, sep="\t")
207
+ df = df.dropna()
208
+
209
+ if split == "test_originated":
210
+ df = df[['Text_origin', 'Label_origin']]
211
+
212
+ df = df.rename(columns={
213
+ 'Text_origin': 'sentence',
214
+ 'Label_origin': 'label',
215
+ })
216
+ else:
217
+ df = df[['Text', 'Label']]
218
+
219
+ df = df.rename(columns={
220
+ 'Text': 'sentence',
221
+ 'Label': 'label',
222
+ })
223
+
224
+ else:
225
+ raise NotImplementedError
226
+
227
+ for id_, row in df.iterrows():
228
+ features = {key: row[key] for key in row.keys()}
229
+ yield id_, features
230
+
231
+
232
+ if __name__ == "__main__":
233
+ for config_name in ["boolq", "copa", 'sentineg', 'hellaswag', 'wic']:
234
+ dataset = datasets.load_dataset("kobest_v1.py", config_name, ignore_verifications=True)
235
+ os.makedirs(config_name, exist_ok=True)
236
+ for split, split_dataset in dataset.items():
237
+ split_dataset.to_json(f"{config_name}/{split}.jsonl")
238
+ # for task in ['boolq', 'copa', 'wic', 'hellaswag', 'sentineg']:
239
+ # dataset = datasets.load_dataset("kobest_v1.py", task, ignore_verifications=True)
240
+ # print(dataset)
241
+ # print(dataset['train']['label'])
242
+