albertvillanova HF staff commited on
Commit
aa733cd
1 Parent(s): 750599f

Convert dataset to Parquet

Browse files

Convert dataset to Parquet.

README.md CHANGED
@@ -29,96 +29,76 @@ task_ids:
29
  - topic-classification
30
  paperswithcode_id: klue
31
  pretty_name: KLUE
 
 
 
 
 
 
 
 
 
32
  tags:
33
  - relation-extraction
34
  dataset_info:
35
- - config_name: ynat
36
  features:
37
- - name: guid
38
- dtype: string
39
- - name: title
40
- dtype: string
41
- - name: label
42
- dtype:
43
- class_label:
44
- names:
45
- '0': IT과학
46
- '1': 경제
47
- '2': 사회
48
- '3': 생활문화
49
- '4': 세계
50
- '5': 스포츠
51
- '6': 정치
52
- - name: url
53
- dtype: string
54
- - name: date
55
  dtype: string
 
 
 
 
 
 
 
 
 
 
 
 
56
  splits:
57
  - name: train
58
- num_bytes: 10109664
59
- num_examples: 45678
60
  - name: validation
61
- num_bytes: 2039197
62
- num_examples: 9107
63
- download_size: 4932555
64
- dataset_size: 12148861
65
- - config_name: sts
66
  features:
67
- - name: guid
68
- dtype: string
69
- - name: source
70
- dtype: string
71
- - name: sentence1
72
  dtype: string
73
- - name: sentence2
74
  dtype: string
75
- - name: labels
76
- struct:
77
- - name: label
78
- dtype: float64
79
- - name: real-label
80
- dtype: float64
81
- - name: binary-label
82
- dtype:
83
- class_label:
84
- names:
85
- '0': negative
86
- '1': positive
87
- splits:
88
- - name: train
89
- num_bytes: 2832921
90
- num_examples: 11668
91
- - name: validation
92
- num_bytes: 122657
93
- num_examples: 519
94
- download_size: 1349875
95
- dataset_size: 2955578
96
- - config_name: nli
97
- features:
98
- - name: guid
99
  dtype: string
100
  - name: source
101
  dtype: string
102
- - name: premise
103
  dtype: string
104
- - name: hypothesis
 
 
 
 
105
  dtype: string
106
- - name: label
107
- dtype:
108
- class_label:
109
- names:
110
- '0': entailment
111
- '1': neutral
112
- '2': contradiction
113
  splits:
114
  - name: train
115
- num_bytes: 5719930
116
- num_examples: 24998
117
  - name: validation
118
- num_bytes: 673276
119
- num_examples: 3000
120
- download_size: 1257374
121
- dataset_size: 6393206
122
  - config_name: ner
123
  features:
124
  - name: sentence
@@ -151,6 +131,32 @@ dataset_info:
151
  num_examples: 5000
152
  download_size: 4308644
153
  dataset_size: 24829532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  - config_name: re
155
  features:
156
  - name: guid
@@ -222,64 +228,37 @@ dataset_info:
222
  num_examples: 7765
223
  download_size: 5669259
224
  dataset_size: 13704838
225
- - config_name: dp
226
- features:
227
- - name: sentence
228
- dtype: string
229
- - name: index
230
- list: int32
231
- - name: word_form
232
- list: string
233
- - name: lemma
234
- list: string
235
- - name: pos
236
- list: string
237
- - name: head
238
- list: int32
239
- - name: deprel
240
- list: string
241
- splits:
242
- - name: train
243
- num_bytes: 7900009
244
- num_examples: 10000
245
- - name: validation
246
- num_bytes: 1557506
247
- num_examples: 2000
248
- download_size: 2033461
249
- dataset_size: 9457515
250
- - config_name: mrc
251
  features:
252
- - name: title
253
- dtype: string
254
- - name: context
255
- dtype: string
256
- - name: news_category
257
  dtype: string
258
  - name: source
259
  dtype: string
260
- - name: guid
261
  dtype: string
262
- - name: is_impossible
263
- dtype: bool
264
- - name: question_type
265
- dtype: int32
266
- - name: question
267
  dtype: string
268
- - name: answers
269
- sequence:
270
- - name: answer_start
271
- dtype: int32
272
- - name: text
273
- dtype: string
 
 
 
 
 
 
274
  splits:
275
  - name: train
276
- num_bytes: 46505665
277
- num_examples: 17554
278
  - name: validation
279
- num_bytes: 15583053
280
- num_examples: 5841
281
- download_size: 19218422
282
- dataset_size: 62088718
283
  - config_name: wos
284
  features:
285
  - name: guid
@@ -303,15 +282,43 @@ dataset_info:
303
  num_examples: 1000
304
  download_size: 4785657
305
  dataset_size: 30165945
306
- config_names:
307
- - dp
308
- - mrc
309
- - ner
310
- - nli
311
- - re
312
- - sts
313
- - wos
314
- - ynat
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  ---
316
 
317
  # Dataset Card for KLUE
 
29
  - topic-classification
30
  paperswithcode_id: klue
31
  pretty_name: KLUE
32
+ config_names:
33
+ - dp
34
+ - mrc
35
+ - ner
36
+ - nli
37
+ - re
38
+ - sts
39
+ - wos
40
+ - ynat
41
  tags:
42
  - relation-extraction
43
  dataset_info:
44
+ - config_name: dp
45
  features:
46
+ - name: sentence
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  dtype: string
48
+ - name: index
49
+ list: int32
50
+ - name: word_form
51
+ list: string
52
+ - name: lemma
53
+ list: string
54
+ - name: pos
55
+ list: string
56
+ - name: head
57
+ list: int32
58
+ - name: deprel
59
+ list: string
60
  splits:
61
  - name: train
62
+ num_bytes: 7900009
63
+ num_examples: 10000
64
  - name: validation
65
+ num_bytes: 1557506
66
+ num_examples: 2000
67
+ download_size: 2033461
68
+ dataset_size: 9457515
69
+ - config_name: mrc
70
  features:
71
+ - name: title
 
 
 
 
72
  dtype: string
73
+ - name: context
74
  dtype: string
75
+ - name: news_category
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  dtype: string
77
  - name: source
78
  dtype: string
79
+ - name: guid
80
  dtype: string
81
+ - name: is_impossible
82
+ dtype: bool
83
+ - name: question_type
84
+ dtype: int32
85
+ - name: question
86
  dtype: string
87
+ - name: answers
88
+ sequence:
89
+ - name: answer_start
90
+ dtype: int32
91
+ - name: text
92
+ dtype: string
 
93
  splits:
94
  - name: train
95
+ num_bytes: 46505665
96
+ num_examples: 17554
97
  - name: validation
98
+ num_bytes: 15583053
99
+ num_examples: 5841
100
+ download_size: 19218422
101
+ dataset_size: 62088718
102
  - config_name: ner
103
  features:
104
  - name: sentence
 
131
  num_examples: 5000
132
  download_size: 4308644
133
  dataset_size: 24829532
134
+ - config_name: nli
135
+ features:
136
+ - name: guid
137
+ dtype: string
138
+ - name: source
139
+ dtype: string
140
+ - name: premise
141
+ dtype: string
142
+ - name: hypothesis
143
+ dtype: string
144
+ - name: label
145
+ dtype:
146
+ class_label:
147
+ names:
148
+ '0': entailment
149
+ '1': neutral
150
+ '2': contradiction
151
+ splits:
152
+ - name: train
153
+ num_bytes: 5719930
154
+ num_examples: 24998
155
+ - name: validation
156
+ num_bytes: 673276
157
+ num_examples: 3000
158
+ download_size: 1257374
159
+ dataset_size: 6393206
160
  - config_name: re
161
  features:
162
  - name: guid
 
228
  num_examples: 7765
229
  download_size: 5669259
230
  dataset_size: 13704838
231
+ - config_name: sts
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  features:
233
+ - name: guid
 
 
 
 
234
  dtype: string
235
  - name: source
236
  dtype: string
237
+ - name: sentence1
238
  dtype: string
239
+ - name: sentence2
 
 
 
 
240
  dtype: string
241
+ - name: labels
242
+ struct:
243
+ - name: label
244
+ dtype: float64
245
+ - name: real-label
246
+ dtype: float64
247
+ - name: binary-label
248
+ dtype:
249
+ class_label:
250
+ names:
251
+ '0': negative
252
+ '1': positive
253
  splits:
254
  - name: train
255
+ num_bytes: 2832921
256
+ num_examples: 11668
257
  - name: validation
258
+ num_bytes: 122657
259
+ num_examples: 519
260
+ download_size: 1349875
261
+ dataset_size: 2955578
262
  - config_name: wos
263
  features:
264
  - name: guid
 
282
  num_examples: 1000
283
  download_size: 4785657
284
  dataset_size: 30165945
285
+ - config_name: ynat
286
+ features:
287
+ - name: guid
288
+ dtype: string
289
+ - name: title
290
+ dtype: string
291
+ - name: label
292
+ dtype:
293
+ class_label:
294
+ names:
295
+ '0': IT과학
296
+ '1': 경제
297
+ '2': 사회
298
+ '3': 생활문화
299
+ '4': 세계
300
+ '5': 스포츠
301
+ '6': 정치
302
+ - name: url
303
+ dtype: string
304
+ - name: date
305
+ dtype: string
306
+ splits:
307
+ - name: train
308
+ num_bytes: 10109584
309
+ num_examples: 45678
310
+ - name: validation
311
+ num_bytes: 2039181
312
+ num_examples: 9107
313
+ download_size: 5012303
314
+ dataset_size: 12148765
315
+ configs:
316
+ - config_name: ynat
317
+ data_files:
318
+ - split: train
319
+ path: ynat/train-*
320
+ - split: validation
321
+ path: ynat/validation-*
322
  ---
323
 
324
  # Dataset Card for KLUE
dataset_infos.json CHANGED
@@ -1 +1,721 @@
1
- {"ynat": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/66/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 7, "names": ["IT\uacfc\ud559", "\uacbd\uc81c", "\uc0ac\ud68c", "\uc0dd\ud65c\ubb38\ud654", "\uc138\uacc4", "\uc2a4\ud3ec\uce20", "\uc815\uce58"], "names_file": null, "id": null, "_type": "ClassLabel"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "ynat", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10109664, "num_examples": 45678, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 2039197, "num_examples": 9107, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000066/data/ynat-v1.tar.gz": {"num_bytes": 4932555, "checksum": "820a4d1d6d1fd83e2a421f856965d3cfc5c93627935ce8c5b27468c6113fc482"}}, "download_size": 4932555, "post_processing_size": null, "dataset_size": 12148861, "size_in_bytes": 17081416}, "sts": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/67/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "labels": {"label": {"dtype": "float64", "id": null, "_type": "Value"}, "real-label": {"dtype": "float64", "id": null, "_type": "Value"}, "binary-label": {"num_classes": 2, "names": ["negative", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "sts", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2832921, "num_examples": 11668, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 122657, "num_examples": 519, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000067/data/klue-sts-v1.tar.gz": {"num_bytes": 1349875, "checksum": "539341ba78a3b351c686cf70a448ac7a5886ed95f0719d5e3d2378ba703213bd"}}, "download_size": 1349875, "post_processing_size": null, "dataset_size": 2955578, "size_in_bytes": 4305453}, "nli": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/68/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "nli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5719930, "num_examples": 24998, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 673276, "num_examples": 3000, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000068/data/klue-nli-v1.tar.gz": {"num_bytes": 1257374, "checksum": "388be2033ef712072201903795a35b4f86826ee3ed3b62dc0c98e1721baa8850"}}, "download_size": 1257374, "post_processing_size": null, "dataset_size": 6393206, "size_in_bytes": 7650580}, "ner": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/69/overview/description", "license": "CC-BY-SA-4.0", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 13, "names": ["B-DT", "I-DT", "B-LC", "I-LC", "B-OG", "I-OG", "B-PS", "I-PS", "B-QT", "I-QT", "B-TI", "I-TI", "O"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "ner", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 19891953, "num_examples": 21008, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 4937579, "num_examples": 5000, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000069/data/klue-ner-v1.tar.gz": {"num_bytes": 4308644, "checksum": "848a89759ac6b7c149c9a00d820726fe2a140c22782201f1a40d856672e7ea8e"}}, "download_size": 4308644, "post_processing_size": null, "dataset_size": 24829532, "size_in_bytes": 29138176}, "re": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/70/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "subject_entity": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "start_idx": {"dtype": "int32", "id": null, "_type": "Value"}, "end_idx": {"dtype": "int32", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}}, "object_entity": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "start_idx": {"dtype": "int32", "id": null, "_type": "Value"}, "end_idx": {"dtype": "int32", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}}, "label": {"num_classes": 30, "names": ["no_relation", "org:dissolved", "org:founded", "org:place_of_headquarters", "org:alternate_names", "org:member_of", "org:members", "org:political/religious_affiliation", "org:product", "org:founded_by", "org:top_members/employees", "org:number_of_employees/members", "per:date_of_birth", "per:date_of_death", "per:place_of_birth", "per:place_of_death", "per:place_of_residence", "per:origin", "per:employee_of", "per:schools_attended", "per:alternate_names", "per:parents", "per:children", "per:siblings", "per:spouse", "per:other_family", "per:colleagues", "per:product", "per:religion", "per:title"], "names_file": null, "id": null, "_type": "ClassLabel"}, "source": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "re", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11145538, "num_examples": 32470, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 2559300, "num_examples": 7765, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000070/data/klue-re-v1.tar.gz": {"num_bytes": 5669259, "checksum": "b09ceac0d986cc09e42fcda9c7f2873c0eea8ec0629baf91fead36580790f8f5"}}, "download_size": 5669259, "post_processing_size": null, "dataset_size": 13704838, "size_in_bytes": 19374097}, "dp": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/71/overview/description", "license": "CC-BY-SA-4.0", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "index": [{"dtype": "int32", "id": null, "_type": "Value"}], "word_form": [{"dtype": "string", "id": null, "_type": "Value"}], "lemma": [{"dtype": "string", "id": null, "_type": "Value"}], "pos": [{"dtype": "string", "id": null, "_type": "Value"}], "head": [{"dtype": "int32", "id": null, "_type": "Value"}], "deprel": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "dp", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7900009, "num_examples": 10000, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 1557506, "num_examples": 2000, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000071/data/klue-dp-v1.tar.gz": {"num_bytes": 2033461, "checksum": "2c76a3543a50599ac6640ad360ba00eac36e0b5b2363f708a614d6e50844d17b"}}, "download_size": 2033461, "post_processing_size": null, "dataset_size": 9457515, "size_in_bytes": 11490976}, "mrc": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/72/overview/description", "license": "CC-BY-SA-4.0", "features": {"title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "news_category": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "guid": {"dtype": "string", "id": null, "_type": "Value"}, "is_impossible": {"dtype": "bool", "id": null, "_type": "Value"}, "question_type": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "mrc", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 46505665, "num_examples": 17554, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 15583053, "num_examples": 5841, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000072/data/klue-mrc-v1.tar.gz": {"num_bytes": 19218422, "checksum": "a444af252901452380d58a6320908ce4a86759bb6f38ad95d0ca98584ad33d14"}}, "download_size": 19218422, "post_processing_size": null, "dataset_size": 62088718, "size_in_bytes": 81307140}, "wos": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/73/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "domains": [{"dtype": "string", "id": null, "_type": "Value"}], "dialogue": [{"role": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "state": [{"dtype": "string", "id": null, "_type": "Value"}]}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "wos", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 26677002, "num_examples": 8000, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 3488943, "num_examples": 1000, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000073/data/wos-v1.tar.gz": {"num_bytes": 4785657, "checksum": "da17829300271560afc6e7fc330503c2ca6f7ae7721d9bb94308579542a5871f"}}, "download_size": 4785657, "post_processing_size": null, "dataset_size": 30165945, "size_in_bytes": 34951602}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ynat": {
3
+ "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
4
+ "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
5
+ "homepage": "https://klue-benchmark.com/tasks/66/overview/description",
6
+ "license": "CC-BY-SA-4.0",
7
+ "features": {
8
+ "guid": {
9
+ "dtype": "string",
10
+ "_type": "Value"
11
+ },
12
+ "title": {
13
+ "dtype": "string",
14
+ "_type": "Value"
15
+ },
16
+ "label": {
17
+ "names": [
18
+ "IT\uacfc\ud559",
19
+ "\uacbd\uc81c",
20
+ "\uc0ac\ud68c",
21
+ "\uc0dd\ud65c\ubb38\ud654",
22
+ "\uc138\uacc4",
23
+ "\uc2a4\ud3ec\uce20",
24
+ "\uc815\uce58"
25
+ ],
26
+ "_type": "ClassLabel"
27
+ },
28
+ "url": {
29
+ "dtype": "string",
30
+ "_type": "Value"
31
+ },
32
+ "date": {
33
+ "dtype": "string",
34
+ "_type": "Value"
35
+ }
36
+ },
37
+ "builder_name": "klue",
38
+ "dataset_name": "klue",
39
+ "config_name": "ynat",
40
+ "version": {
41
+ "version_str": "1.0.0",
42
+ "description": "",
43
+ "major": 1,
44
+ "minor": 0,
45
+ "patch": 0
46
+ },
47
+ "splits": {
48
+ "train": {
49
+ "name": "train",
50
+ "num_bytes": 10109584,
51
+ "num_examples": 45678,
52
+ "dataset_name": null
53
+ },
54
+ "validation": {
55
+ "name": "validation",
56
+ "num_bytes": 2039181,
57
+ "num_examples": 9107,
58
+ "dataset_name": null
59
+ }
60
+ },
61
+ "download_size": 5012303,
62
+ "dataset_size": 12148765,
63
+ "size_in_bytes": 17161068
64
+ },
65
+ "sts": {
66
+ "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
67
+ "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
68
+ "homepage": "https://klue-benchmark.com/tasks/67/overview/description",
69
+ "license": "CC-BY-SA-4.0",
70
+ "features": {
71
+ "guid": {
72
+ "dtype": "string",
73
+ "id": null,
74
+ "_type": "Value"
75
+ },
76
+ "source": {
77
+ "dtype": "string",
78
+ "id": null,
79
+ "_type": "Value"
80
+ },
81
+ "sentence1": {
82
+ "dtype": "string",
83
+ "id": null,
84
+ "_type": "Value"
85
+ },
86
+ "sentence2": {
87
+ "dtype": "string",
88
+ "id": null,
89
+ "_type": "Value"
90
+ },
91
+ "labels": {
92
+ "label": {
93
+ "dtype": "float64",
94
+ "id": null,
95
+ "_type": "Value"
96
+ },
97
+ "real-label": {
98
+ "dtype": "float64",
99
+ "id": null,
100
+ "_type": "Value"
101
+ },
102
+ "binary-label": {
103
+ "num_classes": 2,
104
+ "names": [
105
+ "negative",
106
+ "positive"
107
+ ],
108
+ "names_file": null,
109
+ "id": null,
110
+ "_type": "ClassLabel"
111
+ }
112
+ }
113
+ },
114
+ "post_processed": null,
115
+ "supervised_keys": null,
116
+ "task_templates": null,
117
+ "builder_name": "klue",
118
+ "config_name": "sts",
119
+ "version": {
120
+ "version_str": "1.0.0",
121
+ "description": "",
122
+ "major": 1,
123
+ "minor": 0,
124
+ "patch": 0
125
+ },
126
+ "splits": {
127
+ "train": {
128
+ "name": "train",
129
+ "num_bytes": 2832921,
130
+ "num_examples": 11668,
131
+ "dataset_name": "klue"
132
+ },
133
+ "validation": {
134
+ "name": "validation",
135
+ "num_bytes": 122657,
136
+ "num_examples": 519,
137
+ "dataset_name": "klue"
138
+ }
139
+ },
140
+ "download_checksums": {
141
+ "http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000067/data/klue-sts-v1.tar.gz": {
142
+ "num_bytes": 1349875,
143
+ "checksum": "539341ba78a3b351c686cf70a448ac7a5886ed95f0719d5e3d2378ba703213bd"
144
+ }
145
+ },
146
+ "download_size": 1349875,
147
+ "post_processing_size": null,
148
+ "dataset_size": 2955578,
149
+ "size_in_bytes": 4305453
150
+ },
151
+ "nli": {
152
+ "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
153
+ "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
154
+ "homepage": "https://klue-benchmark.com/tasks/68/overview/description",
155
+ "license": "CC-BY-SA-4.0",
156
+ "features": {
157
+ "guid": {
158
+ "dtype": "string",
159
+ "id": null,
160
+ "_type": "Value"
161
+ },
162
+ "source": {
163
+ "dtype": "string",
164
+ "id": null,
165
+ "_type": "Value"
166
+ },
167
+ "premise": {
168
+ "dtype": "string",
169
+ "id": null,
170
+ "_type": "Value"
171
+ },
172
+ "hypothesis": {
173
+ "dtype": "string",
174
+ "id": null,
175
+ "_type": "Value"
176
+ },
177
+ "label": {
178
+ "num_classes": 3,
179
+ "names": [
180
+ "entailment",
181
+ "neutral",
182
+ "contradiction"
183
+ ],
184
+ "names_file": null,
185
+ "id": null,
186
+ "_type": "ClassLabel"
187
+ }
188
+ },
189
+ "post_processed": null,
190
+ "supervised_keys": null,
191
+ "task_templates": null,
192
+ "builder_name": "klue",
193
+ "config_name": "nli",
194
+ "version": {
195
+ "version_str": "1.0.0",
196
+ "description": "",
197
+ "major": 1,
198
+ "minor": 0,
199
+ "patch": 0
200
+ },
201
+ "splits": {
202
+ "train": {
203
+ "name": "train",
204
+ "num_bytes": 5719930,
205
+ "num_examples": 24998,
206
+ "dataset_name": "klue"
207
+ },
208
+ "validation": {
209
+ "name": "validation",
210
+ "num_bytes": 673276,
211
+ "num_examples": 3000,
212
+ "dataset_name": "klue"
213
+ }
214
+ },
215
+ "download_checksums": {
216
+ "http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000068/data/klue-nli-v1.tar.gz": {
217
+ "num_bytes": 1257374,
218
+ "checksum": "388be2033ef712072201903795a35b4f86826ee3ed3b62dc0c98e1721baa8850"
219
+ }
220
+ },
221
+ "download_size": 1257374,
222
+ "post_processing_size": null,
223
+ "dataset_size": 6393206,
224
+ "size_in_bytes": 7650580
225
+ },
226
+ "ner": {
227
+ "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
228
+ "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
229
+ "homepage": "https://klue-benchmark.com/tasks/69/overview/description",
230
+ "license": "CC-BY-SA-4.0",
231
+ "features": {
232
+ "sentence": {
233
+ "dtype": "string",
234
+ "id": null,
235
+ "_type": "Value"
236
+ },
237
+ "tokens": {
238
+ "feature": {
239
+ "dtype": "string",
240
+ "id": null,
241
+ "_type": "Value"
242
+ },
243
+ "length": -1,
244
+ "id": null,
245
+ "_type": "Sequence"
246
+ },
247
+ "ner_tags": {
248
+ "feature": {
249
+ "num_classes": 13,
250
+ "names": [
251
+ "B-DT",
252
+ "I-DT",
253
+ "B-LC",
254
+ "I-LC",
255
+ "B-OG",
256
+ "I-OG",
257
+ "B-PS",
258
+ "I-PS",
259
+ "B-QT",
260
+ "I-QT",
261
+ "B-TI",
262
+ "I-TI",
263
+ "O"
264
+ ],
265
+ "names_file": null,
266
+ "id": null,
267
+ "_type": "ClassLabel"
268
+ },
269
+ "length": -1,
270
+ "id": null,
271
+ "_type": "Sequence"
272
+ }
273
+ },
274
+ "post_processed": null,
275
+ "supervised_keys": null,
276
+ "task_templates": null,
277
+ "builder_name": "klue",
278
+ "config_name": "ner",
279
+ "version": {
280
+ "version_str": "1.0.0",
281
+ "description": "",
282
+ "major": 1,
283
+ "minor": 0,
284
+ "patch": 0
285
+ },
286
+ "splits": {
287
+ "train": {
288
+ "name": "train",
289
+ "num_bytes": 19891953,
290
+ "num_examples": 21008,
291
+ "dataset_name": "klue"
292
+ },
293
+ "validation": {
294
+ "name": "validation",
295
+ "num_bytes": 4937579,
296
+ "num_examples": 5000,
297
+ "dataset_name": "klue"
298
+ }
299
+ },
300
+ "download_checksums": {
301
+ "http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000069/data/klue-ner-v1.tar.gz": {
302
+ "num_bytes": 4308644,
303
+ "checksum": "848a89759ac6b7c149c9a00d820726fe2a140c22782201f1a40d856672e7ea8e"
304
+ }
305
+ },
306
+ "download_size": 4308644,
307
+ "post_processing_size": null,
308
+ "dataset_size": 24829532,
309
+ "size_in_bytes": 29138176
310
+ },
311
+ "re": {
312
+ "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
313
+ "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
314
+ "homepage": "https://klue-benchmark.com/tasks/70/overview/description",
315
+ "license": "CC-BY-SA-4.0",
316
+ "features": {
317
+ "guid": {
318
+ "dtype": "string",
319
+ "id": null,
320
+ "_type": "Value"
321
+ },
322
+ "sentence": {
323
+ "dtype": "string",
324
+ "id": null,
325
+ "_type": "Value"
326
+ },
327
+ "subject_entity": {
328
+ "word": {
329
+ "dtype": "string",
330
+ "id": null,
331
+ "_type": "Value"
332
+ },
333
+ "start_idx": {
334
+ "dtype": "int32",
335
+ "id": null,
336
+ "_type": "Value"
337
+ },
338
+ "end_idx": {
339
+ "dtype": "int32",
340
+ "id": null,
341
+ "_type": "Value"
342
+ },
343
+ "type": {
344
+ "dtype": "string",
345
+ "id": null,
346
+ "_type": "Value"
347
+ }
348
+ },
349
+ "object_entity": {
350
+ "word": {
351
+ "dtype": "string",
352
+ "id": null,
353
+ "_type": "Value"
354
+ },
355
+ "start_idx": {
356
+ "dtype": "int32",
357
+ "id": null,
358
+ "_type": "Value"
359
+ },
360
+ "end_idx": {
361
+ "dtype": "int32",
362
+ "id": null,
363
+ "_type": "Value"
364
+ },
365
+ "type": {
366
+ "dtype": "string",
367
+ "id": null,
368
+ "_type": "Value"
369
+ }
370
+ },
371
+ "label": {
372
+ "num_classes": 30,
373
+ "names": [
374
+ "no_relation",
375
+ "org:dissolved",
376
+ "org:founded",
377
+ "org:place_of_headquarters",
378
+ "org:alternate_names",
379
+ "org:member_of",
380
+ "org:members",
381
+ "org:political/religious_affiliation",
382
+ "org:product",
383
+ "org:founded_by",
384
+ "org:top_members/employees",
385
+ "org:number_of_employees/members",
386
+ "per:date_of_birth",
387
+ "per:date_of_death",
388
+ "per:place_of_birth",
389
+ "per:place_of_death",
390
+ "per:place_of_residence",
391
+ "per:origin",
392
+ "per:employee_of",
393
+ "per:schools_attended",
394
+ "per:alternate_names",
395
+ "per:parents",
396
+ "per:children",
397
+ "per:siblings",
398
+ "per:spouse",
399
+ "per:other_family",
400
+ "per:colleagues",
401
+ "per:product",
402
+ "per:religion",
403
+ "per:title"
404
+ ],
405
+ "names_file": null,
406
+ "id": null,
407
+ "_type": "ClassLabel"
408
+ },
409
+ "source": {
410
+ "dtype": "string",
411
+ "id": null,
412
+ "_type": "Value"
413
+ }
414
+ },
415
+ "post_processed": null,
416
+ "supervised_keys": null,
417
+ "task_templates": null,
418
+ "builder_name": "klue",
419
+ "config_name": "re",
420
+ "version": {
421
+ "version_str": "1.0.0",
422
+ "description": "",
423
+ "major": 1,
424
+ "minor": 0,
425
+ "patch": 0
426
+ },
427
+ "splits": {
428
+ "train": {
429
+ "name": "train",
430
+ "num_bytes": 11145538,
431
+ "num_examples": 32470,
432
+ "dataset_name": "klue"
433
+ },
434
+ "validation": {
435
+ "name": "validation",
436
+ "num_bytes": 2559300,
437
+ "num_examples": 7765,
438
+ "dataset_name": "klue"
439
+ }
440
+ },
441
+ "download_checksums": {
442
+ "http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000070/data/klue-re-v1.tar.gz": {
443
+ "num_bytes": 5669259,
444
+ "checksum": "b09ceac0d986cc09e42fcda9c7f2873c0eea8ec0629baf91fead36580790f8f5"
445
+ }
446
+ },
447
+ "download_size": 5669259,
448
+ "post_processing_size": null,
449
+ "dataset_size": 13704838,
450
+ "size_in_bytes": 19374097
451
+ },
452
+ "dp": {
453
+ "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
454
+ "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
455
+ "homepage": "https://klue-benchmark.com/tasks/71/overview/description",
456
+ "license": "CC-BY-SA-4.0",
457
+ "features": {
458
+ "sentence": {
459
+ "dtype": "string",
460
+ "id": null,
461
+ "_type": "Value"
462
+ },
463
+ "index": [
464
+ {
465
+ "dtype": "int32",
466
+ "id": null,
467
+ "_type": "Value"
468
+ }
469
+ ],
470
+ "word_form": [
471
+ {
472
+ "dtype": "string",
473
+ "id": null,
474
+ "_type": "Value"
475
+ }
476
+ ],
477
+ "lemma": [
478
+ {
479
+ "dtype": "string",
480
+ "id": null,
481
+ "_type": "Value"
482
+ }
483
+ ],
484
+ "pos": [
485
+ {
486
+ "dtype": "string",
487
+ "id": null,
488
+ "_type": "Value"
489
+ }
490
+ ],
491
+ "head": [
492
+ {
493
+ "dtype": "int32",
494
+ "id": null,
495
+ "_type": "Value"
496
+ }
497
+ ],
498
+ "deprel": [
499
+ {
500
+ "dtype": "string",
501
+ "id": null,
502
+ "_type": "Value"
503
+ }
504
+ ]
505
+ },
506
+ "post_processed": null,
507
+ "supervised_keys": null,
508
+ "task_templates": null,
509
+ "builder_name": "klue",
510
+ "config_name": "dp",
511
+ "version": {
512
+ "version_str": "1.0.0",
513
+ "description": "",
514
+ "major": 1,
515
+ "minor": 0,
516
+ "patch": 0
517
+ },
518
+ "splits": {
519
+ "train": {
520
+ "name": "train",
521
+ "num_bytes": 7900009,
522
+ "num_examples": 10000,
523
+ "dataset_name": "klue"
524
+ },
525
+ "validation": {
526
+ "name": "validation",
527
+ "num_bytes": 1557506,
528
+ "num_examples": 2000,
529
+ "dataset_name": "klue"
530
+ }
531
+ },
532
+ "download_checksums": {
533
+ "http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000071/data/klue-dp-v1.tar.gz": {
534
+ "num_bytes": 2033461,
535
+ "checksum": "2c76a3543a50599ac6640ad360ba00eac36e0b5b2363f708a614d6e50844d17b"
536
+ }
537
+ },
538
+ "download_size": 2033461,
539
+ "post_processing_size": null,
540
+ "dataset_size": 9457515,
541
+ "size_in_bytes": 11490976
542
+ },
543
+ "mrc": {
544
+ "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
545
+ "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
546
+ "homepage": "https://klue-benchmark.com/tasks/72/overview/description",
547
+ "license": "CC-BY-SA-4.0",
548
+ "features": {
549
+ "title": {
550
+ "dtype": "string",
551
+ "id": null,
552
+ "_type": "Value"
553
+ },
554
+ "context": {
555
+ "dtype": "string",
556
+ "id": null,
557
+ "_type": "Value"
558
+ },
559
+ "news_category": {
560
+ "dtype": "string",
561
+ "id": null,
562
+ "_type": "Value"
563
+ },
564
+ "source": {
565
+ "dtype": "string",
566
+ "id": null,
567
+ "_type": "Value"
568
+ },
569
+ "guid": {
570
+ "dtype": "string",
571
+ "id": null,
572
+ "_type": "Value"
573
+ },
574
+ "is_impossible": {
575
+ "dtype": "bool",
576
+ "id": null,
577
+ "_type": "Value"
578
+ },
579
+ "question_type": {
580
+ "dtype": "int32",
581
+ "id": null,
582
+ "_type": "Value"
583
+ },
584
+ "question": {
585
+ "dtype": "string",
586
+ "id": null,
587
+ "_type": "Value"
588
+ },
589
+ "answers": {
590
+ "feature": {
591
+ "answer_start": {
592
+ "dtype": "int32",
593
+ "id": null,
594
+ "_type": "Value"
595
+ },
596
+ "text": {
597
+ "dtype": "string",
598
+ "id": null,
599
+ "_type": "Value"
600
+ }
601
+ },
602
+ "length": -1,
603
+ "id": null,
604
+ "_type": "Sequence"
605
+ }
606
+ },
607
+ "post_processed": null,
608
+ "supervised_keys": null,
609
+ "task_templates": null,
610
+ "builder_name": "klue",
611
+ "config_name": "mrc",
612
+ "version": {
613
+ "version_str": "1.0.0",
614
+ "description": "",
615
+ "major": 1,
616
+ "minor": 0,
617
+ "patch": 0
618
+ },
619
+ "splits": {
620
+ "train": {
621
+ "name": "train",
622
+ "num_bytes": 46505665,
623
+ "num_examples": 17554,
624
+ "dataset_name": "klue"
625
+ },
626
+ "validation": {
627
+ "name": "validation",
628
+ "num_bytes": 15583053,
629
+ "num_examples": 5841,
630
+ "dataset_name": "klue"
631
+ }
632
+ },
633
+ "download_checksums": {
634
+ "http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000072/data/klue-mrc-v1.tar.gz": {
635
+ "num_bytes": 19218422,
636
+ "checksum": "a444af252901452380d58a6320908ce4a86759bb6f38ad95d0ca98584ad33d14"
637
+ }
638
+ },
639
+ "download_size": 19218422,
640
+ "post_processing_size": null,
641
+ "dataset_size": 62088718,
642
+ "size_in_bytes": 81307140
643
+ },
644
+ "wos": {
645
+ "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
646
+ "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
647
+ "homepage": "https://klue-benchmark.com/tasks/73/overview/description",
648
+ "license": "CC-BY-SA-4.0",
649
+ "features": {
650
+ "guid": {
651
+ "dtype": "string",
652
+ "id": null,
653
+ "_type": "Value"
654
+ },
655
+ "domains": [
656
+ {
657
+ "dtype": "string",
658
+ "id": null,
659
+ "_type": "Value"
660
+ }
661
+ ],
662
+ "dialogue": [
663
+ {
664
+ "role": {
665
+ "dtype": "string",
666
+ "id": null,
667
+ "_type": "Value"
668
+ },
669
+ "text": {
670
+ "dtype": "string",
671
+ "id": null,
672
+ "_type": "Value"
673
+ },
674
+ "state": [
675
+ {
676
+ "dtype": "string",
677
+ "id": null,
678
+ "_type": "Value"
679
+ }
680
+ ]
681
+ }
682
+ ]
683
+ },
684
+ "post_processed": null,
685
+ "supervised_keys": null,
686
+ "task_templates": null,
687
+ "builder_name": "klue",
688
+ "config_name": "wos",
689
+ "version": {
690
+ "version_str": "1.0.0",
691
+ "description": "",
692
+ "major": 1,
693
+ "minor": 0,
694
+ "patch": 0
695
+ },
696
+ "splits": {
697
+ "train": {
698
+ "name": "train",
699
+ "num_bytes": 26677002,
700
+ "num_examples": 8000,
701
+ "dataset_name": "klue"
702
+ },
703
+ "validation": {
704
+ "name": "validation",
705
+ "num_bytes": 3488943,
706
+ "num_examples": 1000,
707
+ "dataset_name": "klue"
708
+ }
709
+ },
710
+ "download_checksums": {
711
+ "http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000073/data/wos-v1.tar.gz": {
712
+ "num_bytes": 4785657,
713
+ "checksum": "da17829300271560afc6e7fc330503c2ca6f7ae7721d9bb94308579542a5871f"
714
+ }
715
+ },
716
+ "download_size": 4785657,
717
+ "post_processing_size": null,
718
+ "dataset_size": 30165945,
719
+ "size_in_bytes": 34951602
720
+ }
721
+ }
ynat/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:062c3b51c1ca34ed23c8fd19ffa5ea0ddcd914a95aeb9077d89576d3ef71d123
3
+ size 4165783
ynat/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:521a6785a2492a9142474ad23ae038b1a6f333fa7e39aab8fd9dd19a20c9ce92
3
+ size 846520