{"ynat": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/66/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 7, "names": ["IT\uacfc\ud559", "\uacbd\uc81c", "\uc0ac\ud68c", "\uc0dd\ud65c\ubb38\ud654", "\uc138\uacc4", "\uc2a4\ud3ec\uce20", "\uc815\uce58"], "names_file": null, "id": null, "_type": "ClassLabel"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "ynat", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10109664, "num_examples": 45678, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 2039197, "num_examples": 9107, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000066/data/ynat-v1.tar.gz": {"num_bytes": 4932555, "checksum": "820a4d1d6d1fd83e2a421f856965d3cfc5c93627935ce8c5b27468c6113fc482"}}, "download_size": 4932555, "post_processing_size": null, "dataset_size": 12148861, "size_in_bytes": 17081416}, "sts": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/67/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "labels": {"label": {"dtype": "float64", "id": null, "_type": "Value"}, "real-label": {"dtype": "float64", "id": null, "_type": "Value"}, "binary-label": {"num_classes": 2, "names": ["negative", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "sts", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2832921, "num_examples": 11668, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 122657, "num_examples": 519, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000067/data/klue-sts-v1.tar.gz": {"num_bytes": 1349875, "checksum": "539341ba78a3b351c686cf70a448ac7a5886ed95f0719d5e3d2378ba703213bd"}}, "download_size": 1349875, "post_processing_size": null, "dataset_size": 2955578, "size_in_bytes": 4305453}, "nli": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/68/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "nli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5719930, "num_examples": 24998, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 673276, "num_examples": 3000, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000068/data/klue-nli-v1.tar.gz": {"num_bytes": 1257374, "checksum": "388be2033ef712072201903795a35b4f86826ee3ed3b62dc0c98e1721baa8850"}}, "download_size": 1257374, "post_processing_size": null, "dataset_size": 6393206, "size_in_bytes": 7650580}, "ner": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/69/overview/description", "license": "CC-BY-SA-4.0", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 13, "names": ["B-DT", "I-DT", "B-LC", "I-LC", "B-OG", "I-OG", "B-PS", "I-PS", "B-QT", "I-QT", "B-TI", "I-TI", "O"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "ner", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 19891953, "num_examples": 21008, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 4937579, "num_examples": 5000, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000069/data/klue-ner-v1.tar.gz": {"num_bytes": 4308644, "checksum": "848a89759ac6b7c149c9a00d820726fe2a140c22782201f1a40d856672e7ea8e"}}, "download_size": 4308644, "post_processing_size": null, "dataset_size": 24829532, "size_in_bytes": 29138176}, "re": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/70/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "subject_entity": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "start_idx": {"dtype": "int32", "id": null, "_type": "Value"}, "end_idx": {"dtype": "int32", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}}, "object_entity": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "start_idx": {"dtype": "int32", "id": null, "_type": "Value"}, "end_idx": {"dtype": "int32", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}}, "label": {"num_classes": 30, "names": ["no_relation", "org:dissolved", "org:founded", "org:place_of_headquarters", "org:alternate_names", "org:member_of", "org:members", "org:political/religious_affiliation", "org:product", "org:founded_by", "org:top_members/employees", "org:number_of_employees/members", "per:date_of_birth", "per:date_of_death", "per:place_of_birth", "per:place_of_death", "per:place_of_residence", "per:origin", "per:employee_of", "per:schools_attended", "per:alternate_names", "per:parents", "per:children", "per:siblings", "per:spouse", "per:other_family", "per:colleagues", "per:product", "per:religion", "per:title"], "names_file": null, "id": null, "_type": "ClassLabel"}, "source": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "re", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11145538, "num_examples": 32470, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 2559300, "num_examples": 7765, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000070/data/klue-re-v1.tar.gz": {"num_bytes": 5669259, "checksum": "b09ceac0d986cc09e42fcda9c7f2873c0eea8ec0629baf91fead36580790f8f5"}}, "download_size": 5669259, "post_processing_size": null, "dataset_size": 13704838, "size_in_bytes": 19374097}, "dp": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/71/overview/description", "license": "CC-BY-SA-4.0", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "index": [{"dtype": "int32", "id": null, "_type": "Value"}], "word_form": [{"dtype": "string", "id": null, "_type": "Value"}], "lemma": [{"dtype": "string", "id": null, "_type": "Value"}], "pos": [{"dtype": "string", "id": null, "_type": "Value"}], "head": [{"dtype": "int32", "id": null, "_type": "Value"}], "deprel": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "dp", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7900009, "num_examples": 10000, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 1557506, "num_examples": 2000, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000071/data/klue-dp-v1.tar.gz": {"num_bytes": 2033461, "checksum": "2c76a3543a50599ac6640ad360ba00eac36e0b5b2363f708a614d6e50844d17b"}}, "download_size": 2033461, "post_processing_size": null, "dataset_size": 9457515, "size_in_bytes": 11490976}, "mrc": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/72/overview/description", "license": "CC-BY-SA-4.0", "features": {"title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "news_category": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "guid": {"dtype": "string", "id": null, "_type": "Value"}, "is_impossible": {"dtype": "bool", "id": null, "_type": "Value"}, "question_type": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "mrc", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 46505665, "num_examples": 17554, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 15583053, "num_examples": 5841, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000072/data/klue-mrc-v1.tar.gz": {"num_bytes": 19218422, "checksum": "a444af252901452380d58a6320908ce4a86759bb6f38ad95d0ca98584ad33d14"}}, "download_size": 19218422, "post_processing_size": null, "dataset_size": 62088718, "size_in_bytes": 81307140}, "wos": {"description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n", "citation": "@misc{park2021klue,\n title={KLUE: Korean Language Understanding Evaluation},\n author={Sungjoon Park and Jihyung Moon and Sungdong Kim and Won Ik Cho and Jiyoon Han and Jangwon Park and Chisung Song and Junseong Kim and Yongsook Song and Taehwan Oh and Joohong Lee and Juhyun Oh and Sungwon Lyu and Younghoon Jeong and Inkwon Lee and Sangwoo Seo and Dongjun Lee and Hyunwoo Kim and Myeonghwa Lee and Seongbo Jang and Seungwon Do and Sunkyoung Kim and Kyungtae Lim and Jongwon Lee and Kyumin Park and Jamin Shin and Seonghyun Kim and Lucy Park and Alice Oh and Jungwoo Ha and Kyunghyun Cho},\n year={2021},\n eprint={2105.09680},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://klue-benchmark.com/tasks/73/overview/description", "license": "CC-BY-SA-4.0", "features": {"guid": {"dtype": "string", "id": null, "_type": "Value"}, "domains": [{"dtype": "string", "id": null, "_type": "Value"}], "dialogue": [{"role": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "state": [{"dtype": "string", "id": null, "_type": "Value"}]}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "klue", "config_name": "wos", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 26677002, "num_examples": 8000, "dataset_name": "klue"}, "validation": {"name": "validation", "num_bytes": 3488943, "num_examples": 1000, "dataset_name": "klue"}}, "download_checksums": {"http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000073/data/wos-v1.tar.gz": {"num_bytes": 4785657, "checksum": "da17829300271560afc6e7fc330503c2ca6f7ae7721d9bb94308579542a5871f"}}, "download_size": 4785657, "post_processing_size": null, "dataset_size": 30165945, "size_in_bytes": 34951602}}