Datasets:
File size: 6,492 Bytes
720805b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
"""Korean Dataset for NLI and STS"""
import csv
import pandas as pd
import datasets
_CITATAION = """\
@article{ham2020kornli,
title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},
author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},
journal={arXiv preprint arXiv:2004.03289},
year={2020}
}
"""
_DESCRIPTION = """\
The dataset contains data for bechmarking korean models on NLI and STS
"""
_URL = "https://github.com/kakaobrain/KorNLUDatasets"
_DATA_URLS = {
"nli": {
# 'mnli-train': 'https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/multinli.train.ko.tsv',
"snli-train": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/snli_1.0_train.ko.tsv",
"xnli-dev": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/xnli.dev.ko.tsv",
"xnli-test": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/xnli.test.ko.tsv",
},
"sts": {
"train": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorSTS/sts-train.tsv",
"dev": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorSTS/sts-dev.tsv",
"test": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorSTS/sts-test.tsv",
},
}
class KorNluConfig(datasets.BuilderConfig):
"""BuilderConfig for korNLU"""
def __init__(self, description, data_url, citation, url, **kwargs):
"""
Args:
description: `string`, brief description of the dataset
data_url: `dictionary`, dict with url for each split of data.
citation: `string`, citation for the dataset.
url: `string`, url for information about the dataset.
**kwrags: keyword arguments frowarded to super
"""
super(KorNluConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.description = description
self.data_url = data_url
self.citation = citation
self.url = url
class KorNlu(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
KorNluConfig(name=name, description=_DESCRIPTION, data_url=_DATA_URLS[name], citation=_CITATAION, url=_URL)
for name in ["nli", "sts"]
]
BUILDER_CONFIG_CLASS = KorNluConfig
def _info(self):
features = {}
if self.config.name == "nli":
labels = ["entailment", "neutral", "contradiction"]
features["premise"] = datasets.Value("string")
features["hypothesis"] = datasets.Value("string")
features["label"] = datasets.features.ClassLabel(names=labels)
if self.config.name == "sts":
genre = ["main-news", "main-captions", "main-forum", "main-forums"]
filename = [
"images",
"MSRpar",
"MSRvid",
"headlines",
"deft-forum",
"deft-news",
"track5.en-en",
"answers-forums",
"answer-answer",
]
year = ["2017", "2016", "2013", "2012train", "2014", "2015", "2012test"]
features["genre"] = datasets.features.ClassLabel(names=genre)
features["filename"] = datasets.features.ClassLabel(names=filename)
features["year"] = datasets.features.ClassLabel(names=year)
features["id"] = datasets.Value("int32")
features["score"] = datasets.Value("float32")
features["sentence1"] = datasets.Value("string")
features["sentence2"] = datasets.Value("string")
return datasets.DatasetInfo(
description=_DESCRIPTION, features=datasets.Features(features), homepage=_URL, citation=_CITATAION
)
def _split_generators(self, dl_manager):
if self.config.name == "nli":
# mnli_train = dl_manager.download_and_extract(self.config.data_url['mnli-train'])
snli_train = dl_manager.download_and_extract(self.config.data_url["snli-train"])
xnli_dev = dl_manager.download_and_extract(self.config.data_url["xnli-dev"])
xnli_test = dl_manager.download_and_extract(self.config.data_url["xnli-test"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": snli_train, "split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": xnli_dev, "split": "dev"}
),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": xnli_test, "split": "test"}),
]
if self.config.name == "sts":
train = dl_manager.download_and_extract(self.config.data_url["train"])
dev = dl_manager.download_and_extract(self.config.data_url["dev"])
test = dl_manager.download_and_extract(self.config.data_url["test"])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train, "split": "train"}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev, "split": "dev"}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test, "split": "test"}),
]
def _generate_examples(self, filepath, split):
if self.config.name == "nli":
df = pd.read_csv(filepath, sep="\t")
df = df.dropna()
for id_, row in df.iterrows():
yield id_, {
"premise": str(row["sentence1"]),
"hypothesis": str(row["sentence2"]),
"label": str(row["gold_label"]),
}
if self.config.name == "sts":
with open(filepath, encoding="utf-8") as f:
data = csv.DictReader(f, delimiter="\t")
for id_, row in enumerate(data):
yield id_, {
"genre": row["genre"],
"filename": row["filename"],
"year": row["year"],
"id": row["id"],
"sentence1": row["sentence1"],
"sentence2": row["sentence2"],
"score": row["score"],
}
|