system HF staff commited on
Commit
720805b
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language_creators:
5
+ - expert-generated
6
+ - found
7
+ - machine-generated
8
+ languages:
9
+ - ko
10
+ licenses:
11
+ - cc-by-sa-4-0
12
+ multilinguality:
13
+ - monolingual
14
+ size_categories:
15
+ - 100K<n<1M
16
+ source_datasets:
17
+ - extended|snli
18
+ task_categories:
19
+ - text-classification
20
+ - text-scoring
21
+ task_ids:
22
+ - natural-language-inference
23
+ - semantic-similarity-scoring
24
+ ---
25
+
26
+ # Dataset Card for [Dataset Name]
27
+
28
+ ## Table of Contents
29
+ - [Dataset Description](#dataset-description)
30
+ - [Dataset Summary](#dataset-summary)
31
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
32
+ - [Languages](#languages)
33
+ - [Dataset Structure](#dataset-structure)
34
+ - [Data Instances](#data-instances)
35
+ - [Data Fields](#data-instances)
36
+ - [Data Splits](#data-instances)
37
+ - [Dataset Creation](#dataset-creation)
38
+ - [Curation Rationale](#curation-rationale)
39
+ - [Source Data](#source-data)
40
+ - [Annotations](#annotations)
41
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
42
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
43
+ - [Social Impact of Dataset](#social-impact-of-dataset)
44
+ - [Discussion of Biases](#discussion-of-biases)
45
+ - [Other Known Limitations](#other-known-limitations)
46
+ - [Additional Information](#additional-information)
47
+ - [Dataset Curators](#dataset-curators)
48
+ - [Licensing Information](#licensing-information)
49
+ - [Citation Information](#citation-information)
50
+
51
+ ## Dataset Description
52
+
53
+ - **Homepage:** [Github](https://github.com/kakaobrain/KorNLUDatasets)
54
+ - **Repository:** [Github](https://github.com/kakaobrain/KorNLUDatasets)
55
+ - **Paper:** [Arxiv](https://arxiv.org/abs/2004.03289)
56
+ - **Leaderboard:**
57
+ - **Point of Contact:**
58
+
59
+ ### Dataset Summary
60
+
61
+ [More Information Needed]
62
+
63
+ ### Supported Tasks and Leaderboards
64
+
65
+ [More Information Needed]
66
+
67
+ ### Languages
68
+
69
+ [More Information Needed]
70
+
71
+ ## Dataset Structure
72
+
73
+ ### Data Instances
74
+
75
+ [More Information Needed]
76
+
77
+ ### Data Fields
78
+
79
+ [More Information Needed]
80
+
81
+ ### Data Splits
82
+
83
+ [More Information Needed]
84
+
85
+ ## Dataset Creation
86
+
87
+ ### Curation Rationale
88
+
89
+ [More Information Needed]
90
+
91
+ ### Source Data
92
+
93
+ #### Initial Data Collection and Normalization
94
+
95
+ [More Information Needed]
96
+
97
+ #### Who are the source language producers?
98
+
99
+ [More Information Needed]
100
+
101
+ ### Annotations
102
+
103
+ #### Annotation process
104
+
105
+ [More Information Needed]
106
+
107
+ #### Who are the annotators?
108
+
109
+ [More Information Needed]
110
+
111
+ ### Personal and Sensitive Information
112
+
113
+ [More Information Needed]
114
+
115
+ ## Considerations for Using the Data
116
+
117
+ ### Social Impact of Dataset
118
+
119
+ [More Information Needed]
120
+
121
+ ### Discussion of Biases
122
+
123
+ [More Information Needed]
124
+
125
+ ### Other Known Limitations
126
+
127
+ [More Information Needed]
128
+
129
+ ## Additional Information
130
+
131
+ ### Dataset Curators
132
+
133
+ [More Information Needed]
134
+
135
+ ### Licensing Information
136
+
137
+ [More Information Needed]
138
+
139
+ ### Citation Information
140
+
141
+ [More Information Needed]
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"nli": {"description": " The dataset contains data for bechmarking korean models on NLI and STS\n", "citation": " @article{ham2020kornli,\n title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},\n author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},\n journal={arXiv preprint arXiv:2004.03289},\n year={2020}\n}\n", "homepage": "https://github.com/kakaobrain/KorNLUDatasets", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "kor_nlu", "config_name": "nli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 80135707, "num_examples": 550146, "dataset_name": "kor_nlu"}, "validation": {"name": "validation", "num_bytes": 318170, "num_examples": 1570, "dataset_name": "kor_nlu"}, "test": {"name": "test", "num_bytes": 1047250, "num_examples": 4954, "dataset_name": "kor_nlu"}}, "download_checksums": {"https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/snli_1.0_train.ko.tsv": {"num_bytes": 78486224, "checksum": "aaf12d8955fcda2b14319e5cabb9b31824c6f504aede460c975b518911f484c0"}, "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/xnli.dev.ko.tsv": {"num_bytes": 511383, "checksum": "821fb1c1c2e538ac2724a6132da36ffa379eb9a542ceb89243b3cf711377382c"}, "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/xnli.test.ko.tsv": {"num_bytes": 1032430, "checksum": "0db53cf8e182283a0d0404ec3fe1b15d1e29236d23d22082dc20d43067254b3e"}}, "download_size": 80030037, "post_processing_size": null, "dataset_size": 81501127, "size_in_bytes": 161531164}, "sts": {"description": " The dataset contains data for bechmarking korean models on NLI and STS\n", "citation": " @article{ham2020kornli,\n title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},\n author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},\n journal={arXiv preprint arXiv:2004.03289},\n year={2020}\n}\n", "homepage": "https://github.com/kakaobrain/KorNLUDatasets", "license": "", "features": {"genre": {"num_classes": 4, "names": ["main-news", "main-captions", "main-forum", "main-forums"], "names_file": null, "id": null, "_type": "ClassLabel"}, "filename": {"num_classes": 9, "names": ["images", "MSRpar", "MSRvid", "headlines", "deft-forum", "deft-news", "track5.en-en", "answers-forums", "answer-answer"], "names_file": null, "id": null, "_type": "ClassLabel"}, "year": {"num_classes": 7, "names": ["2017", "2016", "2013", "2012train", "2014", "2015", "2012test"], "names_file": null, "id": null, "_type": "ClassLabel"}, "id": {"dtype": "int32", "id": null, "_type": "Value"}, "score": {"dtype": "float32", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "kor_nlu", "config_name": "sts", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1056664, "num_examples": 5703, "dataset_name": "kor_nlu"}, "validation": {"name": "validation", "num_bytes": 305009, "num_examples": 1471, "dataset_name": "kor_nlu"}, "test": {"name": "test", "num_bytes": 249671, "num_examples": 1379, "dataset_name": "kor_nlu"}}, "download_checksums": {"https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorSTS/sts-train.tsv": {"num_bytes": 1046952, "checksum": "b5aaa7f957d6ff46f4b6834a8b0f024a9234a6eefe9289aed66746b4533da3b8"}, "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorSTS/sts-dev.tsv": {"num_bytes": 307276, "checksum": "d835b65b424401fbcce6be28a78159df8d61f5b19977ea7a0b83b8cb6105f393"}, "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorSTS/sts-test.tsv": {"num_bytes": 249596, "checksum": "13f83a3440d9db52d134eb40303ce021b03264aa338fbf2871c8786ce04d2105"}}, "download_size": 1603824, "post_processing_size": null, "dataset_size": 1611344, "size_in_bytes": 3215168}}
dummy/nli/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfd85137ae7efd6141804a88c5eda1d0c9889f45c6b13012aa9ab9d84025f2b6
3
+ size 1576
dummy/sts/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ed0cbff3b8172e3ec4a6ec94e65bd4a1ecabf8081df5a4e7f8c10cc4e8017f1
3
+ size 1310
kor_nlu.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Korean Dataset for NLI and STS"""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import csv
6
+
7
+ import pandas as pd
8
+
9
+ import datasets
10
+
11
+
12
+ _CITATAION = """\
13
+ @article{ham2020kornli,
14
+ title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},
15
+ author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},
16
+ journal={arXiv preprint arXiv:2004.03289},
17
+ year={2020}
18
+ }
19
+ """
20
+
21
+ _DESCRIPTION = """\
22
+ The dataset contains data for bechmarking korean models on NLI and STS
23
+ """
24
+
25
+ _URL = "https://github.com/kakaobrain/KorNLUDatasets"
26
+
27
+ _DATA_URLS = {
28
+ "nli": {
29
+ # 'mnli-train': 'https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/multinli.train.ko.tsv',
30
+ "snli-train": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/snli_1.0_train.ko.tsv",
31
+ "xnli-dev": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/xnli.dev.ko.tsv",
32
+ "xnli-test": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorNLI/xnli.test.ko.tsv",
33
+ },
34
+ "sts": {
35
+ "train": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorSTS/sts-train.tsv",
36
+ "dev": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorSTS/sts-dev.tsv",
37
+ "test": "https://raw.githubusercontent.com/kakaobrain/KorNLUDatasets/master/KorSTS/sts-test.tsv",
38
+ },
39
+ }
40
+
41
+
42
+ class KorNluConfig(datasets.BuilderConfig):
43
+ """BuilderConfig for korNLU"""
44
+
45
+ def __init__(self, description, data_url, citation, url, **kwargs):
46
+ """
47
+ Args:
48
+ description: `string`, brief description of the dataset
49
+ data_url: `dictionary`, dict with url for each split of data.
50
+ citation: `string`, citation for the dataset.
51
+ url: `string`, url for information about the dataset.
52
+ **kwrags: keyword arguments frowarded to super
53
+ """
54
+ super(KorNluConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
55
+ self.description = description
56
+ self.data_url = data_url
57
+ self.citation = citation
58
+ self.url = url
59
+
60
+
61
+ class KorNlu(datasets.GeneratorBasedBuilder):
62
+ BUILDER_CONFIGS = [
63
+ KorNluConfig(name=name, description=_DESCRIPTION, data_url=_DATA_URLS[name], citation=_CITATAION, url=_URL)
64
+ for name in ["nli", "sts"]
65
+ ]
66
+ BUILDER_CONFIG_CLASS = KorNluConfig
67
+
68
+ def _info(self):
69
+ features = {}
70
+ if self.config.name == "nli":
71
+ labels = ["entailment", "neutral", "contradiction"]
72
+ features["premise"] = datasets.Value("string")
73
+ features["hypothesis"] = datasets.Value("string")
74
+ features["label"] = datasets.features.ClassLabel(names=labels)
75
+
76
+ if self.config.name == "sts":
77
+ genre = ["main-news", "main-captions", "main-forum", "main-forums"]
78
+ filename = [
79
+ "images",
80
+ "MSRpar",
81
+ "MSRvid",
82
+ "headlines",
83
+ "deft-forum",
84
+ "deft-news",
85
+ "track5.en-en",
86
+ "answers-forums",
87
+ "answer-answer",
88
+ ]
89
+ year = ["2017", "2016", "2013", "2012train", "2014", "2015", "2012test"]
90
+
91
+ features["genre"] = datasets.features.ClassLabel(names=genre)
92
+ features["filename"] = datasets.features.ClassLabel(names=filename)
93
+ features["year"] = datasets.features.ClassLabel(names=year)
94
+ features["id"] = datasets.Value("int32")
95
+ features["score"] = datasets.Value("float32")
96
+ features["sentence1"] = datasets.Value("string")
97
+ features["sentence2"] = datasets.Value("string")
98
+
99
+ return datasets.DatasetInfo(
100
+ description=_DESCRIPTION, features=datasets.Features(features), homepage=_URL, citation=_CITATAION
101
+ )
102
+
103
+ def _split_generators(self, dl_manager):
104
+ if self.config.name == "nli":
105
+ # mnli_train = dl_manager.download_and_extract(self.config.data_url['mnli-train'])
106
+ snli_train = dl_manager.download_and_extract(self.config.data_url["snli-train"])
107
+ xnli_dev = dl_manager.download_and_extract(self.config.data_url["xnli-dev"])
108
+ xnli_test = dl_manager.download_and_extract(self.config.data_url["xnli-test"])
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": snli_train, "split": "train"}
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": xnli_dev, "split": "dev"}
116
+ ),
117
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": xnli_test, "split": "test"}),
118
+ ]
119
+
120
+ if self.config.name == "sts":
121
+ train = dl_manager.download_and_extract(self.config.data_url["train"])
122
+ dev = dl_manager.download_and_extract(self.config.data_url["dev"])
123
+ test = dl_manager.download_and_extract(self.config.data_url["test"])
124
+
125
+ return [
126
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train, "split": "train"}),
127
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev, "split": "dev"}),
128
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test, "split": "test"}),
129
+ ]
130
+
131
+ def _generate_examples(self, filepath, split):
132
+ if self.config.name == "nli":
133
+ df = pd.read_csv(filepath, sep="\t")
134
+ df = df.dropna()
135
+ for id_, row in df.iterrows():
136
+ yield id_, {
137
+ "premise": str(row["sentence1"]),
138
+ "hypothesis": str(row["sentence2"]),
139
+ "label": str(row["gold_label"]),
140
+ }
141
+
142
+ if self.config.name == "sts":
143
+ with open(filepath, encoding="utf-8") as f:
144
+ data = csv.DictReader(f, delimiter="\t")
145
+ for id_, row in enumerate(data):
146
+ yield id_, {
147
+ "genre": row["genre"],
148
+ "filename": row["filename"],
149
+ "year": row["year"],
150
+ "id": row["id"],
151
+ "sentence1": row["sentence1"],
152
+ "sentence2": row["sentence2"],
153
+ "score": row["score"],
154
+ }