system HF staff commited on
Commit
458a04a
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"multi_nli": {"description": " Korean Natural Language Inference datasets\n", "citation": "@article{ham2020kornli,\n title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},\n author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},\n journal={arXiv preprint arXiv:2004.03289},\n year={2020}\n}\n", "homepage": "https://github.com/kakaobrain/KorNLUDatasets", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "kor_nli", "config_name": "multi_nli", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 87169599, "num_examples": 385494, "dataset_name": "kor_nli"}}, "download_checksums": {"https://github.com/kakaobrain/KorNLUDatasets/archive/master.zip": {"num_bytes": 42113232, "checksum": "b1184d5e78a7d988400eabe3374b8a7e2abf182896f54e6e311c5173bb2c9bf5"}}, "download_size": 42113232, "dataset_size": 87169599, "size_in_bytes": 129282831}}
dummy/multi_nli/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63ea1d99733461e2228db79f7cbe91f13d65a43235ab01b27100b1fa08079ead
3
+ size 1565
dummy/snli/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dd0e7f1ce2e481a25344b31bcd5e4c69dcd900ec3d5fb74bf449caa99ae5259
3
+ size 1259
kor_nli.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(kor_nli): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import csv
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(kor_nli): BibTeX citation
12
+ _CITATION = """\
13
+ @article{ham2020kornli,
14
+ title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},
15
+ author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},
16
+ journal={arXiv preprint arXiv:2004.03289},
17
+ year={2020}
18
+ }
19
+ """
20
+
21
+ # TODO(kor_nli):
22
+ _DESCRIPTION = """ Korean Natural Language Inference datasets
23
+ """
24
+ _URL = "https://github.com/kakaobrain/KorNLUDatasets/archive/master.zip"
25
+
26
+
27
+ class KorNLIConfig(datasets.BuilderConfig):
28
+ """BuilderConfig for KorNLI."""
29
+
30
+ def __init__(self, **kwargs):
31
+ """BuilderConfig for KorNLI.
32
+
33
+ Args:
34
+
35
+ **kwargs: keyword arguments forwarded to super.
36
+ """
37
+ # Version 1.1.0 remove empty document and summary strings.
38
+ super(KorNLIConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
39
+
40
+
41
+ class KorNli(datasets.GeneratorBasedBuilder):
42
+ """TODO(kor_nli): Short description of my dataset."""
43
+
44
+ # TODO(kor_nli): Set up version.
45
+ VERSION = datasets.Version("1.0.0")
46
+ BUILDER_CONFIGS = [
47
+ KorNLIConfig(name="multi_nli", description="Korean multi NLI datasets"),
48
+ KorNLIConfig(name="snli", description="Korean SNLI dataset"),
49
+ KorNLIConfig(name="xnli", description="Korean XNLI dataset"),
50
+ ]
51
+
52
+ def _info(self):
53
+ # TODO(kor_nli): Specifies the datasets.DatasetInfo object
54
+ return datasets.DatasetInfo(
55
+ # This is the description that will appear on the datasets page.
56
+ description=_DESCRIPTION,
57
+ # datasets.features.FeatureConnectors
58
+ features=datasets.Features(
59
+ {
60
+ # These are the features of your dataset like images, labels ...
61
+ "sentence1": datasets.Value("string"),
62
+ "sentence2": datasets.Value("string"),
63
+ "gold_label": datasets.Value("string"),
64
+ }
65
+ ),
66
+ # If there's a common (input, target) tuple from the features,
67
+ # specify them here. They'll be used if as_supervised=True in
68
+ # builder.as_dataset.
69
+ supervised_keys=None,
70
+ # Homepage of the dataset for documentation
71
+ homepage="https://github.com/kakaobrain/KorNLUDatasets",
72
+ citation=_CITATION,
73
+ )
74
+
75
+ def _split_generators(self, dl_manager):
76
+ """Returns SplitGenerators."""
77
+ # TODO(kor_nli): Downloads the data and defines the splits
78
+ # dl_manager is a datasets.download.DownloadManager that can be used to
79
+ # download and extract URLs
80
+ dl_dir = dl_manager.download_and_extract(_URL)
81
+ dl_dir = os.path.join(dl_dir, "KorNLUDatasets-master", "KorNLI")
82
+ if self.config.name == "multi_nli":
83
+ return [
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split.TRAIN,
86
+ # These kwargs will be passed to _generate_examples
87
+ gen_kwargs={"filepath": os.path.join(dl_dir, "multinli.train.ko.tsv")},
88
+ ),
89
+ ]
90
+ elif self.config.name == "snli":
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN,
94
+ # These kwargs will be passed to _generate_examples
95
+ gen_kwargs={"filepath": os.path.join(dl_dir, "snli_1.0_train.ko.tsv")},
96
+ ),
97
+ ]
98
+ else:
99
+ return [
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.VALIDATION,
102
+ # These kwargs will be passed to _generate_examples
103
+ gen_kwargs={"filepath": os.path.join(dl_dir, "xnli.dev.ko.tsv")},
104
+ ),
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TEST,
107
+ # These kwargs will be passed to _generate_examples
108
+ gen_kwargs={"filepath": os.path.join(dl_dir, "xnli.test.ko.tsv")},
109
+ ),
110
+ ]
111
+
112
+ def _generate_examples(self, filepath):
113
+ """Yields examples."""
114
+ # TODO(kor_nli): Yields (key, example) tuples from the dataset
115
+ with open(filepath, encoding="utf-8") as f:
116
+ data = csv.DictReader(f, dialect="excel-tab")
117
+ for id_, row in enumerate(data):
118
+
119
+ if len(row) != 3:
120
+ continue
121
+ yield id_, row