system HF staff commited on
Commit
207ed8c
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. art.py +117 -0
  3. dataset_infos.json +1 -0
  4. dummy/anli/0.1.0/dummy_data.zip +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
art.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(art): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(art): BibTeX citation
12
+ _CITATION = """\
13
+ @InProceedings{anli,
14
+ author = {Chandra, Bhagavatula and Ronan, Le Bras and Chaitanya, Malaviya and Keisuke, Sakaguchi and Ari, Holtzman
15
+ and Hannah, Rashkin and Doug, Downey and Scott, Wen-tau Yih and Yejin, Choi},
16
+ title = {Abductive Commonsense Reasoning},
17
+ year = {2020}
18
+ }"""
19
+
20
+ # TODO(art):
21
+ _DESCRIPTION = """\
22
+ the Abductive Natural Language Inference Dataset from AI2
23
+ """
24
+ _DATA_URL = "https://storage.googleapis.com/ai2-mosaic/public/alphanli/alphanli-train-dev.zip"
25
+
26
+
27
+ class ArtConfig(datasets.BuilderConfig):
28
+ """BuilderConfig for Art."""
29
+
30
+ def __init__(self, **kwargs):
31
+ """BuilderConfig for Art.
32
+ Args:
33
+ **kwargs: keyword arguments forwarded to super.
34
+ """
35
+ super(ArtConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
36
+
37
+
38
+ class Art(datasets.GeneratorBasedBuilder):
39
+ """TODO(art): Short description of my dataset."""
40
+
41
+ # TODO(art): Set up version.
42
+ VERSION = datasets.Version("0.1.0")
43
+ BUILDER_CONFIGS = [
44
+ ArtConfig(
45
+ name="anli",
46
+ description="""\
47
+ the Abductive Natural Language Inference Dataset from AI2.
48
+ """,
49
+ ),
50
+ ]
51
+
52
+ def _info(self):
53
+ # TODO(art): Specifies the datasets.DatasetInfo object
54
+ return datasets.DatasetInfo(
55
+ # This is the description that will appear on the datasets page.
56
+ description=_DESCRIPTION,
57
+ # datasets.features.FeatureConnectors
58
+ features=datasets.Features(
59
+ {
60
+ "observation_1": datasets.Value("string"),
61
+ "observation_2": datasets.Value("string"),
62
+ "hypothesis_1": datasets.Value("string"),
63
+ "hypothesis_2": datasets.Value("string"),
64
+ "label": datasets.features.ClassLabel(num_classes=3)
65
+ # These are the features of your dataset like images, labels ...
66
+ }
67
+ ),
68
+ # If there's a common (input, target) tuple from the features,
69
+ # specify them here. They'll be used if as_supervised=True in
70
+ # builder.as_dataset.
71
+ supervised_keys=None,
72
+ # Homepage of the dataset for documentation
73
+ homepage="https://leaderboard.allenai.org/anli/submissions/get-started",
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+ """Returns SplitGenerators."""
79
+ # TODO(art): Downloads the data and defines the splits
80
+ # dl_manager is a datasets.download.DownloadManager that can be used to
81
+ # download and extract URLs
82
+ dl_dir = dl_manager.download_and_extract(_DATA_URL)
83
+ return [
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split.VALIDATION,
86
+ gen_kwargs={
87
+ "filepath": os.path.join(dl_dir, "dev.jsonl"),
88
+ "labelpath": os.path.join(dl_dir, "dev-labels.lst"),
89
+ },
90
+ ),
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TRAIN,
93
+ gen_kwargs={
94
+ "filepath": os.path.join(dl_dir, "train.jsonl"),
95
+ "labelpath": os.path.join(dl_dir, "train-labels.lst"),
96
+ },
97
+ ),
98
+ ]
99
+
100
+ def _generate_examples(self, filepath, labelpath):
101
+ """Yields examples."""
102
+ # TODO(art): Yields (key, example) tuples from the dataset
103
+ data = []
104
+ for line in open(filepath, encoding="utf-8"):
105
+ data.append(json.loads(line))
106
+ labels = []
107
+ with open(labelpath, encoding="utf-8") as f:
108
+ for word in f:
109
+ labels.append(word)
110
+ for idx, row in enumerate(data):
111
+ yield idx, {
112
+ "observation_1": row["obs1"],
113
+ "observation_2": row["obs2"],
114
+ "hypothesis_1": row["hyp1"],
115
+ "hypothesis_2": row["hyp2"],
116
+ "label": labels[idx],
117
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"anli": {"description": "the Abductive Natural Language Inference Dataset from AI2\n", "citation": "@InProceedings{anli,\n author = \"Chandra, Bhagavatula\n and Ronan, Le Bras\n and Chaitanya, Malaviya\n and Keisuke, Sakaguchi\n and Ari, Holtzman\n and Hannah, Rashkin\n and Doug, Downey\n and Scott, Wen-tau Yih\n and Yejin, Choi\",\n title = \"Abductive Commonsense Reasoning\",\n year = \"2020\",\n}", "homepage": "https://leaderboard.allenai.org/anli/submissions/get-started", "license": "", "features": {"observation_1": {"dtype": "string", "id": null, "_type": "Value"}, "observation_2": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_1": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["0", "1", "2"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "supervised_keys": null, "builder_name": "art", "config_name": "anli", "version": {"version_str": "0.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 34046304, "num_examples": 169654, "dataset_name": "art"}, "validation": {"name": "validation", "num_bytes": 312314, "num_examples": 1532, "dataset_name": "art"}}, "download_checksums": {"https://storage.googleapis.com/ai2-mosaic/public/alphanli/alphanli-train-dev.zip": {"num_bytes": 5118294, "checksum": "24840b27553e93ec625ae020dbf78d92daeae4be31ebbd469a0c9f6f99ed1c8d"}}, "download_size": 5118294, "dataset_size": 34358618, "size_in_bytes": 39476912}}
dummy/anli/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0357021236ee212b10158db9d335a12b5430657198b841a3f66afae41996534a
3
+ size 1439