Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
expert-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
system HF staff commited on
Commit
748e977
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/1.1.0/dummy_data.zip +3 -0
  4. trec.py +170 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "The Text REtrieval Conference (TREC) Question Classification dataset contains 5500 labeled questions in training set and another 500 for test set. The dataset has 6 labels, 47 level-2 labels. Average length of each sentence is 10, vocabulary size of 8700.\n\nData are collected from four sources: 4,500 English questions published by USC (Hovy et al., 2001), about 500 manually constructed questions for a few rare classes, 894 TREC 8 and TREC 9 questions, and also 500 questions from TREC 10 which serves as the test set.\n", "citation": "@inproceedings{li-roth-2002-learning,\n title = \"Learning Question Classifiers\",\n author = \"Li, Xin and\n Roth, Dan\",\n booktitle = \"{COLING} 2002: The 19th International Conference on Computational Linguistics\",\n year = \"2002\",\n url = \"https://www.aclweb.org/anthology/C02-1150\",\n}\n@inproceedings{hovy-etal-2001-toward,\n title = \"Toward Semantics-Based Answer Pinpointing\",\n author = \"Hovy, Eduard and\n Gerber, Laurie and\n Hermjakob, Ulf and\n Lin, Chin-Yew and\n Ravichandran, Deepak\",\n booktitle = \"Proceedings of the First International Conference on Human Language Technology Research\",\n year = \"2001\",\n url = \"https://www.aclweb.org/anthology/H01-1069\",\n}\n", "homepage": "https://cogcomp.seas.upenn.edu/Data/QA/QC/", "license": "", "features": {"label-coarse": {"num_classes": 6, "names": ["DESC", "ENTY", "ABBR", "HUM", "NUM", "LOC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "label-fine": {"num_classes": 47, "names": ["manner", "cremat", "animal", "exp", "ind", "gr", "title", "def", "date", "reason", "event", "state", "desc", "count", "other", "letter", "religion", "food", "country", "color", "termeq", "city", "body", "dismed", "mount", "money", "product", "period", "substance", "sport", "plant", "techmeth", "volsize", "instru", "abb", "speed", "word", "lang", "perc", "code", "dist", "temp", "symbol", "ord", "veh", "weight", "currency"], "names_file": null, "id": null, "_type": "ClassLabel"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "trec", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 385090, "num_examples": 5452, "dataset_name": "trec"}, "test": {"name": "test", "num_bytes": 27983, "num_examples": 500, "dataset_name": "trec"}}, "download_checksums": {"http://cogcomp.org/Data/QA/QC/train_5500.label": {"num_bytes": 335858, "checksum": "9e4c8bdcaffb96ed61041bd64b564183d52793a8e91d84fc3a8646885f466ec3"}, "http://cogcomp.org/Data/QA/QC/TREC_10.label": {"num_bytes": 23354, "checksum": "033f22c028c2bbba9ca682f68ffe204dc1aa6e1cf35dd6207f2d4ca67f0d0e8e"}}, "download_size": 359212, "dataset_size": 413073, "size_in_bytes": 772285}}
dummy/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cceb4211ca3ed001c69c675b3123d7ffc3ec6679e9c28a53f110024ff8d2dd85
3
+ size 861
trec.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ The Text REtrieval Conference (TREC) Question Classification dataset."""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = """\
23
+ @inproceedings{li-roth-2002-learning,
24
+ title = "Learning Question Classifiers",
25
+ author = "Li, Xin and
26
+ Roth, Dan",
27
+ booktitle = "{COLING} 2002: The 19th International Conference on Computational Linguistics",
28
+ year = "2002",
29
+ url = "https://www.aclweb.org/anthology/C02-1150",
30
+ }
31
+ @inproceedings{hovy-etal-2001-toward,
32
+ title = "Toward Semantics-Based Answer Pinpointing",
33
+ author = "Hovy, Eduard and
34
+ Gerber, Laurie and
35
+ Hermjakob, Ulf and
36
+ Lin, Chin-Yew and
37
+ Ravichandran, Deepak",
38
+ booktitle = "Proceedings of the First International Conference on Human Language Technology Research",
39
+ year = "2001",
40
+ url = "https://www.aclweb.org/anthology/H01-1069",
41
+ }
42
+ """
43
+
44
+ _DESCRIPTION = """\
45
+ The Text REtrieval Conference (TREC) Question Classification dataset contains 5500 labeled questions in training set and another 500 for test set. The dataset has 6 labels, 47 level-2 labels. Average length of each sentence is 10, vocabulary size of 8700.
46
+
47
+ Data are collected from four sources: 4,500 English questions published by USC (Hovy et al., 2001), about 500 manually constructed questions for a few rare classes, 894 TREC 8 and TREC 9 questions, and also 500 questions from TREC 10 which serves as the test set.
48
+ """
49
+
50
+ _URLs = {
51
+ "train": "http://cogcomp.org/Data/QA/QC/train_5500.label",
52
+ "test": "http://cogcomp.org/Data/QA/QC/TREC_10.label",
53
+ }
54
+
55
+ _COARSE_LABELS = ["DESC", "ENTY", "ABBR", "HUM", "NUM", "LOC"]
56
+
57
+ _FINE_LABELS = [
58
+ "manner",
59
+ "cremat",
60
+ "animal",
61
+ "exp",
62
+ "ind",
63
+ "gr",
64
+ "title",
65
+ "def",
66
+ "date",
67
+ "reason",
68
+ "event",
69
+ "state",
70
+ "desc",
71
+ "count",
72
+ "other",
73
+ "letter",
74
+ "religion",
75
+ "food",
76
+ "country",
77
+ "color",
78
+ "termeq",
79
+ "city",
80
+ "body",
81
+ "dismed",
82
+ "mount",
83
+ "money",
84
+ "product",
85
+ "period",
86
+ "substance",
87
+ "sport",
88
+ "plant",
89
+ "techmeth",
90
+ "volsize",
91
+ "instru",
92
+ "abb",
93
+ "speed",
94
+ "word",
95
+ "lang",
96
+ "perc",
97
+ "code",
98
+ "dist",
99
+ "temp",
100
+ "symbol",
101
+ "ord",
102
+ "veh",
103
+ "weight",
104
+ "currency",
105
+ ]
106
+
107
+
108
+ class Trec(datasets.GeneratorBasedBuilder):
109
+ """TODO: Short description of my dataset."""
110
+
111
+ VERSION = datasets.Version("1.1.0")
112
+
113
+ def _info(self):
114
+ # TODO: Specifies the datasets.DatasetInfo object
115
+ return datasets.DatasetInfo(
116
+ # This is the description that will appear on the datasets page.
117
+ description=_DESCRIPTION,
118
+ # datasets.features.FeatureConnectors
119
+ features=datasets.Features(
120
+ {
121
+ "label-coarse": datasets.ClassLabel(names=_COARSE_LABELS),
122
+ "label-fine": datasets.ClassLabel(names=_FINE_LABELS),
123
+ "text": datasets.Value("string"),
124
+ }
125
+ ),
126
+ # If there's a common (input, target) tuple from the features,
127
+ # specify them here. They'll be used if as_supervised=True in
128
+ # builder.as_dataset.
129
+ supervised_keys=None,
130
+ # Homepage of the dataset for documentation
131
+ homepage="https://cogcomp.seas.upenn.edu/Data/QA/QC/",
132
+ citation=_CITATION,
133
+ )
134
+
135
+ def _split_generators(self, dl_manager):
136
+ """Returns SplitGenerators."""
137
+ # TODO: Downloads the data and defines the splits
138
+ # dl_manager is a datasets.download.DownloadManager that can be used to
139
+ # download and extract URLs
140
+ dl_files = dl_manager.download_and_extract(_URLs)
141
+ return [
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TRAIN,
144
+ # These kwargs will be passed to _generate_examples
145
+ gen_kwargs={
146
+ "filepath": dl_files["train"],
147
+ },
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.TEST,
151
+ # These kwargs will be passed to _generate_examples
152
+ gen_kwargs={
153
+ "filepath": dl_files["test"],
154
+ },
155
+ ),
156
+ ]
157
+
158
+ def _generate_examples(self, filepath):
159
+ """ Yields examples. """
160
+ # TODO: Yields (key, example) tuples from the dataset
161
+ with open(filepath, "rb") as f:
162
+ for id_, row in enumerate(f):
163
+ # One non-ASCII byte: sisterBADBYTEcity. We replace it with a space
164
+ label, _, text = row.replace(b"\xf0", b" ").strip().decode().partition(" ")
165
+ coarse_label, _, fine_label = label.partition(":")
166
+ yield id_, {
167
+ "label-coarse": coarse_label,
168
+ "label-fine": fine_label,
169
+ "text": text,
170
+ }