Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
155b9c7
1 Parent(s): 5f6919a

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (79cc05a408d566ef67222afd05c4728032295384)
- Add plus data files (3731e6933c98beb3bb53342ff11518de99c22f71)
- Add small data files (c78dcdda8e141f0572e2664fa901140c9a3a602f)
- Delete loading script (161a65568f7f274a37e586b41e743a85ee8a48aa)
- Delete data file (c0f23d8450ff2747ae8ccce9f93f659ec885bf07)
- Delete data file (23d3c66a0090ce1f613643e327fc3e4b1ac012ba)
- Delete data file (76be542076d36165b3b9fc8ce0d6d15a0d1b1875)

README.md CHANGED
@@ -189,7 +189,7 @@ dataset_info:
189
  - name: test
190
  num_bytes: 286966
191
  num_examples: 5500
192
- download_size: 246833
193
  dataset_size: 994165
194
  - config_name: plus
195
  features:
@@ -360,7 +360,7 @@ dataset_info:
360
  - name: test
361
  num_bytes: 286966
362
  num_examples: 5500
363
- download_size: 291179
364
  dataset_size: 1238511
365
  - config_name: small
366
  features:
@@ -531,8 +531,33 @@ dataset_info:
531
  - name: test
532
  num_bytes: 286966
533
  num_examples: 5500
534
- download_size: 216522
535
  dataset_size: 841388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
536
  ---
537
 
538
  # Dataset Card for CLINC150
189
  - name: test
190
  num_bytes: 286966
191
  num_examples: 5500
192
+ download_size: 441918
193
  dataset_size: 994165
194
  - config_name: plus
195
  features:
360
  - name: test
361
  num_bytes: 286966
362
  num_examples: 5500
363
+ download_size: 525729
364
  dataset_size: 1238511
365
  - config_name: small
366
  features:
531
  - name: test
532
  num_bytes: 286966
533
  num_examples: 5500
534
+ download_size: 385185
535
  dataset_size: 841388
536
+ configs:
537
+ - config_name: imbalanced
538
+ data_files:
539
+ - split: train
540
+ path: imbalanced/train-*
541
+ - split: validation
542
+ path: imbalanced/validation-*
543
+ - split: test
544
+ path: imbalanced/test-*
545
+ - config_name: plus
546
+ data_files:
547
+ - split: train
548
+ path: plus/train-*
549
+ - split: validation
550
+ path: plus/validation-*
551
+ - split: test
552
+ path: plus/test-*
553
+ - config_name: small
554
+ data_files:
555
+ - split: train
556
+ path: small/train-*
557
+ - split: validation
558
+ path: small/validation-*
559
+ - split: test
560
+ path: small/test-*
561
  ---
562
 
563
  # Dataset Card for CLINC150
clinc_oos.py DELETED
@@ -1,275 +0,0 @@
1
- """An Evaluation Dataset for Intent Classification and Out-of-Scope Prediction"""
2
-
3
-
4
- import json
5
- import textwrap
6
-
7
- import datasets
8
-
9
-
10
- _CITATION = """\
11
- @inproceedings{larson-etal-2019-evaluation,
12
- title = "An Evaluation Dataset for Intent Classification and Out-of-Scope Prediction",
13
- author = "Larson, Stefan and
14
- Mahendran, Anish and
15
- Peper, Joseph J. and
16
- Clarke, Christopher and
17
- Lee, Andrew and
18
- Hill, Parker and
19
- Kummerfeld, Jonathan K. and
20
- Leach, Kevin and
21
- Laurenzano, Michael A. and
22
- Tang, Lingjia and
23
- Mars, Jason",
24
- booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
25
- year = "2019",
26
- url = "https://www.aclweb.org/anthology/D19-1131"
27
- }
28
- """
29
-
30
- _DESCRIPTION = """\
31
- This dataset is for evaluating the performance of intent classification systems in the
32
- presence of "out-of-scope" queries. By "out-of-scope", we mean queries that do not fall
33
- into any of the system-supported intent classes. Most datasets include only data that is
34
- "in-scope". Our dataset includes both in-scope and out-of-scope data. You might also know
35
- the term "out-of-scope" by other terms, including "out-of-domain" or "out-of-distribution".
36
- """
37
-
38
- _DESCRIPTIONS = {
39
- "small": textwrap.dedent(
40
- """\
41
- Small, in which there are only 50 training queries per each in-scope intent
42
- """
43
- ),
44
- "imbalanced": textwrap.dedent(
45
- """\
46
- Imbalanced, in which intents have either 25, 50, 75, or 100 training queries.
47
- """
48
- ),
49
- "plus": textwrap.dedent(
50
- """\
51
- OOS+, in which there are 250 out-of-scope training examples, rather than 100.
52
- """
53
- ),
54
- }
55
-
56
- _URL = "https://github.com/clinc/oos-eval/"
57
-
58
- # Source:
59
- # - https://raw.githubusercontent.com/clinc/oos-eval/master/data/data_small.json
60
- # - https://raw.githubusercontent.com/clinc/oos-eval/master/data/data_imbalanced.json
61
- # - https://raw.githubusercontent.com/clinc/oos-eval/master/data/data_oos_plus.json
62
- _DATA_URLS = {
63
- "small": "data/data_small.json.gz",
64
- "imbalanced": "data/data_imbalanced.json.gz",
65
- "plus": "data/data_oos_plus.json.gz",
66
- }
67
-
68
-
69
- class ClincConfig(datasets.BuilderConfig):
70
-
71
- """BuilderConfig for CLINC150"""
72
-
73
- def __init__(self, description, data_url, citation, url, **kwrags):
74
- """
75
- Args:
76
- description: `string`, brief description of the dataset
77
- data_url: `dictionary`, dict with url for each split of data.
78
- citation: `string`, citation for the dataset.
79
- url: `string`, url for information about the dataset.
80
- **kwrags: keyword arguments frowarded to super
81
- """
82
- super(ClincConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwrags)
83
- self.description = description
84
- self.data_url = data_url
85
- self.citation = citation
86
- self.url = url
87
-
88
-
89
- class ClincOos(datasets.GeneratorBasedBuilder):
90
- BUILDER_CONFIGS = [
91
- ClincConfig(
92
- name=name, description=_DESCRIPTIONS[name], data_url=_DATA_URLS[name], citation=_CITATION, url=_URL
93
- )
94
- for name in ["small", "imbalanced", "plus"]
95
- ]
96
-
97
- def _info(self):
98
- features = {}
99
- features["text"] = datasets.Value("string")
100
- labels_list = [
101
- "restaurant_reviews",
102
- "nutrition_info",
103
- "account_blocked",
104
- "oil_change_how",
105
- "time",
106
- "weather",
107
- "redeem_rewards",
108
- "interest_rate",
109
- "gas_type",
110
- "accept_reservations",
111
- "smart_home",
112
- "user_name",
113
- "report_lost_card",
114
- "repeat",
115
- "whisper_mode",
116
- "what_are_your_hobbies",
117
- "order",
118
- "jump_start",
119
- "schedule_meeting",
120
- "meeting_schedule",
121
- "freeze_account",
122
- "what_song",
123
- "meaning_of_life",
124
- "restaurant_reservation",
125
- "traffic",
126
- "make_call",
127
- "text",
128
- "bill_balance",
129
- "improve_credit_score",
130
- "change_language",
131
- "no",
132
- "measurement_conversion",
133
- "timer",
134
- "flip_coin",
135
- "do_you_have_pets",
136
- "balance",
137
- "tell_joke",
138
- "last_maintenance",
139
- "exchange_rate",
140
- "uber",
141
- "car_rental",
142
- "credit_limit",
143
- "oos",
144
- "shopping_list",
145
- "expiration_date",
146
- "routing",
147
- "meal_suggestion",
148
- "tire_change",
149
- "todo_list",
150
- "card_declined",
151
- "rewards_balance",
152
- "change_accent",
153
- "vaccines",
154
- "reminder_update",
155
- "food_last",
156
- "change_ai_name",
157
- "bill_due",
158
- "who_do_you_work_for",
159
- "share_location",
160
- "international_visa",
161
- "calendar",
162
- "translate",
163
- "carry_on",
164
- "book_flight",
165
- "insurance_change",
166
- "todo_list_update",
167
- "timezone",
168
- "cancel_reservation",
169
- "transactions",
170
- "credit_score",
171
- "report_fraud",
172
- "spending_history",
173
- "directions",
174
- "spelling",
175
- "insurance",
176
- "what_is_your_name",
177
- "reminder",
178
- "where_are_you_from",
179
- "distance",
180
- "payday",
181
- "flight_status",
182
- "find_phone",
183
- "greeting",
184
- "alarm",
185
- "order_status",
186
- "confirm_reservation",
187
- "cook_time",
188
- "damaged_card",
189
- "reset_settings",
190
- "pin_change",
191
- "replacement_card_duration",
192
- "new_card",
193
- "roll_dice",
194
- "income",
195
- "taxes",
196
- "date",
197
- "who_made_you",
198
- "pto_request",
199
- "tire_pressure",
200
- "how_old_are_you",
201
- "rollover_401k",
202
- "pto_request_status",
203
- "how_busy",
204
- "application_status",
205
- "recipe",
206
- "calendar_update",
207
- "play_music",
208
- "yes",
209
- "direct_deposit",
210
- "credit_limit_change",
211
- "gas",
212
- "pay_bill",
213
- "ingredients_list",
214
- "lost_luggage",
215
- "goodbye",
216
- "what_can_i_ask_you",
217
- "book_hotel",
218
- "are_you_a_bot",
219
- "next_song",
220
- "change_speed",
221
- "plug_type",
222
- "maybe",
223
- "w2",
224
- "oil_change_when",
225
- "thank_you",
226
- "shopping_list_update",
227
- "pto_balance",
228
- "order_checks",
229
- "travel_alert",
230
- "fun_fact",
231
- "sync_device",
232
- "schedule_maintenance",
233
- "apr",
234
- "transfer",
235
- "ingredient_substitution",
236
- "calories",
237
- "current_location",
238
- "international_fees",
239
- "calculator",
240
- "definition",
241
- "next_holiday",
242
- "update_playlist",
243
- "mpg",
244
- "min_payment",
245
- "change_user_name",
246
- "restaurant_suggestion",
247
- "travel_notification",
248
- "cancel",
249
- "pto_used",
250
- "travel_suggestion",
251
- "change_volume",
252
- ]
253
- features["intent"] = datasets.ClassLabel(names=labels_list)
254
-
255
- return datasets.DatasetInfo(
256
- description=_DESCRIPTION + "\n" + self.config.description,
257
- features=datasets.Features(features),
258
- homepage=self.config.url,
259
- citation=_CITATION,
260
- )
261
-
262
- def _split_generators(self, dl_manager):
263
- file_ = dl_manager.download_and_extract(self.config.data_url)
264
-
265
- return [
266
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_, "split": "train"}),
267
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": file_, "split": "val"}),
268
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": file_, "split": "test"}),
269
- ]
270
-
271
- def _generate_examples(self, filepath, split):
272
- with open(filepath, encoding="utf-8") as f:
273
- j = json.load(f)
274
- for id_, row in enumerate(j[split] + j["oos_" + split]):
275
- yield id_, {"text": row[0], "intent": row[1]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/data_imbalanced.json.gz → imbalanced/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:721c11eb7205c67cdfbf59c7748125b5ed7b9648a8cd3820e6f3b58e8ae429ba
3
- size 246833
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e60e45b25bf86543aa5df8ba4fcc674114164e6184f0197690648c2908d0102
3
+ size 135844
data/data_oos_plus.json.gz → imbalanced/train-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:51c408ab015c311d31f4af5141b7ada0a836b60373f332ed32d37dbbddfa4a9d
3
- size 291179
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9a2315625025628a812c0d8788a822518eacb25898edc2febb08e3b800f1251
3
+ size 228285
data/data_small.json.gz → imbalanced/validation-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3679c8c6171f5ad79c02849af0882dac0964f79d0cd1c74a7f57994997179765
3
- size 216522
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbd545b46c611c4a7ba4b48cae6c7f09bb5b59f33ff56206ad1cd366c85cdfaa
3
+ size 77789
plus/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e60e45b25bf86543aa5df8ba4fcc674114164e6184f0197690648c2908d0102
3
+ size 135844
plus/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30188119cf9f86fc9db27e1c22442d091cb5cb0913c9496f945fe11e7a02a28f
3
+ size 312096
plus/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbd545b46c611c4a7ba4b48cae6c7f09bb5b59f33ff56206ad1cd366c85cdfaa
3
+ size 77789
small/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e60e45b25bf86543aa5df8ba4fcc674114164e6184f0197690648c2908d0102
3
+ size 135844
small/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87e11e91c7dde5ba4b4e99db6248bf39e4f14bf9fe7705ce70bcfae38d505bc1
3
+ size 171552
small/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbd545b46c611c4a7ba4b48cae6c7f09bb5b59f33ff56206ad1cd366c85cdfaa
3
+ size 77789