system HF staff commited on
Commit
4385888
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - crowdsourced
6
+ languages:
7
+ - en
8
+ licenses:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 1K<n<10K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - sequence-modeling
18
+ task_ids:
19
+ - dialogue-modeling
20
+ ---
21
+
22
+ # Dataset Card Creation Guide
23
+
24
+ ## Table of Contents
25
+ - [Dataset Description](#dataset-description)
26
+ - [Dataset Summary](#dataset-summary)
27
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
28
+ - [Languages](#languages)
29
+ - [Dataset Structure](#dataset-structure)
30
+ - [Data Instances](#data-instances)
31
+ - [Data Fields](#data-instances)
32
+ - [Data Splits](#data-instances)
33
+ - [Dataset Creation](#dataset-creation)
34
+ - [Curation Rationale](#curation-rationale)
35
+ - [Source Data](#source-data)
36
+ - [Annotations](#annotations)
37
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
38
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
39
+ - [Social Impact of Dataset](#social-impact-of-dataset)
40
+ - [Discussion of Biases](#discussion-of-biases)
41
+ - [Other Known Limitations](#other-known-limitations)
42
+ - [Additional Information](#additional-information)
43
+ - [Dataset Curators](#dataset-curators)
44
+ - [Licensing Information](#licensing-information)
45
+ - [Citation Information](#citation-information)
46
+
47
+ ## Dataset Description
48
+
49
+ - **Homepage:** [Decoupling Strategy and Generation in Negotiation Dialogues](https://worksheets.codalab.org/worksheets/0x453913e76b65495d8b9730d41c7e0a0c/)
50
+ - **Repository:** [Github: Stanford NLP Cocoa](https://github.com/stanfordnlp/cocoa/tree/master)
51
+ - **Paper:** [Decoupling Strategy and Generation in Negotiation Dialogues](https://arxiv.org/abs/1808.09637)
52
+ - **Leaderboard:** []()
53
+ - **Point of Contact:** [He He](hehe@cs.nyu.edu)
54
+
55
+ ### Dataset Summary
56
+
57
+ We study negotiation dialogues where two agents, a buyer and a seller, negotiate over the price of an time for sale. We collected a dataset of more than 6K negotiation dialogues over multiple categories of products scraped from Craigslist. Our goal is to develop an agent that negotiates with humans through such conversations. The challenge is to handle both the negotiation strategy and the rich language for bargaining. To this end, we develop a modular framework which separates strategy learning from language generation. Specifically, we learn strategies in a coarse dialogue act space and instantiate that into utterances conditioned on dialogue history.
58
+
59
+ ### Supported Tasks and Leaderboards
60
+
61
+
62
+
63
+ ### Languages
64
+
65
+ This dataset is English
66
+
67
+ ## Dataset Structure
68
+
69
+ ### Data Instances
70
+
71
+ ```
72
+ {
73
+ 'agent_info': {
74
+ 'Bottomline':
75
+ [
76
+ 'None',
77
+ 'None'
78
+ ],
79
+ 'Role':
80
+ [
81
+ 'buyer',
82
+ 'seller'
83
+ ],
84
+ 'Target':
85
+ [
86
+ 7.0,
87
+ 10.0
88
+ ]
89
+ },
90
+ 'agent_turn':
91
+ [
92
+ 0,
93
+ 1,
94
+ ...
95
+ ],
96
+ 'dialogue_acts': {
97
+ 'intent':
98
+ [
99
+ 'init-price',
100
+ 'unknown',
101
+ ...
102
+ ],
103
+ 'price':
104
+ [
105
+ 5.0,
106
+ -1.0,
107
+ ...
108
+ ]
109
+ },
110
+ 'items': {
111
+ 'Category':
112
+ [
113
+ 'phone',
114
+ 'phone'
115
+ ],
116
+ 'Description':
117
+ [
118
+ 'Charge two devices simultaneously on the go...,
119
+ ...
120
+ ],
121
+ 'Images':
122
+ [
123
+ 'phone/6149527852_0.jpg',
124
+ 'phone/6149527852_0.jpg'
125
+ ],
126
+ 'Price':
127
+ [
128
+ 10.0,
129
+ 10.0
130
+ ],
131
+ 'Title':
132
+ [
133
+ 'Verizon Car Charger with Dual Output Micro USB and ...',
134
+ ...
135
+ ]
136
+ },
137
+ 'utterance':
138
+ [
139
+ 'Hi, not sure if the charger would work for my car...'
140
+ 'It will work...',
141
+ ...
142
+ ]
143
+ }
144
+
145
+ ```
146
+
147
+ ### Data Fields
148
+
149
+
150
+ - `agent_info`: Information about each of the agents taking part in the dialogue
151
+ - `Bottomline`: TBD
152
+ - `Role`: Whether the agent is buyer or seller
153
+ - `Target`: Target price that the buyer/seller wants to hit in the negotiation
154
+ - `agent_turn`: Agent taking the current turn in the dialogue (`int` index corresponding to `Role` above)
155
+ - `dialogue_acts`: Rules-based information about the strategy of each agent for each turn
156
+ - `intent`: The intent of the agent at the particular turn (offer, accept, etc.)
157
+ - `price`: The current item price associated with the intent and turn in the bargaining process. Default value for missing: (`-1`)
158
+ - `items`: Information about the item the agents are bargaining for. **Note that there is an elembet for each of the fields below for each agent**
159
+ - `Category`: Category of the item
160
+ - `Description`: Description(s) of the item
161
+ - `Images`: (comma delimited) strings of image names of the item
162
+ - `Price`: Price(s) of the item. Default value for missing: (`-1`)
163
+ - `Title`: Title(s) of the item
164
+ - `utterance`: Utterance for each turn in the dialogue, corresponding to the agent in `agent_turns`. The utterance may be an empty string (`''`) for some turns if multiple dialogue acts take place after an utterance (e.g. there are often multiple dialogue acts associated with the closing of the bargaining process after all utterances have completed to describe the conclusion of the bargaining).
165
+
166
+ ### Data Splits
167
+
168
+ This dataset contains three splits, `train`, `validation` and `test`. Note that `test` is not provided with `dialogue_acts` information as described above. To ensure schema consistency across dataset splits, the `dialogue_acts` field in the `test` split is populated with the default values: `{"price": -1.0, "intent": ""}`
169
+
170
+ The counts of examples in each split are as follows:
171
+
172
+ | | Train | Valid | Test |
173
+ | Input Examples | 5247 | 597 | 838 |
174
+ | Average Dialogue Length | 9.14 | 9.17 | 9.24 |
175
+
176
+ Note that
177
+
178
+ ## Dataset Creation
179
+
180
+ From the [source paper](https://arxiv.org/pdf/1808.09637.pdf) for this dataset:
181
+
182
+ > To generate the negotiation scenarios, we
183
+ > scraped postings on sfbay.craigslist.org
184
+ > from the 6 most popular categories (housing, furniture, cars, bikes, phones, and electronics). Each
185
+ > posting produces three scenarios with the buyer’s
186
+ > target prices at 0.5x, 0.7x and 0.9x of the listing
187
+ > price. Statistics of the scenarios are shown in Table 2.
188
+ > We collected 6682 human-human dialogues on
189
+ > AMT using the interface shown in Appendix A
190
+ > Figure 2. The dataset statistics in Table 3 show
191
+ > that CRAIGSLISTBARGAIN has longer dialogues
192
+ > and more diverse utterances compared to prior
193
+ > datasets. Furthermore, workers were encouraged
194
+ > to embellish the item and negotiate side offers
195
+ > such as free delivery or pick-up. This highly relatable scenario leads to richer dialogues such as
196
+ > the one shown in Table 1. We also observed various persuasion techniques listed in Table 4 such as
197
+ > embellishment,
198
+
199
+ ### Curation Rationale
200
+
201
+ See **Dataset Creation**
202
+
203
+ ### Source Data
204
+
205
+ See **Dataset Creation**
206
+
207
+ #### Initial Data Collection and Normalization
208
+
209
+ See **Dataset Creation**
210
+
211
+ #### Who are the source language producers?
212
+
213
+ See **Dataset Creation**
214
+
215
+ ### Annotations
216
+
217
+ If the dataset contains annotations which are not part of the initial data collection, describe them in the following paragraphs.
218
+
219
+ #### Annotation process
220
+
221
+ Annotations for the `dialogue_acts` in `train` and `test` were generated via a rules-based system which can be found in [this script](https://github.com/stanfordnlp/cocoa/blob/master/craigslistbargain/parse_dialogue.py)
222
+
223
+ #### Who are the annotators?
224
+
225
+ [More Information Needed]
226
+
227
+ ### Personal and Sensitive Information
228
+
229
+ [More Information Needed]
230
+
231
+ ## Considerations for Using the Data
232
+
233
+ [More Information Needed]
234
+
235
+ ### Social Impact of Dataset
236
+
237
+ [More Information Needed]
238
+
239
+ ### Discussion of Biases
240
+
241
+ [More Information Needed]
242
+
243
+ ### Other Known Limitations
244
+
245
+ [More Information Needed]
246
+
247
+ ## Additional Information
248
+
249
+ [More Information Needed]
250
+
251
+ ### Dataset Curators
252
+
253
+ He He and Derek Chen and Anusha Balakrishnan and Percy Liang
254
+ Computer Science Department, Stanford University
255
+ `{hehe,derekchen14,anusha,pliang}@cs.stanford.edu`
256
+
257
+ The work through which this data was produced was supported by
258
+ DARPA Communicating with Computers (CwC)
259
+ program under ARO prime contract no. W911NF15-1-0462
260
+
261
+ ### Licensing Information
262
+
263
+ [More Information Needed]
264
+
265
+ ### Citation Information
266
+
267
+ ```
268
+ @misc{he2018decoupling,
269
+ title={Decoupling Strategy and Generation in Negotiation Dialogues},
270
+ author={He He and Derek Chen and Anusha Balakrishnan and Percy Liang},
271
+ year={2018},
272
+ eprint={1808.09637},
273
+ archivePrefix={arXiv},
274
+ primaryClass={cs.CL}
275
+ }
276
+ ```
277
+
craigslist_bargains.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO: Add a description here."""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """\
25
+ @misc{he2018decoupling,
26
+ title={Decoupling Strategy and Generation in Negotiation Dialogues},
27
+ author={He He and Derek Chen and Anusha Balakrishnan and Percy Liang},
28
+ year={2018},
29
+ eprint={1808.09637},
30
+ archivePrefix={arXiv},
31
+ primaryClass={cs.CL}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ We study negotiation dialogues where two agents, a buyer and a seller,
37
+ negotiate over the price of an time for sale. We collected a dataset of more
38
+ than 6K negotiation dialogues over multiple categories of products scraped from Craigslist.
39
+ Our goal is to develop an agent that negotiates with humans through such conversations.
40
+ The challenge is to handle both the negotiation strategy and the rich language for bargaining.
41
+ """
42
+
43
+ _HOMEPAGE = "https://stanfordnlp.github.io/cocoa/"
44
+
45
+ _LICENSE = ""
46
+
47
+ _URLs = {
48
+ "train": "https://worksheets.codalab.org/rest/bundles/0xd34bbbc5fb3b4fccbd19e10756ca8dd7/contents/blob/parsed.json",
49
+ "validation": "https://worksheets.codalab.org/rest/bundles/0x15c4160b43d44ee3a8386cca98da138c/contents/blob/parsed.json",
50
+ "test": "https://worksheets.codalab.org/rest/bundles/0x54d325bbcfb2463583995725ed8ca42b/contents/blob/",
51
+ }
52
+
53
+
54
+ class CraigslistBargains(datasets.GeneratorBasedBuilder):
55
+ """
56
+ Dialogue for buyer and a seller negotiating
57
+ the price of an item for sale on Craigslist.
58
+ """
59
+
60
+ VERSION = datasets.Version("1.1.0")
61
+
62
+ def _info(self):
63
+ features = datasets.Features(
64
+ {
65
+ "agent_info": datasets.features.Sequence(
66
+ {
67
+ "Bottomline": datasets.Value("string"),
68
+ "Role": datasets.Value("string"),
69
+ "Target": datasets.Value("float"),
70
+ }
71
+ ),
72
+ "agent_turn": datasets.features.Sequence(datasets.Value("int32")),
73
+ "dialogue_acts": datasets.features.Sequence(
74
+ {"intent": datasets.Value("string"), "price": datasets.Value("float")}
75
+ ),
76
+ "utterance": datasets.features.Sequence(datasets.Value("string")),
77
+ "items": datasets.features.Sequence(
78
+ {
79
+ "Category": datasets.Value("string"),
80
+ "Images": datasets.Value("string"),
81
+ "Price": datasets.Value("float"),
82
+ "Description": datasets.Value("string"),
83
+ "Title": datasets.Value("string"),
84
+ }
85
+ ),
86
+ }
87
+ )
88
+
89
+ return datasets.DatasetInfo(
90
+ # This is the description that will appear on the datasets page.
91
+ description=_DESCRIPTION,
92
+ # This defines the different columns of the dataset and their types
93
+ features=features, # Here we define them above because they are different between the two configurations
94
+ # If there's a common (input, target) tuple from the features,
95
+ # specify them here. They'll be used if as_supervised=True in
96
+ # builder.as_dataset.
97
+ supervised_keys=None,
98
+ # Homepage of the dataset for documentation
99
+ homepage=_HOMEPAGE,
100
+ # License for the dataset if available
101
+ license=_LICENSE,
102
+ # Citation for the dataset
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+ """Returns SplitGenerators."""
108
+
109
+ my_urls = _URLs
110
+ data_dir = dl_manager.download_and_extract(my_urls)
111
+
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ # These kwargs will be passed to _generate_examples
116
+ gen_kwargs={
117
+ "filepath": data_dir["train"],
118
+ "split": "train",
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TEST,
123
+ # These kwargs will be passed to _generate_examples
124
+ gen_kwargs={"filepath": data_dir["test"], "split": "test"},
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.VALIDATION,
128
+ # These kwargs will be passed to _generate_examples
129
+ gen_kwargs={
130
+ "filepath": data_dir["validation"],
131
+ "split": "validation",
132
+ },
133
+ ),
134
+ ]
135
+
136
+ def _generate_examples(self, filepath, split):
137
+ """ Yields examples. """
138
+
139
+ # Set default values for items when the information is missing
140
+ # `items` is the description of the item advertised on craigslist
141
+ # to which the conversation is referring
142
+ default_items = {"Category": "", "Images": "", "Price": -1.0, "Description": "", "Title": ""}
143
+
144
+ # Set default values for the rules-based `metadata` generated by
145
+ # the Stanford NLP Cocoa project for the Craigslist Bargains dataset
146
+ # For more information on producing the `metadata` values for the train
147
+ # and dev sets, see https://worksheets.codalab.org/bundles/0xd34bbbc5fb3b4fccbd19e10756ca8dd7
148
+ default_metadata = {"price": -1.0, "intent": ""}
149
+
150
+ with open(filepath, encoding="utf-8") as f:
151
+ concat_sep = ","
152
+ jsons = json.loads(f.read())
153
+ for id_, j in enumerate(jsons):
154
+
155
+ # Get scenario information.
156
+ # This is nformation about position of each agent
157
+ scenario = j.get("scenario")
158
+ kbs = scenario["kbs"]
159
+ agent_info = [kb["personal"] for kb in kbs]
160
+ agent_info = [{k: str(v) for k, v in ai.items()} for ai in agent_info]
161
+
162
+ # Get item information.
163
+ # This is information about item listing for each agent
164
+ items = [i["item"] for i in kbs]
165
+
166
+ # Flatten `list` elements in items
167
+ # (e.g. if there are multiple image names, descriptions...)
168
+ # to align more easily with arrow schema
169
+ for item in items:
170
+ for k in item:
171
+ if type(item[k]) == list:
172
+ item[k] = concat_sep.join(item[k])
173
+
174
+ # Check for missing elements in `items`
175
+ # and fill with default values
176
+ for item in items:
177
+ for k in default_items:
178
+ if k not in item:
179
+ item[k] = default_items[k]
180
+ elif not item[k]:
181
+ item[k] = default_items[k]
182
+
183
+ # Get interaction information.
184
+ # This is information about messages exchanged
185
+ # and rules-based dialogue acts assigned to each
186
+ # dialogue segment
187
+ events = j.get("events")
188
+ agents = [e.get("agent") for e in events]
189
+ agents = [a if type(a) == int else -1 for a in agents]
190
+ data = [e.get("data") for e in events]
191
+ utterances = [u if type(u) == str else "" for u in data]
192
+
193
+ metadata = [e.get("metadata") for e in events]
194
+ metadata = [m if m else default_metadata for m in metadata]
195
+
196
+ # Check for missing keys in metadata, or missing
197
+ # metadata altogether for test data split.
198
+ # If anything missing, fill with defaults above.
199
+ for m in metadata:
200
+ for k in default_metadata:
201
+ if k not in m:
202
+ m[k] = default_metadata[k]
203
+ elif not m[k]:
204
+ m[k] = default_metadata[k]
205
+
206
+ yield id_, {
207
+ "agent_info": agent_info,
208
+ "agent_turn": agents,
209
+ "dialogue_acts": metadata,
210
+ "utterance": utterances,
211
+ "items": items,
212
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "We study negotiation dialogues where two agents, a buyer and a seller,\nnegotiate over the price of an time for sale. We collected a dataset of more\nthan 6K negotiation dialogues over multiple categories of products scraped from Craigslist.\nOur goal is to develop an agent that negotiates with humans through such conversations.\nThe challenge is to handle both the negotiation strategy and the rich language for bargaining.\n", "citation": "@misc{he2018decoupling,\n title={Decoupling Strategy and Generation in Negotiation Dialogues},\n author={He He and Derek Chen and Anusha Balakrishnan and Percy Liang},\n year={2018},\n eprint={1808.09637},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://stanfordnlp.github.io/cocoa/", "license": "", "features": {"agent_info": {"feature": {"Bottomline": {"dtype": "string", "id": null, "_type": "Value"}, "Role": {"dtype": "string", "id": null, "_type": "Value"}, "Target": {"dtype": "float32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "agent_turn": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "dialogue_acts": {"feature": {"intent": {"dtype": "string", "id": null, "_type": "Value"}, "price": {"dtype": "float32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "utterance": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "items": {"feature": {"Category": {"dtype": "string", "id": null, "_type": "Value"}, "Images": {"dtype": "string", "id": null, "_type": "Value"}, "Price": {"dtype": "float32", "id": null, "_type": "Value"}, "Description": {"dtype": "string", "id": null, "_type": "Value"}, "Title": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "craigslist_bargains", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8538836, "num_examples": 5247, "dataset_name": "craigslist_bargains"}, "test": {"name": "test", "num_bytes": 1353933, "num_examples": 838, "dataset_name": "craigslist_bargains"}, "validation": {"name": "validation", "num_bytes": 966032, "num_examples": 597, "dataset_name": "craigslist_bargains"}}, "download_checksums": {"https://worksheets.codalab.org/rest/bundles/0xd34bbbc5fb3b4fccbd19e10756ca8dd7/contents/blob/parsed.json": {"num_bytes": 20148723, "checksum": "34033ff87565b9fc9eb0efe867e9d3e32456dbe1528cd1683f94a84b09f66ace"}, "https://worksheets.codalab.org/rest/bundles/0x15c4160b43d44ee3a8386cca98da138c/contents/blob/parsed.json": {"num_bytes": 2287054, "checksum": "03b35dc18bd90d87dac46893ac4db8ab3eed51786d192975be68d3bab38e306e"}, "https://worksheets.codalab.org/rest/bundles/0x54d325bbcfb2463583995725ed8ca42b/contents/blob/": {"num_bytes": 2937841, "checksum": "c802f15f80ea3066d429375393319d7234daacbd6a26a6ad5afd0ad78a2f7736"}}, "download_size": 25373618, "post_processing_size": null, "dataset_size": 10858801, "size_in_bytes": 36232419}}
dummy/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:367ad59ffbd22883e58cf5e76c204d7f2de74e8ab23f6c0833ed0aff56027690
3
+ size 10381