Datasets:
GEM
/

Languages:
English
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
none
Source Datasets:
original
ArXiv:
License:
Sebastian Gehrmann commited on
Commit
6a1ec54
1 Parent(s): 91e4b27

fix taskmaster loader

Browse files
Files changed (5) hide show
  1. Taskmaster.py +38 -31
  2. dataset_infos.json +87 -0
  3. dev.csv +2 -2
  4. test.csv +2 -2
  5. train.csv +2 -2
Taskmaster.py CHANGED
@@ -20,6 +20,7 @@ import json
20
  import os
21
 
22
  import datasets
 
23
 
24
 
25
  # TODO: Add BibTeX citation
@@ -56,9 +57,9 @@ _LICENSE = "CC BY 4.0"
56
  # The HuggingFace dataset library don't host the datasets but only point to the original files
57
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
58
  _URLs = {
59
- "train": "https://huggingface.co/datasets/GEM/Taskmaster/train.csv",
60
- "dev": "https://huggingface.co/datasets/GEM/Taskmaster/dev.csv",
61
- "test": "https://huggingface.co/datasets/GEM/Taskmaster/test.csv",
62
  }
63
 
64
  # New addition
@@ -100,10 +101,10 @@ class Taskmaster(datasets.GeneratorBasedBuilder):
100
  features = datasets.Features(
101
  {
102
  "gem_id": datasets.Value("string"),
103
- "0": datasets.Value("string"),
104
- "1": datasets.Value("string"),
105
- "2": datasets.Value("string"),
106
- "id": datasets.Value("string"),
107
  # "paragraphs": datasets.features.Sequence(
108
  # datasets.Value("string")),
109
  # "summary": datasets.features.Sequence(
@@ -139,43 +140,31 @@ class Taskmaster(datasets.GeneratorBasedBuilder):
139
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
140
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
141
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
142
- # _URLs = { "train": "train.csv", "test": "test.csv", "validation": "dev.csv", }
143
- my_urls = {
144
- "train": "train.csv",
145
- "test": "test.csv",
146
- "validation": "dev.csv",
147
- } # _URLs[self.config.name]
148
- data_dir = dl_manager.download_and_extract(my_urls)
149
 
150
  return [
151
  datasets.SplitGenerator(
152
  name=datasets.Split.TRAIN,
153
  # These kwargs will be passed to _generate_examples
154
  gen_kwargs={
155
- "filepath": os.path.join(
156
- data_dir["train"], "train-%s.csv"
157
- ), # % (self.config.name)),
158
  "split": "train",
159
  },
160
  ),
161
  datasets.SplitGenerator(
162
- name=datasets.Split.TEST,
163
  # These kwargs will be passed to _generate_examples
164
  gen_kwargs={
165
- "filepath": os.path.join(
166
- data_dir["test"], "test-%s.csv"
167
- ), # % (self.config.name)),
168
- "split": "test",
169
  },
170
  ),
171
  datasets.SplitGenerator(
172
- name=datasets.Split.VALIDATION,
173
  # These kwargs will be passed to _generate_examples
174
  gen_kwargs={
175
- "filepath": os.path.join(
176
- data_dir["validation"], "valid-%s.csv"
177
- ), # % (self.config.name)),
178
- "split": "dev",
179
  },
180
  ),
181
  ]
@@ -190,7 +179,25 @@ class Taskmaster(datasets.GeneratorBasedBuilder):
190
  # The `key` is here for legacy reason (tfds) and is not important in itself.
191
 
192
  with open(filepath, encoding="utf-8") as f:
193
- for row in f:
194
- data = csv.loads(row)
195
- data["gem_id"] = "GEM-TASKMASTER-%s-%d" % (split, data["id"] + 1)
196
- yield data["id"], data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  import os
21
 
22
  import datasets
23
+ from phonenumbers import example_number
24
 
25
 
26
  # TODO: Add BibTeX citation
 
57
  # The HuggingFace dataset library don't host the datasets but only point to the original files
58
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
59
  _URLs = {
60
+ "train": "train.csv",
61
+ "validation": "dev.csv",
62
+ "test": "test.csv",
63
  }
64
 
65
  # New addition
 
101
  features = datasets.Features(
102
  {
103
  "gem_id": datasets.Value("string"),
104
+ "context": datasets.Value("string"),
105
+ "target": datasets.Value("string"),
106
+ "references": [datasets.Value("string")],
107
+ "conversation_id": datasets.Value("string"),
108
  # "paragraphs": datasets.features.Sequence(
109
  # datasets.Value("string")),
110
  # "summary": datasets.features.Sequence(
 
140
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
141
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
142
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
143
+ data_dir = dl_manager.download_and_extract(_URLs)
 
 
 
 
 
 
144
 
145
  return [
146
  datasets.SplitGenerator(
147
  name=datasets.Split.TRAIN,
148
  # These kwargs will be passed to _generate_examples
149
  gen_kwargs={
150
+ "filepath": data_dir["train"],
 
 
151
  "split": "train",
152
  },
153
  ),
154
  datasets.SplitGenerator(
155
+ name=datasets.Split.VALIDATION,
156
  # These kwargs will be passed to _generate_examples
157
  gen_kwargs={
158
+ "filepath": data_dir["validation"],
159
+ "split": "validation",
 
 
160
  },
161
  ),
162
  datasets.SplitGenerator(
163
+ name=datasets.Split.TEST,
164
  # These kwargs will be passed to _generate_examples
165
  gen_kwargs={
166
+ "filepath": data_dir["test"],
167
+ "split": "test",
 
 
168
  },
169
  ),
170
  ]
 
179
  # The `key` is here for legacy reason (tfds) and is not important in itself.
180
 
181
  with open(filepath, encoding="utf-8") as f:
182
+ reader = csv.DictReader(f)
183
+ expl_count = 0
184
+ other_count = 0
185
+ for row in reader:
186
+ if not row["response"].startswith("[A]"):
187
+ other_count += 1
188
+ continue
189
+ target = row["response"].replace("[A]", "")
190
+ gem_id = f"Taskmaster-{split}-{expl_count}"
191
+ expl_count += 1
192
+
193
+ ex = {
194
+ "gem_id": gem_id,
195
+ "context": row["context"],
196
+ "target": target,
197
+ "references": [target],
198
+ "conversation_id": row["conversation_id"]
199
+ }
200
+
201
+ print(f"{other_count}/{other_count+expl_count} total now {expl_count}")
202
+
203
+ yield expl_count, ex
dataset_infos.json ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "default": {
3
+ "description": "The Taskmaster-3 (aka TicketTalk) dataset consists of 23,789 movie ticketing dialogs\n(located in Taskmaster/TM-3-2020/data/). By \"movie ticketing\" we mean conversations\nwhere the customer's goal is to purchase tickets after deciding on theater, time,\nmovie name, number of tickets, and date, or opt out of the transaction.\nThe columns are gem_id, 0, 1 for serial numbering, 2 for the text dialog and id\nfor the default id by the authors.\n",
4
+ "citation": "@article{byrne2020tickettalk,\n title={TicketTalk: Toward human-level performance with end-to-end, transaction-based dialog systems},\n author={Byrne, Bill and Krishnamoorthi, Karthik and Ganesh, Saravanan and Kale, Mihir Sanjay},\n journal={arXiv preprint arXiv:2012.12458},\n year={2020}\n}\n",
5
+ "homepage": "https://github.com/google-research-datasets/Taskmaster/tree/master/TM-3-2020",
6
+ "license": "CC BY 4.0",
7
+ "features": {
8
+ "gem_id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "context": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "target": {
19
+ "dtype": "string",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "references": [
24
+ {
25
+ "dtype": "string",
26
+ "id": null,
27
+ "_type": "Value"
28
+ }
29
+ ],
30
+ "conversation_id": {
31
+ "dtype": "string",
32
+ "id": null,
33
+ "_type": "Value"
34
+ }
35
+ },
36
+ "post_processed": null,
37
+ "supervised_keys": null,
38
+ "task_templates": null,
39
+ "builder_name": "taskmaster",
40
+ "config_name": "default",
41
+ "version": {
42
+ "version_str": "3.0.0",
43
+ "description": null,
44
+ "major": 3,
45
+ "minor": 0,
46
+ "patch": 0
47
+ },
48
+ "splits": {
49
+ "train": {
50
+ "name": "train",
51
+ "num_bytes": 252278519,
52
+ "num_examples": 187182,
53
+ "dataset_name": "taskmaster"
54
+ },
55
+ "validation": {
56
+ "name": "validation",
57
+ "num_bytes": 31416020,
58
+ "num_examples": 23406,
59
+ "dataset_name": "taskmaster"
60
+ },
61
+ "test": {
62
+ "name": "test",
63
+ "num_bytes": 31207515,
64
+ "num_examples": 23316,
65
+ "dataset_name": "taskmaster"
66
+ }
67
+ },
68
+ "download_checksums": {
69
+ "train.csv": {
70
+ "num_bytes": 365307526,
71
+ "checksum": "c182fcfe9aa85931fd438d52e5e8dc70b67a8ec1d2bdf5404ebc3c7452528d57"
72
+ },
73
+ "dev.csv": {
74
+ "num_bytes": 45407763,
75
+ "checksum": "39958c2fe1d383ddf4d032c8d02850f973ca7f2ef05a6b7067df22758227b381"
76
+ },
77
+ "test.csv": {
78
+ "num_bytes": 45430002,
79
+ "checksum": "95089fd04b2d91c383b9c65f02dcc942ed9f2d209e6e1d8deef4677a8f8ef18d"
80
+ }
81
+ },
82
+ "download_size": 456145291,
83
+ "post_processing_size": null,
84
+ "dataset_size": 314902054,
85
+ "size_in_bytes": 771047345
86
+ }
87
+ }
dev.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38894de0ca972fba8e0872fd4d3381c46fa3a690066c2542154e496c51b90d2b
3
- size 45407772
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39958c2fe1d383ddf4d032c8d02850f973ca7f2ef05a6b7067df22758227b381
3
+ size 45407763
test.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf3b80f0a974a57255f87cd626ccfd8a27154f48a01c295e4d7845311b322bcb
3
- size 45430011
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95089fd04b2d91c383b9c65f02dcc942ed9f2d209e6e1d8deef4677a8f8ef18d
3
+ size 45430002
train.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d353eb959782ab32db4b7633209df5609179e9326c01027cd01a8f7854e200b
3
- size 365307535
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c182fcfe9aa85931fd438d52e5e8dc70b67a8ec1d2bdf5404ebc3c7452528d57
3
+ size 365307526