Sebastian Gehrmann
commited on
Commit
·
b016b17
1
Parent(s):
a167b33
- dataset_infos.json +1 -0
- dstc10_track2_task2.py +36 -16
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"generation": {"description": "\n", "citation": "@article{kim2020domain,\n title={Beyond Domain APIs: Task-oriented Conversational Modeling with Unstructured Knowledge Access},\n author={Seokhwan Kim and Mihail Eric and Karthik Gopalakrishnan and Behnam Hedayatnia and Yang Liu and Dilek Hakkani-Tur},\n journal={arXiv preprint arXiv:2006.03533}\n year={2020}\n}\n", "homepage": "https://github.com/alexa/alexa-with-dstc10-track2-dataset", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "turns": [{"speaker": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "nbest": [{"hyp": {"dtype": "string", "id": null, "_type": "Value"}, "score": {"dtype": "float32", "id": null, "_type": "Value"}}]}], "knowledge": {"domain": {"dtype": "string", "id": null, "_type": "Value"}, "entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "body": {"dtype": "string", "id": null, "_type": "Value"}}, "response": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "linearized_input": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "dstc10_track2_task2", "config_name": "generation", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 50332821, "num_examples": 19184, "dataset_name": "dstc10_track2_task2"}, "validation": {"name": "validation", "num_bytes": 819517, "num_examples": 104, "dataset_name": "dstc10_track2_task2"}, "test": {"name": "test", "num_bytes": 6600484, "num_examples": 683, "dataset_name": "dstc10_track2_task2"}}, "download_checksums": {"https://raw.githubusercontent.com/alexa/alexa-with-dstc9-track1-dataset/master/data/train/logs.json": {"num_bytes": 96396446, "checksum": "39a87aafdc70e4adde0fc2e9b6e4caa3ab6ce8668bd7d17ea73b4534d7fa41d3"}, "https://raw.githubusercontent.com/alexa/alexa-with-dstc9-track1-dataset/master/data/train/labels.json": {"num_bytes": 6496347, "checksum": "615eac39a48f9068a30a92eff03092e301b20c43121cb5198acf8b3d67557d4e"}, "https://raw.githubusercontent.com/alexa/alexa-with-dstc9-track1-dataset/master/data/knowledge.json": {"num_bytes": 471645, "checksum": "c8490242c23101c4e7c3e3482acd1d6dbf26c788f62c0c87fcaf622ee5360372"}, "https://raw.githubusercontent.com/alexa/alexa-with-dstc10-track2-dataset/main/task2/data/val/logs.json": {"num_bytes": 2321433, "checksum": "d065b457a53d19cf1d8035e5c953df3540382e87d48ddd50380578638d1943a5"}, "https://raw.githubusercontent.com/alexa/alexa-with-dstc10-track2-dataset/main/task2/data/val/labels.json": {"num_bytes": 37931, "checksum": "8ff76887a7aea9ecceb0a59bbb477d6bd360df0d508a6ca75adc1de86a757b49"}, "https://raw.githubusercontent.com/alexa/alexa-with-dstc10-track2-dataset/main/task2/data/knowledge.json": {"num_bytes": 1979588, "checksum": "fb0facf7acc6831707e49e4dec39518a4c627f233ab6c13ffc5e3f3fab00e4ab"}, "https://raw.githubusercontent.com/alexa/alexa-with-dstc10-track2-dataset/main/task2/data/test/logs.json": {"num_bytes": 27842098, "checksum": "386a78fa7ed0abe8ab2961621f044dd6c7db6f718684604150d3b42b759ae1e2"}, "https://raw.githubusercontent.com/alexa/alexa-with-dstc10-track2-dataset/main/task2/data/test/labels.json": {"num_bytes": 274884, "checksum": "479d33cc6398730e63c4eed9b205a2e9f7039a2eee88f6f661700164cd69d190"}}, "download_size": 135820372, "post_processing_size": null, "dataset_size": 57752822, "size_in_bytes": 193573194}}
|
dstc10_track2_task2.py
CHANGED
@@ -26,7 +26,9 @@ _DESCRIPTION = """\
|
|
26 |
"""
|
27 |
|
28 |
_BASE_URL_DSTC10 = "https://raw.githubusercontent.com/alexa/alexa-with-dstc10-track2-dataset/main/task2"
|
29 |
-
_BASE_URL_DSTC9 =
|
|
|
|
|
30 |
_URLs = {
|
31 |
"train": {
|
32 |
"logs": f"{_BASE_URL_DSTC9}/data/train/logs.json",
|
@@ -42,9 +44,10 @@ _URLs = {
|
|
42 |
"logs": f"{_BASE_URL_DSTC10}/data/test/logs.json",
|
43 |
"labels": f"{_BASE_URL_DSTC10}/data/test/labels.json",
|
44 |
"knowledge": f"{_BASE_URL_DSTC10}/data/knowledge.json",
|
45 |
-
}
|
46 |
}
|
47 |
|
|
|
48 |
class DSTC10Track2Task2(datasets.GeneratorBasedBuilder):
|
49 |
|
50 |
VERSION = datasets.Version("1.0.0")
|
@@ -72,20 +75,22 @@ class DSTC10Track2Task2(datasets.GeneratorBasedBuilder):
|
|
72 |
"nbest": [
|
73 |
{
|
74 |
"hyp": datasets.Value("string"),
|
75 |
-
"score": datasets.Value("float")
|
76 |
}
|
77 |
-
]
|
78 |
}
|
79 |
],
|
80 |
-
"knowledge":
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
},
|
87 |
"response": datasets.Value("string"),
|
88 |
"source": datasets.Value("string"),
|
|
|
|
|
|
|
89 |
}
|
90 |
)
|
91 |
|
@@ -121,7 +126,10 @@ class DSTC10Track2Task2(datasets.GeneratorBasedBuilder):
|
|
121 |
else:
|
122 |
source = label["source"]
|
123 |
|
124 |
-
domain, entity_id, doc_id = (
|
|
|
|
|
|
|
125 |
entity_name = knowledge_data[domain][str(entity_id)]["name"]
|
126 |
snippet = knowledge_data[domain][str(entity_id)]["docs"][str(doc_id)]
|
127 |
|
@@ -134,11 +142,15 @@ class DSTC10Track2Task2(datasets.GeneratorBasedBuilder):
|
|
134 |
"domain": domain,
|
135 |
"entity_name": entity_name,
|
136 |
"title": snippet["title"],
|
137 |
-
"body": snippet["body"]
|
138 |
},
|
139 |
-
"response": label["response"]
|
|
|
|
|
140 |
}
|
141 |
|
|
|
|
|
142 |
i += 1
|
143 |
|
144 |
yield x["id"], x
|
@@ -152,9 +164,18 @@ class DSTC10Track2Task2(datasets.GeneratorBasedBuilder):
|
|
152 |
|
153 |
return dl_manager.download_and_extract(urls)
|
154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
|
156 |
urls_to_download = _URLs
|
157 |
-
downloaded_files = self._download_files(
|
|
|
|
|
158 |
for split in ["train", "validation", "test"]:
|
159 |
downloaded_files[split]["split"] = split
|
160 |
|
@@ -166,4 +187,3 @@ class DSTC10Track2Task2(datasets.GeneratorBasedBuilder):
|
|
166 |
(datasets.Split.TEST, "test"),
|
167 |
)
|
168 |
]
|
169 |
-
|
|
|
26 |
"""
|
27 |
|
28 |
_BASE_URL_DSTC10 = "https://raw.githubusercontent.com/alexa/alexa-with-dstc10-track2-dataset/main/task2"
|
29 |
+
_BASE_URL_DSTC9 = (
|
30 |
+
"https://raw.githubusercontent.com/alexa/alexa-with-dstc9-track1-dataset/master"
|
31 |
+
)
|
32 |
_URLs = {
|
33 |
"train": {
|
34 |
"logs": f"{_BASE_URL_DSTC9}/data/train/logs.json",
|
|
|
44 |
"logs": f"{_BASE_URL_DSTC10}/data/test/logs.json",
|
45 |
"labels": f"{_BASE_URL_DSTC10}/data/test/labels.json",
|
46 |
"knowledge": f"{_BASE_URL_DSTC10}/data/knowledge.json",
|
47 |
+
},
|
48 |
}
|
49 |
|
50 |
+
|
51 |
class DSTC10Track2Task2(datasets.GeneratorBasedBuilder):
|
52 |
|
53 |
VERSION = datasets.Version("1.0.0")
|
|
|
75 |
"nbest": [
|
76 |
{
|
77 |
"hyp": datasets.Value("string"),
|
78 |
+
"score": datasets.Value("float"),
|
79 |
}
|
80 |
+
],
|
81 |
}
|
82 |
],
|
83 |
+
"knowledge": {
|
84 |
+
"domain": datasets.Value("string"),
|
85 |
+
"entity_name": datasets.Value("string"),
|
86 |
+
"title": datasets.Value("string"),
|
87 |
+
"body": datasets.Value("string"),
|
88 |
+
},
|
|
|
89 |
"response": datasets.Value("string"),
|
90 |
"source": datasets.Value("string"),
|
91 |
+
"linearized_input": datasets.Value("string"),
|
92 |
+
"target": datasets.Value("string"),
|
93 |
+
"references": [datasets.Value("string")],
|
94 |
}
|
95 |
)
|
96 |
|
|
|
126 |
else:
|
127 |
source = label["source"]
|
128 |
|
129 |
+
domain, entity_id, doc_id = (
|
130 |
+
label["knowledge"][0].get(key)
|
131 |
+
for key in ["domain", "entity_id", "doc_id"]
|
132 |
+
)
|
133 |
entity_name = knowledge_data[domain][str(entity_id)]["name"]
|
134 |
snippet = knowledge_data[domain][str(entity_id)]["docs"][str(doc_id)]
|
135 |
|
|
|
142 |
"domain": domain,
|
143 |
"entity_name": entity_name,
|
144 |
"title": snippet["title"],
|
145 |
+
"body": snippet["body"],
|
146 |
},
|
147 |
+
"response": label["response"],
|
148 |
+
"target": label["response"],
|
149 |
+
"references": [label["response"]],
|
150 |
}
|
151 |
|
152 |
+
x["linearized_input"] = self._linearize_example(x)
|
153 |
+
|
154 |
i += 1
|
155 |
|
156 |
yield x["id"], x
|
|
|
164 |
|
165 |
return dl_manager.download_and_extract(urls)
|
166 |
|
167 |
+
def _linearize_example(self, d):
|
168 |
+
repr_string = ""
|
169 |
+
for t in d["turns"]:
|
170 |
+
repr_string += f"<{t['speaker']}> {t['text']} "
|
171 |
+
repr_string += f"|| knowledge domain: {d['knowledge']['domain']}, entity: {d['knowledge']['entity_name']}, title: {d['knowledge']['title']}, information: {d['knowledge']['body']}"
|
172 |
+
return repr_string
|
173 |
+
|
174 |
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
|
175 |
urls_to_download = _URLs
|
176 |
+
downloaded_files = self._download_files(
|
177 |
+
urls_to_download, self.config.data_files, dl_manager
|
178 |
+
)
|
179 |
for split in ["train", "validation", "test"]:
|
180 |
downloaded_files[split]["split"] = split
|
181 |
|
|
|
187 |
(datasets.Split.TEST, "test"),
|
188 |
)
|
189 |
]
|
|