Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
1eff8bf
1 Parent(s): 6536cf2

Delete loading script

Browse files
Files changed (1) hide show
  1. blended_skill_talk.py +0 -146
blended_skill_talk.py DELETED
@@ -1,146 +0,0 @@
1
- """TODO(blended_skill_talk): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
-
8
-
9
- # TODO(blended_skill_talk): BibTeX citation
10
- _CITATION = """\
11
- @misc{smith2020evaluating,
12
- title={Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills},
13
- author={Eric Michael Smith and Mary Williamson and Kurt Shuster and Jason Weston and Y-Lan Boureau},
14
- year={2020},
15
- eprint={2004.08449},
16
- archivePrefix={arXiv},
17
- primaryClass={cs.CL}
18
- }
19
- """
20
-
21
- # TODO(blended_skill_talk):
22
- _DESCRIPTION = """\
23
- A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.
24
- """
25
- _URL = "http://parl.ai/downloads/blended_skill_talk/blended_skill_talk.tar.gz"
26
-
27
- _TASK = ["convai2", "empathetic_dialogues", "wizard_of_wikipedia"]
28
-
29
-
30
- class BlendedSkillTalk(datasets.GeneratorBasedBuilder):
31
- """TODO(blended_skill_talk): Short description of my dataset."""
32
-
33
- # TODO(blended_skill_talk): Set up version.
34
- VERSION = datasets.Version("1.0.0")
35
-
36
- def _info(self):
37
- # TODO(blended_skill_talk): Specifies the datasets.DatasetInfo object
38
- return datasets.DatasetInfo(
39
- # This is the description that will appear on the datasets page.
40
- description=_DESCRIPTION,
41
- # datasets.features.FeatureConnectors
42
- features=datasets.Features(
43
- {
44
- "personas": datasets.features.Sequence(datasets.Value("string")),
45
- "additional_context": datasets.Value("string"),
46
- "previous_utterance": datasets.features.Sequence(datasets.Value("string")),
47
- "context": datasets.Value("string"),
48
- "free_messages": datasets.features.Sequence(datasets.Value("string")),
49
- "guided_messages": datasets.features.Sequence(datasets.Value("string")),
50
- "suggestions": datasets.features.Sequence({task: datasets.Value("string") for task in _TASK}),
51
- "guided_chosen_suggestions": datasets.features.Sequence(datasets.Value("string")),
52
- "label_candidates": datasets.features.Sequence(
53
- datasets.features.Sequence(datasets.Value("string"))
54
- ),
55
- # These are the features of your dataset like images, labels ...
56
- }
57
- ),
58
- # If there's a common (input, target) tuple from the features,
59
- # specify them here. They'll be used if as_supervised=True in
60
- # builder.as_dataset.
61
- supervised_keys=None,
62
- # Homepage of the dataset for documentation
63
- homepage="https://parl.ai/projects/bst/",
64
- citation=_CITATION,
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- """Returns SplitGenerators."""
69
- # TODO(blended_skill_talk): Downloads the data and defines the splits
70
- # dl_manager is a datasets.download.DownloadManager that can be used to
71
- # download and extract URLs
72
- archive = dl_manager.download(_URL)
73
- return [
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TRAIN,
76
- # These kwargs will be passed to _generate_examples
77
- gen_kwargs={
78
- "filepath": "train.json",
79
- "files": dl_manager.iter_archive(archive),
80
- },
81
- ),
82
- datasets.SplitGenerator(
83
- name=datasets.Split.VALIDATION,
84
- # These kwargs will be passed to _generate_examples
85
- gen_kwargs={
86
- "filepath": "valid.json",
87
- "files": dl_manager.iter_archive(archive),
88
- },
89
- ),
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TEST,
92
- # These kwargs will be passed to _generate_examples
93
- gen_kwargs={
94
- "filepath": "test.json",
95
- "files": dl_manager.iter_archive(archive),
96
- },
97
- ),
98
- ]
99
-
100
- def _generate_examples(self, filepath, files):
101
- """Yields examples."""
102
- # TODO(blended_skill_talk): Yields (key, example) tuples from the dataset
103
- for path, f in files:
104
- if path == filepath:
105
- data = json.load(f)
106
- for id_, row in enumerate(data):
107
- personas = [row["personas"][1][0], row["personas"][1][1]]
108
- dialogs = [dialog[1] for dialog in row["dialog"]]
109
- free_messages = []
110
- guided_messages = []
111
-
112
- for i in range(len(dialogs) // 2):
113
- free_messages.append(dialogs[2 * i])
114
- guided_messages.append(dialogs[2 * i + 1])
115
- context = row["context_dataset"]
116
- add_context = row["additional_context"] if context == "wizard_of_wikipedia" else ""
117
- previous_utterance = [row["free_turker_utterance"], row["guided_turker_utterance"]]
118
- suggestions = row["suggestions"]
119
- convai_suggestions = []
120
- empathetic_suggestions = []
121
- wow_suggestions = []
122
- for i in range(len(suggestions) // 2):
123
- convai_suggestions.append(suggestions[2 * i + 1]["convai2"])
124
- empathetic_suggestions.append(suggestions[2 * i + 1]["empathetic_dialogues"])
125
- wow_suggestions.append(suggestions[2 * i + 1]["wizard_of_wikipedia"])
126
- chosen_suggestions = row["chosen_suggestions"]
127
- guided_chosen_suggestions = []
128
- for i in range(len(chosen_suggestions) // 2):
129
- guided_chosen_suggestions.append(chosen_suggestions[2 * i + 1])
130
- label_candidates = row["label_candidates"] if "label_candidates" in row else []
131
- yield id_, {
132
- "personas": personas,
133
- "additional_context": add_context,
134
- "previous_utterance": previous_utterance,
135
- "context": context,
136
- "free_messages": free_messages,
137
- "guided_messages": guided_messages,
138
- "suggestions": {
139
- "convai2": convai_suggestions,
140
- "empathetic_dialogues": empathetic_suggestions,
141
- "wizard_of_wikipedia": wow_suggestions,
142
- },
143
- "guided_chosen_suggestions": guided_chosen_suggestions,
144
- "label_candidates": label_candidates,
145
- }
146
- break