Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
d7b0093
1 Parent(s): 877b97e

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (6536cf28bcc929e1886d9fdf287d5a533302963a)
- Delete loading script (1eff8bf8314ebe3975347a80602a2959f41b325d)
- Delete legacy dataset_infos.json (4c29e7ef281f03449bee40a4cd71735ddbe21cb1)

README.md CHANGED
@@ -9,7 +9,6 @@ license:
9
  - unknown
10
  multilinguality:
11
  - monolingual
12
- pretty_name: BlendedSkillTalk
13
  size_categories:
14
  - 1K<n<10K
15
  source_datasets:
@@ -19,6 +18,7 @@ task_categories:
19
  task_ids:
20
  - dialogue-generation
21
  paperswithcode_id: blended-skill-talk
 
22
  dataset_info:
23
  features:
24
  - name: personas
@@ -48,16 +48,25 @@ dataset_info:
48
  sequence: string
49
  splits:
50
  - name: train
51
- num_bytes: 10831361
52
  num_examples: 4819
53
  - name: validation
54
- num_bytes: 43961658
55
  num_examples: 1009
56
  - name: test
57
- num_bytes: 44450102
58
  num_examples: 980
59
- download_size: 38101408
60
- dataset_size: 99243121
 
 
 
 
 
 
 
 
 
61
  ---
62
 
63
  # Dataset Card for "blended_skill_talk"
 
9
  - unknown
10
  multilinguality:
11
  - monolingual
 
12
  size_categories:
13
  - 1K<n<10K
14
  source_datasets:
 
18
  task_ids:
19
  - dialogue-generation
20
  paperswithcode_id: blended-skill-talk
21
+ pretty_name: BlendedSkillTalk
22
  dataset_info:
23
  features:
24
  - name: personas
 
48
  sequence: string
49
  splits:
50
  - name: train
51
+ num_bytes: 10830670
52
  num_examples: 4819
53
  - name: validation
54
+ num_bytes: 43961447
55
  num_examples: 1009
56
  - name: test
57
+ num_bytes: 44449895
58
  num_examples: 980
59
+ download_size: 10897644
60
+ dataset_size: 99242012
61
+ configs:
62
+ - config_name: default
63
+ data_files:
64
+ - split: train
65
+ path: data/train-*
66
+ - split: validation
67
+ path: data/validation-*
68
+ - split: test
69
+ path: data/test-*
70
  ---
71
 
72
  # Dataset Card for "blended_skill_talk"
blended_skill_talk.py DELETED
@@ -1,146 +0,0 @@
1
- """TODO(blended_skill_talk): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
-
8
-
9
- # TODO(blended_skill_talk): BibTeX citation
10
- _CITATION = """\
11
- @misc{smith2020evaluating,
12
- title={Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills},
13
- author={Eric Michael Smith and Mary Williamson and Kurt Shuster and Jason Weston and Y-Lan Boureau},
14
- year={2020},
15
- eprint={2004.08449},
16
- archivePrefix={arXiv},
17
- primaryClass={cs.CL}
18
- }
19
- """
20
-
21
- # TODO(blended_skill_talk):
22
- _DESCRIPTION = """\
23
- A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.
24
- """
25
- _URL = "http://parl.ai/downloads/blended_skill_talk/blended_skill_talk.tar.gz"
26
-
27
- _TASK = ["convai2", "empathetic_dialogues", "wizard_of_wikipedia"]
28
-
29
-
30
- class BlendedSkillTalk(datasets.GeneratorBasedBuilder):
31
- """TODO(blended_skill_talk): Short description of my dataset."""
32
-
33
- # TODO(blended_skill_talk): Set up version.
34
- VERSION = datasets.Version("1.0.0")
35
-
36
- def _info(self):
37
- # TODO(blended_skill_talk): Specifies the datasets.DatasetInfo object
38
- return datasets.DatasetInfo(
39
- # This is the description that will appear on the datasets page.
40
- description=_DESCRIPTION,
41
- # datasets.features.FeatureConnectors
42
- features=datasets.Features(
43
- {
44
- "personas": datasets.features.Sequence(datasets.Value("string")),
45
- "additional_context": datasets.Value("string"),
46
- "previous_utterance": datasets.features.Sequence(datasets.Value("string")),
47
- "context": datasets.Value("string"),
48
- "free_messages": datasets.features.Sequence(datasets.Value("string")),
49
- "guided_messages": datasets.features.Sequence(datasets.Value("string")),
50
- "suggestions": datasets.features.Sequence({task: datasets.Value("string") for task in _TASK}),
51
- "guided_chosen_suggestions": datasets.features.Sequence(datasets.Value("string")),
52
- "label_candidates": datasets.features.Sequence(
53
- datasets.features.Sequence(datasets.Value("string"))
54
- ),
55
- # These are the features of your dataset like images, labels ...
56
- }
57
- ),
58
- # If there's a common (input, target) tuple from the features,
59
- # specify them here. They'll be used if as_supervised=True in
60
- # builder.as_dataset.
61
- supervised_keys=None,
62
- # Homepage of the dataset for documentation
63
- homepage="https://parl.ai/projects/bst/",
64
- citation=_CITATION,
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- """Returns SplitGenerators."""
69
- # TODO(blended_skill_talk): Downloads the data and defines the splits
70
- # dl_manager is a datasets.download.DownloadManager that can be used to
71
- # download and extract URLs
72
- archive = dl_manager.download(_URL)
73
- return [
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TRAIN,
76
- # These kwargs will be passed to _generate_examples
77
- gen_kwargs={
78
- "filepath": "train.json",
79
- "files": dl_manager.iter_archive(archive),
80
- },
81
- ),
82
- datasets.SplitGenerator(
83
- name=datasets.Split.VALIDATION,
84
- # These kwargs will be passed to _generate_examples
85
- gen_kwargs={
86
- "filepath": "valid.json",
87
- "files": dl_manager.iter_archive(archive),
88
- },
89
- ),
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TEST,
92
- # These kwargs will be passed to _generate_examples
93
- gen_kwargs={
94
- "filepath": "test.json",
95
- "files": dl_manager.iter_archive(archive),
96
- },
97
- ),
98
- ]
99
-
100
- def _generate_examples(self, filepath, files):
101
- """Yields examples."""
102
- # TODO(blended_skill_talk): Yields (key, example) tuples from the dataset
103
- for path, f in files:
104
- if path == filepath:
105
- data = json.load(f)
106
- for id_, row in enumerate(data):
107
- personas = [row["personas"][1][0], row["personas"][1][1]]
108
- dialogs = [dialog[1] for dialog in row["dialog"]]
109
- free_messages = []
110
- guided_messages = []
111
-
112
- for i in range(len(dialogs) // 2):
113
- free_messages.append(dialogs[2 * i])
114
- guided_messages.append(dialogs[2 * i + 1])
115
- context = row["context_dataset"]
116
- add_context = row["additional_context"] if context == "wizard_of_wikipedia" else ""
117
- previous_utterance = [row["free_turker_utterance"], row["guided_turker_utterance"]]
118
- suggestions = row["suggestions"]
119
- convai_suggestions = []
120
- empathetic_suggestions = []
121
- wow_suggestions = []
122
- for i in range(len(suggestions) // 2):
123
- convai_suggestions.append(suggestions[2 * i + 1]["convai2"])
124
- empathetic_suggestions.append(suggestions[2 * i + 1]["empathetic_dialogues"])
125
- wow_suggestions.append(suggestions[2 * i + 1]["wizard_of_wikipedia"])
126
- chosen_suggestions = row["chosen_suggestions"]
127
- guided_chosen_suggestions = []
128
- for i in range(len(chosen_suggestions) // 2):
129
- guided_chosen_suggestions.append(chosen_suggestions[2 * i + 1])
130
- label_candidates = row["label_candidates"] if "label_candidates" in row else []
131
- yield id_, {
132
- "personas": personas,
133
- "additional_context": add_context,
134
- "previous_utterance": previous_utterance,
135
- "context": context,
136
- "free_messages": free_messages,
137
- "guided_messages": guided_messages,
138
- "suggestions": {
139
- "convai2": convai_suggestions,
140
- "empathetic_dialogues": empathetic_suggestions,
141
- "wizard_of_wikipedia": wow_suggestions,
142
- },
143
- "guided_chosen_suggestions": guided_chosen_suggestions,
144
- "label_candidates": label_candidates,
145
- }
146
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03ba2ffafec09e44e690a0ee44b509d207c3bc9c6a1f7456f48565863e2ab8cb
3
+ size 2402776
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a211dce359c9d6af534d3181d9005709b893c14c8908f65cc96a2b763502482
3
+ size 5876073
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72581a5d82a01a165e91efb364ad0b60c15c958984ff3cb1ccb4a22681733c98
3
+ size 2618795
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.\n", "citation": "@misc{smith2020evaluating,\n title={Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills},\n author={Eric Michael Smith and Mary Williamson and Kurt Shuster and Jason Weston and Y-Lan Boureau},\n year={2020},\n eprint={2004.08449},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://parl.ai/projects/bst/", "license": "", "features": {"personas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "additional_context": {"dtype": "string", "id": null, "_type": "Value"}, "previous_utterance": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "free_messages": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "guided_messages": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "suggestions": {"feature": {"convai2": {"dtype": "string", "id": null, "_type": "Value"}, "empathetic_dialogues": {"dtype": "string", "id": null, "_type": "Value"}, "wizard_of_wikipedia": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "guided_chosen_suggestions": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "label_candidates": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "blended_skill_talk", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10831361, "num_examples": 4819, "dataset_name": "blended_skill_talk"}, "validation": {"name": "validation", "num_bytes": 43961658, "num_examples": 1009, "dataset_name": "blended_skill_talk"}, "test": {"name": "test", "num_bytes": 44450102, "num_examples": 980, "dataset_name": "blended_skill_talk"}}, "download_checksums": {"http://parl.ai/downloads/blended_skill_talk/blended_skill_talk.tar.gz": {"num_bytes": 38101408, "checksum": "5fbed0068ee89e2d43b93c3ecb341e784617033efa5e8e911a219d4eda6134a6"}}, "download_size": 38101408, "post_processing_size": null, "dataset_size": 99243121, "size_in_bytes": 137344529}}