Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
76d43f9
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
blended_skill_talk.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(blended_skill_talk): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(blended_skill_talk): BibTeX citation
12
+ _CITATION = """\
13
+ @misc{smith2020evaluating,
14
+ title={Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills},
15
+ author={Eric Michael Smith and Mary Williamson and Kurt Shuster and Jason Weston and Y-Lan Boureau},
16
+ year={2020},
17
+ eprint={2004.08449},
18
+ archivePrefix={arXiv},
19
+ primaryClass={cs.CL}
20
+ }
21
+ """
22
+
23
+ # TODO(blended_skill_talk):
24
+ _DESCRIPTION = """\
25
+ A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.
26
+ """
27
+ _URL = "http://parl.ai/downloads/blended_skill_talk/blended_skill_talk.tar.gz"
28
+
29
+ _TASK = ["convai2", "empathetic_dialogues", "wizard_of_wikipedia"]
30
+
31
+
32
+ class BlendedSkillTalk(datasets.GeneratorBasedBuilder):
33
+ """TODO(blended_skill_talk): Short description of my dataset."""
34
+
35
+ # TODO(blended_skill_talk): Set up version.
36
+ VERSION = datasets.Version("1.0.0")
37
+
38
+ def _info(self):
39
+ # TODO(blended_skill_talk): Specifies the datasets.DatasetInfo object
40
+ return datasets.DatasetInfo(
41
+ # This is the description that will appear on the datasets page.
42
+ description=_DESCRIPTION,
43
+ # datasets.features.FeatureConnectors
44
+ features=datasets.Features(
45
+ {
46
+ "personas": datasets.features.Sequence(datasets.Value("string")),
47
+ "additional_context": datasets.Value("string"),
48
+ "previous_utterance": datasets.features.Sequence(datasets.Value("string")),
49
+ "context": datasets.Value("string"),
50
+ "free_messages": datasets.features.Sequence(datasets.Value("string")),
51
+ "guided_messages": datasets.features.Sequence(datasets.Value("string")),
52
+ "suggestions": datasets.features.Sequence({task: datasets.Value("string") for task in _TASK})
53
+ # These are the features of your dataset like images, labels ...
54
+ }
55
+ ),
56
+ # If there's a common (input, target) tuple from the features,
57
+ # specify them here. They'll be used if as_supervised=True in
58
+ # builder.as_dataset.
59
+ supervised_keys=None,
60
+ # Homepage of the dataset for documentation
61
+ homepage="https://parl.ai/projects/bst/",
62
+ citation=_CITATION,
63
+ )
64
+
65
+ def _split_generators(self, dl_manager):
66
+ """Returns SplitGenerators."""
67
+ # TODO(blended_skill_talk): Downloads the data and defines the splits
68
+ # dl_manager is a datasets.download.DownloadManager that can be used to
69
+ # download and extract URLs
70
+ data_dir = dl_manager.download_and_extract(_URL)
71
+ return [
72
+ datasets.SplitGenerator(
73
+ name=datasets.Split.TRAIN,
74
+ # These kwargs will be passed to _generate_examples
75
+ gen_kwargs={"filepath": os.path.join(data_dir, "train.json")},
76
+ ),
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.VALIDATION,
79
+ # These kwargs will be passed to _generate_examples
80
+ gen_kwargs={"filepath": os.path.join(data_dir, "valid.json")},
81
+ ),
82
+ datasets.SplitGenerator(
83
+ name=datasets.Split.TEST,
84
+ # These kwargs will be passed to _generate_examples
85
+ gen_kwargs={"filepath": os.path.join(data_dir, "test.json")},
86
+ ),
87
+ ]
88
+
89
+ def _generate_examples(self, filepath):
90
+ """Yields examples."""
91
+ # TODO(blended_skill_talk): Yields (key, example) tuples from the dataset
92
+ with open(filepath, encoding="utf-8") as f:
93
+ data = json.load(f)
94
+ for id_, row in enumerate(data):
95
+ personas = [row["personas"][1][0], row["personas"][1][1]]
96
+ dialogs = [dialog[1] for dialog in row["dialog"]]
97
+ free_messages = []
98
+ guided_messages = []
99
+
100
+ for i in range(len(dialogs) // 2):
101
+ free_messages.append(dialogs[2 * i])
102
+ guided_messages.append(dialogs[2 * i + 1])
103
+ context = row["context_dataset"]
104
+ add_context = row["additional_context"] if context == "wizard_of_wikipedia" else ""
105
+ previous_utterance = [row["free_turker_utterance"], row["guided_turker_utterance"]]
106
+ suggestions = row["suggestions"]
107
+ convai_suggestions = []
108
+ empathetic_suggestions = []
109
+ wow_suggestions = []
110
+ for i in range(len(suggestions) // 2):
111
+ convai_suggestions.append(suggestions[2 * i + 1]["convai2"])
112
+ empathetic_suggestions.append(suggestions[2 * i + 1]["empathetic_dialogues"])
113
+ wow_suggestions.append(suggestions[2 * i + 1]["wizard_of_wikipedia"])
114
+ yield id_, {
115
+ "personas": personas,
116
+ "additional_context": add_context,
117
+ "previous_utterance": previous_utterance,
118
+ "context": context,
119
+ "free_messages": free_messages,
120
+ "guided_messages": guided_messages,
121
+ "suggestions": {
122
+ "convai2": convai_suggestions,
123
+ "empathetic_dialogues": empathetic_suggestions,
124
+ "wizard_of_wikipedia": wow_suggestions,
125
+ },
126
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.\n", "citation": "@misc{smith2020evaluating,\n title={Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills},\n author={Eric Michael Smith and Mary Williamson and Kurt Shuster and Jason Weston and Y-Lan Boureau},\n year={2020},\n eprint={2004.08449},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://parl.ai/projects/bst/", "license": "", "features": {"personas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "additional_context": {"dtype": "string", "id": null, "_type": "Value"}, "previous_utterance": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "free_messages": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "guided_messgaes": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "suggestions": {"feature": {"convai2": {"dtype": "string", "id": null, "_type": "Value"}, "empathetic_dialogues": {"dtype": "string", "id": null, "_type": "Value"}, "wizard_of_wikipedia": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "blended_skill_talk", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10621560, "num_examples": 4819, "dataset_name": "blended_skill_talk"}, "validation": {"name": "validation", "num_bytes": 2238122, "num_examples": 1009, "dataset_name": "blended_skill_talk"}, "test": {"name": "test", "num_bytes": 2214985, "num_examples": 980, "dataset_name": "blended_skill_talk"}}, "download_checksums": {"http://parl.ai/downloads/blended_skill_talk/blended_skill_talk.tar.gz": {"num_bytes": 38101408, "checksum": "5fbed0068ee89e2d43b93c3ecb341e784617033efa5e8e911a219d4eda6134a6"}}, "download_size": 38101408, "dataset_size": 15074667, "size_in_bytes": 53176075}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33151e4c21cb875a197d01f94d5e8ec07882c7c16aecbba5e11622ddcc3d6454
3
+ size 55973