system HF staff commited on
Commit
c6cb8d5
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
daily_dialog.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset"""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import os
20
+ from zipfile import ZipFile
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @InProceedings{li2017dailydialog,
27
+ author = {Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi},
28
+ title = {DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset},
29
+ booktitle = {Proceedings of The 8th International Joint Conference on Natural Language Processing (IJCNLP 2017)},
30
+ year = {2017}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ We develop a high-quality multi-turn dialog dataset, DailyDialog, which is intriguing in several aspects.
36
+ The language is human-written and less noisy. The dialogues in the dataset reflect our daily communication way
37
+ and cover various topics about our daily life. We also manually label the developed dataset with communication
38
+ intention and emotion information. Then, we evaluate existing approaches on DailyDialog dataset and hope it
39
+ benefit the research field of dialog systems.
40
+ """
41
+
42
+ _URL = "http://yanran.li/files/ijcnlp_dailydialog.zip"
43
+
44
+ act_label = {
45
+ "0": "__dummy__", # Added to be compatible out-of-the-box with datasets.ClassLabel
46
+ "1": "inform",
47
+ "2": "question",
48
+ "3": "directive",
49
+ "4": "commissive",
50
+ }
51
+
52
+ emotion_label = {
53
+ "0": "no emotion",
54
+ "1": "anger",
55
+ "2": "disgust",
56
+ "3": "fear",
57
+ "4": "happiness",
58
+ "5": "sadness",
59
+ "6": "surprise",
60
+ }
61
+
62
+
63
+ class DailyDialog(datasets.GeneratorBasedBuilder):
64
+ """DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset"""
65
+
66
+ VERSION = datasets.Version("1.0.0")
67
+
68
+ __EOU__ = "__eou__"
69
+
70
+ def _info(self):
71
+ return datasets.DatasetInfo(
72
+ description=_DESCRIPTION,
73
+ features=datasets.Features(
74
+ {
75
+ "dialog": datasets.features.Sequence(datasets.Value("string")),
76
+ "act": datasets.features.Sequence(datasets.ClassLabel(names=list(act_label.values()))),
77
+ "emotion": datasets.features.Sequence(datasets.ClassLabel(names=list(emotion_label.values()))),
78
+ }
79
+ ),
80
+ supervised_keys=None,
81
+ homepage="http://yanran.li/dailydialog",
82
+ citation=_CITATION,
83
+ )
84
+
85
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
86
+ """Returns SplitGenerators."""
87
+ # dl_manager is a datasets.download.DownloadManager that can be used to
88
+ # download and extract URLs
89
+ dl_dir = dl_manager.download_and_extract(_URL)
90
+ data_dir = os.path.join(dl_dir, "ijcnlp_dailydialog")
91
+
92
+ # The splits are nested inside the zip
93
+ for name in ("train", "validation", "test"):
94
+ zip_fpath = os.path.join(data_dir, f"{name}.zip")
95
+ with ZipFile(zip_fpath) as zip_file:
96
+ zip_file.extractall(path=data_dir)
97
+ zip_file.close()
98
+
99
+ return [
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TRAIN,
102
+ # These kwargs will be passed to _generate_examples
103
+ gen_kwargs={
104
+ "file_path": os.path.join(data_dir, "train", "dialogues_train.txt"),
105
+ "act_path": os.path.join(data_dir, "train", "dialogues_act_train.txt"),
106
+ "emotion_path": os.path.join(data_dir, "train", "dialogues_emotion_train.txt"),
107
+ "split": "train",
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TEST,
112
+ # These kwargs will be passed to _generate_examples
113
+ gen_kwargs={
114
+ "file_path": os.path.join(data_dir, "test", "dialogues_test.txt"),
115
+ "act_path": os.path.join(data_dir, "test", "dialogues_act_test.txt"),
116
+ "emotion_path": os.path.join(data_dir, "test", "dialogues_emotion_test.txt"),
117
+ "split": "test",
118
+ },
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.VALIDATION,
122
+ # These kwargs will be passed to _generate_examples
123
+ gen_kwargs={
124
+ "file_path": os.path.join(data_dir, "validation", "dialogues_validation.txt"),
125
+ "act_path": os.path.join(data_dir, "validation", "dialogues_act_validation.txt"),
126
+ "emotion_path": os.path.join(data_dir, "validation", "dialogues_emotion_validation.txt"),
127
+ "split": "dev",
128
+ },
129
+ ),
130
+ ]
131
+
132
+ def _generate_examples(self, file_path, act_path, emotion_path, split):
133
+ """ Yields examples. """
134
+ # Yields (key, example) tuples from the dataset
135
+ with open(file_path, "r", encoding="utf-8") as f, open(act_path, "r", encoding="utf-8") as act, open(
136
+ emotion_path, "r", encoding="utf-8"
137
+ ) as emotion:
138
+ for i, (line_f, line_act, line_emotion) in enumerate(zip(f, act, emotion)):
139
+ if len(line_f.strip()) == 0:
140
+ break
141
+ dialog = line_f.split(self.__EOU__)[:-1]
142
+ act = line_act.split(" ")[:-1]
143
+ emotion = line_emotion.split(" ")[:-1]
144
+
145
+ assert len(dialog) == len(act) == len(emotion), "Different turns btw dialogue & emotion & action"
146
+
147
+ yield f"{split}-{i}", {
148
+ "dialog": dialog,
149
+ "act": [act_label[x] for x in act],
150
+ "emotion": [emotion_label[x] for x in emotion],
151
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "We develop a high-quality multi-turn dialog dataset, DailyDialog, which is intriguing in several aspects. \nThe language is human-written and less noisy. The dialogues in the dataset reflect our daily communication way \nand cover various topics about our daily life. We also manually label the developed dataset with communication \nintention and emotion information. Then, we evaluate existing approaches on DailyDialog dataset and hope it \nbenefit the research field of dialog systems.\n", "citation": "@InProceedings{li2017dailydialog,\n author = {Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi},\n title = {DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset},\n booktitle = {Proceedings of The 8th International Joint Conference on Natural Language Processing (IJCNLP 2017)},\n year = {2017}\n}\n", "homepage": "http://yanran.li/dailydialog", "license": "", "features": {"dialog": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "act": {"feature": {"num_classes": 5, "names": ["__dummy__", "inform", "question", "directive", "commissive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "emotion": {"feature": {"num_classes": 7, "names": ["no emotion", "anger", "disgust", "fear", "happiness", "sadness", "surprise"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": {"features": null, "resources_checksums": {"train": {}, "test": {}, "validation": {}}}, "supervised_keys": null, "builder_name": "daily_dialog", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7296715, "num_examples": 11118, "dataset_name": "daily_dialog"}, "test": {"name": "test", "num_bytes": 655844, "num_examples": 1000, "dataset_name": "daily_dialog"}, "validation": {"name": "validation", "num_bytes": 673943, "num_examples": 1000, "dataset_name": "daily_dialog"}}, "download_checksums": {"http://yanran.li/files/ijcnlp_dailydialog.zip": {"num_bytes": 4475921, "checksum": "c641e88cbf21fd7c1b57289387f9107d33fe8685a2b37fe8066b82776535ea89"}}, "download_size": 4475921, "post_processing_size": 0, "dataset_size": 8626502, "size_in_bytes": 13102423}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b003ded75a7289dfe802829419a2891bf5fa4cf62bdcec6a416c38250c1c2909
3
+ size 3744