Quentin Lhoest commited on
Commit
1864061
1 Parent(s): 5a1ff12

Release: 1.18.1

Browse files

Commit from https://github.com/huggingface/datasets/commit/218e496519ff14b4bc69ea559616af6f2ef89e57

Files changed (1) hide show
  1. pec.py +175 -175
pec.py CHANGED
@@ -1,175 +1,175 @@
1
- """TODO: Add a description here."""
2
-
3
- import os
4
-
5
- import datasets
6
-
7
-
8
- # TODO: Add BibTeX citation
9
- _CITATION = """\
10
- @inproceedings{zhong2020towards,
11
- title = "Towards Persona-Based Empathetic Conversational Models",
12
- author = "Zhong, Peixiang and
13
- Zhang, Chen and
14
- Wang, Hao and
15
- Liu, Yong and
16
- Miao, Chunyan",
17
- booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
18
- year = "2020",
19
- publisher = "Association for Computational Linguistics",
20
- url = "https://www.aclweb.org/anthology/2020.emnlp-main.531",
21
- pages = "6556--6566"}
22
- """
23
-
24
- # TODO: Add description of the dataset here
25
- _DESCRIPTION = """\
26
- A dataset of around 350K persona-based empathetic conversations. Each speaker is associated with a persona, which comprises multiple persona sentences. The response of each conversation is empathetic.
27
- """
28
-
29
- _URL = "https://dl.dropboxusercontent.com/s/u04fzuhsnxd0uvw/hf_pec.zip"
30
-
31
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
32
- # Using a specific configuration class is optional, you can also use the base class if you don't need
33
- # to add specific attributes.
34
- # here we give an example for three sub-set of the dataset with difference sizes.
35
-
36
-
37
- class PECConfig(datasets.BuilderConfig):
38
- """BuilderConfig for PEC"""
39
-
40
- def __init__(self, domain="all", **kwargs):
41
- """
42
- Args:
43
- domain: the domain of our dataset: happy or offmychest
44
- **kwargs: keyword arguments forwarded to super.
45
- """
46
- super(PECConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
47
- self.domain = domain
48
-
49
-
50
- class PEC(datasets.GeneratorBasedBuilder):
51
- """TODO: Short description of my dataset."""
52
-
53
- VERSION = datasets.Version("1.0.0")
54
- # This is an example of a dataset with multiple configurations.
55
- # If you don't want/need to define several sub-sets in your dataset,
56
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
57
- BUILDER_CONFIG_CLASS = PECConfig
58
- BUILDER_CONFIGS = [
59
- PECConfig(name=domain, description=f"A subset of PEC dataset: {domain}", domain=domain)
60
- for domain in ["happy", "offmychest", "all"]
61
- ]
62
-
63
- def _info(self):
64
- # TODO: Specifies the datasets.DatasetInfo object
65
- return datasets.DatasetInfo(
66
- # This is the description that will appear on the datasets page.
67
- description=_DESCRIPTION,
68
- # This defines the different columns of the dataset and their types
69
- features=datasets.Features(
70
- {
71
- "personas": datasets.features.Sequence(datasets.Value("string")),
72
- "context": datasets.features.Sequence(datasets.Value("string")),
73
- "context_speakers": datasets.features.Sequence(datasets.Value("string")),
74
- "response": datasets.Value("string"),
75
- "response_speaker": datasets.Value("string"),
76
- }
77
- ),
78
- # If there's a common (input, target) tuple from the features,
79
- # specify them here. They'll be used if as_supervised=True in
80
- # builder.as_dataset.
81
- supervised_keys=None,
82
- # Homepage of the dataset for documentation
83
- homepage="https://github.com/zhongpeixiang/PEC",
84
- citation=_CITATION,
85
- )
86
-
87
- def _load_persona(self, paths):
88
- persona = {}
89
- is_speaker = True
90
- sentences = []
91
- for path in paths:
92
- with open(path, encoding="utf-8") as f:
93
- for row in f:
94
- if "********************" not in row:
95
- if is_speaker:
96
- speaker = row.strip()
97
- is_speaker = False
98
- else:
99
- sentences.append(row.strip())
100
- else:
101
- persona[speaker] = sentences
102
- is_speaker = True
103
- sentences = []
104
- return persona
105
-
106
- def _split_generators(self, dl_manager):
107
- """Returns SplitGenerators."""
108
- # TODO: Downloads the data and defines the splits
109
- # dl_manager is a datasets.download.DownloadManager that can be used to
110
- # download and extract URLs
111
- dl_dir = dl_manager.download_and_extract(_URL)
112
- data_dir = os.path.join(dl_dir, "hf_pec")
113
- domains = ["happy", "offmychest"] if self.config.domain == "all" else [self.config.domain] # multiple domains
114
- persona_paths = [os.path.join(data_dir, domain, "persona.txt") for domain in domains]
115
- persona = self._load_persona(persona_paths)
116
-
117
- return [
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TRAIN,
120
- gen_kwargs={
121
- "filepath": [os.path.join(data_dir, domain, "train.txt") for domain in domains],
122
- "split": "train",
123
- "persona": persona,
124
- },
125
- ),
126
- datasets.SplitGenerator(
127
- name=datasets.Split.TEST,
128
- gen_kwargs={
129
- "filepath": [os.path.join(data_dir, domain, "test.txt") for domain in domains],
130
- "split": "test",
131
- "persona": persona,
132
- },
133
- ),
134
- datasets.SplitGenerator(
135
- name=datasets.Split.VALIDATION,
136
- gen_kwargs={
137
- "filepath": [os.path.join(data_dir, domain, "valid.txt") for domain in domains],
138
- "split": "dev",
139
- "persona": persona,
140
- },
141
- ),
142
- ]
143
-
144
- def _generate_examples(self, filepath, split, persona):
145
- """Yields examples."""
146
- # TODO: Yields (key, example) tuples from the dataset
147
- context_speakers = []
148
- context = []
149
- example_id = 0
150
- for fpath in filepath:
151
- with open(fpath, encoding="utf-8") as f:
152
- for id_, row in enumerate(f):
153
- if row.strip() == "":
154
- continue
155
- if "********************" not in row:
156
- if "---+---" in row:
157
- speaker, utterance = row.split("---+---")
158
- context_speakers.append(speaker.strip())
159
- context.append(utterance.strip())
160
- else:
161
- # contains inline \n
162
- context[-1] = context[-1] + " " + row.strip()
163
- else:
164
- response_speaker = context_speakers.pop()
165
- response = context.pop()
166
- yield example_id, {
167
- "personas": persona[response_speaker],
168
- "context_speakers": context_speakers,
169
- "context": context,
170
- "response_speaker": response_speaker,
171
- "response": response,
172
- }
173
- context_speakers = []
174
- context = []
175
- example_id += 1
1
+ """TODO: Add a description here."""
2
+
3
+ import os
4
+
5
+ import datasets
6
+
7
+
8
+ # TODO: Add BibTeX citation
9
+ _CITATION = """\
10
+ @inproceedings{zhong2020towards,
11
+ title = "Towards Persona-Based Empathetic Conversational Models",
12
+ author = "Zhong, Peixiang and
13
+ Zhang, Chen and
14
+ Wang, Hao and
15
+ Liu, Yong and
16
+ Miao, Chunyan",
17
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
18
+ year = "2020",
19
+ publisher = "Association for Computational Linguistics",
20
+ url = "https://www.aclweb.org/anthology/2020.emnlp-main.531",
21
+ pages = "6556--6566"}
22
+ """
23
+
24
+ # TODO: Add description of the dataset here
25
+ _DESCRIPTION = """\
26
+ A dataset of around 350K persona-based empathetic conversations. Each speaker is associated with a persona, which comprises multiple persona sentences. The response of each conversation is empathetic.
27
+ """
28
+
29
+ _URL = "https://dl.dropboxusercontent.com/s/u04fzuhsnxd0uvw/hf_pec.zip"
30
+
31
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
32
+ # Using a specific configuration class is optional, you can also use the base class if you don't need
33
+ # to add specific attributes.
34
+ # here we give an example for three sub-set of the dataset with difference sizes.
35
+
36
+
37
+ class PECConfig(datasets.BuilderConfig):
38
+ """BuilderConfig for PEC"""
39
+
40
+ def __init__(self, domain="all", **kwargs):
41
+ """
42
+ Args:
43
+ domain: the domain of our dataset: happy or offmychest
44
+ **kwargs: keyword arguments forwarded to super.
45
+ """
46
+ super(PECConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
47
+ self.domain = domain
48
+
49
+
50
+ class PEC(datasets.GeneratorBasedBuilder):
51
+ """TODO: Short description of my dataset."""
52
+
53
+ VERSION = datasets.Version("1.0.0")
54
+ # This is an example of a dataset with multiple configurations.
55
+ # If you don't want/need to define several sub-sets in your dataset,
56
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
57
+ BUILDER_CONFIG_CLASS = PECConfig
58
+ BUILDER_CONFIGS = [
59
+ PECConfig(name=domain, description=f"A subset of PEC dataset: {domain}", domain=domain)
60
+ for domain in ["happy", "offmychest", "all"]
61
+ ]
62
+
63
+ def _info(self):
64
+ # TODO: Specifies the datasets.DatasetInfo object
65
+ return datasets.DatasetInfo(
66
+ # This is the description that will appear on the datasets page.
67
+ description=_DESCRIPTION,
68
+ # This defines the different columns of the dataset and their types
69
+ features=datasets.Features(
70
+ {
71
+ "personas": datasets.features.Sequence(datasets.Value("string")),
72
+ "context": datasets.features.Sequence(datasets.Value("string")),
73
+ "context_speakers": datasets.features.Sequence(datasets.Value("string")),
74
+ "response": datasets.Value("string"),
75
+ "response_speaker": datasets.Value("string"),
76
+ }
77
+ ),
78
+ # If there's a common (input, target) tuple from the features,
79
+ # specify them here. They'll be used if as_supervised=True in
80
+ # builder.as_dataset.
81
+ supervised_keys=None,
82
+ # Homepage of the dataset for documentation
83
+ homepage="https://github.com/zhongpeixiang/PEC",
84
+ citation=_CITATION,
85
+ )
86
+
87
+ def _load_persona(self, paths):
88
+ persona = {}
89
+ is_speaker = True
90
+ sentences = []
91
+ for path in paths:
92
+ with open(path, encoding="utf-8") as f:
93
+ for row in f:
94
+ if "********************" not in row:
95
+ if is_speaker:
96
+ speaker = row.strip()
97
+ is_speaker = False
98
+ else:
99
+ sentences.append(row.strip())
100
+ else:
101
+ persona[speaker] = sentences
102
+ is_speaker = True
103
+ sentences = []
104
+ return persona
105
+
106
+ def _split_generators(self, dl_manager):
107
+ """Returns SplitGenerators."""
108
+ # TODO: Downloads the data and defines the splits
109
+ # dl_manager is a datasets.download.DownloadManager that can be used to
110
+ # download and extract URLs
111
+ dl_dir = dl_manager.download_and_extract(_URL)
112
+ data_dir = os.path.join(dl_dir, "hf_pec")
113
+ domains = ["happy", "offmychest"] if self.config.domain == "all" else [self.config.domain] # multiple domains
114
+ persona_paths = [os.path.join(data_dir, domain, "persona.txt") for domain in domains]
115
+ persona = self._load_persona(persona_paths)
116
+
117
+ return [
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TRAIN,
120
+ gen_kwargs={
121
+ "filepath": [os.path.join(data_dir, domain, "train.txt") for domain in domains],
122
+ "split": "train",
123
+ "persona": persona,
124
+ },
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TEST,
128
+ gen_kwargs={
129
+ "filepath": [os.path.join(data_dir, domain, "test.txt") for domain in domains],
130
+ "split": "test",
131
+ "persona": persona,
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.VALIDATION,
136
+ gen_kwargs={
137
+ "filepath": [os.path.join(data_dir, domain, "valid.txt") for domain in domains],
138
+ "split": "dev",
139
+ "persona": persona,
140
+ },
141
+ ),
142
+ ]
143
+
144
+ def _generate_examples(self, filepath, split, persona):
145
+ """Yields examples."""
146
+ # TODO: Yields (key, example) tuples from the dataset
147
+ context_speakers = []
148
+ context = []
149
+ example_id = 0
150
+ for fpath in filepath:
151
+ with open(fpath, encoding="utf-8") as f:
152
+ for id_, row in enumerate(f):
153
+ if row.strip() == "":
154
+ continue
155
+ if "********************" not in row:
156
+ if "---+---" in row:
157
+ speaker, utterance = row.split("---+---")
158
+ context_speakers.append(speaker.strip())
159
+ context.append(utterance.strip())
160
+ else:
161
+ # contains inline \n
162
+ context[-1] = context[-1] + " " + row.strip()
163
+ else:
164
+ response_speaker = context_speakers.pop()
165
+ response = context.pop()
166
+ yield example_id, {
167
+ "personas": persona[response_speaker],
168
+ "context_speakers": context_speakers,
169
+ "context": context,
170
+ "response_speaker": response_speaker,
171
+ "response": response,
172
+ }
173
+ context_speakers = []
174
+ context = []
175
+ example_id += 1