system HF staff commited on
Commit
9c5cfab
1 Parent(s): bf5dc87

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. blended_skill_talk.py +50 -40
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  languages:
3
  - en
4
  paperswithcode_id: blended-skill-talk
 
1
  ---
2
+ pretty_name: BlendedSkillTalk
3
  languages:
4
  - en
5
  paperswithcode_id: blended-skill-talk
blended_skill_talk.py CHANGED
@@ -2,7 +2,6 @@
2
 
3
 
4
  import json
5
- import os
6
 
7
  import datasets
8
 
@@ -66,60 +65,71 @@ class BlendedSkillTalk(datasets.GeneratorBasedBuilder):
66
  # TODO(blended_skill_talk): Downloads the data and defines the splits
67
  # dl_manager is a datasets.download.DownloadManager that can be used to
68
  # download and extract URLs
69
- data_dir = dl_manager.download_and_extract(_URL)
70
  return [
71
  datasets.SplitGenerator(
72
  name=datasets.Split.TRAIN,
73
  # These kwargs will be passed to _generate_examples
74
- gen_kwargs={"filepath": os.path.join(data_dir, "train.json")},
 
 
 
75
  ),
76
  datasets.SplitGenerator(
77
  name=datasets.Split.VALIDATION,
78
  # These kwargs will be passed to _generate_examples
79
- gen_kwargs={"filepath": os.path.join(data_dir, "valid.json")},
 
 
 
80
  ),
81
  datasets.SplitGenerator(
82
  name=datasets.Split.TEST,
83
  # These kwargs will be passed to _generate_examples
84
- gen_kwargs={"filepath": os.path.join(data_dir, "test.json")},
 
 
 
85
  ),
86
  ]
87
 
88
- def _generate_examples(self, filepath):
89
  """Yields examples."""
90
  # TODO(blended_skill_talk): Yields (key, example) tuples from the dataset
91
- with open(filepath, encoding="utf-8") as f:
92
- data = json.load(f)
93
- for id_, row in enumerate(data):
94
- personas = [row["personas"][1][0], row["personas"][1][1]]
95
- dialogs = [dialog[1] for dialog in row["dialog"]]
96
- free_messages = []
97
- guided_messages = []
 
98
 
99
- for i in range(len(dialogs) // 2):
100
- free_messages.append(dialogs[2 * i])
101
- guided_messages.append(dialogs[2 * i + 1])
102
- context = row["context_dataset"]
103
- add_context = row["additional_context"] if context == "wizard_of_wikipedia" else ""
104
- previous_utterance = [row["free_turker_utterance"], row["guided_turker_utterance"]]
105
- suggestions = row["suggestions"]
106
- convai_suggestions = []
107
- empathetic_suggestions = []
108
- wow_suggestions = []
109
- for i in range(len(suggestions) // 2):
110
- convai_suggestions.append(suggestions[2 * i + 1]["convai2"])
111
- empathetic_suggestions.append(suggestions[2 * i + 1]["empathetic_dialogues"])
112
- wow_suggestions.append(suggestions[2 * i + 1]["wizard_of_wikipedia"])
113
- yield id_, {
114
- "personas": personas,
115
- "additional_context": add_context,
116
- "previous_utterance": previous_utterance,
117
- "context": context,
118
- "free_messages": free_messages,
119
- "guided_messages": guided_messages,
120
- "suggestions": {
121
- "convai2": convai_suggestions,
122
- "empathetic_dialogues": empathetic_suggestions,
123
- "wizard_of_wikipedia": wow_suggestions,
124
- },
125
- }
 
 
2
 
3
 
4
  import json
 
5
 
6
  import datasets
7
 
 
65
  # TODO(blended_skill_talk): Downloads the data and defines the splits
66
  # dl_manager is a datasets.download.DownloadManager that can be used to
67
  # download and extract URLs
68
+ archive = dl_manager.download(_URL)
69
  return [
70
  datasets.SplitGenerator(
71
  name=datasets.Split.TRAIN,
72
  # These kwargs will be passed to _generate_examples
73
+ gen_kwargs={
74
+ "filepath": "train.json",
75
+ "files": dl_manager.iter_archive(archive),
76
+ },
77
  ),
78
  datasets.SplitGenerator(
79
  name=datasets.Split.VALIDATION,
80
  # These kwargs will be passed to _generate_examples
81
+ gen_kwargs={
82
+ "filepath": "valid.json",
83
+ "files": dl_manager.iter_archive(archive),
84
+ },
85
  ),
86
  datasets.SplitGenerator(
87
  name=datasets.Split.TEST,
88
  # These kwargs will be passed to _generate_examples
89
+ gen_kwargs={
90
+ "filepath": "test.json",
91
+ "files": dl_manager.iter_archive(archive),
92
+ },
93
  ),
94
  ]
95
 
96
+ def _generate_examples(self, filepath, files):
97
  """Yields examples."""
98
  # TODO(blended_skill_talk): Yields (key, example) tuples from the dataset
99
+ for path, f in files:
100
+ if path == filepath:
101
+ data = json.load(f)
102
+ for id_, row in enumerate(data):
103
+ personas = [row["personas"][1][0], row["personas"][1][1]]
104
+ dialogs = [dialog[1] for dialog in row["dialog"]]
105
+ free_messages = []
106
+ guided_messages = []
107
 
108
+ for i in range(len(dialogs) // 2):
109
+ free_messages.append(dialogs[2 * i])
110
+ guided_messages.append(dialogs[2 * i + 1])
111
+ context = row["context_dataset"]
112
+ add_context = row["additional_context"] if context == "wizard_of_wikipedia" else ""
113
+ previous_utterance = [row["free_turker_utterance"], row["guided_turker_utterance"]]
114
+ suggestions = row["suggestions"]
115
+ convai_suggestions = []
116
+ empathetic_suggestions = []
117
+ wow_suggestions = []
118
+ for i in range(len(suggestions) // 2):
119
+ convai_suggestions.append(suggestions[2 * i + 1]["convai2"])
120
+ empathetic_suggestions.append(suggestions[2 * i + 1]["empathetic_dialogues"])
121
+ wow_suggestions.append(suggestions[2 * i + 1]["wizard_of_wikipedia"])
122
+ yield id_, {
123
+ "personas": personas,
124
+ "additional_context": add_context,
125
+ "previous_utterance": previous_utterance,
126
+ "context": context,
127
+ "free_messages": free_messages,
128
+ "guided_messages": guided_messages,
129
+ "suggestions": {
130
+ "convai2": convai_suggestions,
131
+ "empathetic_dialogues": empathetic_suggestions,
132
+ "wizard_of_wikipedia": wow_suggestions,
133
+ },
134
+ }
135
+ break