suchirsalhan
commited on
Commit
•
696a5bd
1
Parent(s):
61f0fc1
Update BabyLM.py
Browse files
BabyLM.py
CHANGED
@@ -3,15 +3,27 @@ import datasets
|
|
3 |
from typing import List
|
4 |
|
5 |
_DESCRIPTION = """\
|
6 |
-
Dataset for the
|
7 |
The goal is to train a language model from scratch on this data which represents
|
8 |
-
roughly the amount of text and speech data a young child observes.
|
|
|
9 |
"""
|
10 |
|
11 |
-
_HOMEPAGE = "https://babylm.github.io"
|
12 |
|
13 |
filenames = [
|
14 |
-
"aochildes.txt",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
"bnc_spoken.txt",
|
16 |
"cbt.txt",
|
17 |
"children_stories.txt",
|
@@ -22,6 +34,9 @@ filenames = [
|
|
22 |
"switchboard.txt",
|
23 |
"wikipedia.txt"
|
24 |
]
|
|
|
|
|
|
|
25 |
class BabyLM(datasets.GeneratorBasedBuilder):
|
26 |
|
27 |
BUILDER_CONFIGS = [
|
@@ -32,7 +47,7 @@ class BabyLM(datasets.GeneratorBasedBuilder):
|
|
32 |
),
|
33 |
datasets.BuilderConfig(
|
34 |
name="strict_small",
|
35 |
-
description="Cleaned version of the dataset, 10M words,
|
36 |
version="1.0.0",
|
37 |
),
|
38 |
datasets.BuilderConfig(
|
@@ -65,10 +80,53 @@ class BabyLM(datasets.GeneratorBasedBuilder):
|
|
65 |
description="Cleaned version of the dataset, 100M words, gold POS tags",
|
66 |
version="1.0.0",
|
67 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
]
|
69 |
|
70 |
DEFAULT_CONFIG_NAME = "strict_small"
|
71 |
|
|
|
|
|
72 |
def _info(self):
|
73 |
features = datasets.Features(
|
74 |
{
|
@@ -85,25 +143,61 @@ class BabyLM(datasets.GeneratorBasedBuilder):
|
|
85 |
)
|
86 |
|
87 |
|
|
|
|
|
|
|
88 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
89 |
"""
|
90 |
Returns data for different splits
|
91 |
"""
|
92 |
|
93 |
-
if "strict_small" in self.config.name:
|
94 |
train_data_dir = "10M"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
else:
|
96 |
train_data_dir = "100M"
|
97 |
|
98 |
-
folder = 'original_tagged' if 'original' in self.config.name else 'clean_tagged'
|
99 |
-
folder = folder + '_gold' if 'gold' in self.config.name else folder
|
100 |
-
|
|
|
|
|
|
|
101 |
urls_to_download = {
|
102 |
-
"train": [
|
103 |
-
"dev": [
|
104 |
-
"test": [
|
105 |
-
|
106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
108 |
|
109 |
return [
|
@@ -128,6 +222,7 @@ class BabyLM(datasets.GeneratorBasedBuilder):
|
|
128 |
),
|
129 |
]
|
130 |
|
|
|
131 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
132 |
def _generate_examples(self, split, filepaths):
|
133 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
@@ -155,3 +250,4 @@ class BabyLM(datasets.GeneratorBasedBuilder):
|
|
155 |
else:
|
156 |
text = row
|
157 |
is_tags = True
|
|
|
|
3 |
from typing import List
|
4 |
|
5 |
_DESCRIPTION = """\
|
6 |
+
Dataset for the BabyLM Round2: French, German, Chinese & Japanese Small-Scale LMs
|
7 |
The goal is to train a language model from scratch on this data which represents
|
8 |
+
roughly the amount of text and speech data a young child observes.
|
9 |
+
Author– Suchir Salhan
|
10 |
"""
|
11 |
|
|
|
12 |
|
13 |
filenames = [
|
14 |
+
"aochildes.txt",
|
15 |
+
"aochinese.txt",
|
16 |
+
"aochinese_dev.txt",
|
17 |
+
"aochinese_test.txt",
|
18 |
+
"aofrench.txt",
|
19 |
+
"aofrench_dev.txt",
|
20 |
+
"aofrench_test.txt",
|
21 |
+
"aogerman.txt",
|
22 |
+
"aogerman_dev.txt",
|
23 |
+
"aogerman_test.txt",
|
24 |
+
"aojapanese.txt",
|
25 |
+
"aojapanese_dev.txt",
|
26 |
+
"aojapanese_test.txt",
|
27 |
"bnc_spoken.txt",
|
28 |
"cbt.txt",
|
29 |
"children_stories.txt",
|
|
|
34 |
"switchboard.txt",
|
35 |
"wikipedia.txt"
|
36 |
]
|
37 |
+
|
38 |
+
#Suchir Salhan– addition of French, German, Japanese and Chinese dataset BUILDER_CONFIGS
|
39 |
+
|
40 |
class BabyLM(datasets.GeneratorBasedBuilder):
|
41 |
|
42 |
BUILDER_CONFIGS = [
|
|
|
47 |
),
|
48 |
datasets.BuilderConfig(
|
49 |
name="strict_small",
|
50 |
+
description="Cleaned version of the dataset, 10M words, no POS tags",
|
51 |
version="1.0.0",
|
52 |
),
|
53 |
datasets.BuilderConfig(
|
|
|
80 |
description="Cleaned version of the dataset, 100M words, gold POS tags",
|
81 |
version="1.0.0",
|
82 |
),
|
83 |
+
datasets.BuilderConfig(
|
84 |
+
name="fr_lang_strict_small", #FRENCH
|
85 |
+
description="FRENCH Cleaned version of the dataset, 10M words, unsupervised POS tags",
|
86 |
+
version="1.0.0",
|
87 |
+
),
|
88 |
+
datasets.BuilderConfig(
|
89 |
+
name="ja_lang_strict_small",
|
90 |
+
description="GERMAN Cleaned version of the dataset, 10M words, unsupervised POS tags",
|
91 |
+
version="1.0.0",
|
92 |
+
),
|
93 |
+
datasets.BuilderConfig(
|
94 |
+
name="zh_lang_strict_small",
|
95 |
+
description="JAPANESE Cleaned version of the dataset, 10M words, unsupervised POS tags",
|
96 |
+
version="1.0.0",
|
97 |
+
),
|
98 |
+
datasets.BuilderConfig(
|
99 |
+
name="de_lang_strict_small",
|
100 |
+
description="GERMAN Cleaned version of the dataset, 10M words, unsupervised POS tags",
|
101 |
+
version="1.0.0",
|
102 |
+
),
|
103 |
+
|
104 |
+
datasets.BuilderConfig(
|
105 |
+
name="fr_lang_strict_gold",
|
106 |
+
description="FRENCH Cleaned version of the dataset, 100M words, gold POS tags",
|
107 |
+
version="1.0.0",
|
108 |
+
),
|
109 |
+
datasets.BuilderConfig(
|
110 |
+
name="ja_lang_strict_gold",
|
111 |
+
description="JAPANESE Cleaned version of the dataset, 100M words, gold POS tags",
|
112 |
+
version="1.0.0",
|
113 |
+
),
|
114 |
+
datasets.BuilderConfig(
|
115 |
+
name="de_lang_strict_gold",
|
116 |
+
description="GERMAN Cleaned version of the dataset, 100M words, gold POS tags",
|
117 |
+
version="1.0.0",
|
118 |
+
),
|
119 |
+
datasets.BuilderConfig(
|
120 |
+
name="zh_lang_strict_gold",
|
121 |
+
description="CHINESE Cleaned version of the dataset, 100M words, gold POS tags",
|
122 |
+
version="1.0.0",
|
123 |
+
),
|
124 |
]
|
125 |
|
126 |
DEFAULT_CONFIG_NAME = "strict_small"
|
127 |
|
128 |
+
|
129 |
+
|
130 |
def _info(self):
|
131 |
features = datasets.Features(
|
132 |
{
|
|
|
143 |
)
|
144 |
|
145 |
|
146 |
+
#Suchir Salhan– addition of French, German, Japanese and Chinese datasets
|
147 |
+
|
148 |
+
|
149 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
150 |
"""
|
151 |
Returns data for different splits
|
152 |
"""
|
153 |
|
154 |
+
if "strict_small" in self.config.name: #default settings – English
|
155 |
train_data_dir = "10M"
|
156 |
+
elif "fr_lang_strict_small" in self.config.name:
|
157 |
+
train_data_dir = "FR"
|
158 |
+
elif "de_lang_strict_small" in self.config.name:
|
159 |
+
train_data_dir = "DE"
|
160 |
+
elif "zh_lang_strict_small" in self.config.name:
|
161 |
+
train_data_dir = "ZH"
|
162 |
+
elif "ja_lang_strict_small" in self.config.name:
|
163 |
+
train_data_dir = "JA"
|
164 |
else:
|
165 |
train_data_dir = "100M"
|
166 |
|
167 |
+
folder = 'original_tagged' if 'original' in self.config.name else 'clean_tagged' #
|
168 |
+
folder = folder + '_gold' if 'gold' in self.config.name else folder #gold tags for french, german, japanese and english
|
169 |
+
|
170 |
+
|
171 |
+
#modified urls to download
|
172 |
+
|
173 |
urls_to_download = {
|
174 |
+
"train": [],
|
175 |
+
"dev": [],
|
176 |
+
"test": []
|
177 |
+
}
|
178 |
|
179 |
+
if 'fr_lang_strict_small' in self.config.name:
|
180 |
+
urls_to_download["train"].append(f"{folder}/{train_data_dir}/aofrench.txt")
|
181 |
+
urls_to_download["dev"].append(f"{folder}/dev/aofrench_dev.txt")
|
182 |
+
urls_to_download["test"].append(f"{folder}/test/aofrench_test.txt")
|
183 |
+
elif 'de_lang_strict_small' in self.config.name:
|
184 |
+
urls_to_download["train"].append(f"{folder}/{train_data_dir}/aogerman.txt")
|
185 |
+
urls_to_download["dev"].append(f"{folder}/dev/aogerman_dev.txt")
|
186 |
+
urls_to_download["test"].append(f"{folder}/test/aogerman_test.txt")
|
187 |
+
elif 'zh_lang_strict_small' in self.config.name:
|
188 |
+
urls_to_download["train"].append(f"{folder}/{train_data_dir}/aochinese.txt")
|
189 |
+
urls_to_download["dev"].append(f"{folder}/dev/aochinese_dev.txt")
|
190 |
+
urls_to_download["test"].append(f"{folder}/test/aochinese_test.txt")
|
191 |
+
elif 'ja_lang_strict_small' in self.config.name:
|
192 |
+
urls_to_download["train"].append(f"{folder}/{train_data_dir}/aojapanese.txt")
|
193 |
+
urls_to_download["dev"].append(f"{folder}/dev/aojapanese_dev.txt")
|
194 |
+
urls_to_download["test"].append(f"{folder}/test/aojapanese_test.txt")
|
195 |
+
else:
|
196 |
+
urls_to_download["train"] = [f"{folder}/{train_data_dir}/{fn}" for fn in filenames]
|
197 |
+
urls_to_download["dev"] = [f"{folder}/dev/{fn}" for fn in filenames]
|
198 |
+
urls_to_download["test"] = [f"{folder}/test/{fn}" for fn in filenames]
|
199 |
+
|
200 |
+
|
201 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
202 |
|
203 |
return [
|
|
|
222 |
),
|
223 |
]
|
224 |
|
225 |
+
|
226 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
227 |
def _generate_examples(self, split, filepaths):
|
228 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
|
|
250 |
else:
|
251 |
text = row
|
252 |
is_tags = True
|
253 |
+
|