Datasets:
Yeb Havinga
commited on
Commit
•
1146e77
1
Parent(s):
69c5645
Reformat and change # validation files
Browse files- mc4_nl_cleaned.py +20 -12
mc4_nl_cleaned.py
CHANGED
@@ -51,9 +51,13 @@ _LICENSE = "Open Data Commons Attribution License (ODC-By) v1.0"
|
|
51 |
|
52 |
_BASE_URL = "https://huggingface.co/datasets/yhavinga/mc4_nl_cleaned/resolve/main/mc4_nl_cleaned/{split}/c4-nl-cleaned.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
|
53 |
|
54 |
-
_CONFIGS = dict(
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
57 |
|
58 |
|
59 |
class Mc4NlCleanedConfig(datasets.BuilderConfig):
|
@@ -79,7 +83,7 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
79 |
A tiny cleaned version of the Dutch portion of the multilingual C4 corpus.
|
80 |
Estimated size of compressed files: 10GB
|
81 |
"""
|
82 |
-
)
|
83 |
),
|
84 |
Mc4NlCleanedConfig(
|
85 |
name="small",
|
@@ -89,7 +93,7 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
89 |
A small cleaned version of the Dutch portion of the multilingual C4 corpus.
|
90 |
Estimated size of compressed files: 25GB
|
91 |
"""
|
92 |
-
)
|
93 |
),
|
94 |
Mc4NlCleanedConfig(
|
95 |
name="medium",
|
@@ -99,7 +103,7 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
99 |
A medium cleaned version of the Dutch portion of the multilingual C4 corpus.
|
100 |
Estimated size of compressed files: 50GB
|
101 |
"""
|
102 |
-
)
|
103 |
),
|
104 |
Mc4NlCleanedConfig(
|
105 |
name="large",
|
@@ -109,7 +113,7 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
109 |
A large cleaned version of the Dutch portion of the multilingual C4 corpus.
|
110 |
Estimated size of compressed files: 75GB
|
111 |
"""
|
112 |
-
)
|
113 |
),
|
114 |
Mc4NlCleanedConfig(
|
115 |
name="full",
|
@@ -119,8 +123,8 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
119 |
The full cleaned version of the Dutch portion of the multilingual C4 corpus.
|
120 |
Estimated size of compressed files: 103GB
|
121 |
"""
|
122 |
-
)
|
123 |
-
)
|
124 |
]
|
125 |
|
126 |
def _info(self):
|
@@ -153,9 +157,13 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
153 |
train_downloaded_files = dl_manager.download(data_urls["train"])
|
154 |
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|
155 |
return [
|
156 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
|
157 |
datasets.SplitGenerator(
|
158 |
-
name=datasets.Split.
|
|
|
|
|
|
|
|
|
|
|
159 |
),
|
160 |
]
|
161 |
|
@@ -169,4 +177,4 @@ class Mc4(datasets.GeneratorBasedBuilder):
|
|
169 |
if line:
|
170 |
example = json.loads(line)
|
171 |
yield id_, example
|
172 |
-
id_ += 1
|
|
|
51 |
|
52 |
_BASE_URL = "https://huggingface.co/datasets/yhavinga/mc4_nl_cleaned/resolve/main/mc4_nl_cleaned/{split}/c4-nl-cleaned.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
|
53 |
|
54 |
+
_CONFIGS = dict(
|
55 |
+
tiny={"train": 100, "validation": 1},
|
56 |
+
small={"train": 250, "validation": 2},
|
57 |
+
medium={"train": 500, "validation": 2},
|
58 |
+
large={"train": 750, "validation": 3},
|
59 |
+
full={"train": 1024, "validation": 4},
|
60 |
+
)
|
61 |
|
62 |
|
63 |
class Mc4NlCleanedConfig(datasets.BuilderConfig):
|
|
|
83 |
A tiny cleaned version of the Dutch portion of the multilingual C4 corpus.
|
84 |
Estimated size of compressed files: 10GB
|
85 |
"""
|
86 |
+
),
|
87 |
),
|
88 |
Mc4NlCleanedConfig(
|
89 |
name="small",
|
|
|
93 |
A small cleaned version of the Dutch portion of the multilingual C4 corpus.
|
94 |
Estimated size of compressed files: 25GB
|
95 |
"""
|
96 |
+
),
|
97 |
),
|
98 |
Mc4NlCleanedConfig(
|
99 |
name="medium",
|
|
|
103 |
A medium cleaned version of the Dutch portion of the multilingual C4 corpus.
|
104 |
Estimated size of compressed files: 50GB
|
105 |
"""
|
106 |
+
),
|
107 |
),
|
108 |
Mc4NlCleanedConfig(
|
109 |
name="large",
|
|
|
113 |
A large cleaned version of the Dutch portion of the multilingual C4 corpus.
|
114 |
Estimated size of compressed files: 75GB
|
115 |
"""
|
116 |
+
),
|
117 |
),
|
118 |
Mc4NlCleanedConfig(
|
119 |
name="full",
|
|
|
123 |
The full cleaned version of the Dutch portion of the multilingual C4 corpus.
|
124 |
Estimated size of compressed files: 103GB
|
125 |
"""
|
126 |
+
),
|
127 |
+
),
|
128 |
]
|
129 |
|
130 |
def _info(self):
|
|
|
157 |
train_downloaded_files = dl_manager.download(data_urls["train"])
|
158 |
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|
159 |
return [
|
|
|
160 |
datasets.SplitGenerator(
|
161 |
+
name=datasets.Split.TRAIN,
|
162 |
+
gen_kwargs={"filepaths": train_downloaded_files},
|
163 |
+
),
|
164 |
+
datasets.SplitGenerator(
|
165 |
+
name=datasets.Split.VALIDATION,
|
166 |
+
gen_kwargs={"filepaths": validation_downloaded_files},
|
167 |
),
|
168 |
]
|
169 |
|
|
|
177 |
if line:
|
178 |
example = json.loads(line)
|
179 |
yield id_, example
|
180 |
+
id_ += 1
|