patrickvonplaten commited on
Commit
52783d2
1 Parent(s): bafe1b7
de_head_0000_2015-49.txt.gz → dummy.txt.gz RENAMED
File without changes
german_common_crawl.py CHANGED
@@ -34,13 +34,14 @@ _CITATION = """\
34
  _DESCRIPTION = """\
35
  German Only Extract from Common Crawl
36
 
37
- This Dataset is for pretraining a German Language Model (Unsupervised) or tune a Multilingual Model specifically to German
38
  """
39
 
40
  REPO_URL = "https://huggingface.co/datasets/flax-community/german_common_crawl/resolve/main/"
41
 
42
  _URL_FIRST = [REPO_URL + file_name for file_name in [
43
- "de_head_0000_2015-48.txt.gz",
 
44
  ]]
45
 
46
  #TODO convert & upload all those files correctly
@@ -153,12 +154,11 @@ _URL_HEAD = [REPO_URL + file_name for file_name in [
153
  _URL_MIDDLE = [REPO_URL + file_name for file_name in [
154
  ]]
155
 
 
156
  class GermanCommonCrawl(datasets.GeneratorBasedBuilder):
157
  """TODO: Short description of my dataset."""
158
 
159
  VERSION = datasets.Version("1.1.0")
160
-
161
-
162
  BUILDER_CONFIGS = [
163
  datasets.BuilderConfig(name="first", version=VERSION, description="Only the first data file"),
164
  datasets.BuilderConfig(name="head", version=VERSION, description=""), #TODO fill description
@@ -200,16 +200,16 @@ class GermanCommonCrawl(datasets.GeneratorBasedBuilder):
200
  )
201
 
202
  def _split_generators(self, dl_manager):
203
- """Returns SplitGenerators."""
204
- if self.config.name == "first":
205
- data_files = dl_manager.download(_URL_FIRST)
206
  elif self.config.name == "head":
207
- data_files = dl_manager.download(_URL_HEAD)
208
  elif self.config.name == "middle":
209
- data_files = dl_manager.download(_URL_MIDDLE)
210
  else:
211
- data_files = dl_manager.download(_URL_HEAD + _URL_MIDDLE)
212
-
213
  return [
214
  datasets.SplitGenerator(
215
  name=datasets.Split.TRAIN,
@@ -221,9 +221,8 @@ class GermanCommonCrawl(datasets.GeneratorBasedBuilder):
221
 
222
  def _generate_examples(self, data_files):
223
  """This function returns the examples in the raw (text) form by iterating on all the files."""
224
- for filepath in data_files:
225
  with gzip.open(filepath, "rt", encoding="utf-8") as f:
226
- import ipdb; ipdb.set_trace()
227
  for id_, line in enumerate(f):
228
  item = literal_eval(line)
229
  yield id_, {
@@ -239,4 +238,7 @@ class GermanCommonCrawl(datasets.GeneratorBasedBuilder):
239
  "original_nlines": item["original_nlines"],
240
  "original_length": item["original_length"],
241
  "language": item["language"],
 
 
 
242
  }
 
34
  _DESCRIPTION = """\
35
  German Only Extract from Common Crawl
36
 
37
+ This Dataset is for pretraining a German Language Model (Unsupervised) or tune a Multilingual Model specifically to German
38
  """
39
 
40
  REPO_URL = "https://huggingface.co/datasets/flax-community/german_common_crawl/resolve/main/"
41
 
42
  _URL_FIRST = [REPO_URL + file_name for file_name in [
43
+ # "de_head_0000_2015-48.txt.gz",
44
+ "dummy.txt.gz",
45
  ]]
46
 
47
  #TODO convert & upload all those files correctly
 
154
  _URL_MIDDLE = [REPO_URL + file_name for file_name in [
155
  ]]
156
 
157
+
158
  class GermanCommonCrawl(datasets.GeneratorBasedBuilder):
159
  """TODO: Short description of my dataset."""
160
 
161
  VERSION = datasets.Version("1.1.0")
 
 
162
  BUILDER_CONFIGS = [
163
  datasets.BuilderConfig(name="first", version=VERSION, description="Only the first data file"),
164
  datasets.BuilderConfig(name="head", version=VERSION, description=""), #TODO fill description
 
200
  )
201
 
202
  def _split_generators(self, dl_manager):
203
+ """Returns SplitGenerators."""
204
+ if self.config.name == "first":
205
+ data_files = dl_manager.download(_URL_FIRST)
206
  elif self.config.name == "head":
207
+ data_files = dl_manager.download(_URL_HEAD)
208
  elif self.config.name == "middle":
209
+ data_files = dl_manager.download(_URL_MIDDLE)
210
  else:
211
+ data_files = dl_manager.download(_URL_HEAD + _URL_MIDDLE)
212
+
213
  return [
214
  datasets.SplitGenerator(
215
  name=datasets.Split.TRAIN,
 
221
 
222
  def _generate_examples(self, data_files):
223
  """This function returns the examples in the raw (text) form by iterating on all the files."""
224
+ for filepath in data_files:
225
  with gzip.open(filepath, "rt", encoding="utf-8") as f:
 
226
  for id_, line in enumerate(f):
227
  item = literal_eval(line)
228
  yield id_, {
 
238
  "original_nlines": item["original_nlines"],
239
  "original_length": item["original_length"],
240
  "language": item["language"],
241
+ "language_score": item["language_score"],
242
+ "perplexity": item["perplexity"],
243
+ "bucket": item["bucket"],
244
  }