patrickvonplaten commited on
Commit
30d835f
1 Parent(s): d617661
convert_file.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ file_name=${1}
3
+
4
+ local_file_name=$(echo "${file_name}" | cut -f8 -d/)
5
+ bare_local_file_name=$(echo "${local_file_name}" | cut -f1 -d.)
6
+
7
+ wget ${file_name}
8
+
9
+ ./write_to_filtered_file.py ${local_file_name}
10
+
11
+ gzip -c "${bare_local_file_name}.txt" > "${bare_local_file_name}.txt.gz"
de_head_0000_2015-48.txt.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07d753f6a078ca3274a5b08620118a043597a25f71021ab8b28185322fc7ee52
3
- size 1229259841
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f81d28cbb4c1bfaa26699540c498edaf56ca642cb74d3cd5aaad44b71ce53650
3
+ size 1066575032
german_common_crawl.py CHANGED
@@ -15,10 +15,10 @@
15
  """German Common Crawl"""
16
 
17
  from __future__ import absolute_import, division, print_function
18
-
19
- import os
20
-
21
  import datasets
 
 
 
22
 
23
 
24
  # Find for instance the citation on arxiv or on the dataset repo/website
@@ -38,114 +38,116 @@ German Only Extract from Common Crawl
38
  This Dataset is for pretraining a German Language Model (Unsupervised) or tune a Multilingual Model specifically to German
39
  """
40
 
41
- _URL_FIRST = [
42
- "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2015-48.txt.gz",
43
- ]
44
-
45
- _URL_HEAD = [
46
- "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2015-48.txt.gz",
47
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2016-18.txt.gz",
48
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2016-44.txt.gz",
49
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2017-13.txt.gz",
50
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2017-30.txt.gz",
51
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2017-39.txt.gz",
52
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2017-51.txt.gz",
53
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2018-09.txt.gz",
54
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2018-17.txt.gz",
55
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2018-30.txt.gz",
56
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2018-39.txt.gz",
57
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2018-51.txt.gz",
58
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2019-18.txt.gz",
59
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2019-30.txt.gz",
60
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2019-47.txt.gz",
61
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0000_2020-10.txt.gz",
62
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2016-44.txt.gz",
63
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2017-13.txt.gz",
64
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2017-30.txt.gz",
65
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2017-39.txt.gz",
66
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2017-51.txt.gz",
67
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2018-09.txt.gz",
68
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2018-17.txt.gz",
69
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2018-30.txt.gz",
70
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2018-39.txt.gz",
71
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2018-51.txt.gz",
72
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2019-09.txt.gz",
73
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2019-18.txt.gz",
74
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2019-30.txt.gz",
75
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2019-47.txt.gz",
76
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0001_2020-10.txt.gz",
77
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2016-44.txt.gz",
78
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2017-13.txt.gz",
79
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2017-30.txt.gz",
80
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2017-39.txt.gz",
81
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2017-51.txt.gz",
82
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2018-09.txt.gz",
83
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2018-17.txt.gz",
84
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2018-30.txt.gz",
85
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2018-39.txt.gz",
86
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2018-51.txt.gz",
87
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2019-09.txt.gz",
88
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2019-18.txt.gz",
89
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2019-30.txt.gz",
90
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2019-47.txt.gz",
91
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0002_2020-10.txt.gz",
92
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2016-44.txt.gz",
93
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2017-13.txt.gz",
94
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2017-30.txt.gz",
95
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2017-39.txt.gz",
96
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2017-51.txt.gz",
97
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2018-09.txt.gz",
98
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2018-17.txt.gz",
99
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2018-30.txt.gz",
100
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2018-39.txt.gz",
101
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2018-51.txt.gz",
102
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2019-09.txt.gz",
103
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2019-18.txt.gz",
104
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2019-30.txt.gz",
105
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2019-47.txt.gz",
106
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0003_2020-10.txt.gz",
107
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2016-44.txt.gz",
108
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2017-30.txt.gz",
109
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2017-39.txt.gz",
110
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2017-51.txt.gz",
111
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2018-09.txt.gz",
112
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2018-17.txt.gz",
113
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2018-30.txt.gz",
114
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2018-39.txt.gz",
115
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2018-51.txt.gz",
116
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2019-09.txt.gz",
117
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2019-18.txt.gz",
118
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2019-30.txt.gz",
119
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2019-47.txt.gz",
120
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0004_2020-10.txt.gz",
121
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2017-51.txt.gz",
122
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2018-09.txt.gz",
123
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2018-17.txt.gz",
124
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2018-30.txt.gz",
125
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2018-39.txt.gz",
126
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2018-51.txt.gz",
127
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2019-09.txt.gz",
128
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2019-18.txt.gz",
129
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2019-30.txt.gz",
130
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2019-47.txt.gz",
131
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0005_2020-10.txt.gz",
132
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2018-09.txt.gz",
133
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2018-17.txt.gz",
134
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2018-30.txt.gz",
135
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2018-39.txt.gz",
136
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2018-51.txt.gz",
137
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2019-09.txt.gz",
138
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2019-18.txt.gz",
139
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2019-30.txt.gz",
140
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2019-47.txt.gz",
141
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0006_2020-10.txt.gz",
142
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0007_2018-30.txt.gz",
143
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0007_2018-51.txt.gz",
144
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0007_2019-09.txt.gz",
145
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0007_2019-18.txt.gz",
146
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0007_2019-47.txt.gz",
147
- # "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/flax-community/german_common_crawl/de_head_0007_2020-10.txt.gz",
148
- ]
 
 
149
 
150
  class GermanCommonCrawl(datasets.GeneratorBasedBuilder):
151
  """TODO: Short description of my dataset."""
@@ -161,33 +163,25 @@ class GermanCommonCrawl(datasets.GeneratorBasedBuilder):
161
  ]
162
 
163
  def _info(self):
164
- import ipdb; ipdb.set_trace()
165
- if self.config.name == "data_only": # This is the name of the configuration selected in BUILDER_CONFIGS above
166
- features = datasets.Features(
167
- {
168
- "raw_content": datasets.Value("string"),
169
- }
170
- )
171
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
172
- features = datasets.Features(
173
- {
174
- "text": datasets.Value("string"),
175
- "url": datasets.Value("string"),
176
- "digest": datasets.Value("string"),
177
- "length": datasets.Value("int32"),
178
- "nlines": datasets.Value("int32"),
179
- "source_domain": datasets.Value("string"),
180
- "title": datasets.Value("string"),
181
- "raw_content": datasets.Value("string"),
182
- "cc_segment": datasets.Value("string"),
183
- "original_nlines": datasets.Value("int32"),
184
- "original_length": datasets.Value("int32"),
185
- "language": datasets.Value("string"),
186
- "perplexity": datasets.Value("int32"),
187
- "bucket": datasets.Value("int32"),
188
-
189
- }
190
- )
191
  return datasets.DatasetInfo(
192
  # This is the description that will appear on the datasets page.
193
  description=_DESCRIPTION,
@@ -203,41 +197,32 @@ class GermanCommonCrawl(datasets.GeneratorBasedBuilder):
203
 
204
  def _split_generators(self, dl_manager):
205
  """Returns SplitGenerators."""
206
- if self.config.name == "head":
207
- data_dir = dl_manager.download_and_extract(_URL_HEAD)
208
  else:
209
- raise NotImplementedError("just `head` works for now")
210
 
211
- import ipdb; ipdb.set_trace()
212
  return [
213
  datasets.SplitGenerator(
214
  name=datasets.Split.TRAIN,
215
- # These kwargs will be passed to _generate_examples
216
  gen_kwargs={
217
- "folderpath": data_dir,
218
- "split": "train",
219
  },
220
  ),
221
  ]
222
 
223
- def _generate_examples(self, folderpath, split):
224
- """ Yields examples. """
225
- import ipdb; ipdb.set_trace()
226
-
227
- files = os.listdir(folderpath)
228
-
229
- if self.config == "first_part":
230
- files = os.path.join(folderpath, files[0])
231
- else:
232
- files = [os.path.join(folderpath, file) for file in files]
233
-
234
- for filepath in files:
235
- with gzip.open(filepath, 'rt', encoding="utf-8") as f:
236
- for id_, row in enumerate(f):
237
- data = eval(row)
238
- if self.config.name == "data_only":
239
- yield id_, {
240
- "raw_content": data["raw_content"],
241
- }
242
- else:
243
- yield id_, data
 
15
  """German Common Crawl"""
16
 
17
  from __future__ import absolute_import, division, print_function
 
 
 
18
  import datasets
19
+ import itertools
20
+ import gzip
21
+ from ast import literal_eval
22
 
23
 
24
  # Find for instance the citation on arxiv or on the dataset repo/website
 
38
  This Dataset is for pretraining a German Language Model (Unsupervised) or tune a Multilingual Model specifically to German
39
  """
40
 
41
+ REPO_URL = "https://huggingface.co/datasets/flax-community/german_common_crawl/resolve/main/"
42
+
43
+ _URL_FIRST = [REPO_URL + file_name for file_name in [
44
+ "de_head_0000_2015-48.txt.gz",
45
+ ]]
46
+
47
+ _URL_HEAD = [REPO_URL + file_name for file_name in [
48
+ "de_head_0000_2015-48.txt.gz",
49
+ # "de_head_0000_2016-18.txt.gz",
50
+ # "de_head_0000_2016-44.txt.gz",
51
+ # "de_head_0000_2017-13.txt.gz",
52
+ # "de_head_0000_2017-30.txt.gz",
53
+ # "de_head_0000_2017-39.txt.gz",
54
+ # "de_head_0000_2017-51.txt.gz",
55
+ # "de_head_0000_2018-09.txt.gz",
56
+ # "de_head_0000_2018-17.txt.gz",
57
+ # "de_head_0000_2018-30.txt.gz",
58
+ # "de_head_0000_2018-39.txt.gz",
59
+ # "de_head_0000_2018-51.txt.gz",
60
+ # "de_head_0000_2019-18.txt.gz",
61
+ # "de_head_0000_2019-30.txt.gz",
62
+ # "de_head_0000_2019-47.txt.gz",
63
+ # "de_head_0000_2020-10.txt.gz",
64
+ # "de_head_0001_2016-44.txt.gz",
65
+ # "de_head_0001_2017-13.txt.gz",
66
+ # "de_head_0001_2017-30.txt.gz",
67
+ # "de_head_0001_2017-39.txt.gz",
68
+ # "de_head_0001_2017-51.txt.gz",
69
+ # "de_head_0001_2018-09.txt.gz",
70
+ # "de_head_0001_2018-17.txt.gz",
71
+ # "de_head_0001_2018-30.txt.gz",
72
+ # "de_head_0001_2018-39.txt.gz",
73
+ # "de_head_0001_2018-51.txt.gz",
74
+ # "de_head_0001_2019-09.txt.gz",
75
+ # "de_head_0001_2019-18.txt.gz",
76
+ # "de_head_0001_2019-30.txt.gz",
77
+ # "de_head_0001_2019-47.txt.gz",
78
+ # "de_head_0001_2020-10.txt.gz",
79
+ # "de_head_0002_2016-44.txt.gz",
80
+ # "de_head_0002_2017-13.txt.gz",
81
+ # "de_head_0002_2017-30.txt.gz",
82
+ # "de_head_0002_2017-39.txt.gz",
83
+ # "de_head_0002_2017-51.txt.gz",
84
+ # "de_head_0002_2018-09.txt.gz",
85
+ # "de_head_0002_2018-17.txt.gz",
86
+ # "de_head_0002_2018-30.txt.gz",
87
+ # "de_head_0002_2018-39.txt.gz",
88
+ # "de_head_0002_2018-51.txt.gz",
89
+ # "de_head_0002_2019-09.txt.gz",
90
+ # "de_head_0002_2019-18.txt.gz",
91
+ # "de_head_0002_2019-30.txt.gz",
92
+ # "de_head_0002_2019-47.txt.gz",
93
+ # "de_head_0002_2020-10.txt.gz",
94
+ # "de_head_0003_2016-44.txt.gz",
95
+ # "de_head_0003_2017-13.txt.gz",
96
+ # "de_head_0003_2017-30.txt.gz",
97
+ # "de_head_0003_2017-39.txt.gz",
98
+ # "de_head_0003_2017-51.txt.gz",
99
+ # "de_head_0003_2018-09.txt.gz",
100
+ # "de_head_0003_2018-17.txt.gz",
101
+ # "de_head_0003_2018-30.txt.gz",
102
+ # "de_head_0003_2018-39.txt.gz",
103
+ # "de_head_0003_2018-51.txt.gz",
104
+ # "de_head_0003_2019-09.txt.gz",
105
+ # "de_head_0003_2019-18.txt.gz",
106
+ # "de_head_0003_2019-30.txt.gz",
107
+ # "de_head_0003_2019-47.txt.gz",
108
+ # "de_head_0003_2020-10.txt.gz",
109
+ # "de_head_0004_2016-44.txt.gz",
110
+ # "de_head_0004_2017-30.txt.gz",
111
+ # "de_head_0004_2017-39.txt.gz",
112
+ # "de_head_0004_2017-51.txt.gz",
113
+ # "de_head_0004_2018-09.txt.gz",
114
+ # "de_head_0004_2018-17.txt.gz",
115
+ # "de_head_0004_2018-30.txt.gz",
116
+ # "de_head_0004_2018-39.txt.gz",
117
+ # "de_head_0004_2018-51.txt.gz",
118
+ # "de_head_0004_2019-09.txt.gz",
119
+ # "de_head_0004_2019-18.txt.gz",
120
+ # "de_head_0004_2019-30.txt.gz",
121
+ # "de_head_0004_2019-47.txt.gz",
122
+ # "de_head_0004_2020-10.txt.gz",
123
+ # "de_head_0005_2017-51.txt.gz",
124
+ # "de_head_0005_2018-09.txt.gz",
125
+ # "de_head_0005_2018-17.txt.gz",
126
+ # "de_head_0005_2018-30.txt.gz",
127
+ # "de_head_0005_2018-39.txt.gz",
128
+ # "de_head_0005_2018-51.txt.gz",
129
+ # "de_head_0005_2019-09.txt.gz",
130
+ # "de_head_0005_2019-18.txt.gz",
131
+ # "de_head_0005_2019-30.txt.gz",
132
+ # "de_head_0005_2019-47.txt.gz",
133
+ # "de_head_0005_2020-10.txt.gz",
134
+ # "de_head_0006_2018-09.txt.gz",
135
+ # "de_head_0006_2018-17.txt.gz",
136
+ # "de_head_0006_2018-30.txt.gz",
137
+ # "de_head_0006_2018-39.txt.gz",
138
+ # "de_head_0006_2018-51.txt.gz",
139
+ # "de_head_0006_2019-09.txt.gz",
140
+ # "de_head_0006_2019-18.txt.gz",
141
+ # "de_head_0006_2019-30.txt.gz",
142
+ # "de_head_0006_2019-47.txt.gz",
143
+ # "de_head_0006_2020-10.txt.gz",
144
+ # "de_head_0007_2018-30.txt.gz",
145
+ # "de_head_0007_2018-51.txt.gz",
146
+ # "de_head_0007_2019-09.txt.gz",
147
+ # "de_head_0007_2019-18.txt.gz",
148
+ # "de_head_0007_2019-47.txt.gz",
149
+ # "de_head_0007_2020-10.txt.gz",
150
+ ]]
151
 
152
  class GermanCommonCrawl(datasets.GeneratorBasedBuilder):
153
  """TODO: Short description of my dataset."""
 
163
  ]
164
 
165
  def _info(self):
166
+ features = datasets.Features(
167
+ {
168
+ "url": datasets.Value("string"),
169
+ "date_download": datasets.Value("string"),
170
+ "digest": datasets.Value("string"),
171
+ "length": datasets.Value("int32"),
172
+ "nlines": datasets.Value("int32"),
173
+ "source_domain": datasets.Value("string"),
174
+ "title": datasets.Value("string"),
175
+ "raw_content": datasets.Value("string"),
176
+ "cc_segment": datasets.Value("string"),
177
+ "original_nlines": datasets.Value("int32"),
178
+ "original_length": datasets.Value("int32"),
179
+ "language": datasets.Value("string"),
180
+ "language_score": datasets.Value("int32"),
181
+ "perplexity": datasets.Value("int32"),
182
+ "bucket": datasets.Value("string"),
183
+ }
184
+ )
 
 
 
 
 
 
 
 
185
  return datasets.DatasetInfo(
186
  # This is the description that will appear on the datasets page.
187
  description=_DESCRIPTION,
 
197
 
198
  def _split_generators(self, dl_manager):
199
  """Returns SplitGenerators."""
200
+ if self.config.name == "first":
201
+ data_files = dl_manager.download(_URL_FIRST)
202
  else:
203
+ raise NotImplementedError("just `first` works for now")
204
 
 
205
  return [
206
  datasets.SplitGenerator(
207
  name=datasets.Split.TRAIN,
 
208
  gen_kwargs={
209
+ "data_files": data_files,
 
210
  },
211
  ),
212
  ]
213
 
214
+ def _generate_examples(self, data_files):
215
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
216
+ for filepath in data_files:
217
+ with gzip.open(filepath, "rt", encoding="utf-8") as f:
218
+ for id_, line in enumerate(f):
219
+ item = literal_eval(line)
220
+ import ipdb; ipdb.set_trace()
221
+ yield id_, {
222
+
223
+
224
+
225
+
226
+
227
+
228
+ }
 
 
 
 
 
 
german_common_crawl.py.lock ADDED
File without changes
write_to_filtered_file.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import gzip
3
+ from ast import literal_eval
4
+ from tqdm import tqdm
5
+ import sys
6
+
7
+ if __name__ == '__main__':
8
+ file_name = sys.argv[1]
9
+ new_file_name = ".".join(file_name.split(".")[:1] + ["txt"])
10
+ with gzip.open(file_name, 'rt') as f:
11
+ a = f.readline()
12
+ a = a.split("{'url'")
13
+ a = [("{'url'" + item) for item in a]
14
+
15
+ b = []
16
+ for item in tqdm(a):
17
+ try:
18
+ if literal_eval(item)['language_score'] > 0.98:
19
+ b.append(item)
20
+ except:
21
+ None
22
+
23
+ with open(new_file_name, 'wt') as file_new:
24
+ for part in b:
25
+ file_new.write(part + '\n')