File size: 17,222 Bytes
c4c4a36
6b5a512
 
 
 
 
 
 
 
f8cdee2
d98fc07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4df3db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3c100e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5bc8588
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4df3db
f8cdee2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d51483
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f33eddc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e3545d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3fa7197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33bfb85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ecc757
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21b6195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3bc32b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08642e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3a5666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66e903e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f4d88c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8cdee2
d98fc07
 
 
 
 
 
b4df3db
 
 
 
 
 
3c100e3
 
 
 
 
 
5bc8588
 
 
 
 
 
f8cdee2
 
 
 
 
 
4d51483
 
 
 
 
 
f33eddc
 
 
 
 
 
6e3545d
 
 
 
 
 
3fa7197
 
 
 
 
 
33bfb85
 
 
 
 
 
4ecc757
 
 
 
 
 
21b6195
 
 
 
 
 
3bc32b7
 
 
 
 
 
08642e3
 
 
 
 
 
f3a5666
 
 
 
 
 
66e903e
 
 
 
 
 
2f4d88c
 
 
 
 
 
c4c4a36
490daab
ffddd15
 
 
 
3b80002
c8051ae
0b8891c
c8051ae
8a9c55d
ffddd15
30cb9b7
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
0b8891c
30cb9b7
 
 
 
 
 
 
 
dbb0c7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db6c105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b54ea1e
 
 
 
 
 
 
 
 
490daab
 
 
ffddd15
 
490daab
 
 
 
db6c105
490daab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67992ab
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
---
language:
- nl
size_categories:
- 10B<n<100B
task_categories:
- text-generation
- text2text-generation
pretty_name: Filtered CulturaX + Wikipedia for Dutch
dataset_info:
- config_name: 100M
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 738455828.5851797
    num_examples: 1018200
  - name: test
    num_bytes: 7458534.414820259
    num_examples: 10284
  download_size: 411183119
  dataset_size: 745914363.0
- config_name: 100k
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 745955.3074739829
    num_examples: 1047
  - name: test
    num_bytes: 7124.692526017029
    num_examples: 10
  download_size: 366788
  dataset_size: 753080.0
- config_name: 10B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 66539945646.34457
    num_examples: 40176566
  - name: test
    num_bytes: 105996030.65543362
    num_examples: 64000
  download_size: 42132184504
  dataset_size: 66645941677.0
- config_name: 10M
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 76734151.72157606
    num_examples: 139851
  - name: test
    num_bytes: 774743.2784239326
    num_examples: 1412
  download_size: 37995388
  dataset_size: 77508895.0
- config_name: 10k
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 72048.30379746835
    num_examples: 78
  - name: test
    num_bytes: 5896
    num_examples: 1
  download_size: 47197
  dataset_size: 77944.30379746835
- config_name: 15B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 99730049355.25276
    num_examples: 59584123
  - name: test
    num_bytes: 107121206.74724333
    num_examples: 64000
  download_size: 63139415312
  dataset_size: 99837170562.0
- config_name: 1B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 6797502496.392602
    num_examples: 5102360
  - name: test
    num_bytes: 68660322.60739774
    num_examples: 51538
  download_size: 4260450464
  dataset_size: 6866162819.0
- config_name: 1M
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 7442665.619329753
    num_examples: 10694
  - name: test
    num_bytes: 75164.38067024625
    num_examples: 108
  download_size: 3845466
  dataset_size: 7517830.0
- config_name: 20B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 132920704365.75093
    num_examples: 78991679
  - name: test
    num_bytes: 107693939.24907027
    num_examples: 64000
  download_size: 84141456153
  dataset_size: 133028398305.0
- config_name: 25B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 166111586295.01904
    num_examples: 98399236
  - name: test
    num_bytes: 108040894.98094498
    num_examples: 64000
  download_size: 105147418131
  dataset_size: 166219627190.0
- config_name: 30B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 199302582477.5805
    num_examples: 117806793
  - name: test
    num_bytes: 108273597.41950662
    num_examples: 64000
  download_size: 126152714564
  dataset_size: 199410856075.0
- config_name: 35B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 232493644456.181
    num_examples: 137214350
  - name: test
    num_bytes: 108440503.81899258
    num_examples: 64000
  download_size: 147149925109
  dataset_size: 232602084960.0
- config_name: 40B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 265684747781.7734
    num_examples: 156621907
  - name: test
    num_bytes: 108566063.22660531
    num_examples: 64000
  download_size: 168152290262
  dataset_size: 265793313845.0
- config_name: 45B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 298875877641.391
    num_examples: 176029463
  - name: test
    num_bytes: 108663946.60903454
    num_examples: 64000
  download_size: 189159571162
  dataset_size: 298984541588.0
- config_name: 50B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 332067028077.12775
    num_examples: 195437020
  - name: test
    num_bytes: 108742395.87226707
    num_examples: 64000
  download_size: 210160621183
  dataset_size: 332175770473.0
- config_name: 55B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 365258192681.75964
    num_examples: 214844577
  - name: test
    num_bytes: 108806676.24034382
    num_examples: 64000
  download_size: 231164757019
  dataset_size: 365366999358.0
- config_name: 5B
  features:
  - name: text
    dtype: string
  - name: url
    dtype: string
  - name: source
    dtype: string
  splits:
  - name: train
    num_bytes: 33351938314.309906
    num_examples: 20769009
  - name: test
    num_bytes: 102774477.69009268
    num_examples: 64000
  download_size: 21119808690
  dataset_size: 33454712792.0
configs:
- config_name: 100M
  data_files:
  - split: train
    path: 100M/train-*
  - split: test
    path: 100M/test-*
- config_name: 100k
  data_files:
  - split: train
    path: 100k/train-*
  - split: test
    path: 100k/test-*
- config_name: 10B
  data_files:
  - split: train
    path: 10B/train-*
  - split: test
    path: 10B/test-*
- config_name: 10M
  data_files:
  - split: train
    path: 10M/train-*
  - split: test
    path: 10M/test-*
- config_name: 10k
  data_files:
  - split: train
    path: 10k/train-*
  - split: test
    path: 10k/test-*
- config_name: 15B
  data_files:
  - split: train
    path: 15B/train-*
  - split: test
    path: 15B/test-*
- config_name: 1B
  data_files:
  - split: train
    path: 1B/train-*
  - split: test
    path: 1B/test-*
- config_name: 1M
  data_files:
  - split: train
    path: 1M/train-*
  - split: test
    path: 1M/test-*
- config_name: 20B
  data_files:
  - split: train
    path: 20B/train-*
  - split: test
    path: 20B/test-*
- config_name: 25B
  data_files:
  - split: train
    path: 25B/train-*
  - split: test
    path: 25B/test-*
- config_name: 30B
  data_files:
  - split: train
    path: 30B/train-*
  - split: test
    path: 30B/test-*
- config_name: 35B
  data_files:
  - split: train
    path: 35B/train-*
  - split: test
    path: 35B/test-*
- config_name: 40B
  data_files:
  - split: train
    path: 40B/train-*
  - split: test
    path: 40B/test-*
- config_name: 45B
  data_files:
  - split: train
    path: 45B/train-*
  - split: test
    path: 45B/test-*
- config_name: 50B
  data_files:
  - split: train
    path: 50B/train-*
  - split: test
    path: 50B/test-*
- config_name: 55B
  data_files:
  - split: train
    path: 55B/train-*
  - split: test
    path: 55B/test-*
- config_name: 5B
  data_files:
  - split: train
    path: 5B/train-*
  - split: test
    path: 5B/test-*
---

# Filtered CulturaX + Wikipedia for Dutch

This is a combined and filtered version of [CulturaX](https://huggingface.co/datasets/uonlp/CulturaX) and [Wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia), only including Dutch. It is intended for the training of LLMs.

Different configs are available based on the number of tokens (see a section below with an overview). This can be useful if you want to know exactly how many tokens you have. Great for using as a streaming dataset, too. Tokens are counted as white-space tokens, so depending on your tokenizer, you'll likely end up with more tokens than indicated here. 

Every config also has a test set (for validation) of 1% the total size of the dataset, minimally 1 max. 64k samples (~16M tokens).

Wikipedia and CulturaX were suffled before merging and the teset set creation was also shuffled. Priority is given to Wikipedia to prioritize knowledge-content, so the smaller configs will consist exclusively of Wikipedia and for the larger configs we augment with CulturaX. Every config builds further on the previous, so this means that every config contains the same data as the smaller ones and more HOWEVER their train/test splits are not the same, so test set of one config may overlap with samples for another training set. This is usually not a problem but just be aware that you do not train on one config's training set and test with another config's test set.

## Configs

### `10k` -- 79 samples -- 10,087 tokens
- ratio_wikipedia: 100.00%
- total_num_tokens: 10,087
- train_num_tokens: 9,205
- test_num_tokens: 882
- total_num_samples: 79
- train_num_samples: 78
- test_num_samples: 1

### `100k` -- 1,057 samples -- 100,075 tokens
- ratio_wikipedia: 100.00%
- total_num_tokens: 100,075
- train_num_tokens: 98,044
- test_num_tokens: 2,031
- total_num_samples: 1,057
- train_num_samples: 1,047
- test_num_samples: 10

### `1M` -- 10,802 samples -- 1,000,239 tokens
- ratio_wikipedia: 100.00%
- total_num_tokens: 1,000,239
- train_num_tokens: 991,119
- test_num_tokens: 9,120
- total_num_samples: 10,802
- train_num_samples: 10,694
- test_num_samples: 108

### `10M` -- 141,263 samples -- 10,000,022 tokens
- ratio_wikipedia: 100.00%
- total_num_tokens: 10,000,022
- train_num_tokens: 9,874,772
- test_num_tokens: 125,250
- total_num_samples: 141,263
- train_num_samples: 139,851
- test_num_samples: 1,412

### `100M` -- 1,028,484 samples -- 100,000,047 tokens
- ratio_wikipedia: 100.00%
- total_num_tokens: 100,000,047
- train_num_tokens: 99,013,372
- test_num_tokens: 986,675
- total_num_samples: 1,028,484
- train_num_samples: 1,018,200
- test_num_samples: 10,284

### `1B` -- 5,153,898 samples -- 1,000,000,187 tokens
- ratio_wikipedia: 61.21%
- total_num_tokens: 1,000,000,187
- train_num_tokens: 989,990,190
- test_num_tokens: 10,009,997
- total_num_samples: 5,153,898
- train_num_samples: 5,102,360
- test_num_samples: 51,538

### `5B` -- 20,833,009 samples -- 5,000,000,076 tokens
- ratio_wikipedia: 25.35%
- total_num_tokens: 5,000,000,076
- train_num_tokens: 4,984,493,654
- test_num_tokens: 15,506,422
- total_num_samples: 20,833,009
- train_num_samples: 20,769,009
- test_num_samples: 64,000

### `10B` -- 40,240,566 samples -- 10,000,000,115 tokens
- ratio_wikipedia: 18.41%
- total_num_tokens: 10,000,000,115
- train_num_tokens: 9,984,156,828
- test_num_tokens: 15,843,287
- total_num_samples: 40,240,566
- train_num_samples: 40,176,566
- test_num_samples: 64,000

### `15B` -- 59,648,123 samples -- 15,000,000,154 tokens
- ratio_wikipedia: 15.98%
- total_num_tokens: 15,000,000,154
- train_num_tokens: 14,983,970,518
- test_num_tokens: 16,029,636
- total_num_samples: 59,648,123
- train_num_samples: 59,584,123
- test_num_samples: 64,000

### `20B` -- 79,055,679 samples -- 20,000,000,009 tokens
- ratio_wikipedia: 14.75%
- total_num_tokens: 20,000,000,009
- train_num_tokens: 19,983,799,357
- test_num_tokens: 16,200,652
- total_num_samples: 79,055,679
- train_num_samples: 78,991,679
- test_num_samples: 64,000

### `25B` -- 98,463,236 samples -- 25,000,000,048 tokens
- ratio_wikipedia: 14.00%
- total_num_tokens: 25,000,000,048
- train_num_tokens: 24,983,765,326
- test_num_tokens: 16,234,722
- total_num_samples: 98,463,236
- train_num_samples: 98,399,236
- test_num_samples: 64,000

### `30B` -- 117,870,793 samples -- 30,000,000,087 tokens
- ratio_wikipedia: 13.50%
- total_num_tokens: 30,000,000,087
- train_num_tokens: 29,983,707,932
- test_num_tokens: 16,292,155
- total_num_samples: 117,870,793
- train_num_samples: 117,806,793
- test_num_samples: 64,000

### `35B` -- 137,278,350 samples -- 35,000,000,126 tokens
- ratio_wikipedia: 13.14%
- total_num_tokens: 35,000,000,126
- train_num_tokens: 34,983,914,739
- test_num_tokens: 16,085,387
- total_num_samples: 137,278,350
- train_num_samples: 137,214,350
- test_num_samples: 64,000

### `40B` -- 156,685,907 samples -- 40,000,000,165 tokens
- ratio_wikipedia: 12.87%
- total_num_tokens: 40,000,000,165
- train_num_tokens: 39,983,508,625
- test_num_tokens: 16,491,540
- total_num_samples: 156,685,907
- train_num_samples: 156,621,907
- test_num_samples: 64,000

### `45B` -- 176,093,463 samples -- 45,000,000,020 tokens
- ratio_wikipedia: 12.66%
- total_num_tokens: 45,000,000,020
- train_num_tokens: 44,983,608,118
- test_num_tokens: 16,391,902
- total_num_samples: 176,093,463
- train_num_samples: 176,029,463
- test_num_samples: 64,000

### `50B` -- 195,501,020 samples -- 50,000,000,059 tokens
- ratio_wikipedia: 12.49%
- total_num_tokens: 50,000,000,059
- train_num_tokens: 49,983,567,461
- test_num_tokens: 16,432,598
- total_num_samples: 195,501,020
- train_num_samples: 195,437,020
- test_num_samples: 64,000

### `55B` -- 214,908,577 samples -- 55,000,000,098 tokens
- ratio_wikipedia: 12.35%
- total_num_tokens: 55,000,000,098
- train_num_tokens: 54,983,723,278
- test_num_tokens: 16,276,820
- total_num_samples: 214,908,577
- train_num_samples: 214,844,577
- test_num_samples: 64,000


## Filtering

While CultruaX already has done a lot of filtering, some more filtering can be done to improve the quality of the corpus. These filters are described below.

The baseline ratios (punctuation, uppercase, digits) were calculated on the SONAR-500 corpus (excluding WRPEA WRPED WRUEA WRUED WRUEB).

**CulturaX**:
- removed documents that contain the text "rechten voorbehouden" or "rights reserved"
- remove documents whose URL contained "wikipedia.org" (because we include a cleaned version of Wikipedia ourselves)
- removed documents that contain a "bad word" (see the section below)
- removed documents that contain any non-latin characters. The idea is that "knowledge"-based information (e.g. original writing of a name) are allowed
 when the data comes from Wikipedia, but not from any other webcrawl, to avoid unsollicited noise.

**CulturaX + Wikipedia**:
- removed documents where ratio of punctuation marks vs. non-whitespace characters is higher than 0.2
- removed documents where ratio of uppercase vs. non-whitespace characters is higher than 0.22
- removed documents where ratio of digits vs. non-whitespace characters is higher than 0.16
- removed documents where the average token length is < 2 or > 20

## Bad words

```python
BAD_PHRASES_DOC_LEVEL = {
    # https://en.wikipedia.org/wiki/Dutch_profanity
    "achterlijk",
    "debiel",
    "downie",
    "idioot",
    "kankerlijer",
    "klere",
    "kolere",
    "minkukel",
    "pestkop",
    "pleuris",
    "pleuritis",
    "teringlijer",
    "tyfuslijer",
    "gadver",
    "getver",
    "godver",
    "godskolere",
    "godverork",
    "graftak",
    "kopvod",
    "verdomme",
    "anaalgeneraal",
    "bitch",
    "dikzak",
    "flikker",
    "fok",
    "fuck",
    "hoer",
    "klootzak",
    "klote",
    "kreng",
    "kringspiermusketier",
    "kut",
    "lamzak",
    "lul",
    "manwijf",
    "matennaai",
    "neuken",
    "neuker",
    "ouwehoer",
    "reet",
    "reetkever",
    "reetridder",
    "rotzak",
    "schijt",
    "shit",
    "slet",
    "slijmbal",
    "slons",
    "sodemieter",
    "stoephoer",
    "swaffel",
    "teef",
    "trut",
    "tut",
    "zak",
    "uilskuiken",
    "zeik",
    "bamivreter",
    "bosneger",
    "neger",
    "fransoos",
    "geitenneuker",
    "kaaskop",
    "kakker",
    "koelie",
    "lijp",
    "medelander",
    "mocro",
    "mof",
    "nikker",
    "poepchinees",
    "roetmop",
    "spaghettivreter",
    "loempiavouwer",
    "spanjool",
    "spleetoog",
    "tatta",
    "tokkie",
    "zandneger",
    "zwartzak",
    "halvezool",
    "kenau",
    "klootviool",
    "knuppel",
    "koekert",
    "koekwaus",
    "oelewapper",
    "smeerlap",
    "sukkel",
    "sul",
    "wappie",
    "wijf",
    "zooi",
    # xxx (a.o. https://gitlab.com/yhavinga/c4nlpreproc/-/blob/master/clean/badwords_ennl.py?ref_type=heads)
    "xxx",
    "anal",
    "blowjob",
    "buttplug",
    "cock",
    "cunt",
    "geil",
    "sex",  # Standaardnederlands = seks, maybe we catch some porn or socialmedia sites with this misspelling
    "porn",
    # extra
    "nigger",
    "nigga",
    "hoerig",
    "klojo",
}
```

## Config details


## License information

For CulturaX: https://huggingface.co/datasets/uonlp/CulturaX#license-information
For Wikipedia: https://huggingface.co/datasets/wikimedia/wikipedia#licensing-information