dlwh commited on
Commit
1e342a3
1 Parent(s): 4b6ce68

initial commit

Browse files
Files changed (2) hide show
  1. README.md +99 -0
  2. eu_wikipedias.py +121 -0
README.md ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language_creators:
5
+ - crowdsourced
6
+ pretty_name: Wikipedia
7
+ paperswithcode_id: null
8
+ license:
9
+ - cc-by-sa-3.0
10
+ - gfdl
11
+ task_categories:
12
+ - text-generation
13
+ - fill-mask
14
+ task_ids:
15
+ - language-modeling
16
+ - masked-language-modeling
17
+ source_datasets:
18
+ - original
19
+ multilinguality:
20
+ - multilingual
21
+ size_categories:
22
+ - n<1K
23
+ - 1K<n<10K
24
+ - 10K<n<100K
25
+ - 100K<n<1M
26
+ - 1M<n<10M
27
+ language:
28
+ - bg
29
+ - cs
30
+ - da
31
+ - de
32
+ - el
33
+ - en
34
+ - es
35
+ - et
36
+ - fi
37
+ - fr
38
+ - ga
39
+ - hr
40
+ - hu
41
+ - it
42
+ - lt
43
+ - lv
44
+ - mt
45
+ - nl
46
+ - pl
47
+ - pt
48
+ - ro
49
+ - sk
50
+ - sl
51
+ - sv
52
+ ---
53
+
54
+ # Dataset Card for Wikipedia
55
+
56
+ This repo is a wrapper around [olm/wikipedia](https://huggingface.co/datasets/olm/wikipedia) that just concatenates data from the EU languages.
57
+ Please refer to it for a complete data card.
58
+
59
+ The EU languages we include are:
60
+ - bg
61
+ - cs
62
+ - da
63
+ - de
64
+ - el
65
+ - en
66
+ - es
67
+ - et
68
+ - fi
69
+ - fr
70
+ - ga
71
+ - hr
72
+ - hu
73
+ - it
74
+ - lt
75
+ - lv
76
+ - mt
77
+ - nl
78
+ - pl
79
+ - pt
80
+ - ro
81
+ - sk
82
+ - sl
83
+ - sv
84
+
85
+
86
+ As with `olm/wikipedia` you will need to install a few dependencies:
87
+
88
+
89
+ ```
90
+ pip install mwparserfromhell==0.6.4 multiprocess==0.70.13
91
+ ```
92
+
93
+ ```python
94
+ from datasets import load_dataset
95
+
96
+ load_dataset("dlwh/eu_wikipedias", date="20221101")
97
+ ```
98
+
99
+ Please refer to the original olm/wikipedia for a complete data card.
eu_wikipedias.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a huggingface dataset script to load wikipedias for the eu languages using olm/wikipedia
2
+
3
+ # https://huggingface.co/datasets/olm/wikipedia/blob/main/wikipedia.py
4
+
5
+ import datasets
6
+ from datasets import DatasetDict
7
+
8
+ logger = datasets.logging.get_logger(__name__)
9
+
10
+
11
+ _CITATION = """\
12
+ @ONLINE {wikidump,
13
+ author = {Wikimedia Foundation},
14
+ title = {Wikimedia Downloads},
15
+ url = {https://dumps.wikimedia.org}
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ Wikipedia dataset containing cleaned articles of all languages.
21
+ The datasets are built from the Wikipedia dump
22
+ (https://dumps.wikimedia.org/) with one split per language. Each example
23
+ contains the content of one full Wikipedia article with cleaning to strip
24
+ markdown and unwanted sections (references, etc.).
25
+ """
26
+
27
+ _LICENSE = (
28
+ "This work is licensed under the Creative Commons Attribution-ShareAlike "
29
+ "3.0 Unported License. To view a copy of this license, visit "
30
+ "http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
31
+ "Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
32
+ )
33
+
34
+ _VERSION = datasets.Version("1.0.0", "")
35
+
36
+
37
+ eu_languages = [
38
+ "bg",
39
+ "cs",
40
+ # "da",
41
+ # "de",
42
+ # "el",
43
+ # "en",
44
+ # "es",
45
+ # "et",
46
+ # "fi",
47
+ # "fr",
48
+ # "ga",
49
+ # "hr",
50
+ # "hu",
51
+ # "it",
52
+ # "lt",
53
+ # "lv",
54
+ # "mt",
55
+ # "nl",
56
+ # "pl",
57
+ # "pt",
58
+ # "ro",
59
+ # "sk",
60
+ # "sl",
61
+ # "sv",
62
+ ]
63
+
64
+ class WikipediaConfig(datasets.BuilderConfig):
65
+ """BuilderConfig for EuWikipedia."""
66
+
67
+ def __init__(self, date=None, version=_VERSION, **kwargs):
68
+ """BuilderConfig for Wikipedia.
69
+ Args:
70
+ date: string, date of the Wikipedia dump in YYYYMMDD format. A list of
71
+ available dates can be found at https://dumps.wikimedia.org/enwiki/.
72
+ **kwargs: keyword arguments forwarded to super.
73
+ """
74
+ super().__init__(
75
+ name=f"{date}",
76
+ description=f"Wikipedia dataset for EU languages, parsed from {date} dump.",
77
+ version=version,
78
+ **kwargs,
79
+ )
80
+ self.date = date
81
+
82
+
83
+ _DATE = "20221101"
84
+
85
+
86
+ class EuWikipedia(datasets.GeneratorBasedBuilder):
87
+ """Wikipedia dataset."""
88
+
89
+ # Use mirror (your.org) to avoid download caps.
90
+ BUILDER_CONFIG_CLASS = WikipediaConfig
91
+ BUILDER_CONFIGS = [WikipediaConfig(date=_DATE,)]
92
+
93
+ def _info(self):
94
+ return datasets.DatasetInfo(
95
+ description=_DESCRIPTION,
96
+ features=datasets.Features(
97
+ {
98
+ "id": datasets.Value("string"),
99
+ "url": datasets.Value("string"),
100
+ "title": datasets.Value("string"),
101
+ "text": datasets.Value("string"),
102
+ }
103
+ ),
104
+ # No default supervised_keys.
105
+ supervised_keys=None,
106
+ homepage="https://dumps.wikimedia.org",
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager):
111
+ return [
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TRAIN, gen_kwargs={"date": self.config.date}
114
+ )
115
+ ]
116
+
117
+ def _generate_examples(self, date):
118
+ # defer to olm/wikipedia
119
+ for lang in eu_languages:
120
+ for example in datasets.load_dataset("olm/wikipedia", language=lang, date=date):
121
+ yield example