Datasets:

Languages:
Polish
License:
pkedzia commited on
Commit
502e191
1 Parent(s): ecd15f6

Added 20230401 dump

Browse files
Files changed (3) hide show
  1. README.md +7 -0
  2. corpus.jsonl.gz +3 -0
  3. wikipedia-pl.py +54 -0
README.md CHANGED
@@ -1,3 +1,10 @@
1
  ---
2
  license: cc-by-4.0
 
 
3
  ---
 
 
 
 
 
 
1
  ---
2
  license: cc-by-4.0
3
+ language:
4
+ - pl
5
  ---
6
+
7
+ Polish Wikipedia, 20230401 dump, converted from parquet format.
8
+ Thanks for [chrisociepa](https://huggingface.co/chrisociepa) for pl wikipedia in [parquet](https://huggingface.co/datasets/chrisociepa/wikipedia-pl-20230401) format.
9
+
10
+
corpus.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64d73cf01f236a7916dbd97801ee38b0d2c8829c49abc16c7af156161147d1cb
3
+ size 1141171885
wikipedia-pl.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import csv
3
+ import os
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _DESCRIPTION = """
9
+ This is the Polish Wikipedia dataset. This dump is converted from parquet format.
10
+ """
11
+ _SPLITS = ["corpus"]
12
+
13
+ _URLs = {
14
+ "corpus": "corpus.jsonl.gz"
15
+ }
16
+
17
+ class PLWikiedia(datasets.GeneratorBasedBuilder):
18
+ BUILDER_CONFIGS = [
19
+ datasets.BuilderConfig(
20
+ name="corpus",
21
+ description=_DESCRIPTION
22
+ )
23
+ ]
24
+
25
+ def _info(self):
26
+ return datasets.DatasetInfo(
27
+ description=_DESCRIPTION,
28
+ features=datasets.Features({
29
+ "id": datasets.Value("string"),
30
+ "url": datasets.Value("string"),
31
+ "title": datasets.Value("string"),
32
+ "text": datasets.Value("string"),
33
+ }),
34
+ supervised_keys=None,
35
+ )
36
+
37
+ def _split_generators(self, dl_manager):
38
+ my_urls = _URLs[self.config.name]
39
+ data_dir = dl_manager.download_and_extract(my_urls)
40
+
41
+ return [
42
+ datasets.SplitGenerator(
43
+ name=self.config.name,
44
+ gen_kwargs={"filepath": data_dir},
45
+ ),
46
+ ]
47
+
48
+ def _generate_examples(self, filepath):
49
+ with open(filepath, encoding="utf-8") as f:
50
+ texts = f.readlines()
51
+
52
+ for i, text in enumerate(texts):
53
+ text = json.loads(text)
54
+ yield i, text