nreimers commited on
Commit
ea632ef
1 Parent(s): eaca589

first upload

Browse files
simple/000.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dd7d76f6c7b46275eb9ee3f93903d92db60f1e91b40167e5c95f13dd0112246
3
+ size 15262500
simple/000.jsonl.gz.lock ADDED
File without changes
simple/001.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a92703462e9315588522e97f88d684697073084a41d14c88cce5c37c5efeb9
3
+ size 14844594
simple/001.jsonl.gz.lock ADDED
File without changes
simple/002.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1060598d27f0843bc8d152d501fc9c7c5e80abefc41680b521245625d9aef97
3
+ size 14010991
simple/002.jsonl.gz.lock ADDED
File without changes
simple/003.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94f8315d154ee76c1f4e6d01062bfa783c1870b93beb112fc1081a9cfccf0c6c
3
+ size 13186503
simple/003.jsonl.gz.lock ADDED
File without changes
simple/004.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e816552b1395fe29ff1befd97e0158ce0234f6d04b5a50af78dc2de8df39bd53
3
+ size 10239399
simple/004.jsonl.gz.lock ADDED
File without changes
wikipedia-22-12.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the 'License');
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an 'AS IS' BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+
18
+ import json
19
+
20
+ import datasets
21
+ from dataclasses import dataclass
22
+ import numpy as np
23
+
24
+ _CITATION = '''
25
+ '''
26
+
27
+ languages2filesize = {
28
+ 'ar': 32,
29
+ 'de': 150,
30
+ 'en': 352,
31
+ 'es': 102,
32
+ 'fr': 134,
33
+ 'hi': 5,
34
+ 'it': 83,
35
+ 'ja': 47,
36
+ 'ko': 13,
37
+ 'simple': 5,
38
+ 'zh': 23
39
+ }
40
+
41
+ _DESCRIPTION = 'dataset load script'
42
+
43
+ _DATASET_URLS = {
44
+ lang: [f'https://huggingface.co/datasets/Cohere/wikipedia-22-12/resolve/main/{lang}/{str(i).zfill(3)}.jsonl.gz' for i in range(n)]
45
+ for lang, n in languages2filesize.items()
46
+ }
47
+
48
+
49
+ class WikiCorpus(datasets.GeneratorBasedBuilder):
50
+ BUILDER_CONFIGS = [
51
+ datasets.BuilderConfig(
52
+ version=datasets.Version('1.0.0'),
53
+ name=lang,
54
+ description=f'Wiki dataset in language {lang}.'
55
+ ) for lang in languages2filesize
56
+ ]
57
+
58
+ def _info(self):
59
+
60
+ features = datasets.Features({
61
+ 'id': datasets.Value('int32'),
62
+ 'title': datasets.Value('string'),
63
+ 'text': datasets.Value('string'),
64
+ 'url': datasets.Value('string'),
65
+ 'wiki_id': datasets.Value('string'),
66
+ 'views': datasets.Value('float32'),
67
+ 'paragraph_id': datasets.Value('int32'),
68
+ 'langs': datasets.Value('int32'),
69
+ #'emb': datasets.Sequence(datasets.Value("float32"))
70
+ })
71
+
72
+ return datasets.DatasetInfo(
73
+ # This is the description that will appear on the datasets page.
74
+ description=_DESCRIPTION,
75
+ # This defines the different columns of the dataset and their types
76
+ features=features, # Here we define them above because they are different between the two configurations
77
+ supervised_keys=None,
78
+ # Homepage of the dataset for documentation
79
+ homepage='https://www.cohere.ai',
80
+ # License for the dataset if available
81
+ license='',
82
+ # Citation for the dataset
83
+ citation=_CITATION,
84
+ )
85
+
86
+ def _split_generators(self, dl_manager):
87
+ lang = self.config.name
88
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[lang])
89
+
90
+ splits = [
91
+ datasets.SplitGenerator(
92
+ name='train',
93
+ gen_kwargs={
94
+ 'filepaths': downloaded_files,
95
+ },
96
+ ),
97
+ ]
98
+ return splits
99
+
100
+ def _generate_examples(self, filepaths):
101
+ for filepath in filepaths:
102
+ with open(filepath, encoding="utf-8") as f:
103
+ for line in f:
104
+ data = json.loads(line)
105
+ yield data['id'], data