ToluClassics commited on
Commit
6f33416
1 Parent(s): 4594e31

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -52,3 +52,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ masakhane_wiki_100-english/corpus.jsonl filter=lfs diff=lfs merge=lfs -text
56
+ masakhane_wiki_100-french/corpus.jsonl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - fr
5
+
6
+
7
+ multilinguality:
8
+ - multilingual
9
+
10
+ task_categories:
11
+ - text-retrieval
12
+ license: apache-2.0
13
+
14
+ viewer: true
15
+ ---
masakhane_wiki_100-english/corpus.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4aa20d13636d49dec99b6096de45f82f8e97fc14c2f01d4adf744261dff5e9f
3
+ size 18899343471
masakhane_wiki_100-french/corpus.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76af431c66658d98da7d2f477139fa78c54ace5e68b4559a645190603a4a4250
3
+ size 7099025578
masakhane_wiki_100.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the 'License');
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an 'AS IS' BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+
18
+ import json
19
+
20
+ import datasets
21
+ from dataclasses import dataclass
22
+
23
+ _CITATION = '''
24
+ coming soon ...
25
+ '''
26
+
27
+ languages = [
28
+ 'english',
29
+ 'french',
30
+ ]
31
+
32
+ _DESCRIPTION = '100 token long Wikipedia Passages from April/May 2022'
33
+
34
+ _DATASET_URLS = {
35
+ lang: {
36
+ 'collection': f'https://huggingface.co/datasets/ToluClassics/masakhane_wiki_100/resolve/main/masakhane_wiki_100-{lang}/corpus.jsonl',
37
+ } for lang in languages
38
+ }
39
+
40
+
41
+ class MrTyDiCorpus(datasets.GeneratorBasedBuilder):
42
+ BUILDER_CONFIGS = [
43
+ datasets.BuilderConfig(
44
+ version=datasets.Version('1.1.0'),
45
+ name=lang,
46
+ description=f'Wikipedia Passages in language {lang}.'
47
+ ) for lang in languages
48
+ ]
49
+
50
+ def _info(self):
51
+ features = datasets.Features({
52
+ 'id': datasets.Value('string'),
53
+ 'contents': datasets.Value('string'),
54
+ })
55
+
56
+ return datasets.DatasetInfo(
57
+ # This is the description that will appear on the datasets page.
58
+ description=_DESCRIPTION,
59
+ # This defines the different columns of the dataset and their types
60
+ features=features, # Here we define them above because they are different between the two configurations
61
+ supervised_keys=None,
62
+ # Homepage of the dataset for documentation
63
+ homepage='https://huggingface.co/datasets/ToluClassics/masakhane_wiki_100,
64
+ # License for the dataset if available
65
+ license='',
66
+ # Citation for the dataset
67
+ citation=_CITATION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ lang = self.config.name
72
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[lang])
73
+
74
+ splits = [
75
+ datasets.SplitGenerator(
76
+ name='train',
77
+ gen_kwargs={
78
+ 'filepath': downloaded_files['train'],
79
+ },
80
+ ),
81
+ ]
82
+ return splits
83
+
84
+ def _generate_examples(self, filepath):
85
+ with open(filepath, encoding="utf-8") as f:
86
+ for line in f:
87
+ data = json.loads(line)
88
+ yield data['id'], data