system HF staff commited on
Commit
7322776
1 Parent(s): f8575bd

import from S3

Browse files
Files changed (2) hide show
  1. ar.py +102 -0
  2. ar.py.lock +0 -0
ar.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The Arabic United nation Corpus dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import glob
22
+ import os
23
+ import re
24
+
25
+ import datasets
26
+
27
+
28
+ _DESCRIPTION = """\
29
+ The corpus is a part of the MultiUN corpus.\
30
+ It is a collection of translated documents from the United Nations.\
31
+ The corpus is download from the following website : \
32
+ [open parallel corpus](http://opus.datasetsl.eu/) \
33
+ """
34
+
35
+ _CITATION = """\
36
+ @inproceedings{eisele2010multiun,
37
+ title={MultiUN: A Multilingual Corpus from United Nation Documents.},
38
+ author={Eisele, Andreas and Chen, Yu},
39
+ booktitle={LREC},
40
+ year={2010}
41
+ }
42
+
43
+ """
44
+
45
+ URL = "https://object.pouta.csc.fi/OPUS-MultiUN/v1/mono/ar.txt.gz"
46
+
47
+
48
+ class AracorpusConfig(datasets.BuilderConfig):
49
+ """BuilderConfig for BookCorpus."""
50
+
51
+ def __init__(self, **kwargs):
52
+ """BuilderConfig for BookCorpus.
53
+ Args:
54
+ **kwargs: keyword arguments forwarded to super.
55
+ """
56
+ super(AracorpusConfig, self).__init__(
57
+ version=datasets.Version("1.0.0", "New split API (https://tensorflow.org/datasets/splits)"), **kwargs
58
+ )
59
+
60
+
61
+ class Aracorpus(datasets.GeneratorBasedBuilder):
62
+ """BookCorpus dataset."""
63
+
64
+ BUILDER_CONFIGS = [AracorpusConfig(name="plain_text", description="Plain text",)]
65
+
66
+ def _info(self):
67
+ return datasets.DatasetInfo(
68
+ description=_DESCRIPTION,
69
+ features=datasets.Features({"text": datasets.Value("string"),}),
70
+ supervised_keys=None,
71
+ homepage="http://opus.datasetsl.eu/",
72
+ citation=_CITATION,
73
+ )
74
+
75
+ def _vocab_text_gen(self, archive):
76
+ for _, ex in self._generate_examples(archive):
77
+ yield ex["text"]
78
+
79
+ def _split_generators(self, dl_manager):
80
+ arch_path = dl_manager.download_and_extract(URL)
81
+
82
+ return [
83
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"directory": arch_path}),
84
+ ]
85
+
86
+ def _generate_examples(self, directory):
87
+ index=directory.rfind("datasets")
88
+ index=index+8
89
+ url=directory[:index]
90
+ direct_name=directory[index+1:]
91
+ directory=url
92
+
93
+ files = [
94
+ os.path.join(directory, direct_name),
95
+ ]
96
+
97
+ _id = 0
98
+ for txt_file in files:
99
+ with open(txt_file, mode="r") as f:
100
+ for line in f:
101
+ yield _id, {"text": line.strip()}
102
+ _id += 1
ar.py.lock ADDED
File without changes