initial commit
Browse files- README.md +10 -0
- opus_raw.py +98 -0
README.md
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Load mono corpora from OPUS
|
2 |
+
|
3 |
+
This enables you to load any raw mono corpus from [opus.nlpl.eu](https://opus.nlpl.eu/). Please check [opus.nlpl.eu](https://opus.nlpl.eu/) for the available corpora and licenses. The targeted corpus is called raw corpus on OPUS.
|
4 |
+
|
5 |
+
To use it, you need the name of the corpus, the version, and the target language code. The corpus name and version are provided in one string seperated by space (e.g. 'News-Commentary v16'). All of these can be found on [opus.nlpl.eu](https://opus.nlpl.eu/).
|
6 |
+
|
7 |
+
##Example:
|
8 |
+
```python
|
9 |
+
dataset = load_dataset('badranx/opus_raw', corpus="News-Commentary v16", lang="de")
|
10 |
+
```
|
opus_raw.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 HuggingFace Datasets Authors.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
|
17 |
+
import datasets
|
18 |
+
|
19 |
+
|
20 |
+
_DESCRIPTION = """\
|
21 |
+
mono corpus from http://www.opensubtitles.org/. Please check http://www.opensubtitles.org/ for the available corpora and licenses.
|
22 |
+
"""
|
23 |
+
|
24 |
+
_HOMEPAGE_URL = "http://opus.nlpl.eu"
|
25 |
+
|
26 |
+
_CITATION = """\
|
27 |
+
P. Lison and J. Tiedemann, 2016, OpenSubtitles2016: Extracting Large Parallel Corpora from Movie and TV Subtitles. In Proceedings of the 10th International Conference on Language Resources and Evaluation (LREC 2016)
|
28 |
+
"""
|
29 |
+
|
30 |
+
_BASE_URL = "https://object.pouta.csc.fi/OPUS-{}/{}/mono/{}.txt.gz"
|
31 |
+
|
32 |
+
# Please note that only few pairs are shown here. You can use config to generate data for all language pairs
|
33 |
+
_LANGUAGES = [
|
34 |
+
("OpenSubtitles", "v2018", "en"),
|
35 |
+
]
|
36 |
+
|
37 |
+
|
38 |
+
class OpenSubtitlesConfig(datasets.BuilderConfig):
|
39 |
+
def __init__(self, *args, corpus=None, lang=None, **kwargs):
|
40 |
+
corpus.strip()
|
41 |
+
splits = corpus.split()
|
42 |
+
corpus = splits[0]
|
43 |
+
corpus_version = splits[1]
|
44 |
+
|
45 |
+
super().__init__(
|
46 |
+
*args,
|
47 |
+
name=f"{corpus}-{corpus_version}-{lang}",
|
48 |
+
**kwargs,
|
49 |
+
)
|
50 |
+
self.corpus = corpus
|
51 |
+
self.corpus_version = corpus_version
|
52 |
+
self.lang = lang
|
53 |
+
|
54 |
+
|
55 |
+
class OpenSubtitles(datasets.GeneratorBasedBuilder):
|
56 |
+
|
57 |
+
BUILDER_CONFIG_CLASS = OpenSubtitlesConfig
|
58 |
+
|
59 |
+
def _info(self):
|
60 |
+
return datasets.DatasetInfo(
|
61 |
+
description=_DESCRIPTION,
|
62 |
+
features=datasets.Features(
|
63 |
+
{
|
64 |
+
"id": datasets.Value("string"),
|
65 |
+
"text": datasets.Value("string"),
|
66 |
+
},
|
67 |
+
),
|
68 |
+
supervised_keys=None,
|
69 |
+
homepage=_HOMEPAGE_URL,
|
70 |
+
)
|
71 |
+
|
72 |
+
def _split_generators(self, dl_manager):
|
73 |
+
def _base_url(corpus, corpus_version, lang):
|
74 |
+
return _BASE_URL.format(corpus, corpus_version, lang)
|
75 |
+
|
76 |
+
download_url = _base_url(self.config.corpus, self.config.corpus_version, self.config.lang)
|
77 |
+
path = dl_manager.download_and_extract(download_url)
|
78 |
+
return [
|
79 |
+
datasets.SplitGenerator(
|
80 |
+
name=datasets.Split.TRAIN,
|
81 |
+
gen_kwargs={"datapath": path},
|
82 |
+
)
|
83 |
+
]
|
84 |
+
|
85 |
+
def _generate_examples(self, datapath):
|
86 |
+
with open(datapath, encoding="utf-8") as f:
|
87 |
+
for text_counter, line in enumerate(f):
|
88 |
+
line = line.strip()
|
89 |
+
|
90 |
+
result = (
|
91 |
+
text_counter,
|
92 |
+
{
|
93 |
+
"id": str(text_counter),
|
94 |
+
"text": line
|
95 |
+
},
|
96 |
+
)
|
97 |
+
text_counter += 1
|
98 |
+
yield result
|