Datasets:

Multilinguality:
translation
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
f92f826
1 Parent(s): b6d68c3

Delete loading script

Browse files
Files changed (1) hide show
  1. opus_fiskmo.py +0 -94
opus_fiskmo.py DELETED
@@ -1,94 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """fiskmo, a massive parallel corpus for Finnish and Swedish."""
16
-
17
-
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- J. Tiedemann, 2012, Parallel Data, Tools and Interfaces in OPUS. In Proceedings of the 8th International\
25
- Conference on Language Resources and Evaluation (LREC 2012)"""
26
-
27
-
28
- _DESCRIPTION = """\
29
- fiskmo, a massive parallel corpus for Finnish and Swedish."""
30
-
31
-
32
- _HOMEPAGE = "http://opus.nlpl.eu/fiskmo.php"
33
-
34
-
35
- _LICENSE = ""
36
-
37
-
38
- _URLs = {"train": "https://object.pouta.csc.fi/OPUS-fiskmo/v2/moses/fi-sv.txt.zip"}
39
-
40
-
41
- class OpusFiskmo(datasets.GeneratorBasedBuilder):
42
- """fiskmo, a massive parallel corpus for Finnish and Swedish."""
43
-
44
- VERSION = datasets.Version("1.0.0")
45
-
46
- BUILDER_CONFIGS = [
47
- datasets.BuilderConfig(
48
- name="fi-sv", version=VERSION, description="fiskmo, a massive parallel corpus for Finnish and Swedish"
49
- )
50
- ]
51
-
52
- def _info(self):
53
- return datasets.DatasetInfo(
54
- description=_DESCRIPTION,
55
- features=datasets.Features(
56
- {"translation": datasets.features.Translation(languages=tuple(self.config.name.split("-")))}
57
- ),
58
- supervised_keys=None,
59
- homepage="http://opus.nlpl.eu/fiskmo.php",
60
- citation=_CITATION,
61
- )
62
-
63
- def _split_generators(self, dl_manager):
64
- """Returns SplitGenerators."""
65
- data_dir = dl_manager.download_and_extract(_URLs)
66
- return [
67
- datasets.SplitGenerator(
68
- name=datasets.Split.TRAIN,
69
- gen_kwargs={
70
- "source_file": os.path.join(data_dir["train"], "fiskmo.fi-sv.fi"),
71
- "target_file": os.path.join(data_dir["train"], "fiskmo.fi-sv.sv"),
72
- "split": "train",
73
- },
74
- ),
75
- ]
76
-
77
- def _generate_examples(self, source_file, target_file, split):
78
- """This function returns the examples in the raw (text) form."""
79
- with open(source_file, encoding="utf-8") as f:
80
- source_sentences = f.read().split("\n")
81
- with open(target_file, encoding="utf-8") as f:
82
- target_sentences = f.read().split("\n")
83
-
84
- assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
85
- len(source_sentences),
86
- len(target_sentences),
87
- source_file,
88
- target_file,
89
- )
90
-
91
- source, target = tuple(self.config.name.split("-"))
92
- for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
93
- result = {"translation": {source: l1, target: l2}}
94
- yield idx, result