Datasets:

Task Categories: sequence-modeling
Multilinguality: multilingual
Size Categories: unknown
Licenses: unknown
Language Creators: found
Annotations Creators: machine-generated
Source Datasets: original
open_subtitles_monolingual / open_subtitles_monolingual.py
1 # coding=utf-8
2 # Copyright 2020 The HuggingFace Datasets Authors and Antoine Simoulin.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import csv
17 import json
18 import os
19
20 import datasets
21
22
23 _CITATION = """\
24 @inproceedings{lison_16,
25 author = {Pierre Lison and
26 J{\"{o}}rg Tiedemann},
27 editor = {Nicoletta Calzolari and
28 Khalid Choukri and
29 Thierry Declerck and
30 Sara Goggi and
31 Marko Grobelnik and
32 Bente Maegaard and
33 Joseph Mariani and
34 H{\'{e}}l{\`{e}}ne Mazo and
35 Asunci{\'{o}}n Moreno and
36 Jan Odijk and
37 Stelios Piperidis},
38 title = {OpenSubtitles2016: Extracting Large Parallel Corpora from Movie and
39 {TV} Subtitles},
40 booktitle = {Proceedings of the Tenth International Conference on Language Resources
41 and Evaluation {LREC} 2016, Portoro{\v{z}}, Slovenia, May 23-28, 2016},
42 publisher = {European Language Resources Association {(ELRA)}},
43 year = {2016},
44 url = {http://www.lrec-conf.org/proceedings/lrec2016/summaries/947.html},
45 }
46 """
47
48 _DESCRIPTION = """\
49 This is a new collection of translated movie subtitles from http://www.opensubtitles.org/.
50 IMPORTANT: If you use the OpenSubtitle corpus: Please, add a link to http://www.opensubtitles.org/ to your website and to your reports and publications produced with the data!
51 This is a slightly cleaner version of the subtitle collection using improved sentence alignment and better language checking.
52 62 languages, 1,782 bitexts
53 total number of files: 3,735,070
54 total number of tokens: 22.10G
55 total number of sentence fragments: 3.35G
56 """
57
58 _HOMEPAGE_URL = "http://opus.nlpl.eu/OpenSubtitles.php"
59
60 _URLs = {
61 language: './{}.jsonl.gz'.format(language) \
62 for language in ['fr', 'en', 'zh_cn', 'pt', 'es', 'ar']
63 }
64
65
66 # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
67 class OpenSubtitlesMonolingual(datasets.GeneratorBasedBuilder):
68 """Collection of translated movie subtitles from http://www.opensubtitles.org/."""
69
70 VERSION = datasets.Version("1.1.0")
71
72 # , version=VERSION,
73 BUILDER_CONFIGS = [
74 datasets.BuilderConfig(name=language, description="{} subtitles".format(language)) \
75 for language in _URLs.keys()
76 ]
77
78 def _info(self):
79 # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
80 features = datasets.Features(
81 {
82 "subtitle": datasets.Value("string"),
83 "meta": {
84 "year": datasets.Value("int32"),
85 "imdbId": datasets.Value("int32"),
86 "subtitleId": datasets.Value("int32"),
87 }
88 }
89 )
90 return datasets.DatasetInfo(
91 description=_DESCRIPTION,
92 features=features,
93 supervised_keys=None,
94 homepage=_HOMEPAGE_URL,
95 citation=_CITATION,
96 )
97
98
99 def _split_generators(self, dl_manager):
100 """Returns SplitGenerators."""
101 my_url = _URLs[self.config.name]
102 data_file = dl_manager.download_and_extract(my_url)
103 return [
104 datasets.SplitGenerator(
105 name=datasets.Split.TRAIN,
106 gen_kwargs={
107 "filepath": data_file,
108 },
109 )
110 ]
111
112
113 def _generate_examples(self, filepath):
114 """ Yields examples as (key, example) tuples. """
115
116 with open(filepath, encoding="utf-8") as f:
117 for id_, row in enumerate(f):
118 data = json.loads(row)
119 yield id_, {
120 "subtitle": data['subtitles'],
121 "meta": {
122 "year": data['year'],
123 "imdbId": data['IMDbs'],
124 "subtitleId": int(data['filename'][:-len('.xml')]),
125 },
126 }
127
128