# coding=utf-8 # Copyright 2020 HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datasets _DESCRIPTION = """\ mono corpus from http://www.opensubtitles.org/. Please check http://www.opensubtitles.org/ for the available corpora and licenses. """ _HOMEPAGE_URL = "http://opus.nlpl.eu" _CITATION = """\ P. Lison and J. Tiedemann, 2016, OpenSubtitles2016: Extracting Large Parallel Corpora from Movie and TV Subtitles. In Proceedings of the 10th International Conference on Language Resources and Evaluation (LREC 2016) """ _BASE_URL = "https://object.pouta.csc.fi/OPUS-{}/{}/mono/{}.txt.gz" # Please note that only few pairs are shown here. You can use config to generate data for all language pairs _LANGUAGES = [ ("OpenSubtitles", "v2018", "en"), ] class OpenSubtitlesConfig(datasets.BuilderConfig): def __init__(self, *args, corpus=None, lang=None, **kwargs): corpus.strip() splits = corpus.split() corpus = splits[0] corpus_version = splits[1] super().__init__( *args, name=f"{corpus}-{corpus_version}-{lang}", **kwargs, ) self.corpus = corpus self.corpus_version = corpus_version self.lang = lang class OpenSubtitles(datasets.GeneratorBasedBuilder): BUILDER_CONFIG_CLASS = OpenSubtitlesConfig def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "text": datasets.Value("string"), }, ), supervised_keys=None, homepage=_HOMEPAGE_URL, ) def _split_generators(self, dl_manager): def _base_url(corpus, corpus_version, lang): return _BASE_URL.format(corpus, corpus_version, lang) download_url = _base_url(self.config.corpus, self.config.corpus_version, self.config.lang) path = dl_manager.download_and_extract(download_url) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"datapath": path}, ) ] def _generate_examples(self, datapath): with open(datapath, encoding="utf-8") as f: for text_counter, line in enumerate(f): line = line.strip() result = ( text_counter, { "id": str(text_counter), "text": line }, ) text_counter += 1 yield result