import os from collections import defaultdict import datasets _DESCRIPTION = """\ This is a new collection of translated movie subtitles from http://www.opensubtitles.org/. IMPORTANT: If you use the OpenSubtitle corpus: Please, add a link to http://www.opensubtitles.org/ to your website and to your reports and publications produced with the data! This is a slightly cleaner version of the subtitle collection using improved sentence alignment and better language checking. 62 languages, 1,782 bitexts total number of files: 3,735,070 total number of tokens: 22.10G total number of sentence fragments: 3.35G """ _HOMEPAGE_URL = "http://opus.nlpl.eu/OpenSubtitles.php" _CITATION = """\ P. Lison and J. Tiedemann, 2016, OpenSubtitles2016: Extracting Large Parallel Corpora from Movie and TV Subtitles. In Proceedings of the 10th International Conference on Language Resources and Evaluation (LREC 2016) """ _VERSION = "2018.0.0" _BASE_NAME = "OpenSubtitles.{}.{}" _BASE_URL = "https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2018/moses/{}-{}.txt.zip" class OpenSubtitlesConfig(datasets.BuilderConfig): def __init__(self, *args, languages=None, **kwargs): super().__init__( *args, name="-".join(sorted(languages)), **kwargs, ) self.languages = sorted(languages) class OpenSubtitles(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ OpenSubtitlesConfig( languages=["en", "fr", "de"], description="Translating between en, fr, and de", version=datasets.Version(_VERSION), ) ] BUILDER_CONFIG_CLASS = OpenSubtitlesConfig def _info(self): features = { "imdbId": datasets.Value("uint32"), "meta": { "year": datasets.Value("uint32"), "subtitleId": {lang: datasets.Value("uint32") for lang in self.config.languages}, "sentenceIds": {lang: datasets.Sequence(datasets.Value("uint32")) for lang in self.config.languages}, "sentence_count": datasets.Value("int32"), }, "translations": datasets.Sequence({ "source": datasets.Value("string"), "target": datasets.Value("string"), }), } return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(features), supervised_keys=None, homepage=_HOMEPAGE_URL, citation=_CITATION, ) def _split_generators(self, dl_manager): def _base_url(lang1, lang2): return _BASE_URL.format(lang1, lang2) download_paths = {} for i, lang1 in enumerate(self.config.languages): for lang2 in self.config.languages[i+1:]: if lang1 < lang2: download_url = _base_url(lang1, lang2) else: download_url = _base_url(lang2, lang1) path = dl_manager.download_and_extract(download_url) download_paths[(lang1, lang2)] = path return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"download_paths": download_paths}, ) ] @classmethod def _extract_info(cls, sentence_id): parts = sentence_id[: -len(".xml.gz")].split("/") parts.pop(0) return tuple(map(int, parts)) def _generate_examples(self, download_paths): subtitles = defaultdict(lambda: { "meta": { "year": None, "subtitleId": {lang: None for lang in self.config.languages}, "sentenceIds": {lang: [] for lang in self.config.languages}, "sentence_count": 0, }, "translations": [], }) for (lang1, lang2), path in download_paths.items(): if lang1 > lang2: lang1, lang2 = lang2, lang1 folder = f"{lang1}-{lang2}" l1_file = _BASE_NAME.format(folder, lang1) l2_file = _BASE_NAME.format(folder, lang2) ids_file = _BASE_NAME.format(folder, "ids") l1_path = os.path.join(path, l1_file) l2_path = os.path.join(path, l2_file) ids_path = os.path.join(path, ids_file) with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2, open(ids_path, encoding="utf-8") as f3: for x, y, _id in zip(f1, f2, f3): x = x.strip() y = y.strip() l1_id, l2_id, l1_sid, l2_sid = _id.split("\t") year, imdb_id, l1_subtitle_id = self._extract_info(l1_id) _, _, l2_subtitle_id = self._extract_info(l2_id) l1_sentence_ids = list(map(int, l1_sid.split(" "))) l2_sentence_ids = list(map(int, l2_sid.split(" "))) subtitles[imdb_id]["meta"]["year"] = year subtitles[imdb_id]["meta"]["subtitleId"][lang1] = l1_subtitle_id subtitles[imdb_id]["meta"]["subtitleId"][lang2] = l2_subtitle_id subtitles[imdb_id]["meta"]["sentenceIds"][lang1].extend(l1_sentence_ids) subtitles[imdb_id]["meta"]["sentenceIds"][lang2].extend(l2_sentence_ids) subtitles[imdb_id]["meta"]["sentence_count"] += 1 subtitles[imdb_id]["translations"].append({"one": x, "two": y}) for imdb_id, data in subtitles.items(): if all(data["meta"]["subtitleId"][lang] is not None for lang in self.config.languages): yield imdb_id, { "imdbId": imdb_id, "meta": data["meta"], "translations": data["translations"], }