jparacrawl / jparacrawl.py
may-ohta's picture
Update jparacrawl.py
70dc022 verified
# coding=utf-8
# JParaCrawl Dataset
# Lint as: python3
"""JParaCrawl: A Large Scale Web-Based English-Japanese Parallel Corpus."""
from functools import partial
from pathlib import Path
import unicodedata
import packaging.version
import pandas as pd
import datasets
from datasets.features import Translation, Value
_DESCRIPTION = """\
JParaCrawl is the largest publicly available English-Japanese parallel corpus created by NTT.
It was created by largely crawling the web and automatically aligning parallel sentences.
"""
_CITATION = """\
@inproceedings{morishita-etal-2020-jparacrawl,
title = "{JP}ara{C}rawl: A Large Scale Web-Based {E}nglish-{J}apanese Parallel Corpus",
author = "Morishita, Makoto and
Suzuki, Jun and
Nagata, Masaaki",
booktitle = "Proceedings of The 12th Language Resources and Evaluation Conference",
month = may,
year = "2020",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://www.aclweb.org/anthology/2020.lrec-1.443",
pages = "3603--3609",
ISBN = "979-10-95546-34-4",
}
@misc{morishita2022jparacrawl,
title={JParaCrawl v3.0: A Large-scale English-Japanese Parallel Corpus},
author={Makoto Morishita and Katsuki Chousa and Jun Suzuki and Masaaki Nagata},
year={2022},
eprint={2202.12607},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_LICENSE = """\
Terms of Use for Bilingual Data, Monolingual Data and Trained Models
Nippon Telegraph and Telephone Corporation (Hereinafter referred to as "our company".) will provide bilingual data, monolingual data and trained models (Hereinafter referred to as "this data.") subject to your acceptance of these Terms of Use. We assume that you have agreed to these Terms of Use when you start using this data (including downloads).
Article 1 (Use conditions)
This data can only be used for research purposes involving information analysis (Including, but not limited to, replication and distribution. Hereinafter the same in this article.). The same applies to the derived data created based on this data. However, this data is not available for commercial use, including the sale of translators trained using this data.
Article 2 (Disclaimer)
Our company does not warrant the quality, performance or any other aspects of this data. We shall not be liable for any direct or indirect damages caused by the use of this data. Our company shall not be liable for any damage to the system caused by the installation of this data.
Article 3 (Other).
This data may be changed in whole or in part, or provision of this data may be interrupted or stopped at our company’s discretion without prior notice.
"""
_VERSION = "3.0.0" # 2.0 for zh-ja pair
_DATA_URL = "http://www.kecl.ntt.co.jp/icl/lirg/jparacrawl/release/%s/bitext/%s-ja.tar.gz"
_HOMEPAGE = "https://www.kecl.ntt.co.jp/icl/lirg/jparacrawl/"
_LANGUAGE_PAIRS = [("en", "ja"), ("zh", "ja")]
class JParaCrawlConfig(datasets.BuilderConfig):
"""BuilderConfig for JParaCrawl."""
def __init__(self, language_pair=(None, None), **kwargs):
"""BuilderConfig for JParaCrawl.
Args:
for the `datasets.features.text.TextEncoder` used for the features feature.
language_pair: pair of languages that will be used for translation. Should
contain 2-letter coded strings. First will be used at source and second
as target in supervised mode. For example: ("en", "ja").
**kwargs: keyword arguments forwarded to super.
"""
source, target = language_pair
super(JParaCrawlConfig, self).__init__(
name=f"{source}-{target}",
description=f"{source}-{target} Translation dataset",
version=datasets.Version(_VERSION, ""),
**kwargs,
)
# Validate language pair.
assert "ja" in language_pair, (
"Config language pair must contain `ja`, got: %s", language_pair)
non_ja = source if target == "ja" else source
assert non_ja in ["en", "zh"], ("Invalid non-ja language in pair: %s", non_ja)
self.language_pair = language_pair
class JParaCrawl(datasets.GeneratorBasedBuilder):
"""JParaCrawl machine translation dataset."""
BUILDER_CONFIGS = [
JParaCrawlConfig(language_pair=("en", "ja")),
JParaCrawlConfig(language_pair=("zh", "ja")),
]
def _info(self):
source, target = self.config.language_pair
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"domain": Value(dtype="string", id=None),
"url": Value(dtype="string", id=None),
"probability": Value(dtype="float32", id=None),
"translation": Translation(languages=self.config.language_pair),
}
),
supervised_keys=(source, target),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
source, target = self.config.language_pair
non_ja = source if target == "ja" else source
v = packaging.version.parse(_VERSION)
archive = dl_manager.download_and_extract(
_DATA_URL % (f"{v.major}.{v.minor}", non_ja)
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"extracted_path": archive}
)
]
def _generate_examples(self, extracted_path):
"""This function returns the examples in the raw form."""
source, target = self.config.language_pair
non_ja = source if target == "ja" else source
df = None
for path in Path(extracted_path).glob("**/*"):
if path.name == f"{non_ja}-ja.bicleaner05.txt":
df = pd.read_csv(
path,
header=None,
index_col=None,
sep="\t\t", # dummy delimiter
encoding="utf8",
engine='python',
)
break
assert df is not None, extracted_path
def _split(line: str, col: int) -> str:
return line.split('\t', 4)[col].strip()
df['domain'] = df[0].apply(partial(_split, col=0))
df['url'] = df[0].apply(partial(_split, col=1))
df['probability'] = df[0].apply(partial(_split, col=2))
df[non_ja] = df[0].apply(partial(_split, col=3))
df['ja'] = df[0].apply(partial(_split, col=4))
df = df.drop_duplicates(subset=[non_ja, 'ja'])
def _normalize(s: str) -> str:
return unicodedata.normalize("NFKC", s).replace('\t', ' ').strip()
_id = 0
for idx, row in df.iterrows():
result = {
"domain": row["domain"],
"url": row["url"],
"probability": float(row["probability"]),
"translation": {
non_ja: _normalize(row[non_ja]),
"ja": _normalize(row["ja"]),
},
}
# Make sure that both translations are non-empty.
if all(v is not None for v in result.values()):
yield _id, result
_id += 1