|
import re |
|
from typing import List |
|
import unicodedata |
|
from pathlib import Path |
|
|
|
from bs4 import BeautifulSoup |
|
|
|
import datasets as ds |
|
|
|
|
|
_DESCRIPTION = "Parallel passages from novels." |
|
|
|
_CITATION = """ |
|
内山将夫,高橋真弓.(2003) 日英対訳文対応付けデータ. |
|
Masao Utiyama and Mayumi Takahashi. (2003) English-Japanese Translation Alignment Data. |
|
""".strip() |
|
|
|
_HOMEPAGE = "https://www2.nict.go.jp/astrec-att/member/mutiyama/align/" |
|
|
|
_LICENSE = None |
|
|
|
_DOWNLOAD_URL = ( |
|
"https://www2.nict.go.jp/astrec-att/member/mutiyama/align/download/align-070215.zip" |
|
) |
|
|
|
|
|
def preprocess(text: str): |
|
text = re.sub(r"<注[0-9]+>", "", text.strip()) |
|
text = re.sub(r"《.*?》", "", text) |
|
text = re.sub(r"[#.*?]", "", text) |
|
text = re.sub(r"([\u3040-\u309F]+)", "", text) |
|
text = re.sub(r" − (.+) − ", "――\\1――", text) |
|
text = re.sub(r"_(.+)_", "\\1", text) |
|
text = re.sub(r" ``$", "''", text.strip()) |
|
text = re.sub(r"^――", "", text.strip()) |
|
text = re.sub(r"^..第", "第", text.strip()) |
|
return text.strip() |
|
|
|
|
|
def parse_html_table(path: Path): |
|
try: |
|
with path.open(encoding="shift_jis") as f: |
|
content = f.read() |
|
except UnicodeDecodeError: |
|
try: |
|
with path.open(encoding="utf-8") as f: |
|
content = f.read() |
|
except UnicodeDecodeError: |
|
try: |
|
with path.open(encoding="cp932") as f: |
|
content = f.read() |
|
except UnicodeDecodeError: |
|
return [], [] |
|
|
|
soup = BeautifulSoup(content, "lxml") |
|
tables = soup.find_all("table") |
|
|
|
texts_en, texts_ja = [], [] |
|
cur_text_en, cur_text_ja = "", "" |
|
|
|
cur_left_parens, cur_right_parens = 0, 0 |
|
cur_left_quote, cur_right_quote = 0, 0 |
|
cur_left_parens_ja, cur_right_parens_ja = 0, 0 |
|
cur_left_parens_ja2, cur_right_parens_ja2 = 0, 0 |
|
|
|
for table in tables: |
|
for tr in table.find_all("tr"): |
|
text_en, _, text_ja = (preprocess(td.text) for td in tr.find_all("td")) |
|
text_ja = unicodedata.normalize("NFKC", text_ja) |
|
|
|
cur_left_parens += text_en.count("(") |
|
cur_right_parens += text_en.count(")") |
|
|
|
cur_left_quote += len(list(re.findall(r"``", text_en))) |
|
cur_right_quote += len(list(re.findall(r"''", text_en))) |
|
|
|
|
|
|
|
|
|
|
|
|
|
cur_left_parens_ja += text_ja.count("「") |
|
cur_right_parens_ja += text_ja.count("」") |
|
|
|
cur_left_parens_ja2 += text_ja.count("『") |
|
cur_right_parens_ja2 += text_ja.count("』") |
|
|
|
if ( |
|
text_ja.strip().endswith("。") |
|
and text_en.strip().endswith(".") |
|
and cur_left_parens == cur_right_parens |
|
and cur_left_quote == cur_right_quote |
|
and cur_left_parens_ja == cur_right_parens_ja |
|
and cur_left_parens_ja2 == cur_right_parens_ja2 |
|
): |
|
texts_en.append((cur_text_en + " " + text_en).strip()) |
|
texts_ja.append((cur_text_ja + text_ja).strip()) |
|
cur_text_en, cur_text_ja = "", "" |
|
cur_left_parens, cur_right_parens = 0, 0 |
|
cur_left_quote, cur_right_quote = 0, 0 |
|
cur_left_parens_ja, cur_right_parens_ja = 0, 0 |
|
cur_left_parens_ja2, cur_right_parens_ja2 = 0, 0 |
|
else: |
|
cur_text_en += " " + text_en |
|
cur_text_ja += text_ja |
|
|
|
texts_en.append(cur_text_en.strip()) |
|
texts_ja.append(cur_text_ja.strip()) |
|
|
|
return texts_en, texts_ja |
|
|
|
|
|
class EnJaAlignDataset(ds.GeneratorBasedBuilder): |
|
VERSION = ds.Version("1.0.0") |
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
BUILDER_CONFIGS = [ |
|
ds.BuilderConfig( |
|
name="default", |
|
version=VERSION, |
|
description="", |
|
), |
|
] |
|
|
|
def _info(self) -> ds.DatasetInfo: |
|
if self.config.name == "default": |
|
features = ds.Features( |
|
{ |
|
"id": ds.Value("string"), |
|
"en": ds.Value("string"), |
|
"ja": ds.Value("string"), |
|
"source": ds.Value("string"), |
|
} |
|
) |
|
|
|
return ds.DatasetInfo( |
|
description=_DESCRIPTION, |
|
citation=_CITATION, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
features=features, |
|
) |
|
|
|
def _split_generators(self, dl_manager: ds.DownloadManager): |
|
data_path = dl_manager.download_and_extract(_DOWNLOAD_URL) |
|
paths = list(Path(data_path, "align/htmPages").glob("*.htm")) |
|
|
|
return [ |
|
ds.SplitGenerator( |
|
name=ds.Split.TRAIN, |
|
gen_kwargs={"paths": paths}, |
|
) |
|
] |
|
|
|
def _preprocess_ja(self, text: str) -> str: |
|
text = re.sub(r"\d+\.(\d|\.)*", "", text.strip()).strip() |
|
text = re.sub(r"^――", "", text).strip() |
|
return text |
|
|
|
def _preprocess_en(self, text: str) -> str: |
|
text = re.sub(r"\d+\.(\d|\.)*", "", text.strip()).strip() |
|
text = re.sub(r"```(.*?)'", "``\1", text).strip() |
|
text = re.sub(r"``(.*?)''", r'"\1"', text).strip() |
|
return text |
|
|
|
def _generate_examples(self, paths: List[Path]): |
|
for path in paths: |
|
idx = 0 |
|
texts_en, texts_ja = parse_html_table(path) |
|
for text_en, text_ja in zip(texts_en, texts_ja, strict=True): |
|
row = { |
|
"id": f"{path.stem}/{idx}", |
|
"en": self._preprocess_en(text_en), |
|
"ja": self._preprocess_ja(text_ja), |
|
"source": path.name, |
|
} |
|
|
|
if ( |
|
isinstance(row["en"], str) |
|
and isinstance(row["ja"], str) |
|
and len(row["en"]) > 0 |
|
and len(row["ja"]) > 0 |
|
): |
|
yield f"{path.name}/{idx}", row |
|
idx += 1 |
|
|