Datasets:

ArXiv:
License:
language_identification / examples /preprocess /preprocess_europa_ecdc_tm.py
HoneyTian's picture
update
bba7c8f
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from collections import defaultdict
import json
import os
from pathlib import Path
import re
import sys
import tempfile
from typing import List
import zipfile
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, "../../"))
from datasets import load_dataset, DownloadMode
import requests
from tqdm import tqdm
from xml.etree import ElementTree
from language_identification import LANGUAGE_MAP
from project_settings import project_path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_url",
default="http://optima.jrc.it/Resources/ECDC-TM/ECDC-TM.zip",
type=str
)
parser.add_argument(
"--output_file",
default=(project_path / "data/europa_ecdc_tm.jsonl"),
type=str
)
args = parser.parse_args()
return args
_AVAILABLE_LANGUAGES = (
"bg",
"cs",
"da",
"de",
"el",
"es",
"en",
"et",
"fi",
"fr",
"ga",
"hu",
"is",
"it",
"lt",
"lv",
"mt",
"nl",
"no",
"pl",
"pt",
"ro",
"sk",
"sl",
"sv",
)
def _find_sentence(translation, language):
namespaces = {"xml": "http://www.w3.org/XML/1998/namespace"}
seg_tag = translation.find(path=f".//tuv[@xml:lang='{language.upper()}']/seg", namespaces=namespaces)
if seg_tag is not None:
return seg_tag.text
return None
def main():
args = get_args()
data_url = Path(args.data_url)
# download
filename = os.path.join(tempfile.gettempdir(), data_url.name)
print(filename)
if not os.path.exists(filename):
resp = requests.get(args.data_url)
with open(filename, "wb") as f:
f.write(resp.content)
# unzip
unzip_dir = "data/{}".format(data_url.stem)
if not os.path.exists(unzip_dir):
zip_file = zipfile.ZipFile(filename)
zip_file.extractall(unzip_dir)
filepath = os.path.join(unzip_dir, "ECDC-TM", "ECDC.tmx")
xml_element_tree = ElementTree.parse(filepath)
xml_body_tag = xml_element_tree.getroot().find("body")
assert xml_body_tag is not None, f"Invalid data: <body></body> tag not found in {filepath}"
# Translations are stored in <tu>...</tu> tags
translation_units = xml_body_tag.iter("tu")
text_set = set()
counter = defaultdict(int)
split = "train"
with open(args.output_file, "w", encoding="utf-8") as f:
for _id, translation in enumerate(translation_units):
for language in _AVAILABLE_LANGUAGES:
text = _find_sentence(translation=translation, language=language)
if text is None:
continue
text = text.strip()
if text in text_set:
continue
text_set.add(text)
if language not in LANGUAGE_MAP.keys():
raise AssertionError("language: {}, text: {}".format(language, text))
row = {
"text": text,
"language": language,
"data_source": "europa_ecdc_tm",
"split": split
}
row = json.dumps(row, ensure_ascii=False)
f.write("{}\n".format(row))
counter[split] += 1
print("counter: {}".format(counter))
return
if __name__ == "__main__":
main()