Datasets:
ArXiv:
License:
File size: 3,464 Bytes
bba7c8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from collections import defaultdict
import json
import os
from pathlib import Path
import re
import sys
import tempfile
from typing import List
import zipfile
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, "../../"))
from datasets import load_dataset, DownloadMode
import requests
from tqdm import tqdm
from xml.etree import ElementTree
from language_identification import LANGUAGE_MAP
from project_settings import project_path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_url",
default="http://optima.jrc.it/Resources/ECDC-TM/ECDC-TM.zip",
type=str
)
parser.add_argument(
"--output_file",
default=(project_path / "data/europa_ecdc_tm.jsonl"),
type=str
)
args = parser.parse_args()
return args
_AVAILABLE_LANGUAGES = (
"bg",
"cs",
"da",
"de",
"el",
"es",
"en",
"et",
"fi",
"fr",
"ga",
"hu",
"is",
"it",
"lt",
"lv",
"mt",
"nl",
"no",
"pl",
"pt",
"ro",
"sk",
"sl",
"sv",
)
def _find_sentence(translation, language):
namespaces = {"xml": "http://www.w3.org/XML/1998/namespace"}
seg_tag = translation.find(path=f".//tuv[@xml:lang='{language.upper()}']/seg", namespaces=namespaces)
if seg_tag is not None:
return seg_tag.text
return None
def main():
args = get_args()
data_url = Path(args.data_url)
# download
filename = os.path.join(tempfile.gettempdir(), data_url.name)
print(filename)
if not os.path.exists(filename):
resp = requests.get(args.data_url)
with open(filename, "wb") as f:
f.write(resp.content)
# unzip
unzip_dir = "data/{}".format(data_url.stem)
if not os.path.exists(unzip_dir):
zip_file = zipfile.ZipFile(filename)
zip_file.extractall(unzip_dir)
filepath = os.path.join(unzip_dir, "ECDC-TM", "ECDC.tmx")
xml_element_tree = ElementTree.parse(filepath)
xml_body_tag = xml_element_tree.getroot().find("body")
assert xml_body_tag is not None, f"Invalid data: <body></body> tag not found in {filepath}"
# Translations are stored in <tu>...</tu> tags
translation_units = xml_body_tag.iter("tu")
text_set = set()
counter = defaultdict(int)
split = "train"
with open(args.output_file, "w", encoding="utf-8") as f:
for _id, translation in enumerate(translation_units):
for language in _AVAILABLE_LANGUAGES:
text = _find_sentence(translation=translation, language=language)
if text is None:
continue
text = text.strip()
if text in text_set:
continue
text_set.add(text)
if language not in LANGUAGE_MAP.keys():
raise AssertionError("language: {}, text: {}".format(language, text))
row = {
"text": text,
"language": language,
"data_source": "europa_ecdc_tm",
"split": split
}
row = json.dumps(row, ensure_ascii=False)
f.write("{}\n".format(row))
counter[split] += 1
print("counter: {}".format(counter))
return
if __name__ == "__main__":
main()
|