Datasets:

ArXiv:
License:
HoneyTian's picture
update
80f22ba
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from collections import defaultdict
import json
import os
import sys
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, "../../"))
import datasets
from datasets import load_dataset, DownloadMode
from tqdm import tqdm
from language_identification import LANGUAGE_MAP
from project_settings import project_path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", default="setimes", type=str)
parser.add_argument(
"--dataset_cache_dir",
default=(project_path / "hub_datasets").as_posix(),
type=str
)
parser.add_argument(
"--output_file",
default=(project_path / "data/setimes.jsonl"),
type=str
)
args = parser.parse_args()
return args
def main():
args = get_args()
name_list = [
"bg-bs",
"bg-el",
"bg-en",
"bg-hr",
"bg-mk",
"bg-ro",
"bg-sq",
"bg-sr",
"bg-tr",
"bs-el",
"bs-en",
"bs-hr",
"bs-mk",
"bs-ro",
"bs-sq",
"bs-sr",
"bs-tr",
"el-en",
"el-hr",
"el-mk",
"el-ro",
"el-sq",
"el-sr",
"el-tr",
"en-hr",
"en-mk",
"en-ro",
"en-sq",
"en-sr",
"en-tr",
"hr-mk",
"hr-ro",
"hr-sq",
"hr-sr",
"hr-tr",
"mk-ro",
"mk-sq",
"mk-sr",
"mk-tr",
"ro-sq",
"ro-sr",
"ro-tr",
"sq-sr",
"sq-tr",
"sr-tr",
]
# TODO: http://nlp.ffzg.hr 访问不到。
text_set = set()
counter = defaultdict(int)
with open(args.output_file, "w", encoding="utf-8") as f:
for name in name_list:
try:
dataset_dict = load_dataset(
path=args.dataset_path,
name=name,
cache_dir=args.dataset_cache_dir,
# download_mode=DownloadMode.FORCE_REDOWNLOAD
)
except datasets.builder.DatasetGenerationError:
print("skip subset: {}".format(name))
continue
for k, v in dataset_dict.items():
split = k
if split not in ("train", "validation", "test"):
print("skip split: {}".format(split))
continue
for sample in tqdm(v):
translation = sample["translation"]
for language, text in translation.items():
text = text.strip()
text = text.replace(" ", " ")
text = text.replace("­", "-")
if text in text_set:
continue
text_set.add(text)
if language not in LANGUAGE_MAP.keys():
raise AssertionError("language: {}, text: {}".format(language, text))
row = {
"text": text,
"language": language,
"data_source": "setimes",
"split": split
}
row = json.dumps(row, ensure_ascii=False)
f.write("{}\n".format(row))
counter[split] += 1
print("counter: {}".format(counter))
return
if __name__ == "__main__":
main()