Datasets:
ArXiv:
License:
File size: 3,152 Bytes
0e1af0f 4cba128 0e1af0f 4cba128 0e1af0f 4cba128 0e1af0f 4cba128 0e1af0f 4cba128 0e1af0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from collections import defaultdict
import json
import os
import sys
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, "../../"))
from datasets import load_dataset, DownloadMode
from tqdm import tqdm
from language_identification import LANGUAGE_MAP
from project_settings import project_path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", default="xnli", type=str)
parser.add_argument("--dataset_name", default="all_languages", type=str)
parser.add_argument(
"--dataset_cache_dir",
default=(project_path / "hub_datasets").as_posix(),
type=str
)
parser.add_argument(
"--output_file",
default=(project_path / "data/xnli.jsonl"),
type=str
)
args = parser.parse_args()
return args
def main():
args = get_args()
dataset_dict = load_dataset(
path=args.dataset_path,
name=args.dataset_name,
cache_dir=args.dataset_cache_dir,
# download_mode=DownloadMode.FORCE_REDOWNLOAD
)
print(dataset_dict)
split_map = {
"dev": "validation",
"validate": "validation"
}
language_map = {
"zh": "zh-cn"
}
text_set = set()
counter = defaultdict(int)
with open(args.output_file, "w", encoding="utf-8") as f:
for k, v in dataset_dict.items():
if k in split_map.keys():
split = split_map[k]
else:
split = k
if split not in ("train", "validation", "test"):
print("skip split: {}".format(split))
continue
for sample in tqdm(v):
hypothesis = sample["hypothesis"]
premise = sample["premise"]
premise_language_list = list()
premise_text_list = list()
for language, text in premise.items():
premise_language_list.append(language)
premise_text_list.append(text)
language_list = hypothesis["language"] + premise_language_list
translation_list = hypothesis["translation"] + premise_text_list
for language, translation in zip(language_list, translation_list):
text = translation.strip()
language = language_map.get(language, language)
if text in text_set:
continue
text_set.add(text)
if language not in LANGUAGE_MAP.keys():
raise AssertionError(language)
row = {
"text": text,
"language": language,
"data_source": "xnli",
"split": split
}
row = json.dumps(row, ensure_ascii=False)
f.write("{}\n".format(row))
counter[split] += 1
print("counter: {}".format(counter))
return
if __name__ == '__main__':
main()
|