Datasets:

ArXiv:
License:
File size: 3,886 Bytes
6c980bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91ebc59
6c980bf
 
 
 
91ebc59
6c980bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from collections import defaultdict
import json
import os
from pathlib import Path
import shutil
import sys
import tarfile
import tempfile

pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, "../../"))

from datasets import load_dataset, DownloadMode
from tqdm import tqdm

from language_identification import LANGUAGE_MAP
from project_settings import project_path


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--dataset_dir",
        default=r"E:\programmer\nlp_datasets\bucc2018",
        type=str
    )
    parser.add_argument(
        "--output_file",
        default=(project_path / "data/bucc2018.jsonl"),
        type=str
    )

    args = parser.parse_args()
    return args


def main():
    args = get_args()

    dataset_dir = Path(args.dataset_dir)

    # extract
    out_root = Path(tempfile.gettempdir()) / "bucc2018"
    if not out_root.exists():
        out_root.mkdir(parents=True, exist_ok=True)
        print(out_root.as_posix())

        train_files = [
            "bucc2018-de-en.training-gold.tar.bz2",
            "bucc2018-fr-en.training-gold.tar.bz2",
            "bucc2018-ru-en.training-gold.tar.bz2",
            "bucc2018-zh-en.training-gold.tar.bz2",
            "bucc2018-de-en.test.tar.bz2",
            "bucc2018-fr-en.test.tar.bz2",
            "bucc2018-ru-en.test.tar.bz2",
            "bucc2018-zh-en.test.tar.bz2",
        ]
        for train_file in train_files:
            file_path = dataset_dir / train_file

            with tarfile.open(file_path, "r:bz2") as tar:
                tar.extractall(path=out_root.as_posix())

    # read
    root_path = out_root / "bucc2018"
    name_list = [
        "de-en", "fr-en", "ru-en", "zh-en"
    ]

    split_map = {
        "training": "train",
    }

    text_set = set()
    counter = defaultdict(int)
    with open(args.output_file, "w", encoding="utf-8") as fout:
        for name in name_list:
            name_path = root_path / name

            for split_ in ["training", "test"]:
                for language in name.split("-"):
                    train_file = name_path / "{}.{}.{}".format(name, split_, language)

                    with open(train_file, "r", encoding="utf-8") as fin:
                        for row in fin:
                            row = str(row).strip()
                            splits = row.split("\t")

                            if len(splits) != 2:
                                print("skip row: {}".format(row))
                                continue

                            text = splits[1]
                            text = text.strip()
                            text = text.replace(" ", " ")
                            text = text.replace("­", "-")

                            if text in text_set:
                                continue
                            text_set.add(text)

                            if language not in LANGUAGE_MAP.keys():
                                raise AssertionError(language)

                            if split_ in split_map.keys():
                                split = split_map[split_]
                            else:
                                split = split_
                            row = {
                                "text": text,
                                "language": language,
                                "data_source": "bucc2018",
                                "split": split
                            }
                            row = json.dumps(row, ensure_ascii=False)
                            fout.write("{}\n".format(row))
                            counter[split] += 1

    print("counter: {}".format(counter))
    shutil.rmtree(out_root.as_posix())
    return


if __name__ == '__main__':
    main()