Datasets:

ArXiv:
License:
HoneyTian commited on
Commit
6c980bf
1 Parent(s): 4cba128
README.md CHANGED
@@ -19,8 +19,8 @@ license: apache-2.0
19
  | scandi_langid | | TRAIN: 239618, TEST: 59840 | | [kardosdrur/scandi-langid](https://huggingface.co/datasets/kardosdrur/scandi-langid) |
20
  | nordic_langid | [Discriminating Between Similar Nordic Languages](https://aclanthology.org/2021.vardial-1.8/) | TRAIN: 226159, TEST: 10700 | 重点关注六种北欧语言之间的区别:丹麦语、瑞典语、挪威语(尼诺斯克语)、挪威语(博克马尔语)、法罗语和冰岛语。 | [strombergnlp/nordic_langid](https://huggingface.co/datasets/strombergnlp/nordic_langid) |
21
  | mike0307 | [Mike0307/language-detection](https://huggingface.co/datasets/Mike0307/language-detection) | TRAIN: 33095, VALID: 4040, TEST: 4048 | | |
22
- | tatoeba | [tatoeba](https://tatoeba.org/); [Tatoeba Paper](https://arxiv.org/abs/1812.10464v2) | | Tatoeba 是句子和翻译的集合。 | [tatoeba](https://huggingface.co/datasets/tatoeba) |
23
- | bucc2018 | [bucc2018](https://comparable.limsi.fr/bucc2018/bucc2018-task.html) | | 共享任务:识别可比语料库中的平行句子 | |
24
 
25
 
26
 
 
19
  | scandi_langid | | TRAIN: 239618, TEST: 59840 | | [kardosdrur/scandi-langid](https://huggingface.co/datasets/kardosdrur/scandi-langid) |
20
  | nordic_langid | [Discriminating Between Similar Nordic Languages](https://aclanthology.org/2021.vardial-1.8/) | TRAIN: 226159, TEST: 10700 | 重点关注六种北欧语言之间的区别:丹麦语、瑞典语、挪威语(尼诺斯克语)、挪威语(博克马尔语)、法罗语和冰岛语。 | [strombergnlp/nordic_langid](https://huggingface.co/datasets/strombergnlp/nordic_langid) |
21
  | mike0307 | [Mike0307/language-detection](https://huggingface.co/datasets/Mike0307/language-detection) | TRAIN: 33095, VALID: 4040, TEST: 4048 | | |
22
+ | tatoeba | [tatoeba](https://tatoeba.org/); [Tatoeba Paper](https://arxiv.org/abs/1812.10464v2) | TRAIN: 702895 | Tatoeba 是句子和翻译的集合。 | [tatoeba](https://huggingface.co/datasets/tatoeba) |
23
+ | bucc2018 | [bucc2018](https://comparable.limsi.fr/bucc2018/bucc2018-task.html) | TRAIN: 2173318, TEST: 2125879 | 共享任务:识别可比语料库中的平行句子,语言:de, en, fr, ru, zh | |
24
 
25
 
26
 
data/ducc2018.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8768a7e674e4aeb919fd745847a09feff90b641de8e0fcbb7dddc1a60cadbc6f
3
+ size 877661718
data/tatoeba.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:027bab142da675b190f9bd09e72a20529232729fde8f532938057b32d690b52a
3
+ size 82855499
examples/preprocess/preprocess_bucc2018.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ import json
6
+ import os
7
+ from pathlib import Path
8
+ import shutil
9
+ import sys
10
+ import tarfile
11
+ import tempfile
12
+
13
+ pwd = os.path.abspath(os.path.dirname(__file__))
14
+ sys.path.append(os.path.join(pwd, "../../"))
15
+
16
+ from datasets import load_dataset, DownloadMode
17
+ from tqdm import tqdm
18
+
19
+ from language_identification import LANGUAGE_MAP
20
+ from project_settings import project_path
21
+
22
+
23
+ def get_args():
24
+ parser = argparse.ArgumentParser()
25
+ parser.add_argument(
26
+ "--dataset_dir",
27
+ default=r"E:\programmer\nlp_datasets\ducc2018",
28
+ type=str
29
+ )
30
+ parser.add_argument(
31
+ "--output_file",
32
+ default=(project_path / "data/ducc2018.jsonl"),
33
+ type=str
34
+ )
35
+
36
+ args = parser.parse_args()
37
+ return args
38
+
39
+
40
+ def main():
41
+ args = get_args()
42
+
43
+ dataset_dir = Path(args.dataset_dir)
44
+
45
+ # extract
46
+ out_root = Path(tempfile.gettempdir()) / "bucc2018"
47
+ if not out_root.exists():
48
+ out_root.mkdir(parents=True, exist_ok=True)
49
+ print(out_root.as_posix())
50
+
51
+ train_files = [
52
+ "bucc2018-de-en.training-gold.tar.bz2",
53
+ "bucc2018-fr-en.training-gold.tar.bz2",
54
+ "bucc2018-ru-en.training-gold.tar.bz2",
55
+ "bucc2018-zh-en.training-gold.tar.bz2",
56
+ "bucc2018-de-en.test.tar.bz2",
57
+ "bucc2018-fr-en.test.tar.bz2",
58
+ "bucc2018-ru-en.test.tar.bz2",
59
+ "bucc2018-zh-en.test.tar.bz2",
60
+ ]
61
+ for train_file in train_files:
62
+ file_path = dataset_dir / train_file
63
+
64
+ with tarfile.open(file_path, "r:bz2") as tar:
65
+ tar.extractall(path=out_root.as_posix())
66
+
67
+ # read
68
+ root_path = out_root / "bucc2018"
69
+ name_list = [
70
+ "de-en", "fr-en", "ru-en", "zh-en"
71
+ ]
72
+
73
+ split_map = {
74
+ "training": "train",
75
+ }
76
+
77
+ language_map = {
78
+ "zh": "zh-cn"
79
+ }
80
+
81
+ text_set = set()
82
+ counter = defaultdict(int)
83
+ with open(args.output_file, "w", encoding="utf-8") as fout:
84
+ for name in name_list:
85
+ name_path = root_path / name
86
+
87
+ for split_ in ["training", "test"]:
88
+ for language in name.split("-"):
89
+ train_file = name_path / "{}.{}.{}".format(name, split_, language)
90
+
91
+ with open(train_file, "r", encoding="utf-8") as fin:
92
+ for row in fin:
93
+ row = str(row).strip()
94
+ splits = row.split("\t")
95
+
96
+ if len(splits) != 2:
97
+ print("skip row: {}".format(row))
98
+ continue
99
+
100
+ text = splits[1]
101
+ text = text.strip()
102
+ text = text.replace(" ", " ")
103
+ text = text.replace("­", "-")
104
+
105
+ if text in text_set:
106
+ continue
107
+ text_set.add(text)
108
+
109
+ language = language_map.get(language, language)
110
+ if language not in LANGUAGE_MAP.keys():
111
+ raise AssertionError(language)
112
+
113
+ if split_ in split_map.keys():
114
+ split = split_map[split_]
115
+ else:
116
+ split = split_
117
+ row = {
118
+ "text": text,
119
+ "language": language,
120
+ "data_source": "bucc2018",
121
+ "split": split
122
+ }
123
+ row = json.dumps(row, ensure_ascii=False)
124
+ fout.write("{}\n".format(row))
125
+ counter[split] += 1
126
+
127
+ print("counter: {}".format(counter))
128
+ shutil.rmtree(out_root.as_posix())
129
+ return
130
+
131
+
132
+ if __name__ == '__main__':
133
+ main()
examples/preprocess/preprocess_nbnn.py CHANGED
@@ -41,6 +41,7 @@ def main():
41
  path=args.dataset_path,
42
  cache_dir=args.dataset_cache_dir,
43
  # download_mode=DownloadMode.FORCE_REDOWNLOAD
 
44
  )
45
  print(dataset_dict)
46
 
 
41
  path=args.dataset_path,
42
  cache_dir=args.dataset_cache_dir,
43
  # download_mode=DownloadMode.FORCE_REDOWNLOAD
44
+ streaming=True
45
  )
46
  print(dataset_dict)
47
 
examples/preprocess/preprocess_tatoeba.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ import json
6
+ import os
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, "../../"))
11
+
12
+ from datasets import load_dataset, DownloadMode
13
+ from tqdm import tqdm
14
+
15
+ from language_identification import LANGUAGE_MAP
16
+ from project_settings import project_path
17
+
18
+
19
+ def get_args():
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument("--dataset_path", default="tatoeba", type=str)
22
+ parser.add_argument(
23
+ "--dataset_cache_dir",
24
+ default=(project_path / "hub_datasets").as_posix(),
25
+ type=str
26
+ )
27
+ parser.add_argument(
28
+ "--output_file",
29
+ default=(project_path / "data/tatoeba.jsonl"),
30
+ type=str
31
+ )
32
+
33
+ args = parser.parse_args()
34
+ return args
35
+
36
+
37
+ def main():
38
+ args = get_args()
39
+
40
+ name_list = ["en-mr", "eo-nl", "es-gl", "es-pt", "fr-ru"]
41
+
42
+ dataset_dict_list = list()
43
+ for name in name_list:
44
+ dataset_dict = load_dataset(
45
+ path=args.dataset_path,
46
+ name=name,
47
+ cache_dir=args.dataset_cache_dir,
48
+ # download_mode=DownloadMode.FORCE_REDOWNLOAD
49
+ )
50
+ print(dataset_dict)
51
+ dataset_dict_list.append((name, dataset_dict))
52
+
53
+ text_set = set()
54
+ counter = defaultdict(int)
55
+ with open(args.output_file, "w", encoding="utf-8") as f:
56
+ for _, dataset_dict in dataset_dict_list:
57
+ for k, v in dataset_dict.items():
58
+ split = k
59
+ if split not in ("train", "validation", "test"):
60
+ print("skip split: {}".format(split))
61
+ continue
62
+
63
+ for sample in tqdm(v):
64
+ translation = sample["translation"]
65
+ for language, text in translation.items():
66
+ text = text.strip()
67
+
68
+ if text in text_set:
69
+ continue
70
+ text_set.add(text)
71
+
72
+ if language not in LANGUAGE_MAP.keys():
73
+ raise AssertionError(language)
74
+
75
+ row = {
76
+ "text": text,
77
+ "language": language,
78
+ "data_source": "tatoeba",
79
+ "split": split
80
+ }
81
+ row = json.dumps(row, ensure_ascii=False)
82
+ f.write("{}\n".format(row))
83
+ counter[split] += 1
84
+
85
+ print("counter: {}".format(counter))
86
+
87
+ return
88
+
89
+
90
+ if __name__ == '__main__':
91
+ main()
language_identification.py CHANGED
@@ -39,9 +39,11 @@ LANGUAGE_MAP = {
39
  "de": "german",
40
  "el": "modern greek",
41
  "en": "english",
 
42
  "es": "spanish",
43
  "fo": "faroese",
44
  "fr": "french",
 
45
  "hi": "hindi",
46
  "is": "icelandic",
47
  "it": "italian",
@@ -50,6 +52,7 @@ LANGUAGE_MAP = {
50
  "no": "norwegian",
51
  "no-b": "norwegian (bokmål)",
52
  "no-n": "norwegian (nynorsk)",
 
53
  "pl": "polish",
54
  "pt": "portuguese",
55
  "ru": "russian",
 
39
  "de": "german",
40
  "el": "modern greek",
41
  "en": "english",
42
+ "eo": "esperanto",
43
  "es": "spanish",
44
  "fo": "faroese",
45
  "fr": "french",
46
+ "gl": "galician",
47
  "hi": "hindi",
48
  "is": "icelandic",
49
  "it": "italian",
 
52
  "no": "norwegian",
53
  "no-b": "norwegian (bokmål)",
54
  "no-n": "norwegian (nynorsk)",
55
+ "mr": "marathi",
56
  "pl": "polish",
57
  "pt": "portuguese",
58
  "ru": "russian",
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
- datasets==2.4.0
2
  fsspec==2023.9.2
 
3
  tqdm==4.66.1
4
  pandas==2.0.3
5
  xlrd==1.2.0
 
1
+ datasets==2.10.1
2
  fsspec==2023.9.2
3
+ pyarrow==9.0.0
4
  tqdm==4.66.1
5
  pandas==2.0.3
6
  xlrd==1.2.0