Datasets:

ArXiv:
License:
qgyd2021 commited on
Commit
c7ca5de
1 Parent(s): 0e1af0f
README.md CHANGED
@@ -14,7 +14,10 @@ license: apache-2.0
14
  | :--- | :---: | :---: | :---: | :---: |
15
  | amazon_reviews_multi | [Multilingual Amazon Reviews Corpus](https://github.com/awslabs/open-data-docs/tree/main/docs/amazon-reviews-ml); [2010.02573](https://arxiv.org/abs/2010.02573) | TRAIN: 1191160, VALID: 29665, TEST: 29685 | 我们提出了多语言亚马逊评论语料库 (MARC),这是用于多语言文本分类的大规模亚马逊评论集合。 该语料库包含 2015 年至 2019 年间收集的英语、日语、德语、法语、西班牙语和中文评论。 | [amazon_reviews_multi](https://huggingface.co/datasets/amazon_reviews_multi) |
16
  | xnli | [XNLI](https://github.com/facebookresearch/XNLI); [D18-1269.pdf](https://aclanthology.org/D18-1269.pdf) | TRAIN: 7702055, VALID: 49750, TEST: 100129 | 我们希望我们的数据集 XNLI 能够通过提供信息丰富的标准评估任务来促进跨语言句子理解的研究。 | [xnli](https://huggingface.co/datasets/xnli) |
17
- | stsb_multi_mt | [SemEval-2017 Task 1](https://arxiv.org/abs/1708.00055) | 样本个数 | **使用时注意要打乱**。可用语言有:de、en、es、fr、it、nl、pl、pt、ru、zh | [stsb_multi_mt](https://huggingface.co/datasets/stsb_multi_mt) |
 
 
 
18
 
19
 
20
 
 
14
  | :--- | :---: | :---: | :---: | :---: |
15
  | amazon_reviews_multi | [Multilingual Amazon Reviews Corpus](https://github.com/awslabs/open-data-docs/tree/main/docs/amazon-reviews-ml); [2010.02573](https://arxiv.org/abs/2010.02573) | TRAIN: 1191160, VALID: 29665, TEST: 29685 | 我们提出了多语言亚马逊评论语料库 (MARC),这是用于多语言文本分类的大规模亚马逊评论集合。 该语料库包含 2015 年至 2019 年间收集的英语、日语、德语、法语、西班牙语和中文评论。 | [amazon_reviews_multi](https://huggingface.co/datasets/amazon_reviews_multi) |
16
  | xnli | [XNLI](https://github.com/facebookresearch/XNLI); [D18-1269.pdf](https://aclanthology.org/D18-1269.pdf) | TRAIN: 7702055, VALID: 49750, TEST: 100129 | 我们希望我们的数据集 XNLI 能够通过提供信息丰富的标准评估任务来促进跨语言句子理解的研究。 | [xnli](https://huggingface.co/datasets/xnli) |
17
+ | stsb_multi_mt | [SemEval-2017 Task 1](https://arxiv.org/abs/1708.00055) | TRAIN: 104117, VALID: 25943, TEST: 22457 | **使用时注意要打乱**。可用语言有:de、en、es、fr、it、nl、pl、pt、ru、zh | [stsb_multi_mt](https://huggingface.co/datasets/stsb_multi_mt) |
18
+ | nbnn | [oai-nb-no-sbr-80](https://www.nb.no/sprakbanken/ressurskatalog/oai-nb-no-sbr-80/) | TRAIN: 1556212, VALID: 1957, TEST: 1944 | 该语料库包含挪威电报局 (NTB) 的新闻文本从博克马尔语翻译成新挪威语的内容。 | [NbAiLab/nbnn_language_detection](https://huggingface.co/datasets/NbAiLab/nbnn_language_detection) |
19
+ | scandi_langid | | TRAIN: 239618, TEST: 59840 | | [kardosdrur/scandi-langid](https://huggingface.co/datasets/kardosdrur/scandi-langid) |
20
+ | nordic_langid | [Discriminating Between Similar Nordic Languages](https://aclanthology.org/2021.vardial-1.8/) | TRAIN: 226159, TEST: 10700 | 重点关注六种北欧语言之间的区别:丹麦语、瑞典语、挪威语(尼诺斯克语)、挪威语(博克马尔语)、法罗语和冰岛语。 | [strombergnlp/nordic_langid](https://huggingface.co/datasets/strombergnlp/nordic_langid) |
21
 
22
 
23
 
data/nbnn.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43179b610ba2ccc1d1993b816a729e2e89e6b189de7517616673de9079c3a638
3
+ size 271343856
data/nordic_langid.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5108a3bdab7c7f5490794689d7c712e671f1d4803522217652ddd2a9e7528f47
3
+ size 47863139
data/scandi_langid.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12ce5fbcd39461933a27063d775057e37f8f8dc2f51b3cfc7883d5e55835a428
3
+ size 54767792
examples/preprocess/preprocess_nbnn.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ import json
6
+ import os
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, "../../"))
11
+
12
+ from datasets import load_dataset, DownloadMode
13
+ from tqdm import tqdm
14
+
15
+ from language_identification import LANGUAGE_MAP
16
+ from project_settings import project_path
17
+
18
+
19
+ def get_args():
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument("--dataset_path", default="NbAiLab/nbnn_language_detection", type=str)
22
+ parser.add_argument(
23
+ "--dataset_cache_dir",
24
+ default=(project_path / "hub_datasets").as_posix(),
25
+ type=str
26
+ )
27
+ parser.add_argument(
28
+ "--output_file",
29
+ default=(project_path / "data/nbnn.jsonl"),
30
+ type=str
31
+ )
32
+
33
+ args = parser.parse_args()
34
+ return args
35
+
36
+
37
+ def main():
38
+ args = get_args()
39
+
40
+ dataset_dict = load_dataset(
41
+ path=args.dataset_path,
42
+ cache_dir=args.dataset_cache_dir,
43
+ # download_mode=DownloadMode.FORCE_REDOWNLOAD
44
+ )
45
+ print(dataset_dict)
46
+
47
+ language_map = {
48
+ "nno": "nn",
49
+ "nob": "nb"
50
+ }
51
+
52
+ text_set = set()
53
+ counter = defaultdict(int)
54
+ with open(args.output_file, "w", encoding="utf-8") as f:
55
+ for k, v in dataset_dict.items():
56
+ if k not in("train", "dev", "test"):
57
+ continue
58
+ if k == "dev":
59
+ k = "validation"
60
+
61
+ for sample in tqdm(v):
62
+
63
+ text = sample["text"]
64
+ language = sample["language"]
65
+ language = language_map[language]
66
+
67
+ text = text.strip()
68
+
69
+ if text in text_set:
70
+ continue
71
+ text_set.add(text)
72
+
73
+ if language not in LANGUAGE_MAP.keys():
74
+ raise AssertionError(language)
75
+
76
+ row = {
77
+ "text": text,
78
+ "language": language,
79
+ "data_source": "nbnn",
80
+ "split": k
81
+ }
82
+ row = json.dumps(row, ensure_ascii=False)
83
+ f.write("{}\n".format(row))
84
+ counter[k] += 1
85
+
86
+ print("counter: {}".format(counter))
87
+
88
+ return
89
+
90
+
91
+ if __name__ == '__main__':
92
+ main()
examples/preprocess/preprocess_nordic_langid.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ import json
6
+ import os
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, "../../"))
11
+
12
+ from datasets import load_dataset, DownloadMode
13
+ from tqdm import tqdm
14
+
15
+ from language_identification import LANGUAGE_MAP
16
+ from project_settings import project_path
17
+
18
+
19
+ def get_args():
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument("--dataset_path", default="strombergnlp/nordic_langid", type=str)
22
+ parser.add_argument(
23
+ "--dataset_cache_dir",
24
+ default=(project_path / "hub_datasets").as_posix(),
25
+ type=str
26
+ )
27
+ parser.add_argument(
28
+ "--output_file",
29
+ default=(project_path / "data/nordic_langid.jsonl"),
30
+ type=str
31
+ )
32
+
33
+ args = parser.parse_args()
34
+ return args
35
+
36
+
37
+ def main():
38
+ args = get_args()
39
+
40
+ dataset_dict = load_dataset(
41
+ path=args.dataset_path,
42
+ name="50k",
43
+ cache_dir=args.dataset_cache_dir,
44
+ # download_mode=DownloadMode.FORCE_REDOWNLOAD
45
+ )
46
+ print(dataset_dict)
47
+
48
+ index_to_language = ["nb", "is", "nn", "sv", "fo", "da"]
49
+
50
+ text_set = set()
51
+ counter = defaultdict(int)
52
+ with open(args.output_file, "w", encoding="utf-8") as f:
53
+ for k, v in dataset_dict.items():
54
+ for sample in tqdm(v):
55
+
56
+ text = sample["sentence"]
57
+ language_index = sample["language"]
58
+ language = index_to_language[language_index]
59
+
60
+ text = text.strip()
61
+
62
+ if text in text_set:
63
+ continue
64
+ text_set.add(text)
65
+
66
+ if language not in LANGUAGE_MAP.keys():
67
+ raise AssertionError(language)
68
+
69
+ row = {
70
+ "text": text,
71
+ "language": language,
72
+ "data_source": "nordic_langid",
73
+ "split": k
74
+ }
75
+ row = json.dumps(row, ensure_ascii=False)
76
+ f.write("{}\n".format(row))
77
+ counter[k] += 1
78
+
79
+ print("counter: {}".format(counter))
80
+
81
+ return
82
+
83
+
84
+ if __name__ == '__main__':
85
+ main()
examples/preprocess/preprocess_scandi_langid.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ import json
6
+ import os
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, "../../"))
11
+
12
+ from datasets import load_dataset, DownloadMode
13
+ from tqdm import tqdm
14
+
15
+ from language_identification import LANGUAGE_MAP
16
+ from project_settings import project_path
17
+
18
+
19
+ def get_args():
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument("--dataset_path", default="kardosdrur/scandi-langid", type=str)
22
+ parser.add_argument(
23
+ "--dataset_cache_dir",
24
+ default=(project_path / "hub_datasets").as_posix(),
25
+ type=str
26
+ )
27
+ parser.add_argument(
28
+ "--output_file",
29
+ default=(project_path / "data/scandi_langid.jsonl"),
30
+ type=str
31
+ )
32
+
33
+ args = parser.parse_args()
34
+ return args
35
+
36
+
37
+ def main():
38
+ args = get_args()
39
+
40
+ dataset_dict = load_dataset(
41
+ path=args.dataset_path,
42
+ cache_dir=args.dataset_cache_dir,
43
+ # download_mode=DownloadMode.FORCE_REDOWNLOAD
44
+ )
45
+ print(dataset_dict)
46
+
47
+ text_set = set()
48
+ counter = defaultdict(int)
49
+ with open(args.output_file, "w", encoding="utf-8") as f:
50
+ for k, v in dataset_dict.items():
51
+ for sample in tqdm(v):
52
+
53
+ text = sample["text"]
54
+ language = sample["lang"]
55
+
56
+ text = text.strip()
57
+
58
+ if text in text_set:
59
+ continue
60
+ text_set.add(text)
61
+
62
+ if language not in LANGUAGE_MAP.keys():
63
+ raise AssertionError(language)
64
+
65
+ row = {
66
+ "text": text,
67
+ "language": language,
68
+ "data_source": "scandi_langid",
69
+ "split": k
70
+ }
71
+ row = json.dumps(row, ensure_ascii=False)
72
+ f.write("{}\n".format(row))
73
+ counter[k] += 1
74
+
75
+ print("counter: {}".format(counter))
76
+
77
+ return
78
+
79
+
80
+ if __name__ == '__main__':
81
+ main()
language_identification.py CHANGED
@@ -28,19 +28,28 @@ _CITATION = """\
28
  LANGUAGE_MAP = {
29
  "ar": "arabic",
30
  "bg": "bulgarian",
 
31
  "de": "german",
32
  "el": "modern greek",
33
  "en": "english",
34
  "es": "spanish",
 
35
  "fr": "french",
36
  "hi": "hindi",
 
37
  "it": "italian",
38
  "ja": "japanese",
39
  "nl": "dutch",
 
 
 
 
 
40
  "pl": "polish",
41
  "pt": "portuguese",
42
  "ru": "russian",
43
  "sw": "swahili",
 
44
  "th": "thai",
45
  "tr": "turkish",
46
  "ur": "urdu",
@@ -105,9 +114,13 @@ class LanguageIdentification(datasets.GeneratorBasedBuilder):
105
  if sample["split"] != split:
106
  continue
107
 
 
 
 
 
108
  yield idx, {
109
  "text": sample["text"],
110
- "language": sample["language"],
111
  "data_source": sample["data_source"],
112
  }
113
  idx += 1
 
28
  LANGUAGE_MAP = {
29
  "ar": "arabic",
30
  "bg": "bulgarian",
31
+ "da": "danish",
32
  "de": "german",
33
  "el": "modern greek",
34
  "en": "english",
35
  "es": "spanish",
36
+ "fo": "faroese",
37
  "fr": "french",
38
  "hi": "hindi",
39
+ "is": "icelandic",
40
  "it": "italian",
41
  "ja": "japanese",
42
  "nl": "dutch",
43
+ # "nno": "norwegian (nynorsk)",
44
+ "nn": "norwegian (nynorsk)",
45
+ "no": "norwegian",
46
+ # "nob": "norwegian (bokmål)",
47
+ "nb": "norwegian (bokmål)",
48
  "pl": "polish",
49
  "pt": "portuguese",
50
  "ru": "russian",
51
  "sw": "swahili",
52
+ "sv": "swedish",
53
  "th": "thai",
54
  "tr": "turkish",
55
  "ur": "urdu",
 
114
  if sample["split"] != split:
115
  continue
116
 
117
+ language = sample["language"]
118
+ if language not in LANGUAGE_MAP.keys():
119
+ raise AssertionError(language)
120
+
121
  yield idx, {
122
  "text": sample["text"],
123
+ "language": language,
124
  "data_source": sample["data_source"],
125
  }
126
  idx += 1