Datasets:

ArXiv:
License:
qgyd2021 commited on
Commit
0e1af0f
1 Parent(s): 9b760bc
README.md CHANGED
@@ -4,6 +4,7 @@ license: apache-2.0
4
  ## 语种识别
5
 
6
 
 
7
  ### 数据来源
8
 
9
  数据集从网上收集整理如下:
@@ -12,6 +13,9 @@ license: apache-2.0
12
  | 数据 | 原始数据/项目地址 | 样本个数 | 原始数据描述 | 替代数据下载地址 |
13
  | :--- | :---: | :---: | :---: | :---: |
14
  | amazon_reviews_multi | [Multilingual Amazon Reviews Corpus](https://github.com/awslabs/open-data-docs/tree/main/docs/amazon-reviews-ml); [2010.02573](https://arxiv.org/abs/2010.02573) | TRAIN: 1191160, VALID: 29665, TEST: 29685 | 我们提出了多语言亚马逊评论语料库 (MARC),这是用于多语言文本分类的大规模亚马逊评论集合。 该语料库包含 2015 年至 2019 年间收集的英语、日语、德语、法语、西班牙语和中文评论。 | [amazon_reviews_multi](https://huggingface.co/datasets/amazon_reviews_multi) |
 
 
 
15
 
16
 
17
  ### 参考来源
 
4
  ## 语种识别
5
 
6
 
7
+
8
  ### 数据来源
9
 
10
  数据集从网上收集整理如下:
 
13
  | 数据 | 原始数据/项目地址 | 样本个数 | 原始数据描述 | 替代数据下载地址 |
14
  | :--- | :---: | :---: | :---: | :---: |
15
  | amazon_reviews_multi | [Multilingual Amazon Reviews Corpus](https://github.com/awslabs/open-data-docs/tree/main/docs/amazon-reviews-ml); [2010.02573](https://arxiv.org/abs/2010.02573) | TRAIN: 1191160, VALID: 29665, TEST: 29685 | 我们提出了多语言亚马逊评论语料库 (MARC),这是用于多语言文本分类的大规模亚马逊评论集合。 该语料库包含 2015 年至 2019 年间收集的英语、日语、德语、法语、西班牙语和中文评论。 | [amazon_reviews_multi](https://huggingface.co/datasets/amazon_reviews_multi) |
16
+ | xnli | [XNLI](https://github.com/facebookresearch/XNLI); [D18-1269.pdf](https://aclanthology.org/D18-1269.pdf) | TRAIN: 7702055, VALID: 49750, TEST: 100129 | 我们希望我们的数据集 XNLI 能够通过提供信息丰富的标准评估任务来促进跨语言句子理解的研究。 | [xnli](https://huggingface.co/datasets/xnli) |
17
+ | stsb_multi_mt | [SemEval-2017 Task 1](https://arxiv.org/abs/1708.00055) | 样本个数 | **使用时注意要打乱**。可用语言有:de、en、es、fr、it、nl、pl、pt、ru、zh | [stsb_multi_mt](https://huggingface.co/datasets/stsb_multi_mt) |
18
+
19
 
20
 
21
  ### 参考来源
data/stsb_multi_mt.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5484958b97b92ab66cd9693dabad7d3391b82de0b8b229fabce6629407256399
3
+ size 23988370
data/xnli.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45be37c2bf28d8110af51a6784c1e5fc207ba90887e8016482a4a0666b674983
3
+ size 1408899727
examples/preprocess/preprocess_amazon_reviews_multi.py CHANGED
@@ -12,6 +12,7 @@ sys.path.append(os.path.join(pwd, "../../"))
12
  from datasets import load_dataset, DownloadMode
13
  from tqdm import tqdm
14
 
 
15
  from project_settings import project_path
16
 
17
 
@@ -59,6 +60,9 @@ def main():
59
  continue
60
  text_set.add(text)
61
 
 
 
 
62
  row = {
63
  "text": text,
64
  "language": language,
 
12
  from datasets import load_dataset, DownloadMode
13
  from tqdm import tqdm
14
 
15
+ from language_identification import LANGUAGE_MAP
16
  from project_settings import project_path
17
 
18
 
 
60
  continue
61
  text_set.add(text)
62
 
63
+ if language not in LANGUAGE_MAP.keys():
64
+ raise AssertionError(language)
65
+
66
  row = {
67
  "text": text,
68
  "language": language,
examples/preprocess/preprocess_stsb_multi_mt.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ import json
6
+ import os
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, "../../"))
11
+
12
+ from datasets import load_dataset, DownloadMode
13
+ from tqdm import tqdm
14
+
15
+ from language_identification import LANGUAGE_MAP
16
+ from project_settings import project_path
17
+
18
+
19
+ def get_args():
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument("--dataset_path", default="stsb_multi_mt", type=str)
22
+ parser.add_argument(
23
+ "--dataset_cache_dir",
24
+ default=(project_path / "hub_datasets").as_posix(),
25
+ type=str
26
+ )
27
+ parser.add_argument(
28
+ "--output_file",
29
+ default=(project_path / "data/stsb_multi_mt.jsonl"),
30
+ type=str
31
+ )
32
+
33
+ args = parser.parse_args()
34
+ return args
35
+
36
+
37
+ def main():
38
+ args = get_args()
39
+
40
+ name_list = ["de", "en", "es", "fr", "it", "nl", "pl", "pt", "ru", "zh"]
41
+
42
+ dataset_dict_list = list()
43
+ for name in name_list:
44
+ dataset_dict = load_dataset(
45
+ path=args.dataset_path,
46
+ name=name,
47
+ cache_dir=args.dataset_cache_dir,
48
+ # download_mode=DownloadMode.FORCE_REDOWNLOAD
49
+ )
50
+ print(dataset_dict)
51
+ dataset_dict_list.append((name, dataset_dict))
52
+
53
+ text_set = set()
54
+ counter = defaultdict(int)
55
+ with open(args.output_file, "w", encoding="utf-8") as f:
56
+ for language, dataset_dict in dataset_dict_list:
57
+ for k, v in dataset_dict.items():
58
+ for sample in tqdm(v):
59
+ if k == "dev":
60
+ k = "validation"
61
+
62
+ sentence1 = sample["sentence1"]
63
+ sentence2 = sample["sentence2"]
64
+
65
+ for text in [sentence1, sentence2]:
66
+ text = text.strip()
67
+
68
+ if text in text_set:
69
+ continue
70
+ text_set.add(text)
71
+
72
+ if language not in LANGUAGE_MAP.keys():
73
+ raise AssertionError(language)
74
+
75
+ row = {
76
+ "text": text,
77
+ "language": language,
78
+ "data_source": "stsb_multi_mt",
79
+ "split": k
80
+ }
81
+ row = json.dumps(row, ensure_ascii=False)
82
+ f.write("{}\n".format(row))
83
+ counter[k] += 1
84
+
85
+ print("counter: {}".format(counter))
86
+
87
+ return
88
+
89
+
90
+ if __name__ == '__main__':
91
+ main()
examples/preprocess/preprocess_xnli.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ import json
6
+ import os
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, "../../"))
11
+
12
+ from datasets import load_dataset, DownloadMode
13
+ from tqdm import tqdm
14
+
15
+ from language_identification import LANGUAGE_MAP
16
+ from project_settings import project_path
17
+
18
+
19
+ def get_args():
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument("--dataset_path", default="xnli", type=str)
22
+ parser.add_argument("--dataset_name", default="all_languages", type=str)
23
+ parser.add_argument(
24
+ "--dataset_cache_dir",
25
+ default=(project_path / "hub_datasets").as_posix(),
26
+ type=str
27
+ )
28
+ parser.add_argument(
29
+ "--output_file",
30
+ default=(project_path / "data/xnli.jsonl"),
31
+ type=str
32
+ )
33
+
34
+ args = parser.parse_args()
35
+ return args
36
+
37
+
38
+ def main():
39
+ args = get_args()
40
+
41
+ dataset_dict = load_dataset(
42
+ path=args.dataset_path,
43
+ name=args.dataset_name,
44
+ cache_dir=args.dataset_cache_dir,
45
+ # download_mode=DownloadMode.FORCE_REDOWNLOAD
46
+ )
47
+ print(dataset_dict)
48
+
49
+ text_set = set()
50
+ counter = defaultdict(int)
51
+ with open(args.output_file, "w", encoding="utf-8") as f:
52
+ for k, v in dataset_dict.items():
53
+ for sample in tqdm(v):
54
+
55
+ hypothesis = sample["hypothesis"]
56
+ premise = sample["premise"]
57
+ premise_language_list = list()
58
+ premise_text_list = list()
59
+ for language, text in premise.items():
60
+ premise_language_list.append(language)
61
+ premise_text_list.append(text)
62
+
63
+ language_list = hypothesis["language"] + premise_language_list
64
+ translation_list = hypothesis["translation"] + premise_text_list
65
+ for language, translation in zip(language_list, translation_list):
66
+
67
+ text = translation.strip()
68
+
69
+ if text in text_set:
70
+ continue
71
+ text_set.add(text)
72
+
73
+ if language not in LANGUAGE_MAP.keys():
74
+ raise AssertionError(language)
75
+
76
+ row = {
77
+ "text": text,
78
+ "language": language,
79
+ "data_source": "xnli",
80
+ "split": k
81
+ }
82
+ row = json.dumps(row, ensure_ascii=False)
83
+ f.write("{}\n".format(row))
84
+ counter[k] += 1
85
+
86
+ print("counter: {}".format(counter))
87
+
88
+ return
89
+
90
+
91
+ if __name__ == '__main__':
92
+ main()
language_identification.py CHANGED
@@ -25,6 +25,30 @@ _CITATION = """\
25
  """
26
 
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  class LanguageIdentification(datasets.GeneratorBasedBuilder):
29
  VERSION = datasets.Version("1.0.0")
30
 
 
25
  """
26
 
27
 
28
+ LANGUAGE_MAP = {
29
+ "ar": "arabic",
30
+ "bg": "bulgarian",
31
+ "de": "german",
32
+ "el": "modern greek",
33
+ "en": "english",
34
+ "es": "spanish",
35
+ "fr": "french",
36
+ "hi": "hindi",
37
+ "it": "italian",
38
+ "ja": "japanese",
39
+ "nl": "dutch",
40
+ "pl": "polish",
41
+ "pt": "portuguese",
42
+ "ru": "russian",
43
+ "sw": "swahili",
44
+ "th": "thai",
45
+ "tr": "turkish",
46
+ "ur": "urdu",
47
+ "vi": "vietnamese",
48
+ "zh": "chinese",
49
+ }
50
+
51
+
52
  class LanguageIdentification(datasets.GeneratorBasedBuilder):
53
  VERSION = datasets.Version("1.0.0")
54