Datasets:

ArXiv:
License:
qgyd2021 commited on
Commit
821e463
0 Parent(s):
.gitattributes ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
56
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ .git/
3
+ .idea/
4
+
5
+ **/flagged/
6
+ **/log/
7
+ **/logs/
8
+ **/__pycache__/
9
+
10
+ data/
11
+ docs/
12
+ dotenv/
13
+ examples/preprocess/data
14
+ hub_datasets/
15
+ trained_models/
16
+ temp/
17
+
18
+ data/**/README.md
19
+ data/**/*.json
20
+ data/**/*.wav
21
+
22
+ **/*.wav
23
+ **/*.xlsx
README.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ ## 语种识别
5
+
6
+
7
+ ### 数据来源
8
+
9
+ 数据集从网上收集整理如下:
10
+
11
+
12
+ | 数据 | 原始数据/项目地址 | 样本个数 | 原始数据描述 | 替代数据下载地址 |
13
+ | :--- | :---: | :---: | :---: | :---: |
14
+ | amazon_reviews_multi | [Multilingual Amazon Reviews Corpus](https://github.com/awslabs/open-data-docs/tree/main/docs/amazon-reviews-ml); [2010.02573](https://arxiv.org/abs/2010.02573) | TRAIN: 1191160, VALID: 29665, TEST: 29685 | 我们提出了多语言亚马逊评论语料库 (MARC),这是用于多语言文本分类的大规模亚马逊评论集合。 该语料库包含 2015 年至 2019 年间收集的英语、日语、德语、法语、西班牙语和中文评论。 | [amazon_reviews_multi](https://huggingface.co/datasets/amazon_reviews_multi) |
15
+
16
+
17
+ ### 参考来源
18
+ <details>
19
+ <summary>参考的数据来源,展开查看</summary>
20
+ <pre><code>
21
+ https://huggingface.co/datasets/papluca/language-identification
22
+ https://huggingface.co/datasets/unklefedor/language-identification
23
+
24
+ </code></pre>
25
+ </details>
examples/preprocess/preprocess_amazon_reviews_multi.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ import json
6
+ import os
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, "../../"))
11
+
12
+ from datasets import load_dataset, DownloadMode
13
+ from tqdm import tqdm
14
+
15
+ from project_settings import project_path
16
+
17
+
18
+ def get_args():
19
+ parser = argparse.ArgumentParser()
20
+ parser.add_argument("--dataset_path", default="amazon_reviews_multi", type=str)
21
+ parser.add_argument(
22
+ "--dataset_cache_dir",
23
+ default=(project_path / "hub_datasets").as_posix(),
24
+ type=str
25
+ )
26
+ parser.add_argument(
27
+ "--output_file",
28
+ default=(project_path / "data/amazon_reviews_multi.jsonl"),
29
+ type=str
30
+ )
31
+
32
+ args = parser.parse_args()
33
+ return args
34
+
35
+
36
+ def main():
37
+ args = get_args()
38
+
39
+ dataset_dict = load_dataset(
40
+ path=args.dataset_path,
41
+ cache_dir=args.dataset_cache_dir,
42
+ revision="refs/convert/parquet",
43
+ # download_mode=DownloadMode.FORCE_REDOWNLOAD
44
+ )
45
+ print(dataset_dict)
46
+
47
+ text_set = set()
48
+ counter = defaultdict(int)
49
+ with open(args.output_file, "w", encoding="utf-8") as f:
50
+ for k, v in dataset_dict.items():
51
+ for sample in tqdm(v):
52
+
53
+ text = sample["review_body"]
54
+ language = sample["language"]
55
+
56
+ text = text.strip()
57
+
58
+ if text in text_set:
59
+ continue
60
+ text_set.add(text)
61
+
62
+ row = {
63
+ "text": text,
64
+ "language": language,
65
+ "data_source": "amazon_reviews_multi",
66
+ "split": k
67
+ }
68
+ row = json.dumps(row, ensure_ascii=False)
69
+ f.write("{}\n".format(row))
70
+ counter[k] += 1
71
+
72
+ print("counter: {}".format(counter))
73
+
74
+ return
75
+
76
+
77
+ if __name__ == '__main__':
78
+ main()
language_identification.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from glob import glob
4
+ import json
5
+ import os
6
+ from pathlib import Path
7
+
8
+ import datasets
9
+
10
+
11
+ _URLS = {
12
+ "amazon_reviews_multi": "data/amazon_reviews_multi.jsonl",
13
+ }
14
+
15
+
16
+ _CITATION = """\
17
+ @dataset{language_identification,
18
+ author = {Xing Tian},
19
+ title = {language_identification},
20
+ month = aug,
21
+ year = 2024,
22
+ publisher = {Xing Tian},
23
+ version = {1.0},
24
+ }
25
+ """
26
+
27
+
28
+ class LanguageIdentification(datasets.GeneratorBasedBuilder):
29
+ VERSION = datasets.Version("1.0.0")
30
+
31
+ BUILDER_CONFIGS = [
32
+ datasets.BuilderConfig(name="amazon_reviews_multi", version=VERSION, description="amazon_reviews_multi"),
33
+ ]
34
+
35
+ def _info(self):
36
+ features = datasets.Features(
37
+ {
38
+ "text": datasets.Value("string"),
39
+ "language": datasets.Value("string"),
40
+ "data_source": datasets.Value("string"),
41
+ }
42
+ )
43
+
44
+ return datasets.DatasetInfo(
45
+ features=features,
46
+ supervised_keys=None,
47
+ homepage="",
48
+ license="",
49
+ citation=_CITATION,
50
+ )
51
+
52
+ def _split_generators(self, dl_manager):
53
+ """Returns SplitGenerators."""
54
+ url = _URLS[self.config.name]
55
+ dl_path = dl_manager.download(url)
56
+ archive_path = dl_path
57
+
58
+ return [
59
+ datasets.SplitGenerator(
60
+ name=datasets.Split.TRAIN,
61
+ gen_kwargs={"archive_path": archive_path, "split": "train"},
62
+ ),
63
+ datasets.SplitGenerator(
64
+ name=datasets.Split.VALIDATION,
65
+ gen_kwargs={"archive_path": archive_path, "split": "validation"},
66
+ ),
67
+ datasets.SplitGenerator(
68
+ name=datasets.Split.TEST,
69
+ gen_kwargs={"archive_path": archive_path, "split": "test"},
70
+ ),
71
+ ]
72
+
73
+ def _generate_examples(self, archive_path, split):
74
+ archive_path = Path(archive_path)
75
+
76
+ idx = 0
77
+ with open(archive_path, "r", encoding="utf-8") as f:
78
+ for row in f:
79
+ sample = json.loads(row)
80
+
81
+ if sample["split"] != split:
82
+ continue
83
+
84
+ yield idx, {
85
+ "text": sample["text"],
86
+ "language": sample["language"],
87
+ "data_source": sample["data_source"],
88
+ }
89
+ idx += 1
90
+
91
+
92
+ if __name__ == '__main__':
93
+ pass
main.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from datasets import load_dataset, DownloadMode
4
+ from tqdm import tqdm
5
+
6
+
7
+ dataset = load_dataset(
8
+ "language_identification.py",
9
+ name="amazon_reviews_multi",
10
+ split="train",
11
+ # streaming=True,
12
+ cache_dir=None,
13
+ download_mode=DownloadMode.FORCE_REDOWNLOAD
14
+ )
15
+
16
+ for sample in tqdm(dataset):
17
+ print(sample)
18
+
19
+
20
+ if __name__ == '__main__':
21
+ pass
project_settings.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ from pathlib import Path
5
+
6
+
7
+ project_path = os.path.abspath(os.path.dirname(__file__))
8
+ project_path = Path(project_path)
9
+
10
+
11
+ if __name__ == '__main__':
12
+ pass
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ datasets==2.4.0
2
+ fsspec==2023.9.2
3
+ tqdm==4.66.1
4
+ pandas==2.0.3
5
+ xlrd==1.2.0
6
+ openpyxl==3.0.9