minskiter commited on
Commit
d258b9f
0 Parent(s):

feat(weibo.py): update loadding script

Browse files
.gitattributes ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
56
+ # BMES files - uncompressed
57
+ *.bmes filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
data/test.char.bmes ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54d4eae480d0175b08c2967d7b325471789038948f2b43bac21900b66b8a4960
3
+ size 93851
data/train.char.bmes ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64a6cddf1cc3e0238aae5da70e50b95ef85d236235562ddad42e1ebe1bb9d7fe
3
+ size 461990
data/validation.char.bmes ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9515ea6d85c9304b7f172f8b352b462b15d80d369a8d9607a8dfda1599740a92
3
+ size 91251
weibo.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ from datasets.download.download_manager import DownloadManager
3
+
4
+ _DESCRIPTION = """\
5
+ The Weibo NER dataset is a Chinese Named Entity Recognition dataset
6
+ drawn from the social media website Sina Weibo.
7
+ """
8
+
9
+ _CITATION = """\
10
+ @inproceedings{peng-dredze-2015-named,
11
+ title = "Named Entity Recognition for {C}hinese
12
+ Social Media with Jointly Trained Embeddings",
13
+ author = "Peng, Nanyun and Dredze, Mark",
14
+ booktitle = "Proceedings of the 2015 Conference on
15
+ Empirical Methods in Natural Language Processing",
16
+ month = sep,
17
+ year = "2015",
18
+ address = "Lisbon, Portugal",
19
+ publisher = "Association for Computational Linguistics",
20
+ url = "https://aclanthology.org/D15-1064",
21
+ doi = "10.18653/v1/D15-1064",
22
+ pages = "548--554",
23
+ }
24
+ """
25
+
26
+ _URL = "https://huggingface.co/datasets/minskiter/weibo/resolve/main/"
27
+ _URLS = {
28
+ "train": _URL + "data/train.char.bmes",
29
+ "validation": _URL + "data/validation.char.bmes",
30
+ "test": _URL + "data/test.char.bmes",
31
+ }
32
+
33
+
34
+ class WeiboNamedEntities(datasets.GeneratorBasedBuilder):
35
+ VERSION = datasets.Version("1.0.0")
36
+
37
+ def _info(self):
38
+ return datasets.DatasetInfo(
39
+ description=_DESCRIPTION,
40
+ features=datasets.Features(
41
+ {
42
+ "text": datasets.Value("string"),
43
+ "labels": datasets.Sequence(
44
+ datasets.features.ClassLabel(
45
+ names=[
46
+ 'O',
47
+ 'B-PER.NAM',
48
+ 'I-PER.NAM',
49
+ 'E-PER.NAM',
50
+ 'S-PER.NAM',
51
+ 'B-ORG.NAM',
52
+ 'I-ORG.NAM',
53
+ 'E-ORG.NAM',
54
+ 'S-ORG.NAM',
55
+ 'B-LOC.NAM',
56
+ 'I-LOC.NAM',
57
+ 'E-LOC.NAM',
58
+ 'S-LOC.NAM',
59
+ 'B-GPE.NAM',
60
+ 'I-GPE.NAM',
61
+ 'E-GPE.NAM',
62
+ 'S-GPE.NAM',
63
+ 'B-PER.NOM',
64
+ 'I-PER.NOM',
65
+ 'E-PER.NOM',
66
+ 'S-PER.NOM',
67
+ 'B-ORG.NOM',
68
+ 'I-ORG.NOM',
69
+ 'E-ORG.NOM',
70
+ 'S-ORG.NOM',
71
+ 'B-LOC.NOM',
72
+ 'I-LOC.NOM',
73
+ 'E-LOC.NOM',
74
+ 'S-LOC.NOM',
75
+ 'B-GPE.NOM',
76
+ 'I-GPE.NOM',
77
+ 'E-GPE.NOM',
78
+ 'S-GPE.NOM',
79
+ ]
80
+ )
81
+ ),
82
+ }
83
+ ),
84
+ supervised_keys=None,
85
+ homepage="https://aclanthology.org/D15-1064/",
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager: DownloadManager):
90
+ urls_to_download = _URLS
91
+ download_files = dl_manager.download_and_extract(urls_to_download)
92
+ return [
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TRAIN,
95
+ gen_kwargs={"filepath": download_files["train"]},
96
+ ),
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.VALIDATION,
99
+ gen_kwargs={"filepath": download_files["validation"]},
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TEST,
103
+ gen_kwargs={"filepath": download_files["test"]},
104
+ ),
105
+ ]
106
+
107
+ def _default_example(self):
108
+ return {"text": [], "labels": []}
109
+
110
+ def _generate_examples(self, filepath):
111
+ with open(filepath, "r", encoding="utf-8") as f:
112
+ example = self._default_example()
113
+ _id = 0
114
+ for line in f:
115
+ if len(line.strip()) == 0:
116
+ if len(example["text"]) > 0:
117
+ example["text"] = "".join(example["text"])
118
+ yield _id, example
119
+ example = self._default_example()
120
+ _id += 1
121
+ continue
122
+ char, label = line.split(" ")
123
+ char = char.strip()
124
+ if char == "":
125
+ char = " "
126
+ label = label.strip()
127
+ example["text"].append(char)
128
+ example["labels"].append(label)
129
+ if len(example["text"]) > 0:
130
+ example["text"] = "".join(example["text"])
131
+ yield _id, example