Datasets:

Languages:
Lao
ArXiv:
License:
holylovenia commited on
Commit
b07b100
1 Parent(s): a4117f9

Upload yunshan_cup_2020.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. yunshan_cup_2020.py +167 -0
yunshan_cup_2020.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+
6
+ from seacrowd.utils import schemas
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils.constants import Licenses, Tasks
9
+
10
+ _CITATION = """\
11
+ @article{DBLP:journals/corr/abs-2204-02658,
12
+ author = {Yingwen Fu and
13
+ Jinyi Chen and
14
+ Nankai Lin and
15
+ Xixuan Huang and
16
+ Xin Ying Qiu and
17
+ Shengyi Jiang},
18
+ title = {Yunshan Cup 2020: Overview of the Part-of-Speech Tagging Task for
19
+ Low-resourced Languages},
20
+ journal = {CoRR},
21
+ volume = {abs/2204.02658},
22
+ year = {2022},
23
+ url = {https://doi.org/10.48550/arXiv.2204.02658},
24
+ doi = {10.48550/arXiv.2204.02658},
25
+ eprinttype = {arXiv},
26
+ eprint = {2204.02658},
27
+ timestamp = {Tue, 12 Apr 2022 18:42:14 +0200},
28
+ biburl = {https://dblp.org/rec/journals/corr/abs-2204-02658.bib},
29
+ bibsource = {dblp computer science bibliography, https://dblp.org}
30
+ }
31
+ """
32
+
33
+ _DATASETNAME = "yunshan_cup_2020"
34
+
35
+ _DESCRIPTION = """\
36
+ Lao POS dataset containing 11,000 sentences was released as part of Yunshan-Cup-2020 evaluation track.
37
+ """
38
+
39
+ _HOMEPAGE = "https://github.com/GKLMIP/Yunshan-Cup-2020"
40
+
41
+ _LOCAL = False
42
+ _LANGUAGES = ["lao"]
43
+
44
+ _LICENSE = Licenses.UNKNOWN.value # example: Licenses.MIT.value, Licenses.CC_BY_NC_SA_4_0.value, Licenses.UNLICENSE.value, Licenses.UNKNOWN.value
45
+
46
+ _URLS = {
47
+ "train": "https://raw.githubusercontent.com/GKLMIP/Yunshan-Cup-2020/main/train.txt",
48
+ "val": "https://raw.githubusercontent.com/GKLMIP/Yunshan-Cup-2020/main/dev.txt",
49
+ "test": "https://raw.githubusercontent.com/GKLMIP/Yunshan-Cup-2020/main/test.txt",
50
+ }
51
+ _SUPPORTED_TASKS = [Tasks.POS_TAGGING] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
52
+ _SOURCE_VERSION = "1.0.0"
53
+
54
+ _SEACROWD_VERSION = "2024.06.20"
55
+
56
+
57
+ class YunshanCup2020Dataset(datasets.GeneratorBasedBuilder):
58
+ """Lao POS dataset containing 11,000 sentences was released as part of Yunshan-Cup-2020 evaluation track."""
59
+
60
+ class_labels = ["IAC", "COJ", "ONM", "PRE", "PRS", "V", "DBQ", "IBQ", "FIX", "N", "ADJ", "DMN", "IAQ", "CLF", "PRA", "DAN", "NEG", "NTR", "REL", "PVA", "TTL", "DAQ", "PRN", "ADV", "PUNCT", "CNM"]
61
+
62
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
63
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
64
+ BUILDER_CONFIGS = [
65
+ SEACrowdConfig(
66
+ name=f"{_DATASETNAME}_source",
67
+ version=SOURCE_VERSION,
68
+ description="yunshan_cup_2020 source schema",
69
+ schema="source",
70
+ subset_id="yunshan_cup_2020",
71
+ ),
72
+ SEACrowdConfig(
73
+ name=f"{_DATASETNAME}_seacrowd_seq_label",
74
+ version=SEACROWD_VERSION,
75
+ description="yunshan_cup_2020 SEACrowd schema",
76
+ schema="seacrowd_seq_label",
77
+ subset_id="yunshan_cup_2020",
78
+ ),
79
+ ]
80
+
81
+ DEFAULT_CONFIG_NAME = "yunshan_cup_2020_source"
82
+
83
+ def _info(self) -> datasets.DatasetInfo:
84
+ if self.config.schema == "source":
85
+ features = datasets.Features(
86
+ {
87
+ "index": datasets.Value("string"),
88
+ "tokens": [datasets.Value("string")],
89
+ "pos_tags": [datasets.Value("string")],
90
+ }
91
+ )
92
+ elif self.config.schema == "seacrowd_seq_label":
93
+ features = schemas.seq_label_features(self.class_labels)
94
+
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=features,
98
+ homepage=_HOMEPAGE,
99
+ license=_LICENSE,
100
+ citation=_CITATION,
101
+ )
102
+
103
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
104
+ """Returns SplitGenerators."""
105
+ path_dict = dl_manager.download_and_extract(_URLS)
106
+ train_path, val_path, test_path = path_dict["train"], path_dict["val"], path_dict["test"]
107
+
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ gen_kwargs={
112
+ "filepath": train_path,
113
+ },
114
+ ),
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TEST,
117
+ gen_kwargs={
118
+ "filepath": test_path
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.VALIDATION,
123
+ gen_kwargs={
124
+ "filepath": val_path,
125
+ },
126
+ ),
127
+ ]
128
+
129
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
130
+ df = load_postagging_data(filepath)
131
+ if self.config.schema == "source":
132
+ for i, row in enumerate(df):
133
+ ex = {
134
+ "index": str(i),
135
+ "tokens": row["sentence"],
136
+ "pos_tags": row["label"],
137
+ }
138
+ yield i, ex
139
+
140
+ elif self.config.schema == "seacrowd_seq_label":
141
+ for i, row in enumerate(df):
142
+ ex = {
143
+ "id": str(i),
144
+ "tokens": row["sentence"],
145
+ "labels": row["label"],
146
+ }
147
+ yield i, ex
148
+
149
+
150
+ def load_postagging_data(file_path):
151
+ data = open(file_path, "r").readlines()
152
+ dataset = []
153
+ sentence, seq_label = [], []
154
+ for line in data:
155
+ if len(line.strip()) > 0:
156
+ token, label = " ", ""
157
+ if len(line.strip().split(" ")) < 2:
158
+ label = line.strip()
159
+ else:
160
+ token, label = line[:-1].split(" ")
161
+ sentence.append(token)
162
+ seq_label.append(label)
163
+ else:
164
+ dataset.append({"sentence": sentence, "label": seq_label})
165
+ sentence = []
166
+ seq_label = []
167
+ return dataset