Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
33e7c03
·
verified ·
1 Parent(s): 98a0599

Upload uit_vion.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. uit_vion.py +170 -0
uit_vion.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+ import pandas as pd
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ _CITATION = """\
28
+ @inproceedings{fujita2021empirical,
29
+ title={An Empirical Investigation of Online News Classification on an Open-Domain, Large-Scale and High-Quality Dataset in Vietnamese},
30
+ author={Fujita, H and Perez-Meana, H},
31
+ booktitle={New Trends in Intelligent Software Methodologies, Tools and Techniques: Proceedings of the 20th International Conference on New Trends in Intelligent Software Methodologies, Tools and Techniques (SoMeT_21)},
32
+ volume={337},
33
+ pages={367},
34
+ year={2021},
35
+ organization={IOS Press}
36
+ }
37
+ """
38
+
39
+ _DATASETNAME = "uit_vion"
40
+
41
+
42
+ _DESCRIPTION = """\
43
+ UIT-ViON (Vietnamese Online Newspaper) is a dataset collected from well-known online newspapers in Vietnamese.
44
+ The UIT-ViON is an open-domain, large-scale, and high-quality dataset consisting of 260,000 textual data
45
+ points annotated with 13 different categories for evaluating Vietnamese short text classification.
46
+ The dataset is split into training, validation, and test sets, each containing 208000, 26000,
47
+ and 26000 pieces of text, respectively.
48
+ """
49
+
50
+ _HOMEPAGE = "https://github.com/kh4nh12/UIT-ViON-Dataset"
51
+
52
+ _LANGUAGES = ["vie"]
53
+
54
+ _LICENSE = Licenses.UNKNOWN.value
55
+
56
+ _LOCAL = False
57
+
58
+ _URLS = {
59
+ _DATASETNAME: "https://github.com/kh4nh12/UIT-ViON-Dataset/archive/refs/heads/master.zip",
60
+ }
61
+
62
+ _SUPPORTED_TASKS = [Tasks.TOPIC_MODELING]
63
+
64
+ _SOURCE_VERSION = "1.0.0"
65
+
66
+ _SEACROWD_VERSION = "2024.06.20"
67
+
68
+
69
+ class UitVion(datasets.GeneratorBasedBuilder):
70
+ """UIT-ViON (Vietnamese Online Newspaper) is a dataset collected from well-known online newspapers in Vietnamese."""
71
+
72
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
73
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
74
+
75
+ LABEL_CLASSES = [i for i in range(13)]
76
+
77
+ SEACROWD_SCHEMA_NAME = "text"
78
+
79
+ BUILDER_CONFIGS = [
80
+ SEACrowdConfig(
81
+ name=f"{_DATASETNAME}_source",
82
+ version=SOURCE_VERSION,
83
+ description=f"{_DATASETNAME} source schema",
84
+ schema="source",
85
+ subset_id=_DATASETNAME,
86
+ ),
87
+ SEACrowdConfig(
88
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
89
+ version=SEACROWD_VERSION,
90
+ description=f"{_DATASETNAME} SEACrowd schema",
91
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
92
+ subset_id=_DATASETNAME,
93
+ ),
94
+ ]
95
+
96
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
97
+
98
+ def _info(self) -> datasets.DatasetInfo:
99
+
100
+ if self.config.schema == "source":
101
+ features = datasets.Features(
102
+ {
103
+ "title": datasets.Value("string"),
104
+ "link": datasets.Value("string"),
105
+ "label": datasets.ClassLabel(names=self.LABEL_CLASSES),
106
+ }
107
+ )
108
+
109
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
110
+ features = schemas.text_features(self.LABEL_CLASSES)
111
+
112
+ return datasets.DatasetInfo(
113
+ description=_DESCRIPTION,
114
+ features=features,
115
+ homepage=_HOMEPAGE,
116
+ license=_LICENSE,
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
121
+ """Returns SplitGenerators."""
122
+ urls = _URLS[_DATASETNAME]
123
+ data_dir = dl_manager.download_and_extract(urls)
124
+ file_dir = os.path.join("UIT-ViON-Dataset-main", "data.zip")
125
+ data_dir = os.path.join(data_dir, file_dir)
126
+ data_dir = dl_manager.download_and_extract(data_dir)
127
+
128
+ return [
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TRAIN,
131
+ gen_kwargs={
132
+ "filepath": os.path.join(data_dir, "UIT-ViON_train.csv"),
133
+ "split": "train",
134
+ },
135
+ ),
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.TEST,
138
+ gen_kwargs={
139
+ "filepath": os.path.join(data_dir, "UIT-ViON_test.csv"),
140
+ "split": "test",
141
+ },
142
+ ),
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.VALIDATION,
145
+ gen_kwargs={
146
+ "filepath": os.path.join(data_dir, "UIT-ViON_dev.csv"),
147
+ "split": "dev",
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
153
+ """Yields examples as (key, example) tuples."""
154
+ data = pd.read_csv(filepath)
155
+
156
+ if self.config.schema == "source":
157
+ for i, row in data.iterrows():
158
+ yield i, {
159
+ "title": str(row["title"]),
160
+ "link": str(row["link"]),
161
+ "label": row["label"],
162
+ }
163
+
164
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
165
+ for i, row in data.iterrows():
166
+ yield i, {
167
+ "id": str(i),
168
+ "text": str(row["title"]),
169
+ "label": int(row["label"]),
170
+ }