Datasets:

Languages:
Khmer
ArXiv:
License:
holylovenia commited on
Commit
b03d524
1 Parent(s): 7eb5daf

Upload gklmip_newsclass.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. gklmip_newsclass.py +171 -0
gklmip_newsclass.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+ import numpy as np
22
+ import pandas as pd
23
+
24
+ from seacrowd.utils import schemas
25
+ from seacrowd.utils.configs import SEACrowdConfig
26
+ from seacrowd.utils.constants import Licenses, Tasks
27
+
28
+ _CITATION = """\
29
+ @article{,
30
+ author="Jiang, Shengyi
31
+ and Fu, Sihui
32
+ and Lin, Nankai
33
+ and Fu, Yingwen",
34
+ title="Pre-trained Models and Evaluation Data for the Khmer Language",
35
+ year="2021",
36
+ publisher="Tsinghua Science and Technology",
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "gklmip_newsclass"
41
+
42
+ _DESCRIPTION = """\
43
+ The GKLMIP Khmer News Dataset is scraped from the Voice of America Khmer website. \
44
+ The news articles in the dataset are categorized into 8 categories: culture, economics, education, \
45
+ environment, health, politics, rights and science.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/GKLMIP/Pretrained-Models-For-Khmer"
49
+ _LANGUAGES = ["khm"]
50
+
51
+ _LICENSE = Licenses.UNKNOWN.value
52
+ _LOCAL = False
53
+
54
+ _URLS = {
55
+ _DATASETNAME: "https://github.com/GKLMIP/Pretrained-Models-For-Khmer/raw/main/NewsDataset.zip",
56
+ }
57
+
58
+ _SUPPORTED_TASKS = [Tasks.TOPIC_MODELING]
59
+ _SOURCE_VERSION = "1.0.0"
60
+ _SEACROWD_VERSION = "2024.06.20"
61
+
62
+ _TAGS = ["culture", "economic", "education", "environment", "health", "politics", "right", "science"]
63
+
64
+
65
+ class GklmipNewsclass(datasets.GeneratorBasedBuilder):
66
+ """\
67
+ The GKLMIP Khmer News Dataset is scraped from the Voice of America Khmer website. \
68
+ The news articles in the dataset are categorized into 8 categories: culture, economics, education, \
69
+ environment, health, politics, rights and science.
70
+ """
71
+
72
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
73
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
74
+ SEACROWD_SCHEMA_NAME = "text"
75
+
76
+ BUILDER_CONFIGS = [
77
+ SEACrowdConfig(
78
+ name=f"{_DATASETNAME}_source",
79
+ version=SOURCE_VERSION,
80
+ description=f"{_DATASETNAME} source schema",
81
+ schema="source",
82
+ subset_id=f"{_DATASETNAME}",
83
+ ),
84
+ SEACrowdConfig(
85
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
86
+ version=SEACROWD_VERSION,
87
+ description=f"{_DATASETNAME} SEACrowd schema",
88
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
89
+ subset_id=f"{_DATASETNAME}",
90
+ ),
91
+ ]
92
+
93
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
94
+
95
+ def _info(self) -> datasets.DatasetInfo:
96
+ if self.config.schema == "source":
97
+ features = datasets.Features(
98
+ {
99
+ "text": datasets.Value("string"),
100
+ "culture": datasets.Value("bool"),
101
+ "economic": datasets.Value("bool"),
102
+ "education": datasets.Value("bool"),
103
+ "environment": datasets.Value("bool"),
104
+ "health": datasets.Value("bool"),
105
+ "politics": datasets.Value("bool"),
106
+ "right": datasets.Value("bool"),
107
+ "science": datasets.Value("bool"),
108
+ }
109
+ )
110
+
111
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
112
+ features = schemas.text_features(_TAGS)
113
+
114
+ return datasets.DatasetInfo(
115
+ description=_DESCRIPTION,
116
+ features=features,
117
+ homepage=_HOMEPAGE,
118
+ license=_LICENSE,
119
+ citation=_CITATION,
120
+ )
121
+
122
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
123
+ urls = _URLS[_DATASETNAME]
124
+ data_dir = dl_manager.download_and_extract(urls)
125
+
126
+ return [
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TRAIN,
129
+ gen_kwargs={
130
+ "filepath": os.path.join(data_dir, "train.csv"),
131
+ "split": "train",
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "filepath": os.path.join(data_dir, "test.csv"),
138
+ "split": "test",
139
+ },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.VALIDATION,
143
+ gen_kwargs={
144
+ "filepath": os.path.join(data_dir, "dev.csv"),
145
+ "split": "dev",
146
+ },
147
+ ),
148
+ ]
149
+
150
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
151
+ """Yields examples as (key, example) tuples."""
152
+
153
+ dataset = pd.read_csv(filepath)
154
+ reverse_encoding = dict(zip(range(len(_TAGS)), _TAGS))
155
+ if self.config.schema == "source":
156
+ for i, row in dataset.iterrows():
157
+ yield i, {
158
+ "text": row["text"],
159
+ "culture": row["culture"],
160
+ "economic": row["economic"],
161
+ "education": row["education"],
162
+ "environment": row["environment"],
163
+ "health": row["health"],
164
+ "politics": row["politics"],
165
+ "right": row["right"],
166
+ "science": row["science"],
167
+ }
168
+
169
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
170
+ for i, row in dataset.iterrows():
171
+ yield i, {"id": i, "text": row["text"], "label": reverse_encoding[np.argmax(row[_TAGS])]}