holylovenia commited on
Commit
64afe1b
·
verified ·
1 Parent(s): fe62c96

Upload dengue_filipino.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dengue_filipino.py +136 -0
dengue_filipino.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Tuple
2
+
3
+ import datasets
4
+
5
+ from seacrowd.utils import schemas
6
+ from seacrowd.utils.configs import SEACrowdConfig
7
+ from seacrowd.utils.constants import Licenses, Tasks
8
+
9
+ _CITATION = """\
10
+ @INPROCEEDINGS{8459963,
11
+ author={E. D. {Livelo} and C. {Cheng}},
12
+ booktitle={2018 IEEE International Conference on Agents (ICA)},
13
+ title={Intelligent Dengue Infoveillance Using Gated Recurrent Neural Learning and Cross-Label Frequencies},
14
+ year={2018},
15
+ volume={},
16
+ number={},
17
+ pages={2-7},
18
+ doi={10.1109/AGENTS.2018.8459963}}
19
+ }
20
+ """
21
+
22
+ _LANGUAGES = ["fil"]
23
+
24
+ # copied from https://huggingface.co/datasets/dengue_filipino/blob/main/dengue_filipino.py
25
+ _URL = "https://s3.us-east-2.amazonaws.com/blaisecruz.com/datasets/dengue/dengue_raw.zip"
26
+ _DATASETNAME = "dengue_filipino"
27
+
28
+ _DESCRIPTION = """\
29
+ Benchmark dataset for low-resource multi-label classification, with 4,015 training, 500 testing, and 500 validation examples, each labeled as part of five classes. Each sample can be a part of multiple classes. Collected as tweets.
30
+ """
31
+
32
+ _HOMEPAGE = "https://github.com/jcblaisecruz02/Filipino-Text-Benchmarks"
33
+
34
+ _LICENSE = Licenses.UNKNOWN.value
35
+
36
+ _SUPPORTED_TASKS = [Tasks.DOMAIN_KNOWLEDGE_MULTICLASSIFICATION]
37
+
38
+ _SOURCE_VERSION = "1.0.0"
39
+ _SEACROWD_VERSION = "2024.06.20"
40
+
41
+ _LOCAL = False
42
+
43
+
44
+ class DengueFilipinoDataset(datasets.GeneratorBasedBuilder):
45
+ """Dengue Dataset Low-Resource Multi-label Text Classification Dataset in Filipino"""
46
+
47
+ BUILDER_CONFIGS = [
48
+ SEACrowdConfig(
49
+ name=f"{_DATASETNAME}_source",
50
+ version=datasets.Version(_SOURCE_VERSION),
51
+ description=f"{_DATASETNAME} source schema",
52
+ schema="source",
53
+ subset_id=f"{_DATASETNAME}",
54
+ ),
55
+ SEACrowdConfig(
56
+ name=f"{_DATASETNAME}_seacrowd_text_multi",
57
+ version=datasets.Version(_SEACROWD_VERSION),
58
+ description=f"{_DATASETNAME} SEACrowd schema text multi",
59
+ schema="seacrowd_text_multi",
60
+ subset_id=f"{_DATASETNAME}",
61
+ ),
62
+ ]
63
+
64
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
65
+
66
+ def _info(self) -> datasets.DatasetInfo:
67
+ if self.config.schema == "source":
68
+ features = datasets.Features(
69
+ {
70
+ "text": datasets.Value("string"),
71
+ "absent": datasets.features.ClassLabel(names=["0", "1"]),
72
+ "dengue": datasets.features.ClassLabel(names=["0", "1"]),
73
+ "health": datasets.features.ClassLabel(names=["0", "1"]),
74
+ "mosquito": datasets.features.ClassLabel(names=["0", "1"]),
75
+ "sick": datasets.features.ClassLabel(names=["0", "1"]),
76
+ }
77
+ )
78
+ elif self.config.schema == "seacrowd_text_multi":
79
+ features = schemas.text_multi_features(["0", "1"])
80
+
81
+ return datasets.DatasetInfo(
82
+ description=_DESCRIPTION,
83
+ features=features,
84
+ supervised_keys=None,
85
+ homepage=_HOMEPAGE,
86
+ license=_LICENSE,
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN,
94
+ gen_kwargs={
95
+ "split": "train",
96
+ },
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.VALIDATION,
100
+ gen_kwargs={
101
+ "split": "validation",
102
+ },
103
+ ),
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TEST,
106
+ gen_kwargs={
107
+ "split": "test",
108
+ },
109
+ ),
110
+ ]
111
+
112
+ def _generate_examples(self, split: str) -> Tuple[int, Dict]:
113
+ dataset = datasets.load_dataset(_DATASETNAME, split=split)
114
+ for id, data in enumerate(dataset):
115
+ if self.config.schema == "source":
116
+ yield id, {
117
+ "text": data["text"],
118
+ "absent": data["absent"],
119
+ "dengue": data["dengue"],
120
+ "health": data["health"],
121
+ "mosquito": data["mosquito"],
122
+ "sick": data["sick"],
123
+ }
124
+
125
+ elif self.config.schema == "seacrowd_text_multi":
126
+ yield id, {
127
+ "id": id,
128
+ "text": data["text"],
129
+ "labels": [
130
+ data["absent"],
131
+ data["dengue"],
132
+ data["health"],
133
+ data["mosquito"],
134
+ data["sick"],
135
+ ],
136
+ }