Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
6a0992a
1 Parent(s): 4dcaf5a

Upload thai_depression.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. thai_depression.py +145 -0
thai_depression.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+ from typing import List
4
+
5
+ import datasets
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import DEFAULT_SEACROWD_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Licenses, Tasks
10
+
11
+ _DATASETNAME = "thai_depression"
12
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
13
+ _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
14
+
15
+ _LANGUAGES = ["tha"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
16
+ _LOCAL = False
17
+ _CITATION = """\
18
+ @inproceedings{hamalainen-etal-2021-detecting,
19
+ title = "Detecting Depression in Thai Blog Posts: a Dataset and a Baseline",
20
+ author = {H{\"a}m{\"a}l{\"a}inen, Mika and
21
+ Patpong, Pattama and
22
+ Alnajjar, Khalid and
23
+ Partanen, Niko and
24
+ Rueter, Jack},
25
+ editor = "Xu, Wei and
26
+ Ritter, Alan and
27
+ Baldwin, Tim and
28
+ Rahimi, Afshin",
29
+ booktitle = "Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)",
30
+ month = nov,
31
+ year = "2021",
32
+ address = "Online",
33
+ publisher = "Association for Computational Linguistics",
34
+ url = "https://aclanthology.org/2021.wnut-1.3",
35
+ doi = "10.18653/v1/2021.wnut-1.3",
36
+ pages = "20--25",
37
+ abstract = "We present the first openly available corpus for detecting depression in Thai. Our corpus is compiled by expert verified cases of depression in several online blogs.
38
+ We experiment with two different LSTM based models and two different BERT based models. We achieve a 77.53%% accuracy with a Thai BERT model in detecting depression.
39
+ This establishes a good baseline for future researcher on the same corpus. Furthermore, we identify a need for Thai embeddings that have been trained on a more varied corpus than Wikipedia.
40
+ Our corpus, code and trained models have been released openly on Zenodo.",
41
+ }
42
+ """
43
+
44
+ _DESCRIPTION = """\
45
+ We present the first openly available corpus for detecting depression in Thai. Our corpus is compiled by expert verified cases of depression in several online blogs.
46
+ We experiment with two different LSTM based models and two different BERT based models. We achieve a 77.53%% accuracy with a Thai BERT model in detecting depression.
47
+ This establishes a good baseline for future researcher on the same corpus. Furthermore, we identify a need for Thai embeddings that have been trained on a more varied corpus than Wikipedia.
48
+ Our corpus, code and trained models have been released openly on Zenodo.
49
+ """
50
+
51
+ _HOMEPAGE = "https://zenodo.org/records/4734552"
52
+
53
+ _LICENSE = Licenses.CC_BY_NC_ND_4_0.value
54
+
55
+ _URLs = "https://zenodo.org/records/4734552/files/data.zip?download=1"
56
+
57
+ _SUPPORTED_TASKS = [Tasks.EMOTION_CLASSIFICATION]
58
+
59
+ _SOURCE_VERSION = "1.0.0"
60
+ _SEACROWD_VERSION = "2024.06.20"
61
+
62
+
63
+ class ThaiDepressionDataset(datasets.GeneratorBasedBuilder):
64
+ """Thai depression detection dataset."""
65
+
66
+ BUILDER_CONFIGS = [
67
+ SEACrowdConfig(
68
+ name=f"{_DATASETNAME}_source",
69
+ version=datasets.Version(_SOURCE_VERSION),
70
+ description=f"{_DATASETNAME} source schema",
71
+ schema="source",
72
+ subset_id=f"{_DATASETNAME}",
73
+ ),
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_seacrowd_text",
76
+ version=datasets.Version(_SEACROWD_VERSION),
77
+ description=f"{_DATASETNAME} seacrowd schema",
78
+ schema="seacrowd_text",
79
+ subset_id=f"{_DATASETNAME}",
80
+ ),
81
+ ]
82
+
83
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
84
+
85
+ def _info(self):
86
+ if self.config.schema == "source":
87
+ features = datasets.Features(
88
+ {
89
+ "text": datasets.Value("string"),
90
+ "label": datasets.Value("string"),
91
+ }
92
+ )
93
+ elif self.config.schema == "seacrowd_text":
94
+ features = schemas.text_features(["depression", "no_depression"])
95
+
96
+ return datasets.DatasetInfo(
97
+ description=_DESCRIPTION,
98
+ features=features,
99
+ homepage=_HOMEPAGE,
100
+ license=_LICENSE,
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
105
+ path = Path(dl_manager.download_and_extract(_URLs))
106
+ data_files = {
107
+ "train": path / "splits/train.json",
108
+ "test": path / "splits/test.json",
109
+ "valid": path / "splits/valid.json",
110
+ }
111
+
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ gen_kwargs={"filepath": data_files["train"]},
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.VALIDATION,
119
+ gen_kwargs={"filepath": data_files["valid"]},
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TEST,
123
+ gen_kwargs={"filepath": data_files["test"]},
124
+ ),
125
+ ]
126
+
127
+ def _parse_and_label(self, file_path):
128
+ with open(file_path, "r", encoding="utf-8") as file:
129
+ data = json.load(file)
130
+
131
+ parsed_data = []
132
+ for item in data:
133
+ parsed_data.append({"text": item[0], "label": item[1]})
134
+
135
+ return parsed_data
136
+
137
+ def _generate_examples(self, filepath: Path):
138
+ print("Reading ", filepath)
139
+ for id, row in enumerate(self._parse_and_label(filepath)):
140
+ if self.config.schema == "source":
141
+ yield id, {"text": row["text"], "label": row["label"]}
142
+ elif self.config.schema == "seacrowd_text":
143
+ yield id, {"id": str(id), "text": row["text"], "label": row["label"]}
144
+ else:
145
+ raise ValueError(f"Invalid config: {self.config.name}")