# coding=utf-8 import json import os import datasets _DESCRIPTION = """ Deptweet Dataset. Unknown License. """ _CITATION = """ @article{kabir2022deptweet, title = {{DEPTWEET: A typology for social media texts to detect depression severities}}, journal = {{Computers in Human Behavior}}, pages = {107503}, year = {2022}, issn = {0747-5632}, doi = {10.1016/j.chb.2022.107503}, url = {https://www.sciencedirect.com/science/article/pii/S0747563222003235}, author = {Mohsinul Kabir and Tasnim Ahmed and Md. Bakhtiar Hasan and Md Tahmid Rahman Laskar and Tarun Kumar Joarder and Hasan Mahmud and Kamrul Hasan}, keywords = {Social media, Mental health, Depression severity, Dataset}, abstract = {Mental health research through data-driven methods has been hindered by a lack of standard typology and scarcity of adequate data. In this study, we leverage the clinical articulation of depression to build a typology for social media texts for detecting the severity of depression. It emulates the standard clinical assessment procedure Diagnostic and Statistical Manual of Mental Disorders (DSM-5) and Patient Health Questionnaire (PHQ-9) to encompass subtle indications of depressive disorders from tweets. Along with the typology, we present a new dataset of 40191 tweets labeled by expert annotators. Each tweet is labeled as ‘non-depressed’ or ‘depressed’. Moreover, three severity levels are considered for ‘depressed’ tweets: (1) mild, (2) moderate, and (3) severe. An associated confidence score is provided with each label to validate the quality of annotation. We examine the quality of the dataset via representing summary statistics while setting strong baseline results using attention-based models like BERT and DistilBERT. Finally, we extensively address the limitations of the study to provide directions for further research.} } """ _URLs = { "train": "https://huggingface.co/datasets/wdli/deptweet_dataset/blob/main/deptweet_dataset/train/deptweet_dataset_train.json", "val": "https://huggingface.co/datasets/wdli/deptweet_dataset/blob/main/deptweet_dataset/val/deptweet_dataset_val.json", "test": "https://huggingface.co/datasets/wdli/deptweet_dataset/blob/main/deptweet_dataset/test/deptweet_dataset_test.json" } class Deptweet_Dataset(datasets.GeneratorBasedBuilder): """Deptweet_Dataset""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="onlyTweet", description="only tweet content", version=VERSION, ), datasets.BuilderConfig( name="tweetAndLabel", description="tweet and label", version=VERSION, ), datasets.BuilderConfig( name="labelAndConfidence", description = "tweet, label and confidence_score", version=VERSION, ), datasets.BuilderConfig( name="all", description="tweet, label, confidence_score and reply_count", version=VERSION, ) ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tweet": datasets.Value("string"), "replies_count": datasets.Value("int8"), "retweets_count": datasets.Value("int8"), "likes_count": datasets.Value("int8"), "target": datasets.Value("int8"), "label": datasets.Value("string"), "confidence_score": datasets.Value("float16") } ), supervised_keys=None, homepage="https://github.com/mohsinulkabir14/DEPTWEET", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" train_downloaded_data = dl_manager.download(_URLs["train"]) val_downloaded_data = dl_manager.download(_URLs["val"]) test_downloaded_data = dl_manager.download(_URLs["test"]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": train_downloaded_data }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": test_downloaded_data }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": val_downloaded_data }, ) ] def _generate_examples(self, filepath): """Yields examples.""" with open(filepath, encoding="utf-8") as input_file: dataset = json.load((input_file)) for meta_data in dataset: id_ = meta_data["id"] if self.config.name == "onlyTweet": yield id_, meta_data["tweet"] elif self.config.name == "tweetAndLabel": yield id_, meta_data["tweet"], meta_data["label"] elif self.config.name == "labelAndConfidence": yield id_, meta_data["tweet"], meta_data["label"], meta_data["confidence_score"] elif self.config.name == "all": yield id_, meta_data["tweet"], meta_data["replies_count"], meta_data["retweets_count"], meta_data["likes_count"], meta_data["target"], meta_data["label"], meta_data["confidence_score"] else: # default yield id_, meta_data["tweet"], meta_data["replies_count"], meta_data["retweets_count"], meta_data["likes_count"], meta_data["target"], meta_data["label"], meta_data["confidence_score"]