Depression_Severity_Dataset / load_script.py
siyangliu's picture
Upload load_script.py
34792bf verified
raw
history blame contribute delete
No virus
3.26 kB
# coding=utf-8
import json
import os
import datasets
_DESCRIPTION = """ Depression Severity Dataset. Unknown License.
"""
_CITATION = """
@inproceedings{naseem2022early,
title={Early Identification of Depression Severity Levels on Reddit Using Ordinal Classification},
author={Naseem, Usman and Dunn, Adam G and Kim, Jinman and Khushi, Matloob},
booktitle={Proceedings of the ACM Web Conference 2022},
pages={2563--2572},
year={2022}
}
"""
_URLs = {
"whole": "https://huggingface.co/datasets/siyangliu/Depression_Severity_Dataset/blob/main/Reddit_depression_dataset.json",
"train": "https://huggingface.co/datasets/siyangliu/Depression_Severity_Dataset/blob/main/Reddit_depression_dataset_train.json",
"val": "https://huggingface.co/datasets/siyangliu/Depression_Severity_Dataset/blob/main/Reddit_depression_dataset_val.json",
"test": "https://huggingface.co/datasets/siyangliu/Depression_Severity_Dataset/blob/main/Reddit_depression_dataset_test.json",
}
class Reddit_depression(datasets.GeneratorBasedBuilder):
"""Reddit_depression dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="withoutLabel",
description="",
version=VERSION,
),
datasets.BuilderConfig(
name="withLabel",
description="",
version=VERSION,
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.Value("string")
}
),
supervised_keys=None,
homepage="https://github.com/usmaann/Depression_Severity_Dataset",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir["train"]
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": data_dir["test"]
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": data_dir["valid"]
},
)
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as input_file:
dataset = json.load(input_file)
idx = 0
for meta_data in dataset:
if self.config.name == "withoutLabel":
yield idx, meta_data["text"]
elif self.config.name == "withLabel":
yield idx, meta_data["text"], meta_data["label"]
else:
raise Exception("Not a Valid Config Name")
idx += 1