Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
DOI:
Libraries:
Datasets
pandas
License:
CaSERa-catalan-stance-emotions-raco / CaSERa-catalan-stance-emotions-raco.py
ibaucells's picture
Upload CaSERa-catalan-stance-emotions-raco.py
ede6166
raw
history blame
3.26 kB
# Loading script for the ReviewsFinder dataset.
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """ """
_DESCRIPTION = """ The CaSERA dataset is a Catalan corpus from the forum Racó Català annotated with Emotions and Dynamic Stance. The dataset contains 15.782 unique sentences grouped in 10.745 pairs of sentences, paired as original messages and answers to these messages.
"""
_HOMEPAGE = """ https://huggingface.co/datasets/projecte-aina/CaSERA-catalan-stance-emotions-raco """
_URL = "https://huggingface.co/datasets/projecte-aina/CaSERa-catalan-stance-emotions-raco/resolve/main/"
_FILE = "data.jsonl"
class CaSERAConfig(datasets.BuilderConfig):
""" Builder config for the CaSERA dataset """
def __init__(self, **kwargs):
"""BuilderConfig for CaSERA.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CaSERAConfig, self).__init__(**kwargs)
class CaSERA(datasets.GeneratorBasedBuilder):
""" CaSERA Dataset """
BUILDER_CONFIGS = [
CaSERAConfig(
name="CaSERA",
version=datasets.Version("1.0.0"),
description="CaSERA dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"id_conversation": datasets.Value("string"),
"id_reply": datasets.Value("string"),
"parent_text": datasets.Value("string"),
"reply_text": datasets.Value("string"),
"dynamic_stance": datasets.features.ClassLabel
(names=
['Agree', 'Disagree', 'Elaborate', 'Query', 'Neutral', 'Unrelated', 'NA'
]
),
"parent_emotion": datasets.Sequence(datasets.Value("string")),
"reply_emotion": datasets.Sequence(datasets.Value("string")),
}
),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"data": f"{_URL}{_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["data"]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
data = [json.loads(line) for line in f]
for id_, pair in enumerate(data):
yield id_, {
"id_conversation": pair["id_conversation"],
"id_reply": pair["id_reply"],
"parent_text":pair["parent_text"],
"reply_text": pair["reply_text"],
"dynamic_stance": pair["dynamic_stance"],
"parent_emotion": pair["parent_emotion"],
"reply_emotion": pair["reply_emotion"],
}