File size: 4,112 Bytes
7b4d336
 
 
 
 
 
 
 
 
 
 
 
86b37c7
7b4d336
 
86b37c7
7b4d336
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f27f126
 
 
 
7b4d336
 
 
 
 
 
 
 
 
 
 
 
 
 
f27f126
7b4d336
 
 
 
f27f126
7b4d336
 
 
 
3e3cdab
 
7b4d336
 
 
 
 
 
c4aadf2
 
 
 
 
 
 
 
 
 
 
7b4d336
 
 
 
 
 
 
f27f126
 
 
 
7b4d336
 
f27f126
 
 
 
7b4d336
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import json
import csv

import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = """ """


_DESCRIPTION = """ The CaSET dataset is a Catalan corpus of Tweets annotated with Emotions, Static Stance, and Dynamic Stance. The dataset contains 11k unique sentence on five polemical topics, grouped in 6k pairs of sentences, paired as original messages and answers to these messages.  """


_HOMEPAGE = """ https://huggingface.co/datasets/projecte-aina/CaSET-catalan-stance-emotions-twitter/ """



_URL = "https://huggingface.co/datasets/projecte-aina/CaSET-catalan-stance-emotions-twitter/resolve/main/"
_FILE = "data.jsonl"


class CaSETConfig(datasets.BuilderConfig):
    """ Builder config for the CaSET dataset """

    def __init__(self, **kwargs):
        """BuilderConfig for CaSET.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(CaSETConfig, self).__init__(**kwargs)


class CaSET(datasets.GeneratorBasedBuilder):
    """ CaSET Dataset """


    BUILDER_CONFIGS = [
        CaSETConfig(
            name="CaSET",
            version=datasets.Version("1.0.0"),
            description="CaSET dataset",
        ),
    ]


    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {"id_parent": datasets.Value("string"),
                 "id_reply": datasets.Value("string"),
                    "parent_text": datasets.Value("string"),
                    "reply_text": datasets.Value("string"),
                    "topic": datasets.features.ClassLabel
                    (names=
                        ['aeroport',
                         'vaccines',
                         'lloguer',
                         'benidormfest',
                         'subrogada'
                        ]
                    ),
                 "dynamic_stance": datasets.features.ClassLabel
                 (names=
                  ['Agree', 'Disagree', 'Elaborate', 'Query', 'Neutral', 'Unrelated', 'NA'
                   ]
                  ),
                 "parent_stance": datasets.features.ClassLabel
                 (names=
                  ['FAVOUR', 'AGAINST', 'NEUTRAL', 'NA'
                   ]
                  ),
                 "reply_stance": datasets.features.ClassLabel
                 (names=
                  ['FAVOUR', 'AGAINST', 'NEUTRAL', 'NA'
                   ]
                  ),
                 "parent_emotion": datasets.Sequence(datasets.Value("string")),
                 "reply_emotion": datasets.Sequence(datasets.Value("string")),
                }
            ),
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "data": f"{_URL}{_FILE}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["data"]}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            data = [json.loads(line) for line in f]
            for id_, pair in enumerate(data):
                    yield id_, {
                        "id_parent": pair["id_parent"],
                        "id_reply": pair["id_reply"],
                        "parent_text":pair["parent_text"],
                        "reply_text": pair["reply_text"],
                        "topic": pair["topic"],
                        "dynamic_stance": pair["dynamic_stance"],
                        "parent_stance": pair["parent_stance"],
                        "reply_stance": pair["reply_stance"],
                        "parent_emotion": pair["parent_emotion"],
                        "reply_emotion": pair["reply_emotion"],
                    }