File size: 4,538 Bytes
f97919d
 
 
 
 
 
16f0d19
 
 
f97919d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16f0d19
f97919d
 
 
 
 
 
 
 
16f0d19
f97919d
 
16f0d19
f97919d
 
 
 
 
 
16f0d19
 
 
f97919d
16f0d19
f97919d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16f0d19
f97919d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16f0d19
f97919d
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
from pathlib import Path
from typing import Dict, List, Tuple

import datasets
import pandas as pd

from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Tasks

_CITATION = """\
@article{karo2022sentiment,
  title={Sentiment Analysis in Karonese Tweet using Machine Learning},
  author={Karo, Ichwanul Muslim Karo and Fudzee, Mohd Farhan Md and Kasim, Shahreen and Ramli, Azizul Azhar},
  journal={Indonesian Journal of Electrical Engineering and Informatics (IJEEI)},
  volume={10},
  number={1},
  pages={219--231},
  year={2022}
}
"""

_LANGUAGES = ["btx"]
_LOCAL = False

_DATASETNAME = "karonese_sentiment"

_DESCRIPTION = """\
Karonese sentiment was crawled from Twitter between 1 January 2021 and 31 October 2021.
The first crawling process used several keywords related to the Karonese, such as
"deleng sinabung, Sinabung mountain", "mejuah-juah, greeting welcome", "Gundaling",
and so on. However, due to the insufficient number of tweets obtained using such
keywords, a second crawling process was done based on several hashtags, such as
#kalakkaro, # #antonyginting, and #lyodra.
"""

_HOMEPAGE = "http://section.iaesonline.com/index.php/IJEEI/article/view/3565"

_LICENSE = "Unknown"

_URLS = {
    _DATASETNAME: "https://raw.githubusercontent.com/aliakbars/karonese/main/karonese_sentiment.csv",
}

_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]

_SOURCE_VERSION = "1.0.0"

_SEACROWD_VERSION = "2024.06.20"


class KaroneseSentimentDataset(datasets.GeneratorBasedBuilder):
    """Karonese sentiment was crawled from Twitter between 1 January 2021 and 31 October 2021.
    The dataset consists of 397 negative, 342 neutral, and 260 positive tweets.
    """

    SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
    SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)

    BUILDER_CONFIGS = [
        SEACrowdConfig(
            name="karonese_sentiment_source",
            version=SOURCE_VERSION,
            description="Karonese Sentiment source schema",
            schema="source",
            subset_id="karonese_sentiment",
        ),
        SEACrowdConfig(
            name="karonese_sentiment_seacrowd_text",
            version=SEACROWD_VERSION,
            description="Karonese Sentiment Nusantara schema",
            schema="seacrowd_text",
            subset_id="karonese_sentiment",
        ),
    ]

    DEFAULT_CONFIG_NAME = "sentiment_nathasa_review_source"

    def _info(self) -> datasets.DatasetInfo:
        if self.config.schema == "source":
            features = datasets.Features(
                {
                    "no": datasets.Value("string"),
                    "tweet": datasets.Value("string"),
                    "label": datasets.Value("string"),
                }
            )
        elif self.config.schema == "seacrowd_text":
            features = schemas.text_features(["negative", "neutral", "positive"])

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        """Returns SplitGenerators."""
        # Dataset does not have predetermined split, putting all as TRAIN
        data_dir = Path(dl_manager.download_and_extract(_URLS[_DATASETNAME]))

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": data_dir,
                },
            ),
        ]

    def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
        """Yields examples as (key, example) tuples."""
        df = pd.read_csv(filepath).drop("no", axis=1)
        df.columns = ["text", "label"]

        if self.config.schema == "source":
            for idx, row in df.iterrows():
                example = {
                    "no": str(idx+1),
                    "tweet": row.text,
                    "label": row.label,
                }
                yield idx, example
        elif self.config.schema == "seacrowd_text":
            for idx, row in df.iterrows():
                example = {
                    "id": str(idx+1),
                    "text": row.text,
                    "label": row.label,
                }
                yield idx, example
        else:
            raise ValueError(f"Invalid config: {self.config.name}")