holylovenia commited on
Commit
f97919d
1 Parent(s): 9ad057f

Upload karonese_sentiment.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. karonese_sentiment.py +135 -0
karonese_sentiment.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from nusacrowd.utils import schemas
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks
10
+
11
+ _CITATION = """\
12
+ @article{karo2022sentiment,
13
+ title={Sentiment Analysis in Karonese Tweet using Machine Learning},
14
+ author={Karo, Ichwanul Muslim Karo and Fudzee, Mohd Farhan Md and Kasim, Shahreen and Ramli, Azizul Azhar},
15
+ journal={Indonesian Journal of Electrical Engineering and Informatics (IJEEI)},
16
+ volume={10},
17
+ number={1},
18
+ pages={219--231},
19
+ year={2022}
20
+ }
21
+ """
22
+
23
+ _LANGUAGES = ["btx"]
24
+ _LOCAL = False
25
+
26
+ _DATASETNAME = "karonese_sentiment"
27
+
28
+ _DESCRIPTION = """\
29
+ Karonese sentiment was crawled from Twitter between 1 January 2021 and 31 October 2021.
30
+ The first crawling process used several keywords related to the Karonese, such as
31
+ "deleng sinabung, Sinabung mountain", "mejuah-juah, greeting welcome", "Gundaling",
32
+ and so on. However, due to the insufficient number of tweets obtained using such
33
+ keywords, a second crawling process was done based on several hashtags, such as
34
+ #kalakkaro, # #antonyginting, and #lyodra.
35
+ """
36
+
37
+ _HOMEPAGE = "http://section.iaesonline.com/index.php/IJEEI/article/view/3565"
38
+
39
+ _LICENSE = "Unknown"
40
+
41
+ _URLS = {
42
+ _DATASETNAME: "https://raw.githubusercontent.com/aliakbars/karonese/main/karonese_sentiment.csv",
43
+ }
44
+
45
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
46
+
47
+ _SOURCE_VERSION = "1.0.0"
48
+
49
+ _NUSANTARA_VERSION = "1.0.0"
50
+
51
+
52
+ class KaroneseSentimentDataset(datasets.GeneratorBasedBuilder):
53
+ """Karonese sentiment was crawled from Twitter between 1 January 2021 and 31 October 2021.
54
+ The dataset consists of 397 negative, 342 neutral, and 260 positive tweets.
55
+ """
56
+
57
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
58
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
59
+
60
+ BUILDER_CONFIGS = [
61
+ NusantaraConfig(
62
+ name="karonese_sentiment_source",
63
+ version=SOURCE_VERSION,
64
+ description="Karonese Sentiment source schema",
65
+ schema="source",
66
+ subset_id="karonese_sentiment",
67
+ ),
68
+ NusantaraConfig(
69
+ name="karonese_sentiment_nusantara_text",
70
+ version=NUSANTARA_VERSION,
71
+ description="Karonese Sentiment Nusantara schema",
72
+ schema="nusantara_text",
73
+ subset_id="karonese_sentiment",
74
+ ),
75
+ ]
76
+
77
+ DEFAULT_CONFIG_NAME = "sentiment_nathasa_review_source"
78
+
79
+ def _info(self) -> datasets.DatasetInfo:
80
+ if self.config.schema == "source":
81
+ features = datasets.Features(
82
+ {
83
+ "no": datasets.Value("string"),
84
+ "tweet": datasets.Value("string"),
85
+ "label": datasets.Value("string"),
86
+ }
87
+ )
88
+ elif self.config.schema == "nusantara_text":
89
+ features = schemas.text_features(["negative", "neutral", "positive"])
90
+
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=features,
94
+ homepage=_HOMEPAGE,
95
+ license=_LICENSE,
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
100
+ """Returns SplitGenerators."""
101
+ # Dataset does not have predetermined split, putting all as TRAIN
102
+ data_dir = Path(dl_manager.download_and_extract(_URLS[_DATASETNAME]))
103
+
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ gen_kwargs={
108
+ "filepath": data_dir,
109
+ },
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
114
+ """Yields examples as (key, example) tuples."""
115
+ df = pd.read_csv(filepath).drop("no", axis=1)
116
+ df.columns = ["text", "label"]
117
+
118
+ if self.config.schema == "source":
119
+ for idx, row in df.iterrows():
120
+ example = {
121
+ "no": str(idx+1),
122
+ "tweet": row.text,
123
+ "label": row.label,
124
+ }
125
+ yield idx, example
126
+ elif self.config.schema == "nusantara_text":
127
+ for idx, row in df.iterrows():
128
+ example = {
129
+ "id": str(idx+1),
130
+ "text": row.text,
131
+ "label": row.label,
132
+ }
133
+ yield idx, example
134
+ else:
135
+ raise ValueError(f"Invalid config: {self.config.name}")