Datasets:

Languages:
Filipino
ArXiv:
License:
holylovenia commited on
Commit
ab5c843
1 Parent(s): d465587

Upload typhoon_yolanda_tweets.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. typhoon_yolanda_tweets.py +136 -0
typhoon_yolanda_tweets.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+ import pandas as pd
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = """\
13
+ @misc{imperial2019sentiment,
14
+ title={Sentiment Analysis of Typhoon Related Tweets using Standard and Bidirectional Recurrent Neural Networks},
15
+ author={Joseph Marvin Imperial and Jeyrome Orosco and Shiela Mae Mazo and Lany Maceda},
16
+ year={2019},
17
+ eprint={1908.01765},
18
+ archivePrefix={arXiv},
19
+ primaryClass={cs.NE}
20
+ }
21
+ """
22
+
23
+ _DATASETNAME = "typhoon_yolanda_tweets"
24
+
25
+ _DESCRIPTION = """\
26
+ The dataset contains annotated typhoon and disaster-related tweets in Filipino collected before, during,
27
+ and after one month of Typhoon Yolanda in 2013. The dataset has been annotated by an expert into three
28
+ sentiment categories: positive, negative, and neutral.
29
+ """
30
+
31
+ _HOMEPAGE = "https://github.com/imperialite/Philippine-Languages-Online-Corpora/tree/master/Tweets/Annotated%20Yolanda"
32
+
33
+ _LOCAL = False
34
+ _LANGUAGES = ["fil"]
35
+
36
+ _LICENSE = Licenses.CC_BY_4_0.value
37
+
38
+ _ROOT_URL = "https://raw.githubusercontent.com/imperialite/Philippine-Languages-Online-Corpora/master/Tweets/Annotated%20Yolanda/"
39
+ _URLS = {"train": {-1: _ROOT_URL + "train/-1.txt", 0: _ROOT_URL + "train/0.txt", 1: _ROOT_URL + "train/1.txt"}, "test": {-1: _ROOT_URL + "test/-1.txt", 0: _ROOT_URL + "test/0.txt", 1: _ROOT_URL + "test/1.txt"}}
40
+
41
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
42
+
43
+ _SOURCE_VERSION = "1.0.0"
44
+
45
+ _SEACROWD_VERSION = "2024.06.20"
46
+
47
+ class TyphoonYolandaTweets(datasets.GeneratorBasedBuilder):
48
+ """
49
+ The dataset contains annotated typhoon and disaster-related tweets in Filipino collected before, during, and
50
+ after one month of Typhoon Yolanda in 2013. The dataset has been annotated by an expert into three sentiment
51
+ categories: positive, negative, and neutral.
52
+ """
53
+
54
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
55
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
56
+
57
+ BUILDER_CONFIGS = [
58
+ SEACrowdConfig(
59
+ name="typhoon_yolanda_tweets_source",
60
+ version=SOURCE_VERSION,
61
+ description="Typhoon Yolanda Tweets source schema",
62
+ schema="source",
63
+ subset_id="typhoon_yolanda_tweets",
64
+ ),
65
+ SEACrowdConfig(
66
+ name="typhoon_yolanda_tweets_seacrowd_text",
67
+ version=SEACROWD_VERSION,
68
+ description="Typhoon Yolanda Tweets SEACrowd schema",
69
+ schema="seacrowd_text",
70
+ subset_id="typhoon_yolanda_tweets",
71
+ ),
72
+ ]
73
+
74
+ DEFAULT_CONFIG_NAME = "typhoon_yolanda_tweets_source"
75
+
76
+ def _info(self) -> datasets.DatasetInfo:
77
+ if self.config.schema == "source":
78
+ features = datasets.Features(
79
+ {
80
+ "id": datasets.Value("string"),
81
+ "text": datasets.Value("string"),
82
+ "label": datasets.Value("string"),
83
+ }
84
+ )
85
+ elif self.config.schema == "seacrowd_text":
86
+ features = schemas.text_features(["-1", "0", "1"])
87
+
88
+ return datasets.DatasetInfo(
89
+ description=_DESCRIPTION,
90
+ features=features,
91
+ homepage=_HOMEPAGE,
92
+ license=_LICENSE,
93
+ citation=_CITATION,
94
+ )
95
+
96
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
97
+ emos = [-1, 0, 1]
98
+ if self.config.name == "typhoon_yolanda_tweets_source" or self.config.name == "typhoon_yolanda_tweets_seacrowd_text":
99
+ train_path = dl_manager.download_and_extract({emo: _URLS["train"][emo] for emo in emos})
100
+
101
+ test_path = dl_manager.download_and_extract({emo: _URLS["test"][emo] for emo in emos})
102
+
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={
107
+ "filepath": train_path,
108
+ "split": "train",
109
+ },
110
+ ),
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TEST,
113
+ gen_kwargs={
114
+ "filepath": test_path,
115
+ "split": "test",
116
+ },
117
+ ),
118
+ ]
119
+
120
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
121
+ if self.config.schema != "source" and self.config.schema != "seacrowd_text":
122
+ raise ValueError(f"Invalid config: {self.config.name}")
123
+
124
+ df = pd.DataFrame(columns=["text", "label"])
125
+
126
+ if self.config.name == "typhoon_yolanda_tweets_source" or self.config.name == "typhoon_yolanda_tweets_seacrowd_text":
127
+ for emo, file in filepath.items():
128
+ with open(file) as f:
129
+ t = f.readlines()
130
+ l = [str(emo)]*(len(t))
131
+ tmp_df = pd.DataFrame.from_dict({"text": t, "label": l})
132
+ df = pd.concat([df, tmp_df], ignore_index=True)
133
+
134
+ for row in df.itertuples():
135
+ ex = {"id": str(row.Index), "text": row.text, "label": row.label}
136
+ yield row.Index, ex