holylovenia commited on
Commit
40b58f1
1 Parent(s): 564be85

Upload emotcmt.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. emotcmt.py +123 -0
emotcmt.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from nusacrowd.utils import schemas
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks, DEFAULT_SOURCE_VIEW_NAME, DEFAULT_NUSANTARA_VIEW_NAME
10
+
11
+ _DATASETNAME = "emotcmt"
12
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
13
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
14
+
15
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
16
+ _LOCAL = False
17
+ _CITATION = """\
18
+ @inproceedings{barik-etal-2019-normalization,
19
+ title = "Normalization of {I}ndonesian-{E}nglish Code-Mixed {T}witter Data",
20
+ author = "Barik, Anab Maulana and
21
+ Mahendra, Rahmad and
22
+ Adriani, Mirna",
23
+ booktitle = "Proceedings of the 5th Workshop on Noisy User-generated Text (W-NUT 2019)",
24
+ month = nov,
25
+ year = "2019",
26
+ address = "Hong Kong, China",
27
+ publisher = "Association for Computational Linguistics",
28
+ url = "https://aclanthology.org/D19-5554",
29
+ doi = "10.18653/v1/D19-5554",
30
+ pages = "417--424"
31
+ }
32
+
33
+ @article{Yulianti2021NormalisationOI,
34
+ title={Normalisation of Indonesian-English Code-Mixed Text and its Effect on Emotion Classification},
35
+ author={Evi Yulianti and Ajmal Kurnia and Mirna Adriani and Yoppy Setyo Duto},
36
+ journal={International Journal of Advanced Computer Science and Applications},
37
+ year={2021}
38
+ }
39
+ """
40
+
41
+ _DESCRIPTION = """\
42
+ EmotCMT is an emotion classification Indonesian-English code-mixing dataset created through an Indonesian-English code-mixed Twitter data pipeline consisting of 4 processing steps, i.e., tokenization, language identification, lexical normalization, and translation. The dataset consists of 825 tweets, 22.736 tokens with 11.204 Indonesian tokens and 5.613 English tokens. Each tweet is labelled with an emotion, i.e., cinta (love), takut (fear), sedih (sadness), senang (joy), or marah (anger).
43
+ """
44
+
45
+ _HOMEPAGE = "https://github.com/ir-nlp-csui/emotcmt"
46
+
47
+ _LICENSE = "MIT"
48
+
49
+ _URLs = {
50
+ "test": "https://raw.githubusercontent.com/ir-nlp-csui/emotcmt/main/codeswitch_emotion.csv"
51
+ }
52
+
53
+ _SUPPORTED_TASKS = [Tasks.EMOTION_CLASSIFICATION]
54
+
55
+ _SOURCE_VERSION = "1.0.0"
56
+ _NUSANTARA_VERSION = "1.0.0"
57
+
58
+
59
+ class EmotCMT(datasets.GeneratorBasedBuilder):
60
+ BUILDER_CONFIGS = [
61
+ NusantaraConfig(
62
+ name="emotcmt_source",
63
+ version=datasets.Version(_SOURCE_VERSION),
64
+ description="EmotCMT source schema",
65
+ schema="source",
66
+ subset_id="emotcmt",
67
+ ),
68
+ NusantaraConfig(
69
+ name="emotcmt_nusantara_text",
70
+ version=datasets.Version(_NUSANTARA_VERSION),
71
+ description="EmotCMT Nusantara schema",
72
+ schema="nusantara_text",
73
+ subset_id="emotcmt",
74
+ ),
75
+ ]
76
+
77
+ DEFAULT_CONFIG_NAME = "emotcmt_source"
78
+
79
+ def _info(self):
80
+ if self.config.schema == "source":
81
+ features = datasets.Features({"tweet": datasets.Value("string"), "label": datasets.Value("string")})
82
+ elif self.config.schema == "nusantara_text":
83
+ features = schemas.text_features(["cinta", "takut", "sedih", "senang", "marah"])
84
+
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION,
87
+ features=features,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+
93
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
94
+ test_csv_path = Path(dl_manager.download_and_extract(_URLs["test"]))
95
+ data_files = {
96
+ "test": test_csv_path,
97
+ }
98
+
99
+ return [
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TEST,
102
+ gen_kwargs={"filepath": data_files["test"]},
103
+ )
104
+ ]
105
+
106
+ def _generate_examples(self, filepath: Path):
107
+ df = pd.read_csv(filepath).reset_index()
108
+ df.columns = ["id", "label", "sentence"]
109
+
110
+ if self.config.schema == "source":
111
+ for row in df.itertuples():
112
+ ex = {"tweet": row.sentence, "label": row.label}
113
+ yield row.id, ex
114
+ elif self.config.schema == "nusantara_text":
115
+ for row in df.itertuples():
116
+ ex = {
117
+ "id": str(row.id),
118
+ "text": row.sentence,
119
+ "label": row.label
120
+ }
121
+ yield row.id, ex
122
+ else:
123
+ raise ValueError(f"Invalid config: {self.config.name}")