holylovenia commited on
Commit
949b8d9
1 Parent(s): 8e96e7d

Upload indolem_sentiment.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indolem_sentiment.py +263 -0
indolem_sentiment.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This template serves as a starting point for contributing a dataset to the Nusantara Dataset repo.
18
+
19
+ When modifying it for your dataset, look for TODO items that offer specific instructions.
20
+
21
+ Full documentation on writing dataset loading scripts can be found here:
22
+ https://huggingface.co/docs/datasets/add_dataset.html
23
+
24
+ To create a dataset loading script you will create a class and implement 3 methods:
25
+ * `_info`: Establishes the schema for the dataset, and returns a datasets.DatasetInfo object.
26
+ * `_split_generators`: Downloads and extracts data for each split (e.g. train/val/test) or associate local data with each split.
27
+ * `_generate_examples`: Creates examples from data on disk that conform to each schema defined in `_info`.
28
+
29
+ TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset.
30
+
31
+ [nusantara_schema_name] = (kb, pairs, qa, text, t2t, entailment)
32
+ """
33
+ from base64 import encode
34
+ import json
35
+ from pathlib import Path
36
+ from typing import Dict, List, Tuple
37
+
38
+ import datasets
39
+
40
+ from nusacrowd.utils import schemas
41
+ from nusacrowd.utils.common_parser import load_conll_data
42
+ from nusacrowd.utils.configs import NusantaraConfig
43
+ from nusacrowd.utils.constants import Tasks, DEFAULT_SOURCE_VIEW_NAME, DEFAULT_NUSANTARA_VIEW_NAME
44
+
45
+ # TODO: Add BibTeX citation
46
+ _CITATION = """\
47
+ @article{DBLP:journals/corr/abs-2011-00677,
48
+ author = {Fajri Koto and
49
+ Afshin Rahimi and
50
+ Jey Han Lau and
51
+ Timothy Baldwin},
52
+ title = {IndoLEM and IndoBERT: {A} Benchmark Dataset and Pre-trained Language
53
+ Model for Indonesian {NLP}},
54
+ journal = {CoRR},
55
+ volume = {abs/2011.00677},
56
+ year = {2020},
57
+ url = {https://arxiv.org/abs/2011.00677},
58
+ eprinttype = {arXiv},
59
+ eprint = {2011.00677},
60
+ timestamp = {Fri, 06 Nov 2020 15:32:47 +0100},
61
+ biburl = {https://dblp.org/rec/journals/corr/abs-2011-00677.bib},
62
+ bibsource = {dblp computer science bibliography, https://dblp.org}
63
+ }
64
+ """
65
+
66
+ # TODO: create a module level variable with your dataset name (should match script name)
67
+ # E.g. Hallmarks of Cancer: [dataset_name] --> hallmarks_of_cancer
68
+ _DATASETNAME = "indolem_sentiment"
69
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
70
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
71
+
72
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
73
+ _LOCAL = False
74
+
75
+ # TODO: Add description of the dataset here
76
+ # You can copy an official description
77
+ _DESCRIPTION = """\
78
+ IndoLEM (Indonesian Language Evaluation Montage) is a comprehensive Indonesian benchmark that comprises of seven tasks for the Indonesian language. This benchmark is categorized into three pillars of NLP tasks: morpho-syntax, semantics, and discourse.
79
+
80
+ This dataset is based on binary classification (positive and negative), with distribution:
81
+ * Train: 3638 sentences
82
+ * Development: 399 sentences
83
+ * Test: 1011 sentences
84
+
85
+ The data is sourced from 1) Twitter [(Koto and Rahmaningtyas, 2017)](https://www.researchgate.net/publication/321757985_InSet_Lexicon_Evaluation_of_a_Word_List_for_Indonesian_Sentiment_Analysis_in_Microblogs)
86
+ and 2) [hotel reviews](https://github.com/annisanurulazhar/absa-playground/).
87
+
88
+ The experiment is based on 5-fold cross validation.
89
+ """
90
+
91
+ # TODO: Add a link to an official homepage for the dataset here (if possible)
92
+ _HOMEPAGE = "https://indolem.github.io/"
93
+
94
+ # TODO: Add the licence for the dataset here (if possible)
95
+ # Note that this doesn't have to be a common open source license.
96
+ # Some datasets have custom licenses. In this case, simply put the full license terms
97
+ # into `_LICENSE`
98
+ _LICENSE = "Creative Commons Attribution Share-Alike 4.0 International"
99
+
100
+ # TODO: Add links to the urls needed to download your dataset files.
101
+ # For local datasets, this variable can be an empty dictionary.
102
+
103
+ # For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
104
+ # In most cases the URLs will be the same for the source and nusantara config.
105
+ # However, if you need to access different files for each config you can have multiple entries in this dict.
106
+ # This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
107
+ _URLS = {
108
+ _DATASETNAME: {
109
+ 'train': 'https://raw.githubusercontent.com/indolem/indolem/main/sentiment/data/train0.csv',
110
+ 'dev': 'https://raw.githubusercontent.com/indolem/indolem/main/sentiment/data/dev0.csv',
111
+ 'test': 'https://raw.githubusercontent.com/indolem/indolem/main/sentiment/data/test0.csv'
112
+ }
113
+ }
114
+
115
+ # TODO: add supported task by dataset. One dataset may support multiple tasks
116
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
117
+
118
+ # TODO: set this to a version that is associated with the dataset. if none exists use "1.0.0"
119
+ # This version doesn't have to be consistent with semantic versioning. Anything that is
120
+ # provided by the original dataset as a version goes.
121
+ _SOURCE_VERSION = "1.0.0"
122
+
123
+ _NUSANTARA_VERSION = "1.0.0"
124
+
125
+
126
+ # TODO: Name the dataset class to match the script name using CamelCase instead of snake_case
127
+ class IndolemSentimentDataset(datasets.GeneratorBasedBuilder):
128
+
129
+ label_classes = ['negative','positive']
130
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
131
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
132
+
133
+ # You will be able to load the "source" or "nusanrata" configurations with
134
+ # ds_source = datasets.load_dataset('my_dataset', name='source')
135
+ # ds_nusantara = datasets.load_dataset('my_dataset', name='nusantara')
136
+
137
+ # For local datasets you can make use of the `data_dir` and `data_files` kwargs
138
+ # https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
139
+ # ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
140
+ # ds_nusantara = datasets.load_dataset('my_dataset', name='nusantara', data_dir="/path/to/data/files")
141
+
142
+ # TODO: For each dataset, implement Config for Source and Nusantara;
143
+ # If dataset contains more than one subset (see nusantara/nusa_datasets/smsa.py) implement for EACH of them.
144
+ # Each of them should contain:
145
+ # - name: should be unique for each dataset config eg. smsa_(source|nusantara)_[nusantara_schema_name]
146
+ # - version: option = (SOURCE_VERSION|NUSANTARA_VERSION)
147
+ # - description: one line description for the dataset
148
+ # - schema: options = (source|nusantara_[nusantara_schema_name])
149
+ # - subset_id: subset id is the canonical name for the dataset (eg. smsa)
150
+ # where [nusantara_schema_name] = (kb, pairs, qa, text, t2t)
151
+
152
+ BUILDER_CONFIGS = [
153
+ NusantaraConfig(
154
+ name="indolem_sentiment_source",
155
+ version=SOURCE_VERSION,
156
+ description="indolem_sentiment source schema",
157
+ schema="source",
158
+ subset_id="indolem_sentiment",
159
+ ),
160
+ NusantaraConfig(
161
+ name="indolem_sentiment_nusantara_text",
162
+ version=NUSANTARA_VERSION,
163
+ description="indolem_sentiment Nusantara schema",
164
+ schema="nusantara_text",
165
+ subset_id="indolem_sentiment",
166
+ ),
167
+ ]
168
+
169
+ DEFAULT_CONFIG_NAME = "indolem_sentiment_source"
170
+
171
+ def _info(self) -> datasets.DatasetInfo:
172
+
173
+ # Create the source schema; this schema will keep all keys/information/labels as close to the original dataset as possible.
174
+ # You can arbitrarily nest lists and dictionaries.
175
+ # For iterables, use lists over tuples or `datasets.Sequence`
176
+
177
+ if self.config.schema == "source":
178
+ features = datasets.Features({"sentence":datasets.Value("string"), "sentiment": datasets.Value("int32")})
179
+ elif self.config.schema == "nusantara_text":
180
+ features = schemas.text_features(self.label_classes)
181
+
182
+ return datasets.DatasetInfo(
183
+ description=_DESCRIPTION,
184
+ features=features,
185
+ homepage=_HOMEPAGE,
186
+ license=_LICENSE,
187
+ citation=_CITATION,
188
+ )
189
+
190
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
191
+ """Returns SplitGenerators."""
192
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
193
+ # If you need to access the "source" or "nusantara" config choice, that will be in self.config.name
194
+ # LOCAL DATASETS: You do not need the dl_manager; you can ignore this argument. Make sure `gen_kwargs` in the return gets passed the right filepath
195
+ # PUBLIC DATASETS: Assign your data-dir based on the dl_manager.
196
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs; many examples use the download_and_extract method; see the DownloadManager docs here: https://huggingface.co/docs/datasets/package_reference/builder_classes.html#datasets.DownloadManager
197
+ # dl_manager can accept any type of nested list/dict and will give back the same structure with the url replaced with the path to local files.
198
+
199
+ # TODO: KEEP if your dataset is PUBLIC; remove if not
200
+ urls = _URLS[_DATASETNAME]
201
+ train_data = Path(dl_manager.download(urls['train']))
202
+ test_data = Path(dl_manager.download(urls['test']))
203
+ dev_data = Path(dl_manager.download(urls['dev']))
204
+
205
+ # Not all datasets have predefined canonical train/val/test splits.
206
+ # If your dataset has no predefined splits, use datasets.Split.TRAIN for all of the data.
207
+
208
+ return [
209
+ datasets.SplitGenerator(
210
+ name=datasets.Split.TRAIN,
211
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
212
+ gen_kwargs={
213
+ "filepath": train_data,
214
+ "split": "train",
215
+ },
216
+ ),
217
+ datasets.SplitGenerator(
218
+ name=datasets.Split.TEST,
219
+ gen_kwargs={
220
+ "filepath": test_data,
221
+ "split": "test",
222
+ },
223
+ ),
224
+ datasets.SplitGenerator(
225
+ name=datasets.Split.VALIDATION,
226
+ gen_kwargs={
227
+ "filepath": dev_data,
228
+ "split": "dev",
229
+ },
230
+ ),
231
+ ]
232
+
233
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
234
+ # TODO: change the args of this function to match the keys in `gen_kwargs`. You may add any necessary kwargs.
235
+
236
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
237
+ """Yields examples as (key, example) tuples."""
238
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
239
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
240
+ # NOTE: For local datasets you will have access to self.config.data_dir and self.config.data_files
241
+
242
+ with filepath.open('r', encoding='utf-8') as f:
243
+ line = f.readline()
244
+ id = 0
245
+ while line:
246
+ line = f.readline().strip()
247
+ if len(line) == 0: break
248
+
249
+ ex = {}
250
+ id += 1
251
+ sentence = line[:-2].strip('"')
252
+ sentiment = int(line[-1])
253
+ if self.config.schema == 'source':
254
+ ex = {'sentence': sentence, 'sentiment': sentiment}
255
+ elif self.config.schema == 'nusantara_text':
256
+ ex = {'id': str(id), 'text': str(sentence), 'label': self.label_classes[sentiment]}
257
+ else:
258
+ raise ValueError(f"Invalid config: {self.config.name}")
259
+
260
+ yield id, ex
261
+
262
+ # This template is based on the following template from the datasets package:
263
+ # https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py