alexcadillon commited on
Commit
3517760
1 Parent(s): aa1a292

Upload 6 files

Browse files
ABSA_Gold_TestData/Laptops_Test_Gold.xml ADDED
The diff for this file is too large to render. See raw diff
 
ABSA_Gold_TestData/Restaurants_Test_Gold.xml ADDED
The diff for this file is too large to render. See raw diff
 
SemEval'14-ABSA-TrainData_v2 & AnnotationGuidelines/Laptop_Train_v2.xml ADDED
The diff for this file is too large to render. See raw diff
 
SemEval'14-ABSA-TrainData_v2 & AnnotationGuidelines/Restaurants_Train_v2.xml ADDED
The diff for this file is too large to render. See raw diff
 
SemEval'14-ABSA-TrainData_v2 & AnnotationGuidelines/SemEval14_ABSA_AnnotationGuidelines.pdf ADDED
Binary file (621 kB). View file
 
SemEval2014Task4.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import xml.etree.ElementTree as ET
19
+ import os
20
+
21
+ import datasets
22
+ from datasets import ClassLabel
23
+
24
+
25
+ _CITATION = """\
26
+ @inproceedings{pontiki-etal-2014-semeval,
27
+ title = "{S}em{E}val-2014 Task 4: Aspect Based Sentiment Analysis",
28
+ author = "Pontiki, Maria and
29
+ Galanis, Dimitris and
30
+ Pavlopoulos, John and
31
+ Papageorgiou, Harris and
32
+ Androutsopoulos, Ion and
33
+ Manandhar, Suresh",
34
+ booktitle = "Proceedings of the 8th International Workshop on Semantic Evaluation ({S}em{E}val 2014)",
35
+ month = aug,
36
+ year = "2014",
37
+ address = "Dublin, Ireland",
38
+ publisher = "Association for Computational Linguistics",
39
+ url = "https://aclanthology.org/S14-2004",
40
+ doi = "10.3115/v1/S14-2004",
41
+ pages = "27--35",
42
+ }
43
+ """
44
+
45
+ _DESCRIPTION = """\
46
+ These are the datasets for Aspect Based Sentiment Analysis (ABSA), Task 4 of SemEval-2014.
47
+ """
48
+
49
+ _HOMEPAGE = "https://alt.qcri.org/semeval2014/task4/index.php?id=data-and-tools"
50
+
51
+ # TODO: Add the licence for the dataset here if you can find it
52
+ _LICENSE = ""
53
+
54
+ # TODO: Add link to the official dataset URLs here
55
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
56
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
57
+ _URLS = {
58
+ "restaurants": {"train": "SemEval'14-ABSA-TrainData_v2 & AnnotationGuidelines/Restaurants_Train_v2.xml",
59
+ "test": "ABSA_Gold_TestData/Restaurants_Test_Gold.xml"},
60
+ "laptops": {"train": "SemEval'14-ABSA-TrainData_v2 & AnnotationGuidelines/Laptop_Train_v2.xml",
61
+ "test": "ABSA_Gold_TestData/Laptops_Test_Gold.xml"},
62
+ }
63
+
64
+
65
+ class SemEval2014Task4(datasets.GeneratorBasedBuilder):
66
+ """These are the datasets for Aspect Based Sentiment Analysis (ABSA), Task 4 of SemEval-2014."""
67
+
68
+ VERSION = datasets.Version("1.1.0")
69
+
70
+ # This is an example of a dataset with multiple configurations.
71
+ # If you don't want/need to define several sub-sets in your dataset,
72
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
73
+
74
+ # If you need to make complex sub-parts in the datasets with configurable options
75
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
76
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
77
+
78
+ # You will be able to load one or the other configurations in the following list with
79
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
80
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
81
+ BUILDER_CONFIGS = [
82
+ datasets.BuilderConfig(name="restaurants", version=VERSION, description="Restaurant review sentences"),
83
+ datasets.BuilderConfig(name="laptops", version=VERSION, description="Laptop review sentences"),
84
+ ]
85
+
86
+ # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
87
+
88
+ def _info(self):
89
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
90
+ if self.config.name == "restaurants": # This is the name of the configuration selected in BUILDER_CONFIGS above
91
+ features = datasets.Features(
92
+ {'sentenceId': datasets.Value(dtype='string'),
93
+ 'text': datasets.Value(dtype='string'),
94
+ 'aspectTerms': [
95
+ {'term': datasets.Value(dtype='string'),
96
+ 'polarity': ClassLabel(num_classes=4, names=['positive', 'negative', 'neutral', 'conflict']),
97
+ 'from': datasets.Value(dtype='string'),
98
+ 'to': datasets.Value(dtype='string')}
99
+ ],
100
+ 'aspectCategories': [
101
+ {'category': ClassLabel(num_classes=5, names=['food', 'service', 'price', 'ambience', 'anecdotes/miscellaneous']),
102
+ 'polarity': ClassLabel(num_classes=4, names=['positive', 'negative', 'neutral', 'conflict'])}
103
+ ],
104
+ 'domain': ClassLabel(num_classes=2, names=['restaurants', 'laptops'])
105
+ }
106
+ )
107
+ elif self.config.name == "laptops":
108
+ features = datasets.Features(
109
+ {'sentenceId': datasets.Value(dtype='string'),
110
+ 'text': datasets.Value(dtype='string'),
111
+ 'aspectTerms': [
112
+ {'term': datasets.Value(dtype='string'),
113
+ 'polarity': ClassLabel(num_classes=4, names=['positive', 'negative', 'neutral', 'conflict']),
114
+ 'from': datasets.Value(dtype='string'),
115
+ 'to': datasets.Value(dtype='string')}
116
+ ],
117
+ 'domain': ClassLabel(num_classes=2, names=['restaurants', 'laptops'])
118
+ }
119
+ )
120
+ return datasets.DatasetInfo(
121
+ # This is the description that will appear on the datasets page.
122
+ description=_DESCRIPTION,
123
+ # This defines the different columns of the dataset and their types
124
+ features=features, # Here we define them above because they are different between the two configurations
125
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
126
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
127
+ # supervised_keys=("sentence", "label"),
128
+ # Homepage of the dataset for documentation
129
+ homepage=_HOMEPAGE,
130
+ # License for the dataset if available
131
+ license=_LICENSE,
132
+ # Citation for the dataset
133
+ citation=_CITATION,
134
+ )
135
+
136
+ def _split_generators(self, dl_manager):
137
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
138
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
139
+
140
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
141
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
142
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
143
+ urls = _URLS[self.config.name]
144
+ data_dir = dl_manager.download_and_extract(urls)
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TRAIN,
148
+ # These kwargs will be passed to _generate_examples
149
+ gen_kwargs={
150
+ "filepath": data_dir['train'],
151
+ "split": "train",
152
+ },
153
+ ),
154
+ datasets.SplitGenerator(
155
+ name=datasets.Split.TEST,
156
+ # These kwargs will be passed to _generate_examples
157
+ gen_kwargs={
158
+ "filepath": data_dir['test'],
159
+ "split": "test"
160
+ },
161
+ ),
162
+ ]
163
+
164
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
165
+ def _generate_examples(self, filepath, split):
166
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
167
+ # The `id_` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
168
+ tree = ET.parse(filepath)
169
+ root = tree.getroot()
170
+ for id_, sentence in enumerate(root.iter("sentence")):
171
+ sentenceId = sentence.attrib.get("id")
172
+ text = sentence.find("text").text
173
+ aspectTerms = []
174
+ for aspectTerm in sentence.iter("aspectTerm"):
175
+ aspectTerms.append(aspectTerm.attrib)
176
+ if self.config.name == "restaurants":
177
+ aspectCategories = []
178
+ for aspectCategory in sentence.iter("aspectCategory"):
179
+ aspectCategories.append(aspectCategory.attrib)
180
+ yield id_, {
181
+ "sentenceId": sentenceId,
182
+ "text": text,
183
+ "aspectTerms": aspectTerms,
184
+ "aspectCategories": aspectCategories,
185
+ "domain": self.config.name,
186
+ }
187
+ elif self.config.name == 'laptops':
188
+ yield id_, {
189
+ "sentenceId": sentenceId,
190
+ "text": text,
191
+ "aspectTerms": aspectTerms,
192
+ "domain": self.config.name,
193
+ }