Basvoju commited on
Commit
ac9d2c0
1 Parent(s): 05b26e3

Create SemEval2018Task7.py

Browse files
Files changed (1) hide show
  1. SemEval2018Task7.py +273 -0
SemEval2018Task7.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # I am trying to understand to the following code. Do not use this for any purpose as I do not support this.
2
+ # Use the original source from https://huggingface.co/datasets/DFKI-SLT/science_ie/raw/main/science_ie.py
3
+
4
+
5
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ """Semeval2018Task7 is a dataset that describes the first task on semantic relation extraction and classification in scientific paper abstracts"""
19
+
20
+
21
+
22
+ import glob
23
+ import datasets
24
+ #from path lib import Path
25
+ from itertools import permutations
26
+ from spacy.lang.en import English
27
+ import xml.dom.minidom
28
+ import xml.etree.ElementTree as ET
29
+
30
+ # Find for instance the citation on arxiv or on the dataset repo/website
31
+ _CITATION = """\
32
+ @inproceedings{gabor-etal-2018-semeval,
33
+ title = "{S}em{E}val-2018 Task 7: Semantic Relation Extraction and Classification in Scientific Papers",
34
+ author = {G{\'a}bor, Kata and
35
+ Buscaldi, Davide and
36
+ Schumann, Anne-Kathrin and
37
+ QasemiZadeh, Behrang and
38
+ Zargayouna, Ha{\"\i}fa and
39
+ Charnois, Thierry},
40
+ booktitle = "Proceedings of the 12th International Workshop on Semantic Evaluation",
41
+ month = jun,
42
+ year = "2018",
43
+ address = "New Orleans, Louisiana",
44
+ publisher = "Association for Computational Linguistics",
45
+ url = "https://aclanthology.org/S18-1111",
46
+ doi = "10.18653/v1/S18-1111",
47
+ pages = "679--688",
48
+ abstract = "This paper describes the first task on semantic relation extraction and classification in scientific paper abstracts at SemEval 2018. The challenge focuses on domain-specific semantic relations and includes three different subtasks. The subtasks were designed so as to compare and quantify the effect of different pre-processing steps on the relation classification results. We expect the task to be relevant for a broad range of researchers working on extracting specialized knowledge from domain corpora, for example but not limited to scientific or bio-medical information extraction. The task attracted a total of 32 participants, with 158 submissions across different scenarios.",
49
+ }
50
+ """
51
+
52
+ # You can copy an official description
53
+ _DESCRIPTION = """\
54
+ This paper describes the first task on semantic relation extraction and classification in scientific paper abstracts at SemEval 2018. The challenge focuses on domain-specific semantic relations and includes three different subtasks. The subtasks were designed so as to compare and quantify the effect of different pre-processing steps on the relation classification results. We expect the task to be relevant for a broad range of researchers working on extracting specialized knowledge from domain corpora, for example but not limited to scientific or bio-medical information extraction. The task attracted a total of 32 participants, with 158 submissions across different scenarios.
55
+ """
56
+
57
+ # Add a link to an official homepage for the dataset here
58
+ _HOMEPAGE = "https://github.com/gkata/SemEval2018Task7/tree/testing"
59
+
60
+ # Add the licence for the dataset here if you can find it
61
+ _LICENSE = ""
62
+
63
+ # Add link to the official dataset URLs here
64
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
65
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
66
+ _URLS = {
67
+ "clean": {
68
+ "train": {
69
+ "relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.relations.txt",
70
+ "text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.text.xml",
71
+ },
72
+ "test": {
73
+ "relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.test.relations.txt",
74
+ "text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.1.test.text.xml",
75
+ },
76
+ },
77
+ "noisy": {
78
+ "train": {
79
+ "relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.relations.txt",
80
+ "text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.text.xml",
81
+ },
82
+ "test": {
83
+ "relations": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.test.relations.txt",
84
+ "text": "https://raw.githubusercontent.com/gkata/SemEval2018Task7/testing/1.2.test.text.xml",
85
+ },
86
+ }
87
+
88
+ }
89
+
90
+
91
+ def all_text_nodes(root):
92
+ if root.text is not None:
93
+ yield root.text
94
+ for child in root:
95
+ if child.tail is not None:
96
+ yield child.tail
97
+
98
+
99
+ def reading_entity_data(string_conver):
100
+
101
+ parsing_tag = False
102
+ final_string = ""
103
+ tag_string= ""
104
+ current_tag_id = ""
105
+ current_tag_starting_pos = 0
106
+ current_tag_ending_pos= 0
107
+ entity_mapping_list=[]
108
+
109
+ for i in string_conver:
110
+ if i=='<':
111
+ parsing_tag = True
112
+ if current_tag_id!="":
113
+ current_tag_ending_pos = len(final_string)-1
114
+ entity_mapping_list.append({"id":current_tag_id,
115
+ "char_start":current_tag_starting_pos,
116
+ "char_end":current_tag_ending_pos})
117
+ current_tag_id= ""
118
+ tag_string=""
119
+
120
+
121
+ elif i=='>':
122
+ parsing_tag = False
123
+ tag_string_split = tag_string.split('"')
124
+ if len(tag_string_split)>1:
125
+ current_tag_id= tag_string.split('"')[1]
126
+ current_tag_starting_pos = len(final_string)
127
+
128
+ else:
129
+ if parsing_tag!=True:
130
+ final_string = final_string + i
131
+ else:
132
+ tag_string = tag_string + i
133
+
134
+ return {"abstract":final_string, "entities":entity_mapping_list}
135
+
136
+
137
+
138
+
139
+ class Semeval2018Task7(datasets.GeneratorBasedBuilder):
140
+ """
141
+ Semeval2018Task7 is a dataset for semantic relation extraction and classification in scientific paper abstracts
142
+ """
143
+
144
+ VERSION = datasets.Version("1.1.0")
145
+
146
+ BUILDER_CONFIGS = [
147
+ datasets.BuilderConfig(name="clean", version=VERSION,
148
+ description="Relation classification on clean data"),
149
+ datasets.BuilderConfig(name="noisy", version=VERSION,
150
+ description="Relation classification on noisy data"),
151
+ ]
152
+ DEFAULT_CONFIG_NAME = "clean"
153
+
154
+ def _info(self):
155
+ class_labels = ["USAGE", "RESULT", "MODEL-FEATURE", "PART_WHOLE", "TOPIC", "COMPARE"]
156
+ features = datasets.Features(
157
+ {
158
+ "id": datasets.Value("string"),
159
+ "title": datasets.Value("string"),
160
+ "abstract": datasets.Value("string"),
161
+ "entities": [
162
+ {
163
+ "id": datasets.Value("string"),
164
+ "char_start": datasets.Value("int32"),
165
+ "char_end": datasets.Value("int32")
166
+ }
167
+ ],
168
+ "relation": [
169
+ {
170
+ "label": datasets.ClassLabel(names=class_labels),
171
+ "arg1": datasets.Value("string"),
172
+ "arg2": datasets.Value("string"),
173
+ "reverse": datasets.Value("bool")
174
+ }
175
+ ]
176
+ }
177
+ )
178
+
179
+ return datasets.DatasetInfo(
180
+ # This is the description that will appear on the datasets page.
181
+ description=_DESCRIPTION,
182
+ # This defines the different columns of the dataset and their types
183
+ features=features, # Here we define them above because they are different between the two configurations
184
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
185
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
186
+ # supervised_keys=("sentence", "label"),
187
+ # Homepage of the dataset for documentation
188
+ homepage=_HOMEPAGE,
189
+ # License for the dataset if available
190
+ license=_LICENSE,
191
+ # Citation for the dataset
192
+ citation=_CITATION,
193
+ )
194
+
195
+ def _split_generators(self, dl_manager):
196
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
197
+
198
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
199
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
200
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
201
+ urls = _URLS[self.config.name]
202
+ downloaded_files = dl_manager.download(urls)
203
+ print(downloaded_files)
204
+
205
+ return [
206
+ datasets.SplitGenerator(
207
+ name=datasets.Split.TRAIN,
208
+ # These kwargs will be passed to _generate_examples
209
+ gen_kwargs={
210
+ "relation_filepath": downloaded_files[datasets.Split.TRAIN]["relations"],
211
+ "text_filepath": downloaded_files[datasets.Split.TRAIN]["text"],
212
+ }
213
+
214
+ )]
215
+ # TODO: test split does not contain relations, how to do
216
+
217
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
218
+ def _generate_examples(self, relation_filepath, text_filepath):
219
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
220
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
221
+ with open(relation_filepath, encoding="utf-8") as f:
222
+ relations = []
223
+
224
+ for key, row in enumerate(f):
225
+ row_split = row.strip("\n").split("(")
226
+ use_case = row_split[0]
227
+ second_half = row_split[1].strip(")")
228
+ second_half_splits = second_half.split(",")
229
+ size = len(second_half_splits)
230
+
231
+ X = second_half_splits[0]
232
+ Y = second_half_splits[1]
233
+
234
+ relation = {
235
+ "label": use_case,
236
+ "arg1": X,
237
+ "arg2": Y,
238
+ "reverse": True if size == 3 else False
239
+ }
240
+ relations.append(relation)
241
+
242
+ doc2 = ET.parse(text_filepath)
243
+ root = doc2.getroot()
244
+
245
+ for child in root:
246
+ if child.attrib!= None:
247
+ text_id = child.attrib
248
+ else:
249
+ continue
250
+
251
+ if child.find("title")!=None:
252
+ title = child.find("title").text
253
+ child_abstract = child.find("abstract")
254
+ else:
255
+ continue
256
+
257
+ if child_abstract!=None:
258
+ prev=ET.tostring(child_abstract,"utf-8")
259
+ prev= prev.decode('utf8').replace("b\'","")
260
+ prev= prev.replace("<abstract>","")
261
+ prev= prev.replace("</abstract>","")
262
+ final_list= reading_entity_data(prev)
263
+ else:
264
+ continue
265
+
266
+
267
+ yield text_id['id'], {
268
+ "id": text_id['id'],
269
+ "title": title,
270
+ "abstract": final_list['abstract'],
271
+ "entities": final_list['entities'],
272
+ "relation": relations
273
+ }