Basvoju commited on
Commit
21d8a8c
1 Parent(s): f2eea07

Update SemEval2018Task7.py

Browse files
Files changed (1) hide show
  1. SemEval2018Task7.py +42 -20
SemEval2018Task7.py CHANGED
@@ -15,7 +15,7 @@
15
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
  # See the License for the specific language governing permissions and
17
  # limitations under the License.
18
- """Semeval2018Task7 is a dataset that describes the Semantic Relation Extraction and Classification in Scientific Papers"""
19
 
20
 
21
 
@@ -108,8 +108,15 @@ def all_text_nodes(root):
108
  yield child.tail
109
 
110
 
111
- def reading_entity_data(string_conver):
112
-
 
 
 
 
 
 
 
113
  parsing_tag = False
114
  final_string = ""
115
  tag_string= ""
@@ -117,8 +124,8 @@ def reading_entity_data(string_conver):
117
  current_tag_starting_pos = 0
118
  current_tag_ending_pos= 0
119
  entity_mapping_list=[]
120
-
121
- for i in string_conver:
122
  if i=='<':
123
  parsing_tag = True
124
  if current_tag_id!="":
@@ -143,8 +150,7 @@ def reading_entity_data(string_conver):
143
  else:
144
  tag_string = tag_string + i
145
 
146
- return {"abstract":final_string, "entities":entity_mapping_list}
147
-
148
 
149
 
150
 
@@ -212,7 +218,7 @@ class Semeval2018Task7(datasets.GeneratorBasedBuilder):
212
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
213
  urls = _URLS[self.config.name]
214
  downloaded_files = dl_manager.download(urls)
215
- print(downloaded_files)
216
 
217
  return [
218
  datasets.SplitGenerator(
@@ -232,14 +238,14 @@ class Semeval2018Task7(datasets.GeneratorBasedBuilder):
232
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
233
  with open(relation_filepath, encoding="utf-8") as f:
234
  relations = []
235
-
236
  for key, row in enumerate(f):
237
  row_split = row.strip("\n").split("(")
238
  use_case = row_split[0]
239
  second_half = row_split[1].strip(")")
240
  second_half_splits = second_half.split(",")
241
  size = len(second_half_splits)
242
-
243
  relation = {
244
  "label": use_case,
245
  "arg1": second_half_splits[0],
@@ -247,6 +253,20 @@ class Semeval2018Task7(datasets.GeneratorBasedBuilder):
247
  "reverse": True if size == 3 else False
248
  }
249
  relations.append(relation)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
  doc2 = ET.parse(text_filepath)
252
  root = doc2.getroot()
@@ -255,23 +275,25 @@ class Semeval2018Task7(datasets.GeneratorBasedBuilder):
255
  if child.find("title")==None:
256
  continue
257
  text_id = child.attrib
 
258
 
259
  if child.find("abstract")==None:
260
  continue
261
  title = child.find("title").text
262
  child_abstract = child.find("abstract")
263
 
264
- prev=ET.tostring(child_abstract,"utf-8")
265
- prev= prev.decode('utf8').replace("b\'","")
266
- prev= prev.replace("<abstract>","")
267
- prev= prev.replace("</abstract>","")
268
- final_list= reading_entity_data(prev)
269
 
270
-
 
 
 
 
 
 
271
  yield text_id['id'], {
272
  "id": text_id['id'],
273
- "title": title,
274
- "abstract": final_list['abstract'],
275
- "entities": final_list['entities'],
276
- "relation": relations
277
  }
 
15
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
  # See the License for the specific language governing permissions and
17
  # limitations under the License.
18
+ """Semeval2018Task7 is a dataset that describes the first task on semantic relation extraction and classification in scientific paper abstracts"""
19
 
20
 
21
 
 
108
  yield child.tail
109
 
110
 
111
+ def reading_entity_data(ET_data_to_convert):
112
+ parsed_data = ET.tostring(ET_data_to_convert,"utf-8")
113
+ parsed_data= parsed_data.decode('utf8').replace("b\'","")
114
+ parsed_data= parsed_data.replace("<abstract>","")
115
+ parsed_data= parsed_data.replace("</abstract>","")
116
+ parsed_data= parsed_data.replace("<title>","")
117
+ parsed_data= parsed_data.replace("</title>","")
118
+ parsed_data = parsed_data.replace("\n\n\n","")
119
+
120
  parsing_tag = False
121
  final_string = ""
122
  tag_string= ""
 
124
  current_tag_starting_pos = 0
125
  current_tag_ending_pos= 0
126
  entity_mapping_list=[]
127
+
128
+ for i in parsed_data:
129
  if i=='<':
130
  parsing_tag = True
131
  if current_tag_id!="":
 
150
  else:
151
  tag_string = tag_string + i
152
 
153
+ return {"text_data":final_string, "entities":entity_mapping_list}
 
154
 
155
 
156
 
 
218
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
219
  urls = _URLS[self.config.name]
220
  downloaded_files = dl_manager.download(urls)
221
+ #print(downloaded_files)
222
 
223
  return [
224
  datasets.SplitGenerator(
 
238
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
239
  with open(relation_filepath, encoding="utf-8") as f:
240
  relations = []
241
+ text_id_to_relations_map= {}
242
  for key, row in enumerate(f):
243
  row_split = row.strip("\n").split("(")
244
  use_case = row_split[0]
245
  second_half = row_split[1].strip(")")
246
  second_half_splits = second_half.split(",")
247
  size = len(second_half_splits)
248
+
249
  relation = {
250
  "label": use_case,
251
  "arg1": second_half_splits[0],
 
253
  "reverse": True if size == 3 else False
254
  }
255
  relations.append(relation)
256
+
257
+ arg_id = second_half_splits[0].split(".")[0]
258
+ if arg_id not in text_id_to_relations_map:
259
+ text_id_to_relations_map[arg_id] = [relation]
260
+ else:
261
+ text_id_to_relations_map[arg_id].append(relation)
262
+ #print("result", text_id_to_relations_map)
263
+
264
+ #for arg_id, values in text_id_to_relations_map.items():
265
+ #print(f"ID: {arg_id}")
266
+ # for value in values:
267
+ # (value)
268
+
269
+
270
 
271
  doc2 = ET.parse(text_filepath)
272
  root = doc2.getroot()
 
275
  if child.find("title")==None:
276
  continue
277
  text_id = child.attrib
278
+ #print("text_id", text_id)
279
 
280
  if child.find("abstract")==None:
281
  continue
282
  title = child.find("title").text
283
  child_abstract = child.find("abstract")
284
 
 
 
 
 
 
285
 
286
+ abstract_text_and_entities = reading_entity_data(child.find("abstract"))
287
+ title_text_and_entities = reading_entity_data(child.find("title"))
288
+
289
+ text_relations = []
290
+ if text_id['id'] in text_id_to_relations_map:
291
+ text_relations = text_id_to_relations_map[text_id['id']]
292
+
293
  yield text_id['id'], {
294
  "id": text_id['id'],
295
+ "title": title_text_and_entities['text_data'],
296
+ "abstract": abstract_text_and_entities['text_data'],
297
+ "entities": abstract_text_and_entities['entities'] + title_text_and_entities['entities'],
298
+ "relation": text_relations
299
  }