Basvoju commited on
Commit
a07919f
1 Parent(s): 42889e7

Upload DWIE.py

Browse files
Files changed (1) hide show
  1. DWIE.py +346 -0
DWIE.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # I am trying to understand to the following code. Do not use this for any purpose as I do not support this.
2
+ # Use the original source from https://huggingface.co/datasets/DFKI-SLT/science_ie/raw/main/science_ie.py
3
+
4
+
5
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ """DWIE is conceived as an entity-centric dataset that describes interactions and properties of conceptual entities on the level of the complete document."""
19
+
20
+ import datasets
21
+ from datasets import DownloadManager
22
+ import os
23
+ import json
24
+ import requests
25
+ from typing import Optional, List, Union
26
+ import argparse
27
+ import hashlib
28
+ from collections import OrderedDict
29
+ from time import sleep
30
+
31
+ #from dataset.utils.tokenizer import TokenizerCPN
32
+
33
+
34
+ # Find for instance the citation on arxiv or on the dataset repo/website
35
+ _CITATION = """\
36
+ @article{ZAPOROJETS2021102563,
37
+ title = {{DWIE}: An entity-centric dataset for multi-task document-level information extraction},
38
+ journal = {Information Processing & Management},
39
+ volume = {58},
40
+ number = {4},
41
+ pages = {102563},
42
+ year = {2021},
43
+ issn = {0306-4573},
44
+ doi = {https://doi.org/10.1016/j.ipm.2021.102563},
45
+ url = {https://www.sciencedirect.com/science/article/pii/S0306457321000662},
46
+ author = {Klim Zaporojets and Johannes Deleu and Chris Develder and Thomas Demeester}
47
+ }
48
+ """
49
+
50
+ # You can copy an official description
51
+ _DESCRIPTION = """\
52
+ DWIE is conceived as an entity-centric dataset that describes interactions and properties of conceptual entities
53
+ on the level of the complete document. This contrasts with currently dominant mention-driven approaches that start
54
+ from the detection and classification of named entity mentions in individual sentences. Also, the dataset was
55
+ randomly sampled from a news platform (English online content from Deutsche Welle), and the annotation scheme
56
+ was generated to cover that content. This makes the setting more realistic than in datasets with pre-determined
57
+ annotation schemes, and non-uniform sampling of content to obtain balanced annotations."""
58
+
59
+ # Add a link to an official homepage for the dataset here
60
+ _HOMEPAGE = "https://github.com/klimzaporojets/DWIE"
61
+
62
+ # Add the licence for the dataset here if you can find it
63
+ _LICENSE = ""
64
+
65
+ # Add link to the official dataset URLs here
66
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
67
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
68
+ _URLS = {"Task_1":
69
+ {
70
+ "url":"https://github.com/klimzaporojets/DWIE/archive/refs/heads/master.zip"
71
+ }
72
+ }
73
+
74
+
75
+
76
+ class DWIE(datasets.GeneratorBasedBuilder):
77
+ """
78
+ DWIE is conceived as an entity-centric dataset that describes interactions and properties of conceptual entities on the level of the complete document.
79
+ """
80
+
81
+ VERSION = datasets.Version("1.1.0")
82
+
83
+ BUILDER_CONFIGS = [
84
+ datasets.BuilderConfig(name="Task_1", version=VERSION,
85
+ description="Relation classification"),
86
+ ]
87
+ DEFAULT_CONFIG_NAME = "Task_1"
88
+
89
+ def _info(self):
90
+ features = datasets.Features(
91
+ {
92
+ "id": datasets.Value("string"),
93
+ "content": datasets.Value("string"),
94
+ "tags": datasets.Value("string"),
95
+ "mentions": [
96
+ {
97
+ "begin": datasets.Value("int32"),
98
+ "end": datasets.Value("int32"),
99
+ "text": datasets.Value("string"),
100
+ "concept": datasets.Value("int32"),
101
+ "candidates" : datasets.Sequence(datasets.Value("string")),
102
+ "scores": datasets.Sequence(datasets.Value("float32"))
103
+ }
104
+ ],
105
+ "concepts": [
106
+ {
107
+ "concept": datasets.Value("int32"),
108
+ "text": datasets.Value("string"),
109
+ "keyword": datasets.Value("bool"),
110
+ "count": datasets.Value("int32"),
111
+ "link": datasets.Value("string"),
112
+ "tags": datasets.Sequence(datasets.Value("string")),
113
+
114
+ }
115
+ ],
116
+ "relations": [
117
+ {
118
+ "s": datasets.Value("int32"),
119
+ "p": datasets.Value("string"),
120
+ "o": datasets.Value("int32"),
121
+
122
+ }
123
+ ],
124
+ "frames": [
125
+ {
126
+ "type": datasets.Value("string"),
127
+ "slots": [{
128
+ "name": datasets.Value("string"),
129
+ "value":datasets.Value("int32")
130
+ }]
131
+
132
+ }
133
+ ],
134
+ "iptc": datasets.Sequence(datasets.Value("string"))
135
+
136
+ }
137
+ )
138
+
139
+ return datasets.DatasetInfo(
140
+ # This is the description that will appear on the datasets page.
141
+ description=_DESCRIPTION,
142
+ # This defines the different columns of the dataset and their types
143
+ features=features, # Here we define them above because they are different between the two configurations
144
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
145
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
146
+ # supervised_keys=("sentence", "label"),
147
+ # Homepage of the dataset for documentation
148
+ homepage=_HOMEPAGE,
149
+ # License for the dataset if available
150
+ license=_LICENSE,
151
+ # Citation for the dataset
152
+ citation=_CITATION,
153
+ )
154
+
155
+ def _split_generators(self, dl_manager):
156
+
157
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
158
+
159
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
160
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
161
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
162
+
163
+ urls = _URLS[self.config.name]
164
+ downloaded = dl_manager.download_and_extract(_URLS)
165
+ article_id_to_url_json= json.load(open(downloaded['Task_1']['url'] + '/DWIE-master/data/article_id_to_url.json'))
166
+ ids_to_new_ids = dict()
167
+ # some ids seem to be different, for now only this one:
168
+ ids_to_new_ids[18525950] = 19026607
169
+
170
+ should_tokenize = False
171
+
172
+ content_to_new_content = {'DW_40663341': [('starting with Sunday\'s', 'starting Sunday\'s'),
173
+ ('$1 million (€840,000)', 'one million dollars (840,000 euros)'),
174
+ ('who kneel in protest during', 'to kneel in protest during')]}
175
+
176
+ articles_done = 0
177
+ total_articles = len(article_id_to_url_json)
178
+ problematic_articles = set()
179
+ problematic_hash_articles = set()
180
+ all_annos = []
181
+ for curr_article in article_id_to_url_json:
182
+ article_id = curr_article['id']
183
+ article_url = curr_article['url']
184
+ article_id_nr = int(article_id[3:])
185
+ if article_id_nr in ids_to_new_ids:
186
+ article_url = article_url.replace(str(article_id_nr), str(ids_to_new_ids[article_id_nr]))
187
+ article_hash = curr_article['hash']
188
+ #print('fetching {} out of {} articles -'.format(articles_done, total_articles), curr_article)
189
+
190
+ annos_only_art_path = downloaded['Task_1']['url'] + '/DWIE-master/data/annos/' + curr_article['id'] + '.json'
191
+ annos_only_json = json.load(open(annos_only_art_path))
192
+ done = False
193
+ attempts = 0
194
+ while not done and attempts <= 3:
195
+ # try:
196
+ a = requests.get(article_url, allow_redirects=True).json()
197
+ if 'name' in a:
198
+ article_title = a['name']
199
+ else:
200
+ print('WARNING: no name detected for ', article_id)
201
+ article_title = ''
202
+ if 'teaser' in a:
203
+ article_teaser = a['teaser']
204
+ else:
205
+ print('WARNING: no teaser detected for ', article_id)
206
+ article_teaser = ''
207
+
208
+ if 'text' in a:
209
+ article_text = a['text']
210
+ else:
211
+ print('WARNING: no text detected for ', article_id)
212
+ article_text = ''
213
+
214
+ article_content_no_strip = '{}\n{}\n{}'.format(article_title, article_teaser, article_text)
215
+ article_content = article_content_no_strip
216
+
217
+ if article_id in content_to_new_content:
218
+ for str_dw, str_dwie in content_to_new_content[article_id]:
219
+ article_content = article_content.replace(str_dw, str_dwie)
220
+
221
+ if 'mentions' in annos_only_json:
222
+ for idx_mention, curr_mention in enumerate(annos_only_json['mentions']):
223
+ curr_mention_text = curr_mention['text'].replace(' ', ' ')
224
+ curr_mention_text = curr_mention_text.replace('​', '')
225
+ solved = False
226
+ if "begin" not in curr_mention:
227
+ curr_mention["begin"] = 0
228
+ if "end" not in curr_mention:
229
+ curr_mention["end"] = 0
230
+ if "text" not in curr_mention:
231
+ curr_mention["text"] = ""
232
+ if "concept" not in curr_mention:
233
+ curr_mention["concept"] = 0
234
+
235
+
236
+ if "candidates" not in curr_mention:
237
+ curr_mention["candidates"] = []
238
+ if "scores" not in curr_mention:
239
+ curr_mention["scores"] = []
240
+
241
+ if article_content[curr_mention['begin']:curr_mention['end']] != curr_mention_text:
242
+ curr_mention_begin = curr_mention['begin']
243
+ curr_mention_end = curr_mention['end']
244
+ offset = 0
245
+
246
+ if not solved:
247
+ print('--------------------------------')
248
+ print('ERROR ALIGNMENT: texts don\'t match for {}: "{}" vs "{}", the textual content of '
249
+ 'the files won\'t be complete '
250
+ .format(article_id, article_content[curr_mention['begin']:curr_mention['end']],
251
+ curr_mention_text))
252
+ print('--------------------------------')
253
+ problematic_articles.add(article_id)
254
+ else:
255
+ if "candidates" not in curr_mention:
256
+ curr_mention["candidates"] = []
257
+
258
+ curr_mention['begin'] = curr_mention_begin - offset
259
+ curr_mention['end'] = curr_mention_end - offset
260
+ if 'concepts' in annos_only_json:
261
+ for idx_concept, curr_concept in enumerate(annos_only_json['concepts']):
262
+ if "concept" not in curr_concept:
263
+ curr_concept["concept"] = 0
264
+ if "text" not in curr_concept:
265
+ curr_concept["text"] = ""
266
+ if "count" not in curr_concept:
267
+ curr_concept["count"] = 0
268
+ if "link" not in curr_concept:
269
+ curr_concept["link"] = ""
270
+ if "tags" not in curr_concept:
271
+ curr_concept["tags"] = []
272
+
273
+ if not should_tokenize:
274
+ annos_json = {'id': annos_only_json['id'],
275
+ 'content': article_content,
276
+ 'tags': annos_only_json['tags'],
277
+ 'mentions': annos_only_json['mentions'],
278
+ 'concepts': annos_only_json['concepts'],
279
+ 'relations': annos_only_json['relations'],
280
+ 'frames': annos_only_json['frames'],
281
+ 'iptc': annos_only_json['iptc']}
282
+ all_annos.append(annos_json)
283
+
284
+ #print("annos_json",annos_json)
285
+ else:
286
+ tokenized = tokenizer.tokenize(article_content)
287
+ tokens = list()
288
+ begin = list()
289
+ end = list()
290
+ for curr_token in tokenized:
291
+ tokens.append(curr_token['token'])
292
+ begin.append(curr_token['offset'])
293
+ end.append(curr_token['offset'] + curr_token['length'])
294
+ annos_json = OrderedDict({'id': annos_only_json['id'],
295
+ 'content': article_content,
296
+ 'tokenization': OrderedDict({'tokens': tokens, 'begin': begin, 'end': end}),
297
+ 'tags': annos_only_json['tags'],
298
+ 'mentions': annos_only_json['mentions'],
299
+ 'concepts': annos_only_json['concepts'],
300
+ 'relations': annos_only_json['relations'],
301
+ 'frames': annos_only_json['frames'],
302
+ 'iptc': annos_only_json['iptc']})
303
+
304
+ hash_content = hashlib.sha1(article_content.encode("UTF-8")).hexdigest()
305
+
306
+ if hash_content != article_hash:
307
+ print('!!ERROR - hash doesn\'t match for ', article_id)
308
+ problematic_hash_articles.add(article_id)
309
+ attempts += 1
310
+
311
+ sleep(.1)
312
+ done = True
313
+ if done:
314
+ articles_done += 1
315
+
316
+
317
+ return[
318
+ datasets.SplitGenerator(
319
+ name=datasets.Split.TRAIN,
320
+ # These kwargs will be passed to _generate_examples
321
+ gen_kwargs={
322
+ "all_annos" : all_annos,
323
+
324
+ }
325
+
326
+ )
327
+ ]
328
+
329
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
330
+ def _generate_examples(self, all_annos):
331
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
332
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
333
+ for data in all_annos:
334
+ yield data['id'], {
335
+ "id": data['id'],
336
+ "content":data['content'],
337
+ "tags": data['tags'],
338
+ "mentions": data['mentions'],
339
+ "concepts": data['concepts'],
340
+ "relations": data['relations'],
341
+ "frames": data['frames'],
342
+ "iptc": data['iptc']
343
+ }
344
+
345
+
346
+