Datasets:

Modalities:
Text
Formats:
text
Libraries:
Datasets
License:
khulnasoft commited on
Commit
b39ef90
1 Parent(s): eb9e7bf

Create carb.py

Browse files
Files changed (1) hide show
  1. evaluation_data/carb/carb.py +386 -0
evaluation_data/carb/carb.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Usage:
3
+ benchmark --gold=GOLD_OIE (--openiefive=OPENIE5 | --stanford=STANFORD_OIE | --ollie=OLLIE_OIE |--reverb=REVERB_OIE | --clausie=CLAUSIE_OIE | --openiefour=OPENIEFOUR_OIE | --props=PROPS_OIE | --tabbed=TABBED_OIE | --benchmarkGold=BENCHMARK_GOLD | --allennlp=ALLENNLP_OIE ) [--exactMatch | --predMatch | --lexicalMatch | --binaryMatch | --simpleMatch | --strictMatch] [--error-file=ERROR_FILE] [--binary]
4
+
5
+ Options:
6
+ --gold=GOLD_OIE The gold reference Open IE file (by default, it should be under ./oie_corpus/all.oie).
7
+ --benchmarkgold=GOLD_OIE The benchmark's gold reference.
8
+ # --out-OUTPUT_FILE The output file, into which the precision recall curve will be written.
9
+ --clausie=CLAUSIE_OIE Read ClausIE format from file CLAUSIE_OIE.
10
+ --ollie=OLLIE_OIE Read OLLIE format from file OLLIE_OIE.
11
+ --openiefour=OPENIEFOUR_OIE Read Open IE 4 format from file OPENIEFOUR_OIE.
12
+ --openiefive=OPENIE5 Read Open IE 5 format from file OPENIE5.
13
+ --props=PROPS_OIE Read PropS format from file PROPS_OIE
14
+ --reverb=REVERB_OIE Read ReVerb format from file REVERB_OIE
15
+ --stanford=STANFORD_OIE Read Stanford format from file STANFORD_OIE
16
+ --tabbed=TABBED_OIE Read simple tab format file, where each line consists of:
17
+ sent, prob, pred,arg1, arg2, ...
18
+ --exactmatch Use exact match when judging whether an extraction is correct.
19
+ '''
20
+ from __future__ import division
21
+ import docopt
22
+ import string
23
+ import numpy as np
24
+ from sklearn.metrics import precision_recall_curve
25
+ from sklearn.metrics import auc
26
+ import re
27
+ import logging
28
+ import pdb
29
+ import ipdb
30
+ from _collections import defaultdict
31
+ logging.basicConfig(level = logging.INFO)
32
+
33
+ from oie_readers.allennlpReader import AllennlpReader
34
+ from oie_readers.stanfordReader import StanfordReader
35
+ from oie_readers.ollieReader import OllieReader
36
+ from oie_readers.reVerbReader import ReVerbReader
37
+ from oie_readers.clausieReader import ClausieReader
38
+ from oie_readers.openieFourReader import OpenieFourReader
39
+ from oie_readers.openieFiveReader import OpenieFiveReader
40
+ from oie_readers.propsReader import PropSReader
41
+ from oie_readers.tabReader import TabReader
42
+ from oie_readers.benchmarkGoldReader import BenchmarkGoldReader
43
+
44
+ from oie_readers.goldReader import GoldReader
45
+ from matcher import Matcher
46
+ from operator import itemgetter
47
+ import pprint
48
+ from copy import copy
49
+ pp = pprint.PrettyPrinter(indent=4)
50
+
51
+ class Benchmark:
52
+ ''' Compare the gold OIE dataset against a predicted equivalent '''
53
+ def __init__(self, gold_fn):
54
+ ''' Load gold Open IE, this will serve to compare against using the compare function '''
55
+ gr = GoldReader()
56
+ gr.read(gold_fn)
57
+ self.gold = gr.oie
58
+
59
+ def compare(self, predicted, matchingFunc, output_fn=None, error_file=None, binary=False):
60
+ ''' Compare gold against predicted using a specified matching function.
61
+ Outputs PR curve to output_fn '''
62
+
63
+ y_true = []
64
+ y_scores = []
65
+ errors = []
66
+ correct = 0
67
+ incorrect = 0
68
+
69
+ correctTotal = 0
70
+ unmatchedCount = 0
71
+ predicted = Benchmark.normalizeDict(predicted)
72
+ gold = Benchmark.normalizeDict(self.gold)
73
+ if binary:
74
+ predicted = Benchmark.binarize(predicted)
75
+ gold = Benchmark.binarize(gold)
76
+ #gold = self.gold
77
+
78
+ # taking all distinct values of confidences as thresholds
79
+ confidence_thresholds = set()
80
+ for sent in predicted:
81
+ for predicted_ex in predicted[sent]:
82
+ confidence_thresholds.add(predicted_ex.confidence)
83
+
84
+ confidence_thresholds = sorted(list(confidence_thresholds))
85
+ num_conf = len(confidence_thresholds)
86
+
87
+ results = {}
88
+ p = np.zeros(num_conf)
89
+ pl = np.zeros(num_conf)
90
+ r = np.zeros(num_conf)
91
+ rl = np.zeros(num_conf)
92
+
93
+ for sent, goldExtractions in gold.items():
94
+
95
+ if sent in predicted:
96
+ predictedExtractions = predicted[sent]
97
+ else:
98
+ predictedExtractions = []
99
+
100
+ scores = [[None for _ in predictedExtractions] for __ in goldExtractions]
101
+
102
+ # print("***Gold Extractions***")
103
+ # print("\n".join([goldExtractions[i].pred + ' ' + " ".join(goldExtractions[i].args) for i in range(len(goldExtractions))]))
104
+ # print("***Predicted Extractions***")
105
+ # print("\n".join([predictedExtractions[i].pred+ " ".join(predictedExtractions[i].args) for i in range(len(predictedExtractions))]))
106
+
107
+ for i, goldEx in enumerate(goldExtractions):
108
+ for j, predictedEx in enumerate(predictedExtractions):
109
+ score = matchingFunc(goldEx, predictedEx,ignoreStopwords = True,ignoreCase = True)
110
+ scores[i][j] = score
111
+
112
+
113
+ # OPTIMISED GLOBAL MATCH
114
+ sent_confidences = [extraction.confidence for extraction in predictedExtractions]
115
+ sent_confidences.sort()
116
+ prev_c = 0
117
+ for conf in sent_confidences:
118
+ c = confidence_thresholds.index(conf)
119
+ ext_indices = []
120
+ for ext_indx, extraction in enumerate(predictedExtractions):
121
+ if extraction.confidence >= conf:
122
+ ext_indices.append(ext_indx)
123
+
124
+ recall_numerator = 0
125
+ for i, row in enumerate(scores):
126
+ max_recall_row = max([row[ext_indx][1] for ext_indx in ext_indices ], default=0)
127
+ recall_numerator += max_recall_row
128
+
129
+ precision_numerator = 0
130
+
131
+ selected_rows = []
132
+ selected_cols = []
133
+ num_precision_matches = min(len(scores), len(ext_indices))
134
+ for t in range(num_precision_matches):
135
+ matched_row = -1
136
+ matched_col = -1
137
+ matched_precision = -1 # initialised to <0 so that it updates whenever precision is 0 as well
138
+ for i in range(len(scores)):
139
+ if i in selected_rows:
140
+ continue
141
+ for ext_indx in ext_indices:
142
+ if ext_indx in selected_cols:
143
+ continue
144
+ if scores[i][ext_indx][0] > matched_precision:
145
+ matched_precision = scores[i][ext_indx][0]
146
+ matched_row = i
147
+ matched_col = ext_indx
148
+
149
+ selected_rows.append(matched_row)
150
+ selected_cols.append(matched_col)
151
+ precision_numerator += scores[matched_row][matched_col][0]
152
+
153
+ p[prev_c:c+1] += precision_numerator
154
+ pl[prev_c:c+1] += len(ext_indices)
155
+ r[prev_c:c+1] += recall_numerator
156
+ rl[prev_c:c+1] += len(scores)
157
+
158
+ prev_c = c+1
159
+
160
+ # for indices beyond the maximum sentence confidence, len(scores) has to be added to the denominator of recall
161
+ rl[prev_c:] += len(scores)
162
+
163
+ prec_scores = [a/b if b>0 else 1 for a,b in zip(p,pl) ]
164
+ rec_scores = [a/b if b>0 else 0 for a,b in zip(r,rl)]
165
+
166
+ f1s = [Benchmark.f1(p,r) for p,r in zip(prec_scores, rec_scores)]
167
+ try:
168
+ optimal_idx = np.nanargmax(f1s)
169
+ optimal = (prec_scores[optimal_idx], rec_scores[optimal_idx], f1s[optimal_idx])
170
+ return np.round(optimal,3)
171
+ except ValueError:
172
+ # When there is no prediction
173
+ optimal = (0,0)
174
+
175
+ # In order to calculate auc, we need to add the point corresponding to precision=1 , recall=0 to the PR-curve
176
+ # temp_rec_scores = rec_scores.copy()
177
+ # temp_prec_scores = prec_scores.copy()
178
+ # temp_rec_scores.append(0)
179
+ # temp_prec_scores.append(1)
180
+ # # print("AUC: {}\t Optimal (precision, recall, F1): {}".format( np.round(auc(temp_rec_scores, temp_prec_scores),3), np.round(optimal,3) ))
181
+ #
182
+ # with open(output_fn, 'w') as fout:
183
+ # fout.write('{0}\t{1}\t{2}\n'.format("Precision", "Recall", "Confidence"))
184
+ # for cur_p, cur_r, cur_conf in sorted(zip(prec_scores, rec_scores, confidence_thresholds), key = lambda cur: cur[1]):
185
+ # fout.write('{0}\t{1}\t{2}\n'.format(cur_p, cur_r, cur_conf))
186
+ #
187
+ # if len(f1s)>0:
188
+ # return np.round(auc(temp_rec_scores, temp_prec_scores),3), np.round(optimal,3)
189
+ # else:
190
+ # # When there is no prediction
191
+ # return 0, (0,0,0)
192
+
193
+ @staticmethod
194
+ def binarize(extrs):
195
+ res = defaultdict(lambda: [])
196
+ for sent,extr in extrs.items():
197
+ for ex in extr:
198
+ #Add (a1, r, a2)
199
+ temp = copy(ex)
200
+ temp.args = ex.args[:2]
201
+ res[sent].append(temp)
202
+
203
+ if len(ex.args) <= 2:
204
+ continue
205
+
206
+ #Add (a1, r a2 , a3 ...)
207
+ for arg in ex.args[2:]:
208
+ temp.args = [ex.args[0]]
209
+ temp.pred = ex.pred + ' ' + ex.args[1]
210
+ words = arg.split()
211
+
212
+ #Add preposition of arg to rel
213
+ if words[0].lower() in Benchmark.PREPS:
214
+ temp.pred += ' ' + words[0]
215
+ words = words[1:]
216
+ temp.args.append(' '.join(words))
217
+ res[sent].append(temp)
218
+
219
+ return res
220
+
221
+ @staticmethod
222
+ def f1(prec, rec):
223
+ try:
224
+ return 2*prec*rec / (prec+rec)
225
+ except ZeroDivisionError:
226
+ return 0
227
+
228
+ @staticmethod
229
+ def aggregate_scores_greedily(scores):
230
+ # Greedy match: pick the prediction/gold match with the best f1 and exclude
231
+ # them both, until nothing left matches. Each input square is a [prec, rec]
232
+ # pair. Returns precision and recall as score-and-denominator pairs.
233
+ matches = []
234
+ while True:
235
+ max_s = 0
236
+ gold, pred = None, None
237
+ for i, gold_ss in enumerate(scores):
238
+ if i in [m[0] for m in matches]:
239
+ # Those are already taken rows
240
+ continue
241
+ for j, pred_s in enumerate(scores[i]):
242
+ if j in [m[1] for m in matches]:
243
+ # Those are used columns
244
+ continue
245
+ if pred_s and Benchmark.f1(*pred_s) > max_s:
246
+ max_s = Benchmark.f1(*pred_s)
247
+ gold = i
248
+ pred = j
249
+ if max_s == 0:
250
+ break
251
+ matches.append([gold, pred])
252
+ # Now that matches are determined, compute final scores.
253
+ prec_scores = [scores[i][j][0] for i,j in matches]
254
+ rec_scores = [scores[i][j][1] for i,j in matches]
255
+ total_prec = sum(prec_scores)
256
+ total_rec = sum(rec_scores)
257
+ scoring_metrics = {"precision" : [total_prec, len(scores[0])],
258
+ "recall" : [total_rec, len(scores)],
259
+ "precision_of_matches" : prec_scores,
260
+ "recall_of_matches" : rec_scores
261
+ }
262
+ return scoring_metrics
263
+
264
+ # Helper functions:
265
+ @staticmethod
266
+ def normalizeDict(d):
267
+ return dict([(Benchmark.normalizeKey(k), v) for k, v in d.items()])
268
+
269
+ @staticmethod
270
+ def normalizeKey(k):
271
+ # return Benchmark.removePunct(unicode(Benchmark.PTB_unescape(k.replace(' ','')), errors = 'ignore'))
272
+ return Benchmark.removePunct(str(Benchmark.PTB_unescape(k.replace(' ',''))))
273
+
274
+ @staticmethod
275
+ def PTB_escape(s):
276
+ for u, e in Benchmark.PTB_ESCAPES:
277
+ s = s.replace(u, e)
278
+ return s
279
+
280
+ @staticmethod
281
+ def PTB_unescape(s):
282
+ for u, e in Benchmark.PTB_ESCAPES:
283
+ s = s.replace(e, u)
284
+ return s
285
+
286
+ @staticmethod
287
+ def removePunct(s):
288
+ return Benchmark.regex.sub('', s)
289
+
290
+ # CONSTANTS
291
+ regex = re.compile('[%s]' % re.escape(string.punctuation))
292
+
293
+ # Penn treebank bracket escapes
294
+ # Taken from: https://github.com/nlplab/brat/blob/master/server/src/gtbtokenize.py
295
+ PTB_ESCAPES = [('(', '-LRB-'),
296
+ (')', '-RRB-'),
297
+ ('[', '-LSB-'),
298
+ (']', '-RSB-'),
299
+ ('{', '-LCB-'),
300
+ ('}', '-RCB-'),]
301
+
302
+ PREPS = ['above','across','against','along','among','around','at','before','behind','below','beneath','beside','between','by','for','from','in','into','near','of','off','on','to','toward','under','upon','with','within']
303
+
304
+ def f_beta(precision, recall, beta = 1):
305
+ """
306
+ Get F_beta score from precision and recall.
307
+ """
308
+ beta = float(beta) # Make sure that results are in float
309
+ return (1 + pow(beta, 2)) * (precision * recall) / ((pow(beta, 2) * precision) + recall)
310
+
311
+
312
+ if __name__ == '__main__':
313
+ args = docopt.docopt(__doc__)
314
+ logging.debug(args)
315
+
316
+ if args['--allennlp']:
317
+ predicted = AllennlpReader()
318
+ predicted.read(args['--allennlp'])
319
+
320
+ if args['--stanford']:
321
+ predicted = StanfordReader()
322
+ predicted.read(args['--stanford'])
323
+
324
+ if args['--props']:
325
+ predicted = PropSReader()
326
+ predicted.read(args['--props'])
327
+
328
+ if args['--ollie']:
329
+ predicted = OllieReader()
330
+ predicted.read(args['--ollie'])
331
+
332
+ if args['--reverb']:
333
+ predicted = ReVerbReader()
334
+ predicted.read(args['--reverb'])
335
+
336
+ if args['--clausie']:
337
+ predicted = ClausieReader()
338
+ predicted.read(args['--clausie'])
339
+
340
+ if args['--openiefour']:
341
+ predicted = OpenieFourReader()
342
+ predicted.read(args['--openiefour'])
343
+
344
+ if args['--openiefive']:
345
+ predicted = OpenieFiveReader()
346
+ predicted.read(args['--openiefive'])
347
+
348
+ if args['--benchmarkGold']:
349
+ predicted = BenchmarkGoldReader()
350
+ predicted.read(args['--benchmarkGold'])
351
+
352
+ if args['--tabbed']:
353
+ predicted = TabReader()
354
+ predicted.read(args['--tabbed'])
355
+
356
+ if args['--binaryMatch']:
357
+ matchingFunc = Matcher.binary_tuple_match
358
+
359
+ elif args['--simpleMatch']:
360
+ matchingFunc = Matcher.simple_tuple_match
361
+
362
+ elif args['--exactMatch']:
363
+ matchingFunc = Matcher.argMatch
364
+
365
+ elif args['--predMatch']:
366
+ matchingFunc = Matcher.predMatch
367
+
368
+ elif args['--lexicalMatch']:
369
+ matchingFunc = Matcher.lexicalMatch
370
+
371
+ elif args['--strictMatch']:
372
+ matchingFunc = Matcher.tuple_match
373
+
374
+ else:
375
+ matchingFunc = Matcher.binary_linient_tuple_match
376
+
377
+ b = Benchmark(args['--gold'])
378
+ # out_filename = args['--out']
379
+
380
+
381
+ optimal_f1_point = b.compare(predicted = predicted.oie,
382
+ matchingFunc = matchingFunc,
383
+ error_file = args["--error-file"],
384
+ binary = args["--binary"])
385
+
386
+ print("Precision: {}, Recall: {}, F1-score: {}".format(optimal_f1_point[0], optimal_f1_point[1], optimal_f1_point[2]))