File size: 52,441 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
{
    "paper_id": "M98-1022",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T03:16:01.655508Z"
    },
    "title": "DESCRIPTION OF THE UPENN CAMP SYSTEM AS USED FOR COREFERENCE",
    "authors": [
        {
            "first": "Breck",
            "middle": [],
            "last": "Baldwin",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Cognitive Science",
                "location": {
                    "addrLine": "3401 Walnut St. 400C Philadelphia",
                    "postCode": "19104",
                    "region": "PA",
                    "country": "USA"
                }
            },
            "email": ""
        },
        {
            "first": "Tom",
            "middle": [],
            "last": "Morton",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Cognitive Science",
                "location": {
                    "addrLine": "3401 Walnut St. 400C Philadelphia",
                    "postCode": "19104",
                    "region": "PA",
                    "country": "USA"
                }
            },
            "email": "tsmortong@linc.cis.upenn.edu"
        },
        {
            "first": "Amit",
            "middle": [],
            "last": "Bagga",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Cognitive Science",
                "location": {
                    "addrLine": "3401 Walnut St. 400C Philadelphia",
                    "postCode": "19104",
                    "region": "PA",
                    "country": "USA"
                }
            },
            "email": ""
        },
        {
            "first": "Jason",
            "middle": [],
            "last": "Baldridge",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Cognitive Science",
                "location": {
                    "addrLine": "3401 Walnut St. 400C Philadelphia",
                    "postCode": "19104",
                    "region": "PA",
                    "country": "USA"
                }
            },
            "email": ""
        },
        {
            "first": "Raman",
            "middle": [],
            "last": "Chandraseker",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Cognitive Science",
                "location": {
                    "addrLine": "3401 Walnut St. 400C Philadelphia",
                    "postCode": "19104",
                    "region": "PA",
                    "country": "USA"
                }
            },
            "email": ""
        },
        {
            "first": "Alexis",
            "middle": [],
            "last": "Dimitriadis",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Cognitive Science",
                "location": {
                    "addrLine": "3401 Walnut St. 400C Philadelphia",
                    "postCode": "19104",
                    "region": "PA",
                    "country": "USA"
                }
            },
            "email": ""
        },
        {
            "first": "Kieran",
            "middle": [],
            "last": "Snyder",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Cognitive Science",
                "location": {
                    "addrLine": "3401 Walnut St. 400C Philadelphia",
                    "postCode": "19104",
                    "region": "PA",
                    "country": "USA"
                }
            },
            "email": ""
        },
        {
            "first": "Magdalena",
            "middle": [],
            "last": "Wolska",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Cognitive Science",
                "location": {
                    "addrLine": "3401 Walnut St. 400C Philadelphia",
                    "postCode": "19104",
                    "region": "PA",
                    "country": "USA"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "",
    "pdf_parse": {
        "paper_id": "M98-1022",
        "_pdf_hash": "",
        "abstract": [],
        "body_text": [
            {
                "text": "In this paper we present some advances made to the CAMP system since it's inception for MUC-6. Although the infrastructure has been completely re-implemented, the architecture has remained fundamentally the same consequently we will focus some advances we h a v e made in our understanding of coreference and then discuss the performance of the system.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "Scoring the performance of a system is an extremely important aspect of coreference algorithm performance. The score for a particular run is the single strongest measure of how well the system is performing and it can strongly determine directions for further improvements. In this paper, we present several di erent scoring algorithms and detail their respective strengths and weaknesses for varying classes of processing. In particular, we describe and analyze the coreference scoring algorithm used to evaluate the coreference systems in the sixth Message Understanding Conference 95 . We also present two shortcomings of this algorithm. In addition, we present a new coreference scoring algorithm, our B-CUBED algorithm, which w as designed to overcome the shortcomings of the MUC-6 algorithm.",
                "cite_spans": [
                    {
                        "start": 584,
                        "end": 586,
                        "text": "95",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "Scoring in MUC-6 7: Vilain et al.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "Prior to Vilain et al.'s coreference scoring algorithm Vilain, 95 there had been a graph based scoring algorithm Sundheim et al. which produced unintuitive results for even very simple cases. Vilain, 95 substituted a model-theoretic scoring algorithm which produced very intuitive results for the type of scoring desired in MUC-6. This algorithm computes computes the recall error by taking each equivalence class S de ned by the links in the answer key and determining the number of coreference links m that would have to be added to the response to place all the entities in S into the same equivalence class in the response. Recall error then is the sum of m's divided by the number of links in the key. Precision error is computed by reversing the roles of the answer key and the response.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "The full details of the algorithm are discussed next.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "The Model Theoretic Approach To The Vilain et. al Algorithm 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "1 The exposition of this scorer has been taken nearly entirely from Vilain, 95 In the description of the model theoretic algorithm, the terms key,\" and response\" are de ned in the following way:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "key refers to the manually annotated coreference chains the truth. response refers to the coreference chains output by a system.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "An equivalence set is the transitive closure of a coreference chain. The algorithm computes recall in the following way.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "First, let S be an equivalence set generated by the key, and let R 1 : : : R m be equivalence classes generated by the response. Then we de ne the following functions over S:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "pS is a partition of S relative to the response. Each subset of S in the partition is formed by intersecting S and those response sets R i that overlap S. Note that the equivalence classes de ned by the response may include implicit singleton sets -these correspond to elements that are mentioned in the key but not in the response. For example, say the key generates the equivalence class S = fA B C D g , and the response is simply A-B . The relative partition pS is then fA B g f C g and fDg.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "cS is the minimal number of correct\" links necessary to generate the equivalence class S. It is clear that cS is one less than the cardinality of S, i.e., cS = jSj , 1 :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "mS is the number of missing\" links in the response relative to the key set S. As noted above, this is the number of links necessary to fully reunite any components of the pS partition. We note that this is simply one fewer than the number of elements in the partition, that is, mS = jpSj , 1 :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "Looking in isolation at a single equivalence class in the key, the recall error for that class is just the number of missing links divided by the number of correct links, i.e., mS cS :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "Recall in turn is cS , mS cS ; which equals jSj , 1 , jpSj , 1 jSj , 1 :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "The whole expression can now be simpli ed to jSj , j pSj jSj , 1 :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "1 Finally, extending this measure from a single key equivalence class to an entire set T simply requires summing over the key equivalence classes. That is,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "R T = P jS i j , j pS i j P jS i j , 1 : 2",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "Precision is computed by switching the roles of the key and response in the above formulation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Scoring Coreference Output",
                "sec_num": null
            },
            {
                "text": "For example, let the key contain 3 equivalence classes as shown in Figure 1 . Suppose Figure 2 shows a response. From Figure 3I , the three equivalence classes in the truth, S 1 , S 2 , and S 3 , are f1, 2, 3, 4, 5g, f6, 7g, and f8, 9, A, B, Cg respectively. And the partitions pS 1 , pS 2 , and pS 3 , with respect to the response, shown in Figure 3II , are f1, 2, 3, 4, 5g, f6, 7g, and f8, 9, A, B, Cg respectively. Using equation 2, the recall can now be calculated in the following way: Recall = 5 , 1 + 2 , 1 + 5 , 1 5 , 1 + 2 , 1 + 5 , 1 = 9 = 9 = 100 :",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 67,
                        "end": 75,
                        "text": "Figure 1",
                        "ref_id": null
                    },
                    {
                        "start": 86,
                        "end": 94,
                        "text": "Figure 2",
                        "ref_id": null
                    },
                    {
                        "start": 118,
                        "end": 127,
                        "text": "Figure 3I",
                        "ref_id": "FIGREF2"
                    },
                    {
                        "start": 342,
                        "end": 352,
                        "text": "Figure 3II",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Example",
                "sec_num": null
            },
            {
                "text": "Similarly, if the roles of the key and the response are reversed, then the equivalence classes in the truth, S 1 , and S 2 , are f1, 2, 3, 4, 5g and f6, 7, 8, 9, A, B, Cg , and the partitions, pS 1 , and pS 2 , are f1, 2, 3, 4, 5g and f6, 7g f 8, 9, A, B, Cg respectively Figure 3III . The precision can now be calculated as: Precision = 5 , 1 + 7 , 2 5 , 1 + 7 , 1 = 9 = 10 = 90 :",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 272,
                        "end": 283,
                        "text": "Figure 3III",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Example",
                "sec_num": null
            },
            {
                "text": "Despite the advances of the model-theoretic scorer, it yields unintuitive results for some tasks. There are two main reasons.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "1. The algorithm does not give any credit for separating out singletons entities that occur in chains consisting only of one element, the entity itself from other chains which h a v e been identi ed. This follows from the convention in coreference annotation of not identifying those entities that are markable as possibly coreferent with other entities in the text. Rather, entities are only marked as being coreferent if they actually are coreferent with other entities in the text. This potential shortcoming could be easily enough overcome with di erent annotation conventions and with minor changes to the algorithm, but the decision to annotate singletons is a bit of a philosophical issue. On the one hand singletons do form equivalence classes, and those equivalence classes are signi cant in that they are NOT coreferent with another phrase in the text and they may play an important role in other equivalence classes out side the immediate text as in cross document coreference. On the other hand, if coreference is viewed as being about the relations between entities, then perhaps is makes little sense to annotate and score singletons. 2. All errors are considered to be equal. The MUC scoring algorithm penalizes the precision numbers equally for all types of errors. It is our position that, for certain tasks, some coreference errors do more damage than others. Consider the following examples: suppose the truth contains two large coreference chains and one small one Figure 1 , and suppose Figures 2 and 4 show t w o di erent responses. We will explore two di erent precision errors. The rst error will connect one of the large coreference chains with the small one Figure 2 . The second error occurs when the two large coreference chains are related by the errant coreferent link Figure 4 . It is our position that the second error is more damaging because, compared to the rst error, the second error makes more entities coreferent that should not be. This distinction is not re ected in the Vilain, 95 scorer which scores both responses as having a precision score of 90 Figure 6 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 1485,
                        "end": 1493,
                        "text": "Figure 1",
                        "ref_id": null
                    },
                    {
                        "start": 1684,
                        "end": 1692,
                        "text": "Figure 2",
                        "ref_id": null
                    },
                    {
                        "start": 1799,
                        "end": 1807,
                        "text": "Figure 4",
                        "ref_id": "FIGREF3"
                    },
                    {
                        "start": 2092,
                        "end": 2100,
                        "text": "Figure 6",
                        "ref_id": "FIGREF5"
                    }
                ],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "Revisions to the Algorithm: Our B-CUBED Algorithm 2",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "Our B-CUBED algorithm was designed to overcome the two shortcomings of the Vilain et. al algorithm. Instead of looking at the links produced by a system, our algorithm looks at the presence absence of entities relative to each of the other entities in the equivalence classes produced. Therefore, we compute the precision and recall numbers for each e n tity in the document, which are then combined to produce nal precision and recall numbers for the entire output. The formal model-theoretic version of our algorithm is discussed in the next section. Precision i = number of correct elements in the output chain containing entity i number of elements in the output chain containing entity i",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "Recall i = number of correct elements in the output chain containing entity i number of elements in the truth chain containing entity i For an entity, i, w e de ne the precision and recall with respect to that entity in Figure 5 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 220,
                        "end": 228,
                        "text": "Figure 5",
                        "ref_id": "FIGREF4"
                    }
                ],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "The nal precision and recall numbers are computed by the following two formulae:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "Final Precision = N X i=1 w i Precision i Final Recall = N X i=1 w i Recall i",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "where N is the numberofentities in the document, and w i is the weight assigned to entity i in the document.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "It should be noted that the B-CUBED algorithm implicitly overcomes the rst shortcoming of the Vilain et. al algorithm by calculating the precision and recall numbers for each e n tity in the document irrespective o f whether an entity is part of a coreference chain. Di erent weighting schemes produce di erent versions of the algorithm. The choice of the weighting scheme is determined by the task for which the algorithm is going to be used.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "When coreference or cross-document coreference is used for an information extraction task, where information about every entity in an equivalence class is important, the weighting scheme assigns equal weights for every entity i. For example, the weight assigned to each e n tity in Figure 1 is 1 12. As shown in Figure 6 , the precision scores for responses in Figures 2 and 4 are 16 21 76 and 7 12 58 respectively, using equal weights for all entities. Recall for both responses is 100. It should be noted that the algorithm penalizes the precision numbers more for the error made in Figure 4 than the one made in Figure 2 . As evident from the two examples, this version of the B-CUBED algorithm using equal weights for each e n tity is a precision oriented algorithm i.e. it is sensitive to precision errors.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 282,
                        "end": 290,
                        "text": "Figure 1",
                        "ref_id": null
                    },
                    {
                        "start": 312,
                        "end": 320,
                        "text": "Figure 6",
                        "ref_id": "FIGREF5"
                    },
                    {
                        "start": 361,
                        "end": 376,
                        "text": "Figures 2 and 4",
                        "ref_id": "FIGREF3"
                    },
                    {
                        "start": 585,
                        "end": 593,
                        "text": "Figure 4",
                        "ref_id": "FIGREF3"
                    },
                    {
                        "start": 615,
                        "end": 623,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "But, for an information retrieval IR task, or a web search task, where an user is presented with classes of documents that pertain to the same entity, the weighting scheme assigns equal weights to each equivalence class. The weight for each entity within an equivalence class is computed by dividing the weight of the equivalence class by the number of entities in that class. Recall is calculated by assigning equal weights to each equivalence class in the truth while precision is calculated by assigning equal weights to each equivalence class in the response. For example, in Figure 2 , the weighting scheme assigns a weight of 1 10 to each e n tity in the rst equivalence class, and a weight of 1 14 to each entity in the second equivalence class, when calculating precision. Using this weighting scheme, the precision scores for responses in Figures 2 and 4 are 39 49 79.6 and 3 4 75 respectively. Recall for both responses is 100.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 580,
                        "end": 588,
                        "text": "Figure 2",
                        "ref_id": null
                    },
                    {
                        "start": 848,
                        "end": 863,
                        "text": "Figures 2 and 4",
                        "ref_id": "FIGREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Shortcomings of the Vilain et. al Algorithm",
                "sec_num": null
            },
            {
                "text": "MUC Algorithm B-CUBED Algorithm equal weights for every entity P: 9 10 90 P: 1 12 * 5 5 + 5 5 + 5 5 + 5 5 + 5 5 + 2 7 + 2 7 + 5 7 + 5 7 + 5 7 + 5 7 + 5 7 = 16 21 76 Example 1 R: 9 9 100 R: 1 12 * 5 5 + 5 5 + 5 5 + 5 5 + 5 5 + 2 2 + 2 2 + 5 5 + 5 5 + 5 5 + 5 5 + 5 5 = 100 P: 9 10 90 P: 1 12 * 5 10 + 5 10 + 5 10 + 5 10 + 5 10 + 2 2 + 2 2 + 5 10 + 5 10 + 5 10 + 5 10 + 5 10 = 7 12 58 Example 2 R: 9 9 100 R: 1 12 * 5 5 + 5 5 + 5 5 + 5 5 + 5 5 + 2 2 + 2 2 + 5 5 + 5 5 + 5 5 + 5 5 + 5 5 = 100 Comparing these numbers to the ones obtained by using the version of the algorithm which assigns equal weights to each e n tity, one can see that the current v ersion is much less sensitive to precision errors. Although the current v ersion of the algorithm does penalize the precision numbers for the error in Figure 4 more than the error made in Figure 2 , it is less severe than the earlier version.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 801,
                        "end": 809,
                        "text": "Figure 4",
                        "ref_id": "FIGREF3"
                    },
                    {
                        "start": 838,
                        "end": 846,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Output",
                "sec_num": null
            },
            {
                "text": "Let S be an equivalence set generated by the key, and let R 1 : : : R m be equivalence classes generated by the response. Then we de ne the following functions over S: pS is a partition of S with respect to the response, i.e. pS is a set of subsets of S formed by intersecting S with those response sets R i that overlap S. Let pS = fP 1 ; P 2 ; : : : ; P m g where each P j is a subset of S. m j S is the number of elements that are missing from each P j relative to the key set S. Therefore, m j S = jSj , j P j j :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Model Theoretic Approach To The B-CUBED Algorithm",
                "sec_num": null
            },
            {
                "text": "Since the B-CUBED algorithm looks at the presence absence of entities relative to each of the other entities, the number of missing entities in an entire equivalence set is calculated by adding the number of missing entities with respect to each e n tity in that equivalence set. Therefore, the number of missing entities for the entire set S is m X j=1 X for each e 2 P j m j S :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Model Theoretic Approach To The B-CUBED Algorithm",
                "sec_num": null
            },
            {
                "text": "The recall error is simply the number of missing entities divided by the numberofentities in the equivalence set, i.e., m j S jSj :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Model Theoretic Approach To The B-CUBED Algorithm",
                "sec_num": null
            },
            {
                "text": "Since the algorithm looks at each e n tity in an equivalence set, the recall error for that entire set is 1 jSj m ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Model Theoretic Approach To The B-CUBED Algorithm",
                "sec_num": null
            },
            {
                "text": "The whole expression can now be simpli ed to 1 , P m j=1 P",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "for each e 2 P j j S j , j P j j j S j 2 :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "Moreover, the measure can be extended from a single key equivalence class to a set T = fS 1 ; S 2 ; : : : ; S n g of equivalence classes. Therefore, the recall R i for an equivalence class S i equals",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "R i = 1 , P m j=1 P",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "for each e 2 P ij jS i j , j P ij j jS i j 2 ;",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "where P ij is the jth element of the partition pS i , and, hence, is a subset of S i .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "The recall numbers calculated for each class can now be combined in various ways to produce the nal recall. Di erent versions of the algorithm are obtained by using di erent combination strategies. If equal weights are assigned to each class, the version of the algorithm produced is exactly the same as the version of the informal algorithm which assigns equal weights to each class, as described in the previous section. In other words, the nal recall is an average of the recall numbers for each equivalence class, i.e.,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "R T = 1 n n X i=1 R i :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "To obtain the version of the informal algorithm which assigns equal weights to each e n tity, the nal recall is computed by calculating the weighted average of the recall numbers for each equivalence class where the weights are decided by the numberofentities in each class, i.e.,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "R T = n X i=1 jS i j P n j=1 jS j j R i :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "Finally, as in the case of the Vilain et. al algorithm, the precision numbers are calculated by reversing the roles of the key and the response in the above formulation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": ":",
                "sec_num": null
            },
            {
                "text": "The Vilain et. al algorithm is useful for applications tasks that use single coreference relations at a time rather than resulting equivalence classes. For our development in the coreference task, the two algorithms provide distinct perspectives on system performance. Vilain et. al provide a strong diagnostic for errors that re ect pairwise decisions done by the system. Our visual display techniques emphasize just this sort of processing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Task Relative Strengths and Weaknesses of the Two Algorithms",
                "sec_num": null
            },
            {
                "text": "Our total score under the Vilain algorithm, with a somewhat fuzzier extent requirement and stricter requirement for links is 81 precision and 45 recall.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Task Relative Strengths and Weaknesses of the Two Algorithms",
                "sec_num": null
            },
            {
                "text": "The same les using the B3 algorithm resulted in 78 precision and 31 recall. The precision numbers are comparable which indicates that our goal of high precision is supported under both views of the data. The 14 drop in recall was however unexpected. The reason is fairly straight forward our system is not doing a good job of relating large equivalence classes. This is the converse of penalizing the system for positing incorrect links that result in larger equivalent classes than smaller ones.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Task Relative Strengths and Weaknesses of the Two Algorithms",
                "sec_num": null
            },
            {
                "text": "The drop in recall in the B3 scorer also suggests a distinct class of coreference resolution procedure that we could investigate growing of large equivalence classes via an entity merging model which eschewed the standard left-to-right processing strategy of most coreference resolution systems. If such a procedure can reliably grow medium sized equivalence classes into large ones, then the recall gures will improve under the B3 scorer. The Vilain et. al scorer notes no di erence between correctly relating two singleton equivalence classes and correctly relating two large equivalence classes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Task Relative Strengths and Weaknesses of the Two Algorithms",
                "sec_num": null
            },
            {
                "text": "Since large equivalence classes tend to include topically signi cant e n tities for documents, correctly identifying them is perhaps crucial to applications like summarization and information extraction.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Task Relative Strengths and Weaknesses of the Two Algorithms",
                "sec_num": null
            },
            {
                "text": "The below analysis re ects how w e assesed the individual contributions of the components during development. Since the B3 algorithm was not yet implemented, we did not use it for development.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Developing with the Vilain et al algorithm",
                "sec_num": null
            },
            {
                "text": "Our explicit goal was to maximize recall at a precision level of 80. We feel that this level of precision provides enough accuracy to drive a range of coreference dependent applications most important for us was query sensitive text summarization. Our overall approach was to break down coreference resolution into concrete subprograms that resolved a limited class of coreference well. Each component could be scored separately by either running it in isolation, or by blocking coreference from subsequent processes. Below w e discuss each component in the order of execution.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Developing with the Vilain et al algorithm",
                "sec_num": null
            },
            {
                "text": "A problematic aspect of any new genre of data is the existence of idiosyncratic classes of coreference and the MUC-7 data was particularly troubling since very oddly formatted text was fair game for coreference. For example, the strings`HUGHES' and`FCC' in` SLUG fv=tia-z BC-HUGHES-FCC-BLOOM SLUG ' are coreferent with the same strings in` PREAMBLE BC-HUGHES-FCC-BLOOM...' which w as outside the scope of our linguistic tools. Simple programs were written to recognize this sort of coreference. The performance by the Vilain scorer is 4.2 recall 67.5 precision.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Genre Speci c Coreference",
                "sec_num": null
            },
            {
                "text": "This performance is well below what we observed in training data the precision was 85-90 for similarly sized collections. Perhaps part of the problem was that we never quite grasped why some but not all these all CAPS strings were not coreferent.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Genre Speci c Coreference",
                "sec_num": null
            },
            {
                "text": "La Hack is a carry over from our original MUC-6 system, and it is responsible for identi cation of proper noun coreference. This component is indirectly helped by IBM's named entity tool 'Textract' which nd extents of named entities in addition to assigning them properties like 'is person', 'is company'. It is the foundation upon which our coreference annotation is built mistakes here can be devastating for the rest of the system. In MUC-6, La Hack performed at 29 Recall and 86 precision, but it faired somewhat worse in MUC-7 with, 24.0 precision 80.0 recall.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "La Hack 3",
                "sec_num": null
            },
            {
                "text": "We observed that the New York Times data had far less regular honori c use and corporate designator use than the MUC-6 corpus based on Wall Street Journal. As a result, there were fewer reliable indicators of proper names.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "La Hack 3",
                "sec_num": null
            },
            {
                "text": "This component asserts coreference between phrases that are in appositive relations or that are in predicate nominal relations. We w ere quite surprised at how poorly this component performed since we expected performance to be above the 80 precision cuto . Our actual performance is 3.3 precision 64.0 recall.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Highly Syntactic Coreference",
                "sec_num": null
            },
            {
                "text": "Quoted speech has idiosyncratic patterns of use that are better solved out side the scope of our standard coreference resolution module. We expected performance to be above 90 precision and were pleased with 2.6 recall and 86.8 precision. This module is a good example of how the coreference problem can be fruitfully broken up into sub-parts of individually high precision.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Quoted Speech",
                "sec_num": null
            },
            {
                "text": "CogNIAC is the most general purpose coreference resolution component of the system. It features a fairly sophisticated salience model and property con dence model to preorder order the set of candidate antecedents. The importance of the preorder is that it allows ties between equally salient a n tecedents and in the case of ties the anaphor is not resolved.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CogNIAC Proper Noun Resolution",
                "sec_num": null
            },
            {
                "text": "When de ciencies were noted with the output of LaHack, the simplest solution was to add a proper noun resolution component to CogNIAC. In the end this addition added a bit of recall but with fairly low precision with 1.2 recall and 65.2 precision.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CogNIAC Proper Noun Resolution",
                "sec_num": null
            },
            {
                "text": "Common noun coreference is an important part of coreference, but it is very di cult to accurately resolve. Our MUC-6 system had fairly poor performance with 10 recall and a precision of 48. We w ere surprised with an increase in performance over training data 78 precision with 7.1 recall 90.7 precision.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CogNIAC Common Noun Resolution",
                "sec_num": null
            },
            {
                "text": "Common noun anaphora is probably one of the most trying classes of coreference to annotate as a human. This is due to many di cult judgment calls required on the part of the human judges, and this was re ected in the consistency of annotation in the training data. We found it challenging to develop on the training data because the system would nd what we considered to be reasonable instances of coreference that the annotator had not made coreferent. We believe that common noun anaphora is a large source of inter-annotator disagreement.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CogNIAC Common Noun Resolution",
                "sec_num": null
            },
            {
                "text": "The pronominal system performed under our goal of 80 precision. In training, we found that we w ere constantly balancing the ability of pronouns to i refer uniquely, and ii have all entities have the correct property. We adopted a property con dence model that encouraged recall over precision. This meant that a proper noun like 'Mrs. Fields' would be both potentially an antecedent to feminine pronouns, and pronouns that referred to companies. A salience model was then applied to these overloaded entities and pronominal resolution served to be a word-sense disambiguation problem in addition to a coreference resolution problem. Our performance was 4.5 recall and 70.0 precision.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CogNIAC Pronouns",
                "sec_num": null
            },
            {
                "text": "One of the stronger conclusions that we h a v e come to regarding coreference is that there is an apparent linear trade-o between precision and recall given the performance of other systems with the coreference task. Our suspicion is that the same can be said with the B3 scorer but that will have t o a w ait experimentation. This is a positive result in its self because we now can choose from multiple types of coreference systems depending on our task. We consider high precision systems to be more useful for the types of systems that we build, but, it has not been clear that high precision systems were possible.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": null
            },
            {
                "text": "We also believe that the space of high precision 'contributors' to coreference is not exhausted. We doubt that there are any 10 recall 80 precision subcomponents that we h a v e not already explored, but there are certainly 1-5 recall opportunities. How w ell they will sum to the recall of the entire system is unknown, but there is room for improvement.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": null
            },
            {
                "text": "The main idea of this algorithm was initially put forth by Alan W. Biermann of Duke University.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "How Much Processing Is Required for Cross-Document Coreference?",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Bagga",
                        "suffix": ""
                    },
                    {
                        "first": "Amit",
                        "middle": [],
                        "last": "Bagga",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Bagga, 98a Bagga, Amit. How Much Processing Is Required for Cross-Document Coreference?, this volume.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "A Model-Theoretic Coreference Scoring Scheme",
                "authors": [
                    {
                        "first": "Marc",
                        "middle": [],
                        "last": "Vilain",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proceedings of the Sixth Message Understanding Conference MUC-6",
                "volume": "",
                "issue": "",
                "pages": "45--52",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Vilain, Marc, et al. A Model-Theoretic Coreference Scoring Scheme, Proceedings of the Sixth Message Understanding Conference MUC-6, pp. 45-52, November 1995.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "95 Proceedings of the Sixth Message Understanding Conference MUC-6, N o v ember 1995",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Muc-6",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "MUC-6, 95 Proceedings of the Sixth Message Understanding Conference MUC-6, N o v ember 1995, San Mateo: Morgan Kaufmann.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "type_str": "figure",
                "text": "Figure 1: Truth",
                "num": null
            },
            "FIGREF1": {
                "uris": null,
                "type_str": "figure",
                "text": "Figure I",
                "num": null
            },
            "FIGREF2": {
                "uris": null,
                "type_str": "figure",
                "text": "Equivalence Classes and Their Partitions For Example 1",
                "num": null
            },
            "FIGREF3": {
                "uris": null,
                "type_str": "figure",
                "text": "Response: Example 2",
                "num": null
            },
            "FIGREF4": {
                "uris": null,
                "type_str": "figure",
                "text": "De nitions for Precision and Recall for an Entity i",
                "num": null
            },
            "FIGREF5": {
                "uris": null,
                "type_str": "figure",
                "text": "Scores of Both Algorithms on the Examples",
                "num": null
            }
        }
    }
}