File size: 67,294 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
{
    "paper_id": "A00-1039",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T01:12:12.645894Z"
    },
    "title": "Unsupervised Discovery of Scenario-Level Patterns for Information Extraction",
    "authors": [
        {
            "first": "Roman",
            "middle": [],
            "last": "Yangarber",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Ralph",
            "middle": [],
            "last": "Grishman",
            "suffix": "",
            "affiliation": {},
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Information Extraction (IE) systems are commonly based on pattern matching. Adapting an IE system to a new scenario entails the construction of a new pattern base-a timeconsuming and expensive process. We have implemented a system for finding patterns automatically from un-annotated text. Starting with a small initial set of seed patterns proposed by the user, the system applies an incremental discovery procedure to identify new patterns. We present experiments with evaluations which show that the resulting patterns exhibit high precision and recall.",
    "pdf_parse": {
        "paper_id": "A00-1039",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Information Extraction (IE) systems are commonly based on pattern matching. Adapting an IE system to a new scenario entails the construction of a new pattern base-a timeconsuming and expensive process. We have implemented a system for finding patterns automatically from un-annotated text. Starting with a small initial set of seed patterns proposed by the user, the system applies an incremental discovery procedure to identify new patterns. We present experiments with evaluations which show that the resulting patterns exhibit high precision and recall.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The task of Information Extraction (I-E) is the selective extraction of meaning from free natural language text. I \"Meaning\" is understood here in terms of a fixed set of semantic objects--entities, relationships among entities, and events in which entities participate. The semantic objects belong to a small number of types, all having fixed regular structure, within a fixed and closely circumscribed subject domain. The extracted objects are then stored in a relational database. In this paper, we use the nomenclature accepted in current IE literature; the term subject domain denotes a class of textual documents to be processed, e.g., \"business news,\" and scenario denotes the specific topic of interest within the domain, i.e., the set of facts to be extracted. One example of a scenario is \"management succession,\" the topic of MUC-6 (the Sixth Message Understanding Conference); in this scenario the system seeks to identify events in which corporate managers left 1For general references on IE, cf., e.g., (Pazienza, 1997; muc, 1995; muc, 1993) . their posts or assumed new ones. We will consider this scenario in detail in a later section describing experiments. IE systems today are commonly based on pattern matching. The patterns are regular expressions, stored in a \"pattern base\" containing a general-purpose component and a substantial domain-and scenario-specific component.",
                "cite_spans": [
                    {
                        "start": 1017,
                        "end": 1033,
                        "text": "(Pazienza, 1997;",
                        "ref_id": null
                    },
                    {
                        "start": 1034,
                        "end": 1044,
                        "text": "muc, 1995;",
                        "ref_id": null
                    },
                    {
                        "start": 1045,
                        "end": 1055,
                        "text": "muc, 1993)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "Portability and performance are two major problem areas which are recognized as impeding widespread use of IE. This paper presents a novel approach, which addresses both of these problems by automatically discovering good patterns for a new scenario. The viability of our approach is tested and evaluated with an actual IE system.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "In the next section we describe the problem in more detail in the context of our IE system; sections 2 and 3 describe our algorithm for pattern discovery; section 4 describes our experimental results, followed by comparison with prior work and discussion, in section 5.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "1 The IE System",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "Our IE system, among others, contains a a backend core engine, at the heart of which is a regular-e~xpression pattern matcher. The engine draws on attendant knowledge bases (KBs) of varying degrees of domain-specificity. The KB components are commonly factored out to make the systems portable to new scenarios. There are four customizable knowledge bases in our IE system: the Lexicon contains general dictionaries and scenario-specific terms; the concept base groups terms into classes; the predicate base describes the logical structure of events to be extracted, and the pattern base contains patterns that catch the events in text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "Each KB has a. substantial domain-specific component, which must be modified when mov-ing to new domains and scenarios. The system allows the user (i.e. scenario developer) to start with example sentences in text which contain events of interest, the candidates, and generalize them into patterns. However, the user is ultimately responsible for finding all the candidates, which amounts to manually processing example sentences in a very large training corpus. Should s/he fail to provide an example of a particular class of syntactic/semantic construction, the system has no hope of recovering the corresponding events. Our experience has shown that (1) the process of discovering candidates is highly expensive, and (2) gaps in patterns directly translate into gaps in coverage. How can the system help automate the process of discovering new good candidates? The system should find examples of all common linguistic constructs relevant to a scenario. While there has been prior research on identifying the primary lexical patterns of a sub-language or corpus (Grishman et al., 1986; Riloff, 1996) , the task here is more complex, since we are typically not provided in advance with a sub-corpus of relevant passages; these passages must themselves be found as part of the discovery process.",
                "cite_spans": [
                    {
                        "start": 1063,
                        "end": 1086,
                        "text": "(Grishman et al., 1986;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 1087,
                        "end": 1100,
                        "text": "Riloff, 1996)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "The difficulty is that one of the best indications of the relevance of the passages is precisely the presence of these constructs. Because of this circularity, we propose to acquire the constructs and passages in tandem.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "We outline our procedure for automatic acquisition of patterns; details are elaborated in later sections. The procedure is unsupervised in that it does not require the training corpus to be manually annotated with events of interest, nor a pro-classified corpus with relevance judgements, nor any feedback or intervention from the user 2. The idea is to combine IR-style document selection with an iterative relaxation process; this is similar to techniques used elsewhere in NLP, and is inspired in large part, if remotely, by the work of (Kay and RSscheisen, 1993) on automatic alignment of sentences and words in a bilingual corpus. There, the reasoning was: sentences that are translations of each 2however, it may be supervised after each iteration, where the user can answer yes/no questions to improve the quality of the results other are good indicators that words they contain are translation pairs; conversely, words that are translation pairs indicate that the sentences which contain them correspond to one another.",
                "cite_spans": [
                    {
                        "start": 540,
                        "end": 566,
                        "text": "(Kay and RSscheisen, 1993)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Solution",
                "sec_num": "2"
            },
            {
                "text": "In our context, we observe that documents that are relevant to the scenario will necessarily contain good patterns; conversely, good patterns are strong indicators of relevant documents. The outline of our approach is as follows. (2) an initial set of trusted scenario patterns, as chosen ad hoc by the user--the seed; as will be seen, the seed can be quite small--two or three patterns seem to suffice. (3) an initial (possibly empty) set of concept classes",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Solution",
                "sec_num": "2"
            },
            {
                "text": "The pattern set induces a binary partition (a split) on the corpus: on any document, either zero or more than zero patterns will match. Thus the universe of documents, U, is partitioned into the relevant sub-corpus, R, vs. the non-relevant sub-corpus, R = U -R, with respect to the given pattern set. Actually, the documents are assigned weights which are 1 for documents matched by the trusted seed, and 0 otherwise. 3 2. Search for new candidate patterns: (a) Automatically convert each sentence in the corpus,into a set of candidate patterns, 4",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Solution",
                "sec_num": "2"
            },
            {
                "text": "(b) Generalize each pattern by replacing each lexical item which is a member of a concept class by the class name.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Solution",
                "sec_num": "2"
            },
            {
                "text": "(c) Working from the relevant documents, select those patterns whose distribution is strongly correlated with other relevant documents (i.e., much more 3R represents the trusted truth through the discovery iterations, since it was induced by the manually-selected seed.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Solution",
                "sec_num": "2"
            },
            {
                "text": "4Here, for each clause in the sentence we extract a tuple of its major roles: the head of the subject, the verb group, the object, object complement, as described below. This tuple is considered to be a pattern for the present purposes of discovery; it is a skeleton for the rich, syntactically transformed patterns our system uses in the extraction phase. densely distributed among the relevant documents than among the nonrelevant ones). The idea is to consider those candidate patterns, p, which meet the density, criterion:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Solution",
                "sec_num": "2"
            },
            {
                "text": "IHnRI IRI -->> IHnUI IUI",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Solution",
                "sec_num": "2"
            },
            {
                "text": "where H = H(p) is the set of documents where p hits.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Solution",
                "sec_num": "2"
            },
            {
                "text": "(d) Based on co-occurrence with the chosen patterns, extend the concept classes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Solution",
                "sec_num": "2"
            },
            {
                "text": "classes to the user for review, retaining those relevant to the scenario.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Optional: Present the new candidates and",
                "sec_num": "3."
            },
            {
                "text": "The new pattern set induces a new partition on the corpus. With this pattern set, return to step 1. Repeat the procedure until no more patterns can be added.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4.",
                "sec_num": null
            },
            {
                "text": "3 Methodology 3.1 Pre-proeessing: Normalization Before applying the discovery procedure, we subject the corpus to several stages of preprocessing. First, we apply a name recognition module, and replace each name with a token describing its class, e.g. C-Person, C-Company, etc. We collapse together all numeric expressions, currency values, dates, etc., using a single token to designate each of these classes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4.",
                "sec_num": null
            },
            {
                "text": "We then apply a parser to perform syntactic normalization to transform each clause into a common predicate-argument structure. We use the general-purpose dependency parser of English, based on the FDG formalism (Tapanainen and J~rvinen, 1997) and developed by the Research Unit for Multilingual Language Technology at the University of Helsinki, and Conexor Oy. The parser (modified to understand the name labels attached in the previous step) is used for reducing such variants as passive and relative clauses to a tuple, consisting of several elements.",
                "cite_spans": [
                    {
                        "start": 211,
                        "end": 242,
                        "text": "(Tapanainen and J~rvinen, 1997)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Syntactic Analysis",
                "sec_num": "3.2"
            },
            {
                "text": "1. For each claus, the first element is the subject, a \"semantic\" subject of a non-finite sentence or agent of the passive. 5",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Syntactic Analysis",
                "sec_num": "3.2"
            },
            {
                "text": "2. The second element is the verb.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Syntactic Analysis",
                "sec_num": "3.2"
            },
            {
                "text": "3. The third element is the object, certain object-like adverbs, subject of the passive or subject complement 6 4. The fourth element is a phrase which refers to the object or the subject. A typical example of such an argument is an object complement, such as Company named John Smith president. Another instance is the so-called copredicatire (Nichols, 1978) , in the parsing system (J~irvinen and . A copredicative refers to a subject or an object, though this distinction is typically difficult to resolve automatically/ Clausal tuples also contain a locative modifier, and a temporal modifier. We used a corpus of 5,963 articles from the Wall Street Journal, randomly chosen. The parsed articles yielded a total of 250,000 clausal tuples, of which 135,000 were distinct.",
                "cite_spans": [
                    {
                        "start": 344,
                        "end": 359,
                        "text": "(Nichols, 1978)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Syntactic Analysis",
                "sec_num": "3.2"
            },
            {
                "text": "Because tuples may not repeat with sufficient frequency to obtain reliable statistics, each tuple is reduced to a set of pairs: e.g., a verbobject pair, a subject-object pair, etc. Each pair is used as a generalized pattern during the candidate selection stage. Once we have identified pairs which are relevant to the scenario, we use them to construct or augment concept classes, by grouping together the missing roles, (for example, a class of verbs which occur with a relevant subject-object pair: \"company (hire/fire/expel...} person\"). This is similar to work by several other groups which aims to induce semantic classes through syntactic co-occurrence analysis (Riloff and Jones, 1999; Pereira et al., 1993; Dagan et al., 1993; Hirschman et al., 1975) , although in .our case the contexts are limited to selected patterns, relevant to the scenario. SE.g., \"John sleeps\", \"John is appointed by Company\", \"I saw a dog which sleeps\", \"She asked John to buy a car\". 6E.g., \"John is appointed by Company\", \"John is the president of Company\", \"I saw a dog which sleeps\", The dog which I saw sleeps. 7For example, \"She gave us our coffee black\", \"Company appointed John Smith as president\".",
                "cite_spans": [
                    {
                        "start": 668,
                        "end": 692,
                        "text": "(Riloff and Jones, 1999;",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 693,
                        "end": 714,
                        "text": "Pereira et al., 1993;",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 715,
                        "end": 734,
                        "text": "Dagan et al., 1993;",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 735,
                        "end": 758,
                        "text": "Hirschman et al., 1975)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Generalization and Concept Classes",
                "sec_num": "3.3"
            },
            {
                "text": "Here we present the results from experiments we conducted on the MUC-6 scenario, \"management succession\". The discovery procedure was seeded with a small pattern set, namely:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Pattern Discovery",
                "sec_num": "3.4"
            },
            {
                "text": "Subject Verb Direct Object C-Company C-Appoint C-Person C-Person C-Resign",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Pattern Discovery",
                "sec_num": "3.4"
            },
            {
                "text": "Documents are assigned relevance scores on a scale between 0 and 1. The seed patterns are accepted as ground truth; thus the documents they match have relevance 1. On subsequent iterations, the newly accepted patterns are not trusted as absolutely. On iteration number i q-1, each pattern p is assigned a precision measure, based on the relevance of the documents it matches: Pc(P) --Igl is the conditional probability of relevance. We further impose two support criteria: we distrust such frequent patterns where [HA U{ > a[U[ as uninformative, and rare patterns for which [H A R[ </3 as noise. \u00b0 At the end of each iteration, the system selects the pattern with the highest score, L(p), and adds it to the seed set. The documents which the winning pattern hits are added to the relevant set. The pattern search is then restarted.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Pattern Discovery",
                "sec_num": "3.4"
            },
            {
                "text": "Relevance The above is a simplification of the actual procedure, in several important respects.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "Only generalized patterns are considered for candidacy, with one or more slots filled with wild-cards. In computing the score of the generalized pattern, we do not take into consideration all possible values of the wild-card role. We instead constrain the wild-card to those values which themselves in turn produce patterns with high scores. These values then become members of a new class, which is output in tandem with the winning pattern 1\u00b0 Ssimilarly to (Riloff, 1996) \u00b0U denotes the universe of documents. We used c~ = 0.i and ~-----2.",
                "cite_spans": [
                    {
                        "start": 459,
                        "end": 473,
                        "text": "(Riloff, 1996)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "1\u00b0The classes are currently unused by subsequent iterations; this important issue is considered in future work.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "Preci+l(p) = 1 {H(p){ ~ Reli(d) (2) dEH(p)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "where Reli(d) is the relevance of the document from the previous iteration, and H(p) is the set of documents where p matched. More generally, if K is a classifier consisting of a set of patterns, we can define H(K) as the set of documents where all of patterns p E K match, and the \"cumulative\" precision 11 of K as",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "Preci+l(K) = 1 ~ Reli(d) (3) IH(K)[ riCH(K)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "Once the new winning pattern is accepted, the relevance scores of the documents are readjusted as follows. For each document d which is matched by some (non-empty) subset of the currently accepted patterns, we can view that subset of patterns as a classifier K d = {py}. These patterns determine the new relevance score of the document",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "Reli+l(d) = max (Rel~(d),Prec~+l(Kd)) (4)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "This ensures that the relevance score grows monotonically, and only when there is sufficient positive evidence, as the patterns in effect vote \"conjunctively\" on the documents. The results which follow use this measure.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "Thus in the formulas above, R is not simply the count of the relevant documents, but is rather their cumulative relevance. The two formulas, (3) and (4), capture the mutual dependency of patterns and documents; this recomputation and growing of precision and relevance scores is at the heart of the procedure.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "11Of course, this measure is defined only when H(K) # 0.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-computatlon of Document",
                "sec_num": "3.5"
            },
            {
                "text": "An objective measure of goodness of a pattern o. 9 is not trivial to establish since the patterns cannot be used for extraction directly, without be-o. s ing properly incorporated into the knowledge base. Thus, the discovery procedure does not o. v lend itself easily to MUC-style evaluations, since 0.6 a pattern lacks information about which events it induces and which slots its arguments should 0.5 fill.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "1",
                "sec_num": null
            },
            {
                "text": "However, it is possible to apply some objec-o. a tive measures of performance. One way we evaluated the system is by noting that in addition o. to growing the pattern set, the procedure also grows the relevance of documents. The latter o. 2 can be objectively evaluated.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "1",
                "sec_num": null
            },
            {
                "text": "We used a test corpus of 100 MUC-6 formaltraining documents (which were included in the o main development corpus of about 6000 documents) plus another 150 documents picked at random from the main corpus and judged by hand. These judgements constituted the ground truth and were used only for evaluation, (not in the discovery procedure). Figure 1 shows the recall/precision measures with respect to the test corpus of 250 documents, over a span of 60 generations, starting with the seed set in table 3.4. The Seed patterns matched 184 of the 5963 documents, yielding an initial recall of .11 and precision of .93; by the last generation it searched through 982 documents with non-zero relevance, and ended with .80 precision and .78 recall. This facet of the discovery procedure is closely related to the MUC '%ext-filtering\" sub-task, where the systems are judged at the level of documents rather than event slots. It is interesting to compare the results with other MUC-6 participants, shown anonymously in figure 2. Considering recall and precision separately, the discovery procedure attains values comparable to those achieved by some of the participants, all of which were either heavily-supervised or manually coded systems. It is important to bear in mind that the discovery procedure had no benefit of training material, or any information beyond the seed pattern set. Figure 2 shows two evaluations of our discovery procedure, tested against the original MUC-6 corpus of 100 documents, and against our test corpus, which consists of an additional 150 documents judged manually. The two plots in the figure show a slight difference in results, indicating that in some sense, the MUC corpus was more \"random\", or that our expanded corpus was somewhat skewed in favor of more common patterns that the system is able to find more easily.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 339,
                        "end": 347,
                        "text": "Figure 1",
                        "ref_id": null
                    },
                    {
                        "start": 1379,
                        "end": 1387,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "0.1",
                "sec_num": null
            },
            {
                "text": "The graphs shown in Figures 1 and 2 are based on an \"objective\" measure we adopted during the experiments. This is the same measure of relevance used internally by the discovery procedure on each iteration (relative to the \"truth\" of relevance scores of the previous iteration), and is not quite the standard measure used for text filtering in IR. According to this measure, the system gets a score for each document based on the relevance which it assigned to the document. Thus if the system .assigned relevance of X percent to a relevant document, it only received X Figure 3 : Results on the MUC corpus percent on the recall score for classifying that document correctly. Similarly, if the system assigned relevance Y to an irrelevant document, it was penalized only for the mis-classified Y percent on the precision score. To make our results more comparable to those of other MUC competitors, we chose a cut-off point and force the system to make a binary relevance decision on each document. The cut-off of 0.5 seemed optimal from empirical observations. Figure 3 shows a noticeable improvement in scores, when using our continuous, \"objective\" measure, vs. the cut-off measure, with the entire graph essentially translated to the right for a gain of almost 10 percentage points of recall.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 20,
                        "end": 35,
                        "text": "Figures 1 and 2",
                        "ref_id": null
                    },
                    {
                        "start": 570,
                        "end": 578,
                        "text": "Figure 3",
                        "ref_id": null
                    },
                    {
                        "start": 1062,
                        "end": 1070,
                        "text": "Figure 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Choice of Evaluation Metric",
                "sec_num": "4.3"
            },
            {
                "text": "i i i i i im~ ! ! ! i i DiE c ! i i i i i i i i 0 . '",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Choice of Evaluation Metric",
                "sec_num": "4.3"
            },
            {
                "text": "Another effective, if simple, measure of performanceis how many of the patterns the procedure found, and comparing them with those used by an extraction engine which was manually constructed for the same task. Our MUC-6 system used approximately 75 clause level patterns, with 30 distinct verbal heads. In one conservative experiment, we observed that the discovery procedure found 17 of these verbs, or 57%. However, it also found at least 8 verbs the manual system lacked, which seemed relevant to the scenario: company-bring-person- [as\u00f7officer] 12",
                "cite_spans": [
                    {
                        "start": 536,
                        "end": 548,
                        "text": "[as\u00f7officer]",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluating Patterns",
                "sec_num": "4.4"
            },
            {
                "text": "person-rejoin-company- [as + o25cer] person- { ret , conti, e, remai, ,stay}-[as + o25cer] person-pursue-interest At the risk of igniting a philosophical debate over what is or is not relevant to a scenario, we note that the first four of these verbs are evidently essential to the scenario in the strictest definition, since they imply changes of post. The next three are \"staying\" verbs, and are actually also needed, since higher-level inferences required in tracking events for long-range merging over documents, require knowledge of persons occupying posts, rather than only assuming or leaving them. The most curious one is \"person-pursue-interesf'; surprisingly, it too is useful, even in the strictest MUC sense, cf., (muc, 1995) . Systems are judged on filling a slot called \"other-organization\", indicating from or to which company the person came or went. This pattern is consistently used in text to indi- cate that the person left to pursue other, undisclosed interests, the knowledge of which would relieve the system from seeking other information in order to fill this slot. This is to say that here strict evaluation is elusive.",
                "cite_spans": [
                    {
                        "start": 23,
                        "end": 36,
                        "text": "[as + o25cer]",
                        "ref_id": null
                    },
                    {
                        "start": 45,
                        "end": 90,
                        "text": "{ ret , conti, e, remai, ,stay}-[as + o25cer]",
                        "ref_id": null
                    },
                    {
                        "start": 726,
                        "end": 737,
                        "text": "(muc, 1995)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "person-come-[to+eompanv]-[as+oZScer]",
                "sec_num": null
            },
            {
                "text": "Some of the prior research has emphasized interactive tools to convert examples to extraction patterns, cf. (Yangarber and Grishman, 1997) , while others have focused on methods for automatically converting a corpus annotated with extraction examples into such patterns (Lehnert et al., 1992; Fisher et al., 1995; Miller et al., 1998) . These methods, however, do not reduce the burden of finding the examples to annotate. With either approach, the portability bottleneck is shifted from the problem of building patterns to that of finding good candidates. The prior work most closely related to this study is (Riloff, 1996) , which, along with (Riloff, 1993) , seeks automatic methods for filling slots in event templates. However, the prior work differs from that presented here in several crucial respects; firstly, the prior work does not attempt to find entire events, after the fashion of MUC's highest-level scenario-template task. Rather the patterns produced by those systems identify NPs that fill individual slots, without specifying how these slots may be combined at a later stage into complete event templates. The present work focuses on directly discovering event-level, multi-slot relational patterns. Secondly, the prior work either relies on a set of documents with relevance judgements to find slot fillers where they are relevant to events, (Riloff, 1996) , or utilizes an un-classified corpus containing a very high proportion of relevant documents to find all instances of a semantic class, (Riloff and Jones, 1999) . By contrast, our procedure requires no relevance judgements, and works on the assumption that the corpus is balanced and the proportion of relevant documents is small. Classifying documents by hand, although admittedly easier than tagging event instances in text for automatic training, is still a formidable task. When we prepared the test corpus, it took 5 hours to mark 150 short documents.",
                "cite_spans": [
                    {
                        "start": 108,
                        "end": 138,
                        "text": "(Yangarber and Grishman, 1997)",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 270,
                        "end": 292,
                        "text": "(Lehnert et al., 1992;",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 293,
                        "end": 313,
                        "text": "Fisher et al., 1995;",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 314,
                        "end": 334,
                        "text": "Miller et al., 1998)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 610,
                        "end": 624,
                        "text": "(Riloff, 1996)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 645,
                        "end": 659,
                        "text": "(Riloff, 1993)",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 1362,
                        "end": 1376,
                        "text": "(Riloff, 1996)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 1514,
                        "end": 1538,
                        "text": "(Riloff and Jones, 1999)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion and Current Work",
                "sec_num": "5"
            },
            {
                "text": "The presented results indicate that our method of corpus analysis can be used to rapidly identify a large number of relevant patterns without pre-classifying a large training corpus. We are at the early stages of understanding how to optimally tune these techniques, and there are number of areas that need refinement. We are working on capturing the rich information about concept classes which is currently returned as part of our pattern discovery procedure, to build up a concept dictionary in tandem with the pattern base. We are also considering the proper selection of weights and thresholds for controlling the rankings of patterns and documents, criteria for terminating the iteration process, and for dynamic adjustments of these weights. We feel that the generalization technique in pattern discovery offers a great opportunity for combating sparseness of data, though this requires further research. Lastly, we are studying these algorithms under several unrelated scenarios to determine to what extent scenario-specific phenomena affect their performance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion and Current Work",
                "sec_num": "5"
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Contextual word similarity and estimation from sparse data",
                "authors": [
                    {
                        "first": "Shaul",
                        "middle": [],
                        "last": "Ido Dagan",
                        "suffix": ""
                    },
                    {
                        "first": "Shaul",
                        "middle": [],
                        "last": "Marcus",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Markovitch",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of the 31st Annual Meeting of the Assn. for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "31--37",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ido Dagan, Shaul Marcus, and Shaul Markovitch. 1993. Contextual word simi- larity and estimation from sparse data. In Proceedings of the 31st Annual Meeting of the Assn. for Computational Linguistics, pages 31-37, Columbus, OH, June.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Description of the UMass system as used for MUC-6",
                "authors": [
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Fisher",
                        "suffix": ""
                    },
                    {
                        "first": "Stephen",
                        "middle": [],
                        "last": "Soderland",
                        "suffix": ""
                    },
                    {
                        "first": "Joseph",
                        "middle": [],
                        "last": "Mc-Carthy",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proc. Si;zth Message Understanding Conf. (MUC-6)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "David Fisher, Stephen Soderland, Joseph Mc- Carthy, Fang-fang Feng, and Wendy Lehnert. 1995. Description of the UMass system as used for MUC-6. In Proc. Si;zth Message Un- derstanding Conf. (MUC-6), Columbia, MD, November. Morgan Kaufmann.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Discovery procedures for sublanguage selectional patterns: Initial experiments",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Grishman",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Hirschman",
                        "suffix": ""
                    },
                    {
                        "first": "N",
                        "middle": [
                            "T"
                        ],
                        "last": "Nhan",
                        "suffix": ""
                    }
                ],
                "year": 1986,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "R. Grishman, L. Hirschman, and N.T. Nhan. 1986. Discovery procedures for sublanguage selectional patterns: Initial experiments.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Grammatically-based automatic word class formation",
                "authors": [
                    {
                        "first": "Lynette",
                        "middle": [],
                        "last": "Hirschman",
                        "suffix": ""
                    },
                    {
                        "first": "Ralph",
                        "middle": [],
                        "last": "Grishman",
                        "suffix": ""
                    },
                    {
                        "first": "Naomi",
                        "middle": [],
                        "last": "Sager",
                        "suffix": ""
                    }
                ],
                "year": 1975,
                "venue": "Information Processing and Management",
                "volume": "11",
                "issue": "1/2",
                "pages": "39--57",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lynette Hirschman, Ralph Grishman, and Naomi Sager. 1975. Grammatically-based automatic word class formation. Information Processing and Management, 11(1/2):39-57.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "A dependency parser for English. Technical Report TR-1, Department of General Linguistics",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Timo",
                        "suffix": ""
                    },
                    {
                        "first": "Pasi",
                        "middle": [],
                        "last": "/Irvinen",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Tapanainen",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Timo J/irvinen and Pasi Tapanainen. 1997. A dependency parser for English. Technical Re- port TR-1, Department of General Linguis- tics, University of Helsinki, Finland, Febru- ary.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Text-translation alignment",
                "authors": [
                    {
                        "first": "Martin",
                        "middle": [],
                        "last": "Kay",
                        "suffix": ""
                    },
                    {
                        "first": "Martin",
                        "middle": [],
                        "last": "Rsscheisen",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Computational Linguistics",
                "volume": "",
                "issue": "1",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Martin Kay and Martin RSscheisen. 1993. Text-translation alignment. Computational Linguistics, 19(1).",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "University of massachusetts: MUC-4 test results and analysis",
                "authors": [
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Lehnert",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Cardie",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Fisher",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Mccarthy",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Riloff",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Soderland ; Mclean",
                        "suffix": ""
                    },
                    {
                        "first": "June",
                        "middle": [
                            "Morgan"
                        ],
                        "last": "Va",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Kaufmann",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "Proc. Fourth Message Understanding Conf",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "W. Lehnert, C. Cardie, D. Fisher, J. McCarthy, E. Riloff, and S. Soderland. 1992. Univer- sity of massachusetts: MUC-4 test results and analysis. In Proc. Fourth Message Un- derstanding Conf., McLean, VA, June. Mor- gan Kaufmann.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Algorithms that learn to extract information; BBN: Description of the SIFT system as used for MUC-7",
                "authors": [
                    {
                        "first": "Scott",
                        "middle": [],
                        "last": "Miller",
                        "suffix": ""
                    },
                    {
                        "first": "Michael",
                        "middle": [],
                        "last": "Crystal",
                        "suffix": ""
                    },
                    {
                        "first": "Heidi",
                        "middle": [],
                        "last": "Fox",
                        "suffix": ""
                    },
                    {
                        "first": "Lance",
                        "middle": [],
                        "last": "Ramshaw",
                        "suffix": ""
                    },
                    {
                        "first": "Richard",
                        "middle": [],
                        "last": "Schwartz",
                        "suffix": ""
                    },
                    {
                        "first": "Rebecca",
                        "middle": [],
                        "last": "Stone",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of the Fifth Message Understanding Conference (MUC-5)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Scott Miller, Michael Crystal, Heidi Fox, Lance Ramshaw, Richard Schwartz, Rebecca Stone, Ralph Weischedel, and the Annota- tion Group. 1998. Algorithms that learn to extract information; BBN: Description of the SIFT system as used for MUC-7. In Proc. of the Seventh Message Understanding Confer- ence, Fairfax, VA. 1993. Proceedings of the Fifth Message Un- derstanding Conference (MUC-5), Baltimore, MD, August. Morgan Kaufmann.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Proceedings of the Sixth Message Understanding Conference (MUC-6)",
                "authors": [],
                "year": 1995,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "1995. Proceedings of the Sixth Message Un- derstanding Conference (MUC-6), Columbia, M_D, November. Morgan Kaufmann.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Secondary predicates. Proceedings of the 4th Annual Meeting of Berkeley Linguistics Society",
                "authors": [
                    {
                        "first": "Johanna",
                        "middle": [],
                        "last": "Nichols",
                        "suffix": ""
                    }
                ],
                "year": 1978,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "114--127",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Johanna Nichols. 1978. Secondary predicates. Proceedings of the 4th Annual Meeting of Berkeley Linguistics Society, pages 114-127.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Information Extraction",
                "authors": [],
                "year": 1997,
                "venue": "Lecture Notes in Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Maria Teresa Pazienza, editor. 1997. Infor- mation Extraction. Springer-Verlag, Lecture Notes in Artificial Intelligence, Rome.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Distributional clustering of English words",
                "authors": [
                    {
                        "first": "Fernando",
                        "middle": [],
                        "last": "Pereira",
                        "suffix": ""
                    },
                    {
                        "first": "Naftali",
                        "middle": [],
                        "last": "Tishby",
                        "suffix": ""
                    },
                    {
                        "first": "Lillian",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of the 31st Annual Meeting of the Assn. for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "183--190",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Fernando Pereira, Naftali Tishby, and Lillian Lee. 1993. Distributional clustering of En- glish words. In Proceedings of the 31st An- nual Meeting of the Assn. for Computational Linguistics, pages 183-190, Columbus, OH, June.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Learning dictionaries for information extraction by multi-level bootstrapping",
                "authors": [
                    {
                        "first": "Ellen",
                        "middle": [],
                        "last": "Riloff",
                        "suffix": ""
                    },
                    {
                        "first": "Rosie",
                        "middle": [],
                        "last": "Jones",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of Sixteenth National Conference on Artificial Intelligence (AAAI-99)",
                "volume": "",
                "issue": "",
                "pages": "811--816",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ellen Riloff and Rosie Jones. 1999. Learn- ing dictionaries for information extraction by multi-level bootstrapping. In Proceedings of Sixteenth National Conference on Artificial Intelligence (AAAI-99), Orlando, Florida, Ellen Riloff. 1993. Automatically construct- ing a dictionary for information extraction tasks. In Proceedings of Eleventh National Conference on Artificial Intelligence (AAAI- 93), pages 811-816. The AAAI Press/MIT Press.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Automatically generating extraction patterns from untagged text",
                "authors": [
                    {
                        "first": "Ellen",
                        "middle": [],
                        "last": "Riloff",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proceedings of Thirteenth National Conference on Artificial Intelligence (AAAL96)",
                "volume": "",
                "issue": "",
                "pages": "1044--1049",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ellen Riloff. 1996. Automatically generating extraction patterns from untagged text. In Proceedings of Thirteenth National Confer- ence on Artificial Intelligence (AAAL96), pages 1044-1049. The AAAI Press/MIT Press.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "A non-projective dependency parser",
                "authors": [
                    {
                        "first": "Pasi",
                        "middle": [],
                        "last": "Tapanainen",
                        "suffix": ""
                    },
                    {
                        "first": "Timo",
                        "middle": [],
                        "last": "J~rvinen",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proceedings of the 5th Conference on Applied Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "64--71",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Pasi Tapanainen and Timo J~rvinen. 1997. A non-projective dependency parser. In Pro- ceedings of the 5th Conference on Applied Natural Language Processing, pages 64-71, Washington, D.C., April. ACL.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Customization of information extraction systems",
                "authors": [
                    {
                        "first": "Roman",
                        "middle": [],
                        "last": "Yangarber",
                        "suffix": ""
                    },
                    {
                        "first": "Ralph",
                        "middle": [],
                        "last": "Grishman",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "International Workshop on Lexically Driven Information Extraction",
                "volume": "",
                "issue": "",
                "pages": "1--11",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Roman Yangarber and Ralph Grishman. 1997. Customization of information extraction sys- tems. In Paola Velardi, editor, International Workshop on Lexically Driven Information Extraction, pages 1-11, Frascati, Italy, July. Universit?~ di Roma.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "a large corpus of un-annotated and un-classified documents in the domain;",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF1": {
                "text": "Here C-Company and C-Person denote semantic classes containing named entities of the corresponding semantic types. C-Appoirlt denotes a class of verbs, containing four verbs { appoint, elect, promote, name}; C-Resign = { resign, depart, quit, step-down }. During a single iteration, we compute the score s, L(p), for each candidate pattern p: L(p) = Pc(P)\" log {H A R] (1) where R denotes the relevant subset, and H --H(p) the documents matching p, as above, and [gnR[",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF2": {
                "text": "....... ~ .......... ~ ......... ~ ........... ~ .......... ~ .......... ~ ......... ...... iiiiiiiiiiiiiilEi ........... ........ /... .......... ~ .......... '.\" ........ \": .......... ~ ........... r .......... ~ .......... ! .........",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF3": {
                "text": "Figure h Recall/Precision curves for Management Succession",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF4": {
                "text": "7 ................................................................... ~\" ............... ... ~ ........ , ........ ~ ....... ~ ........ ~ ....... ~ ........ ! ........ : ........ 4 .......",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "TABREF0": {
                "text": "....... ........ ............... ---! ........ ....... ...... i ........ i ........ [ ....... ? ........ f ............ T ....... ...... J ........ i ........ i. ....... i ........ ~ ....... .; ........... ..: ........ i....: 0 6 ........................ .' 7 ....... ' ........ ~ ....... ~ ........ ~ ....................... ..... '. ........ , ........ ~\" ....... , ........ ~ ....... ~ ........ * ....... \"=. ........ , ....... ...... e ........ i ........ ! ........ ~ ........ ~... ' ........ ! ........ i .......",
                "html": null,
                "content": "<table><tr><td/><td/><td>II :</td><td>I :</td><td>I .</td><td>I ,</td><td/><td>I .</td><td/><td>I :</td><td>I \u2022</td><td>I</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>i</td><td>.</td><td>i</td><td>~</td><td>iB</td><td>i</td></tr><tr><td colspan=\"2\">0.9</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>0.9</td></tr><tr><td colspan=\"2\">0.8</td><td>i</td><td>i</td><td>i</td><td>::</td><td>i</td><td>i</td><td>i</td><td>i</td><td>i</td><td>~</td></tr><tr><td>0</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"2\">0.7</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>(I)</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td>i</td><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td>i i</td><td/><td/><td/><td/><td/><td/></tr><tr><td>0</td><td>. 5</td><td>..i</td><td/><td>z</td><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"2\">0.4</td><td>i</td><td/><td>I</td><td/><td/><td/><td/><td/><td/></tr><tr><td/><td>0</td><td colspan=\"8\">0.10.20.30.40.50.60.70.80.9</td><td/><td>1</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">Recall</td><td/><td/><td/><td/></tr><tr><td/><td/><td colspan=\"7\">Figure 2: Precision vs. Recall</td><td/><td/></tr></table>",
                "num": null,
                "type_str": "table"
            }
        }
    }
}