File size: 85,380 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
{
    "paper_id": "M98-1009",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T03:16:04.906921Z"
    },
    "title": "ALGORITHMS THAT LEARN TO EXTRACT INFORMATION BBN: DESCRIPTION OF THE SIFT SYSTEM AS USED FOR MUC-7",
    "authors": [
        {
            "first": "Scott",
            "middle": [],
            "last": "Miller",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Michael Crystal",
                "location": {
                    "addrLine": "Heidi Fox, Lance Ramshaw, Richard Schwartz, Rebecca Stone, Ralph Weischedel"
                }
            },
            "email": "weischedel@bbn.com"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "For MUC-7, BBN has for the first time fielded a fully-trained system for NE, TE, and TR; results are all the output of statistical language models trained on annotated data, rather than programs executing handwritten rules. Such trained systems have some significant advantages: \u2022 They can be easily ported to new domains by simply annotating data with semantic answers. \u2022 The complex interactions that make rule-based systems difficult to develop and maintain can here be learned automatically from the training data. We believe that the results in this evaluation are evidence that such trained systems, even at their current level of development, can perform roughly on a par with rules hand-tailored by experts. Since MUC-3, BBN has been steadily increasing the proportion of the information extraction process that is statistically trained. Already in MET-1, our name-finding results were the output of a fully statistical, HMM-based model, and that statistical Identifinder\u2122 model was also used for the NE task in MUC-7. For the MUC-7 TE and TR tasks, BBN developed SIFT, a new model that represents a significant further step along this path, replacing PLUM, a system requiring handwritten patterns, with SIFT, a single integrated trained model.",
    "pdf_parse": {
        "paper_id": "M98-1009",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "For MUC-7, BBN has for the first time fielded a fully-trained system for NE, TE, and TR; results are all the output of statistical language models trained on annotated data, rather than programs executing handwritten rules. Such trained systems have some significant advantages: \u2022 They can be easily ported to new domains by simply annotating data with semantic answers. \u2022 The complex interactions that make rule-based systems difficult to develop and maintain can here be learned automatically from the training data. We believe that the results in this evaluation are evidence that such trained systems, even at their current level of development, can perform roughly on a par with rules hand-tailored by experts. Since MUC-3, BBN has been steadily increasing the proportion of the information extraction process that is statistically trained. Already in MET-1, our name-finding results were the output of a fully statistical, HMM-based model, and that statistical Identifinder\u2122 model was also used for the NE task in MUC-7. For the MUC-7 TE and TR tasks, BBN developed SIFT, a new model that represents a significant further step along this path, replacing PLUM, a system requiring handwritten patterns, with SIFT, a single integrated trained model.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "At the sentence level, the SIFT system (\"Statistics for Information From Text\") employs a unified statistical process to map from words to semantic structures. That is, part-of-speech determination, namefinding, parsing, and relationship-finding all happen as part of the same process. This allows each element of the model to influence the others, and avoids the assembly-line trap of having to commit to a particular part-of-speech choice, say, early on in the process, when only local information is available to inform the choice.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "The SIFT sentence-level model was trained from two sources:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "\u2022 General knowledge of English sentence structure was learned from the Penn Treebank corpus of one million words of Wall Street Journal text. \u2022 Specific knowledge about how the target entities and relations are expressed in English was learned from about 500 K words of on-domain text annotated with named entities, descriptors, and semantic relations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "For extraction in a new domain, the names and descriptors of relevant items (persons, organizations, locations, and artifacts) are marked, as well as the target relationships between them that are signaled syntactically. For example, in the phrase \"GTE Corp. of Stamford\", the annotation would record a \"location-of\" connection between the company and the city. The model can thus learn the structures that are typically used in English to convey the target relationships.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "While the bulk of the TE/TR task happens within the sentence-level decoder, some further processing was still required to produce TE and TR answer templates. After the names, descriptors, and local relationships had been extracted from the decoder output, a merging process had to be applied to link multiple occurrences of the same name or of alternative forms of the name from different sentences. A second, cross-sentence model was then invoked to try to identify non-local relationships that were missed by the decoder, as when the two entities do not occur in the same sentence. Finally, type and country information was filled in using heuristic tests and a gazetteer database, and output filters were applied to select which of the proposed internal structures should be included in the output. We are actively exploring ways to integrate these post-processing steps more closely with the main model, since an integrated statistical model is the only way to make every choice in a nuanced way, based on all the available information. Figure 1 is a block diagram of the sentence-level model showing the main components and data paths. Two types of annotations are used to train the model: semantic annotations for learning about the target entities and relations, and syntactic annotations for learning about the general structure of English. From these annotations, the training program estimates the parameters of a unified statistical model that accounts for both syntax and semantics. Later, when presented with a new sentence, the search program explores the statistical model to find the most likely combined semantic and syntactic interpretation. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 1041,
                        "end": 1049,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "Our source for syntactically annotated training data was the Penn Treebank (Marcus et al., 1993) . Significantly, we do not require that syntactic annotations be from the same source, or cover the same domain, as the target task. For example, while the Penn Treebank consists of Wall Street Journal text, the target source for this evaluation was New York Times newswire. Similarly, although the Penn Treebank domain covers general and financial news, the target domain for this evaluation was space technology. The ability to use syntactic training from a different source and domain than the target is an important feature of our model.",
                "cite_spans": [
                    {
                        "start": 75,
                        "end": 96,
                        "text": "(Marcus et al., 1993)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": null
            },
            {
                "text": "Since the Penn Treebank serves as our syntactically annotated training corpus, we need only create a semantically annotated corpus. Stated generally, semantic annotations serve to denote the entities and relations of interest in the target domain. More specifically, entities are marked as either names or descriptors, with co-reference between entities marked as well. Figure 2 shows a semantically annotated fragment of a typical sentence.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 370,
                        "end": 378,
                        "text": "Figure 2",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": null
            },
            {
                "text": "From only these simple semantic annotations, the system can be trained to work in a new domain. To train SIFT for MUC-7, we annotated approximately 500,000 words of New York Times newswire text, covering the domains of air disasters and space technology. (We have not yet run experiments to see how performance varies with more/less training data.)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": null
            },
            {
                "text": "Nance , who a consultant is to News ABC paid also , said ... ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training Data",
                "sec_num": null
            },
            {
                "text": "While our semantic annotations are quite simple, the internal model of sentence structure is substantially more complicated, since it must account for syntactic structure as well as entities and semantic relations. Our underlying training algorithm requires examples of these internal structures in order to estimate the parameters of the unified semantic/syntactic model. However, we do not wish to incur the high cost of annotating parse trees. Instead, we use the following multi-step training procedure, exploiting the Penn Treebank:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "1) Train the sentence-level model on the purely syntactic parse trees in the Treebank. Once this step is complete, the model will function as a state-of-the-art statistical parser.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "2) For each sentence in the semantically annotated corpus: a) Apply the sentence level model to syntactically parse the sentence, constraining the model to produce only parses that are consistent with the semantic annotation. b) Augment the resulting parse tree to reflect semantic structure as well as syntactic structure.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "3) Retrain the sentence-level model on the augmented parse trees produced in step 2. Once this step is complete, we have an integrated model of semantics and syntax.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "Details of the statistical model will be discussed later. For now, we turn our attention to (a) constraining the decoder and (b) augmenting the parse trees with semantic structure.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "Constraints are simply bracketing boundaries that may not be crossed by any parse constituent. There are two types of constraints: hard constraints that cannot be violated under any conditions, and soft constraints, that may be violated only if enforcing them would result in no plausible parse. All named entities and descriptors are treated as hard constraints; the model is prohibited from producing any structures that would break up these elements. In addition, we attempt to keep possible appositives together through soft constraints. Whenever there is a co-referential relation between two entities that are either adjacent or separated by only a comma, we posit an appositive and introduce a soft constraint to encourage the parser to keep the elements together.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "Once a constrained parse is found, it must be augmented to reflect the semantic structure. Augmentation is a five step process.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "1) Nodes are inserted into the parse tree to distinguish names and descriptors that are not bracketed in the parse. For example, the parser produces a single noun phrase with no internal structure for \"Lt. Cmdr. David Edwin Lewis\". Additional nodes must be inserted to distinguish the descriptor, \"Lt. Cmdr.,\" and the name, \"David Edwin Lewis.\"",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "2) Semantic labels are attached to all nodes that correspond to names or descriptors. These labels reflect the entity type, such as person, organization or location, as well as whether the node is a proper name or a descriptor.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "3) For relations between entities, where one entity is not a syntactic modifier of the other, the lowermost parse node that spans both entities is identified. A semantic tag is then added to that node denoting the relationship. For example, in the sentence \"Mary Fackler Schiavo is the inspector general of the U.S. Department of Transportation,\" a co-reference semantic label is added to the S node spanning the name, \"Mary Fackler Schiavo,\" and the descriptor, \"the inspector general of the U.S. Department of Transportation.\" 4) Nodes are inserted into the parse tree to distinguish the arguments to each relation. In cases where there is a relation between two entities, and one of the entities is a syntactic modifier of the other, the inserted node serves to indicate the relation as well as the argument. For example, in the phrase \"Lt. Cmdr. David Edwin Lewis,\" a node is inserted to indicate that \"Lt. Cmdr.\" is a descriptor for \"David Edwin Lewis.\" 5) Whenever a relation involves an entity that is not a direct descendant of that relation in the parse tree, semantic pointer labels are attached to all of the intermediate nodes. These labels serve to form a continuous chain between the relation and its argument. Figure 3 shows an augmented parse tree corresponding to the semantic annotation in Figure 2 . Note that nodes with semantic labels ending in \"-r\" are MUC reportable names and descriptors.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 1225,
                        "end": 1233,
                        "text": "Figure 3",
                        "ref_id": "FIGREF5"
                    },
                    {
                        "start": 1308,
                        "end": 1316,
                        "text": "Figure 2",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Semantic/Syntactic Structure",
                "sec_num": null
            },
            {
                "text": "In SIFT's statistical model, augmented parse trees are generated according to a process similar to that described in Collins (1996 Collins ( , 1997 . For each constituent, the head is generated first, followed by the modifiers, which are generated from the head outward. Head words, along with their part-of-speech tags and features, are generated for each modifier as soon as the modifier is created. Word features are introduced primarily to help with unknown words, as in Weischedel et al. (1993) .",
                "cite_spans": [
                    {
                        "start": 117,
                        "end": 130,
                        "text": "Collins (1996",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 131,
                        "end": 147,
                        "text": "Collins ( , 1997",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 475,
                        "end": 499,
                        "text": "Weischedel et al. (1993)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "We illustrate the generation process by walking through a few of the steps of the parse shown in Figure 3 . At each step in the process, a choice is made from a statistical distribution, with the probability of each possible selection dependent on particular features of previously-generated elements. We pick up the derivation just after the topmost S and its head word, said, have been produced. The next steps are to generate in order:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 97,
                        "end": 105,
                        "text": "Figure 3",
                        "ref_id": "FIGREF5"
                    }
                ],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "1. A head constituent for the S, in this case a VP.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "2. Pre-modifier constituents for the S. In this case, there is only one: a PER/NP. 8. Post-modifier constituents for the PER/NP. First a comma, then an SBAR structure, and then a second comma are each generated in turn.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "This generation process is continued until the entire tree has been produced. We now briefly summarize the probability structure of the model. The categories for head constituents, c h , are predicted based solely on the category of the parent node, c p :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "P c c h p ( | ) , e.g. P vp s ( | )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "Modifier constituent cateogries, c m , are predicted based on their parent node, c p , the head constituent of their parent node, c hp , the previously generated modifier, c m-1 , and the head word of their parent, w p . Separate probabilities are maintained for left (pre) and right (post) modifiers:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "P c c c c w L m p hp m p ( | , ,",
                        "eq_num": ", ) \u22121"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": ", e.g.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "P per np s vp null said L ( / | , , , ) P c c c c w R m p hp m p ( | , ,",
                        "eq_num": ", ) \u22121"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": ", e.g.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "P null s vp null said R ( | , ,",
                        "eq_num": ", )"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "Part-of-speech tags, t m , for modifiers are predicted based on the modifier, c m , the part-of-speech tag of the head word , t h , and the head word itself, w h : Head words, w m , for modifiers are predicted based on the modifier, c m , the part-of-speech tag of the modifier word , t m , the part-of-speech tag of the head word , t h , and the head word itself, w h : ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "P t c t w m m h h ( | , ,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "P w c t t w m m m h h ( | ,",
                        "eq_num": ", ) , , e."
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "( | / , /",
                        "eq_num": ", , )"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "Finally, word features, f m , for modifiers are predicted based on the modifier, c m , the part-of-speech tag of the modifier word , t m , the part-of-speech tag of the head word , t h , the head word itself, w h , and whether or not the modifier head word, w m , is known or unknown.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "P f c t t w known w m m m h h m ( | , ,",
                        "eq_num": ", ( ))"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": ", , e.g. P cap per np per nnp vbd said true",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "( | / , / , , , )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "The probability of a complete tree is the product of the probabilities of generating each element in the tree. If we generalize the tree components (constituent labels, words, tags, etc.) and treat them all as simply elements, e, and treat all the conditioning factors as the history, h, we can write: Maximum likelihood estimates for all model probabilities are obtained by observing frequencies in the training corpus. However, because these estimates are too sparse to be relied upon, they must be smoothed by mixing in lower-dimensional estimates. We determine the mixture weights using the Witten-Bell smoothing method.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "P",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "For modifier constituents, the mixture components are: ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "\u2032 = + \u2212 \u2212 \u2212 P c c c c w P c c c c w P c c c c m p hp m p m p hp m p m p hp m ( | , , , ) ( | , , , ) ( | , ,",
                        "eq_num": ") 1 1"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "( | , , , ,",
                        "eq_num": "( ))"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "( | , , ,",
                        "eq_num": "( ))"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "( | , , ,",
                        "eq_num": "( ))"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "( | , ,",
                        "eq_num": "( ))"
                    }
                ],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "( | , ( )) \u03bb \u03bb \u03bb \u03bb 1 2 3 4",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "Searching the Model Given a sentence to be analyzed, the search program must find the most likely semantic and syntactic interpretation. More concretely, it must find the most likely augmented parse tree. Although mathematically the model predicts tree elements in a top-down fashion, we search the space bottom-up using a chart based search. The search is kept tractable through a combination of CKY-style dynamic programming and pruning of low probability elements.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "Dynamic Programming: Whenever two or more constituents are equivalent relative to all possible later parsing decisions, we apply dynamic programming, keeping only the most likely constituent in the chart. Two constituents are considered equivalent if:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "1. They have identical category labels.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "2. Their head constituents have identical labels.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "3. They have the same head word.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "4. Their leftmost modifiers have identical labels.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "5. Their rightmost modifiers have identical labels.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "Pruning: Given multiple constituents that cover identical spans in the chart, only those constituents with probabilities within a threshold of the highest scoring constituent are maintained; all others are pruned. For purposes of pruning, and only for purposes of pruning, the prior probability of each constituent category is multiplied by the generative probability of that constituent (Goodman, 1997) . We can think of this prior probability as an estimate of the probability of generating a subtree with the constituent category, starting at the topmost node. Thus, the scores used in pruning can be considered as the product of:",
                "cite_spans": [
                    {
                        "start": 388,
                        "end": 403,
                        "text": "(Goodman, 1997)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "1. The probability of generating a constituent of the specified category, starting at the topmost node.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "2. The probability of generating the structure beneath that constituent, having already generated a constituent of that category.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "The outcome of the search process is a tree structure that encodes both the syntactic and semantic structure of the sentence, so that the TE entities and local TR relations can be directly extracted from these sentential trees.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Statistical Model",
                "sec_num": null
            },
            {
                "text": "The cross-sentence model uses structural and contextual clues to hypothesize template relations between two elements that are not mentioned within the same sentence. Since 80-90% of the relations found in the answer keys connect two elements that are mentioned in the same sentence, the cross sentence model has a narrow target to shoot for. Very few of the pairs of entities seen in different sentences turn out to be actually related. This model uses features extracted from related pairs in training data to try to identify those cases.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cross-Sentence Model",
                "sec_num": null
            },
            {
                "text": "It is a classifier model that considers all pairs of entities in a message whose types are compatible with a given relation; for example, a Person and an Organization would suggest a possible Employee_Of. For the three Muc-7 relations, it turned out to be somewhat advantageous to build in a functional constraint, so that the model would not consider, for example, a possible Employee_Of relation for a person already known from the sentence-level model to be employed elsewhere.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cross-Sentence Model",
                "sec_num": null
            },
            {
                "text": "Given the measured features for a possible relation, the probability of a relation holding or not holding can be computed as follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cross-Sentence Model",
                "sec_num": null
            },
            {
                "text": ") ( ) ( ) | ( ) | ( feats p rel p rel feats p feats rel p = ) ( ) ( ) | ( ) | ( feats p rel p rel feats p feats rel p =",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cross-Sentence Model",
                "sec_num": null
            },
            {
                "text": "If the ratio of those two probabilities, computed as follows, is greater than 1, the model predicts a relation: We approximate this ratio by assuming feature independence and taking the product of the contributions for each feature.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cross-Sentence Model",
                "sec_num": null
            },
            {
                "text": ") ( ) | ( ) ( ) | ( ) |",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cross-Sentence Model",
                "sec_num": null
            },
            {
                "text": "\u220f \u220f \u2248 i i i i rel feat p rel p rel feat p rel p feats rel p feats rel p ) | ( ) ( ) | ( ) ( ) | ( ) | (",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cross-Sentence Model",
                "sec_num": null
            },
            {
                "text": "The cross-sentence feature model applies to entities found by the sentence-level model, which is run over all of the sentence-like portions of the text. An initial heuristic procedure checks for sections of the preamble or trailer that look like sentential material, that should be treated like the body text. There is also a separate handwritten procedure that searches the preamble text for any byline, and, if one is found, instantiates an appropriate employee relationship.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cross-Sentence Model",
                "sec_num": null
            },
            {
                "text": "Two classes of features were used in this model: structural features that reflect properties of the text surrounding references to the entities involved in the suggested relation, and content features based on the actual entities and relations encountered in the training data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Model Features",
                "sec_num": null
            },
            {
                "text": "The structural features exploit simple characteristics of the text surrounding references to the possiblyrelated entities. The most powerful structural feature, not surprisingly, was distance, reflecting the fact that related elements tend to be mentioned in close proximity, even when they are not mentioned in the same sentence. Given a pair of entity references in the text, the distance between them was quantized into one of three possible values:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Structural Features",
                "sec_num": null
            },
            {
                "text": "Distance Value 0",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Code",
                "sec_num": null
            },
            {
                "text": "Within the same sentence 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Code",
                "sec_num": null
            },
            {
                "text": "Neighboring sentences 2 More remote than neighboring sentences",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Code",
                "sec_num": null
            },
            {
                "text": "For each pair of possibly-related elements, the distance feature value was defined as the minimum distance between some reference in the text to the first element and some reference to the second.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Code",
                "sec_num": null
            },
            {
                "text": "A second structural feature grew out of the intuition that entities mentioned in the first sentence of an article often play a special topical role throughout the article. The \"Topic Sentence\" feature was defined to be true if some reference to one of the two entities involved in the suggested relation occurred in the first sentence of the text-field body of the article.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Code",
                "sec_num": null
            },
            {
                "text": "Other structural features that were considered but not implemented included the count of the number of references to each entity.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Code",
                "sec_num": null
            },
            {
                "text": "While the structural features learn general facts about the patterns in which related references occur and the text that surrounds them, the content features learn about the actual names and descriptors of entities seen to be related in the training data. The three content features in current use test for a similar relationship in training by name or by descriptor or for a conflicting relationship in training by name.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content Features",
                "sec_num": null
            },
            {
                "text": "The simplest content feature tests using names whether the entities in the proposed relationship have ever been seen before to be related. To test this feature, the model maintains a database of all the entities seen to be related in training, and of the names used to refer to them. The \"by name\" content feature is true if, for example, a person in some training message who shared at least one name string with the person in the proposed relationship was employed in that training message by an organization that shared at least one name string with the organization in the proposed relationship.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content Features",
                "sec_num": null
            },
            {
                "text": "A somewhat weaker feature makes the same kind of test for a previously seen relationship using descriptor strings. This feature fires when an entity that shares a descriptor string with the first argument of the suggested relation was related in training to an entity that shares a name with the second argument. Since titles like \"General\" count as descriptor strings, one effect of this feature is to increase the likelihood of generals being employed by armies. Observing such examples, but noting that the training didn't include all the reasonable combinations of titles and organizations, the training for this feature was seeded by adding a virtual message constructed from a list of such titles and organizations, so that any reasonable such pair would turn up in training.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content Features",
                "sec_num": null
            },
            {
                "text": "The third content feature was a kind of inverse of the first \"by name\" feature which was true if some entity sharing a name with the first argument of the proposed relation was related to an entity that did not share a name with the second argument. Using Employee_Of again as an example, it is less likely (though still possible) that a person who was known in another message to be employed by a different organization should be reported here as employed by the suggested one.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content Features",
                "sec_num": null
            },
            {
                "text": "Given enough fully annotated data, with both sentence-level semantic annotation and message-level answer keys recorded along with the connections between them, training the features would be quite straightforward. For each possibly-related pair of entities mentioned in a document, one would just count up the 2x2 table showing how many of them exhibited the given structural feature and how many of them were actually related. The training issues that did arise stemmed from the limited supply of answer keys and that the keys were not connected to the sentence-level annotations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training",
                "sec_num": null
            },
            {
                "text": "The government training and dry run data provided 200 messages' worth of TE and TR answer keys. Those answer keys, however, contained strings without recording where in the text they were found. In order to train structural features from that data, we needed the locations of references within the text. A heuristic string matching process was used to make that connection, with a special check to ensure for names that the shorter version of a name did not match a string in the text that also matched a longer version of the same name.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training",
                "sec_num": null
            },
            {
                "text": "Training the content features, on the other hand, did not require positional information about the references. The plain answer keys could be used in combination with a database of the name and descriptor strings for entities related in training to count up the feature probabilities for actually related and non-related pairs. The string database was collected first, and one-out training was then used, so that the rest of the training corpus provided the string database for training the feature counts on each particular message. The additional training data that was semantically annotated for training the sentence-level model but for which answer keys were not available could still also be used in building up the string database for the content features.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training",
                "sec_num": null
            },
            {
                "text": "The probabilities based on the final feature counts were smoothed by mixing them with 0.01% of a uniform model.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Training",
                "sec_num": null
            },
            {
                "text": "When measured on 10 randomly-selected messages from the airplane crash training, the cross sentence model improved TR scores by 5 points. It proved a bit less effective on the 100 messages of the formal test set, improving scores there by only 2 points. (The F score on the formal test set with the cross-sentence model component disabled was 69.33%.)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Contribution of the Cross Sentence Model",
                "sec_num": null
            },
            {
                "text": "The SIFT system worked by first applying the sentence-level model to each sentence in the message and then extracting entities, descriptors, and relations from the resulting trees, heuristically merging TE elements, applying the cross-sentence model to identify non-local relations, and finally filtering and formatting TE and TR templates for output. The system's score on the TE task was 83% recall with 84% precision, for an F of 83.49%. Its score on TR was 64% recall with 81% precision, for an F of 71.23%.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "TE/TR Results",
                "sec_num": null
            },
            {
                "text": "For the Named Entity task, we used the IdentiFinder\u2122 trained named entity extraction system (Bikel, et. al., 1997) , which utilizes an HMM to recognize the entities present in the text.",
                "cite_spans": [
                    {
                        "start": 92,
                        "end": 114,
                        "text": "(Bikel, et. al., 1997)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "The HMM labels each word either with one of the desired classes (e.g., person, organization, etc.) or with the label NOT-A-NAME (to represent \"none of the desired classes\"). The states of the HMM fall into regions, one region for each desired class plus one for NOT-A-NAME. (See Figure 4 .) The HMM thus has a model of each desired class and of the other text. Note that the implementation is not confined to the seven name classes used in the NE task; the particular classes to be recognized can be easily changed via a parameter. Within each of the regions, we use a statistical bigram language model, and emit exactly one word upon entering each state. Therefore, the number of states in each of the name-class regions is equal to the vocabulary size, V . Additionally, there are two special states, the START-OF-SENTENCE and END-OF-SENTENCE states. In addition to generating the word, states may also generate features of that word. Features used in the MUC-7 version of the system include several features pertaining to numeric expressions, capitalization, and membership in lists of important words (e.g. known corporate designators).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 279,
                        "end": 287,
                        "text": "Figure 4",
                        "ref_id": "FIGREF8"
                    }
                ],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "The generation of words and name-classes proceeds in the following steps:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "1. Select a name-class NC, conditioning on the previous name-class and the previous word.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "2. Generate the first word inside that name-class, conditioning on the current and previous name-classes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "3. Generate all subsequent words inside the current name-class, where each subsequent word is conditioned on its immediate predecessor.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "4. If not at the end of a sentence, go to 1.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "Whenever a person or organization name is recognized, the vocabulary of the system is dynamically updated to include possible aliases for that name. Using the Viterbi algorithm, we search the entire space of all possible name-class assignments, maximizing Pr(W,F,NC), the joint probability of words, features, and name classes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "This model allows each type of \"name\" to have its own language, with separate bigram probabilities for generating its words. This reflects our intuition that:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "\u2022 There is generally predictive internal evidence regarding the class of a desired entity.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "Consider the following evidence: Organization names tend to be stereotypical for airlines, utilities, law firms, insurance companies, other corporations, and government organizations. Organizations tend to select names to suggest the purpose or type of the organization. For person names, first person names are stereotypical in many cultures; in Chinese, family names are stereotypical. In Chinese and Japanese, special characters are used to transliterate foreign names. Monetary amounts typically include a unit term, e.g., Taiwan dollars, yen, German marks, etc. \u2022 Local evidence often suggests the boundaries and class of one of the desired expressions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "Titles signal beginnings of person names. Closed class words, such as determiners, pronouns, and prepositions often signal a boundary. Corporate designators (Inc., Ltd., Corp., etc.) often end a corporation name.",
                "cite_spans": [
                    {
                        "start": 157,
                        "end": 182,
                        "text": "(Inc., Ltd., Corp., etc.)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "While the number of word-states within each name-class is equal to V , this \"interior\" bigram language model is ergodic, i.e., there is a non-zero probability associated with every one of the V 2 transitions. As a parameterized, trained model, for transitions that were never observed, the model \"backs off\" to a lesspowerful model which allows for the possibility of unknown words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of the IdentiFinder\u2122 HMM Model",
                "sec_num": null
            },
            {
                "text": "The model as used for the MUC-7 NE evaluation was trained on a total of approximately 790,000 words of NYT newswire data, annotated with approximately 65,500 named entities. In order to increase the size of our training set beyond the 90,000 words of training of airline crash documents provided by the Government, we selected additional training data from the North American News Text corpus. We annotated full articles before discovering a more effective annotation strategy. Since the test domain would be similar to the dry-run domain of air crashes, we used the University of Massachusetts INQUERY system to select 2000 articles which were similar to the 200 dry run training and test documents. About half of our training data consisted of full messages; this portion included the 200 messages provided by the Government as well as 319 messages from the 2000 retrieved by INQUERY. The second half of the data consisted of sample sentences selected from the remainder of the 2000 messages with the hope of increasing the variety of training data. This sampling strategy proved more effective than annotating full messages. Improvement in performance on the (dry run) airline crash test set is shown in Figure 6 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 1207,
                        "end": 1215,
                        "text": "Figure 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Training",
                "sec_num": null
            },
            {
                "text": "Our F-measure for the official evaluation condition, 90.44, is shown as \"Text Baseline\" in Figure 5 . In addition to the baseline condition, we performed some unofficial experiments to measure the accuracy of the system under more difficult conditions. Specifically, we evaluated the system on the test data modified to remove all case information (\"Upper Case\" in Figure 5 ), and also on the test data in SNOR (Speech Normalized Orthographic Representation) format (\"SNOR\" in Figure 5 ). By converting the text to all upper case characters, information useful for recognizing names in English is removed. Automatically transcribed speech, even with no recognition errors, is harder due to the lack of punctuation, spelling numbers out as words, and upper case in SNOR format.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 91,
                        "end": 99,
                        "text": "Figure 5",
                        "ref_id": null
                    },
                    {
                        "start": 365,
                        "end": 373,
                        "text": "Figure 5",
                        "ref_id": null
                    },
                    {
                        "start": 477,
                        "end": 485,
                        "text": "Figure 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "NE Results",
                "sec_num": null
            },
            {
                "text": "The degradation in performance from mixed case to all upper case is somewhat greater than that previously observed in similar tests run on generic newswire data (about 2 points). One possible explanation is that case information is more useful in instances where the test domain is different than the domain of the training set. The degradation from all upper case to SNOR is similar to that previously observed. We also measured the effect of the training set size on the performance of the system in the air crash domain of the dry run. As is to be expected, increasing the amount of training data results in improved system performance. Figure 6 shows an almost two point increase in F-measure as the training set size was doubled from 91,000 words to 176,000 words. However, the next doubling of the number of words in the training set only resulted in a one point increase in F-measure. This is most likely due to the fact that as training set size increases, the likelihood of seeing a unique name or construction decreases. Though performance might not have peaked, adding more training data will have a progressively smaller effect since the system will not be seeing many constructions which it has not already seen in previous training. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 640,
                        "end": 648,
                        "text": "Figure 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "NE Results",
                "sec_num": null
            },
            {
                "text": "BBN's Identifinder\u2122 HMM-based approach to named entity recognition did well overall, and it scored 94% on the NE walkthrough article. Of the 7 errors, some can be related directly to choices made in marking our training data. For example, two cases were TV network names, which our annotators typically marked in training as organizations, but which the answer keys did not mark as such in the context where they occurred in the walkthrough article. One error can be attributed to the bigram nature of the current HMM model; while the phrase \"Thursday morning\" is to be tagged as a date and time, \"early Thursday Here the decoder correctly identified both the artifact descriptors \"A Chinese rocket\" and \"an Intelsat satellite\", but the output filter chose not to include them. That choice was made because of frequent cases where an indefinite artifact descriptor not linked to any named artifact should not be output; an example is \"the last rocket I'd recommend\" in paragraph 16. But this example shows that this decision not to output such cases cost us some points.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NE Walkthrough",
                "sec_num": null
            },
            {
                "text": "BBN's results on the three tasks are summarized in the following These results, close to those of the best system, demonstrate the power of trained systems for information extraction.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "The NE result demonstrates the robustness of IdentiFinder\u2122, the learning algorithm used for NE, to an unknown but similar domain. Further tests also showed its robustness to all upper case input, and input with no punctuation. Our future plans for IdentiFinder\u2122 include \u2022 evaluation in the broadcast news domain, which requires speech input in a much broader domain,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "\u2022 applying IdentiFinder\u2122 to unsegmented languages, and",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "\u2022 working on performance improvements and improvements in the training process.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "The SIFT model, used for TE and TR, employs the Penn Treebank for syntactic information, and thus requires for its training data only the semantic annotation of entities, descriptors, and relationships. Its sentence-level model determines parts of speech, parses, finds names, and identifies semantic relationships in a single, integrated process, with a separate merging model then used to connect information between sentences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CONCLUSIONS",
                "sec_num": null
            },
            {
                "text": "Time was a limiting factor in SIFT's performance, since the decision to field an integrated, fully-trained model was made only in late January, so that SIFT first existed in end-to-end form only as of March 11. That left little time for experiments, or for addressing all issues, such as the handling of nationalities and unnamed TE entities. Given the early stage of development of the SIFT system, we believe that significant performance improvements are still possible. We are also interested in measuring performance as a function of training set size, and in applying SIFT to the broadcast news domain.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CONCLUSIONS",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "The work reported here was supported in part by the Defense Advanced Research Projects Agency. Technical agents for part of this work were Fort Huachucha and AFRL under contract numbers DABT63-94-C-0062, F30602-97-C-0096, and 4132-BBN-001. The views and conclusions contained in this document are those of the authors and should not be interpreted as necessarily representing the official policies, either expressed or implied, of the Defense Advanced Research Projects Agency or the United States Government.We appreciate the contributions of the Annotation Group at BBN: Ann Albrect, Elizabeth Arentzen, Rachel Bers, Ada Brunstein, Georgina Garcia, Maia Mesnil, and Hugh Walsh.We thank Michael Collins of the University of Pennsylvania for his valuable suggestions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "ACKNOWLEDGEMENTS",
                "sec_num": null
            },
            {
                "text": "morning\" should instead be tagged as a single time, but the bigram model does not remember \"early\" when processing \"morning\". Two other errors were unfamiliar organization names (seen no more than twice in training data) that Identifinder\u2122 guessed were persons, since that guess is more frequently correct in the absence of other clues.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "annex",
                "sec_num": null
            },
            {
                "text": "In an integrated system of the sort we used for the TE and TR tasks, the main determinant of performance is the sentence-level model, and the semantic structures that it produces. Secondary but still significant effects on performance come from the post-processing steps that derive TE and TR output from the sentence-level decoder tree:\u2022 Extracting elements and relations\u2022 Merging TE elements\u2022 Searching for additional relations with the cross-sentence model\u2022 Filtering candidate entities and relations for output This section will follow through selected portions of the walkthrough message, giving examples of the different effects that applied.Example 1 from paragraph 16 shows a case where everything worked as planned.Example 1: From Walkthrough Article, Paragraph 16Here the decoder correctly recognized a person name (PER/NPA) bound to a person descriptor (PER-DESC/NP-R). That descriptor contains an organization (ORG/NP) which in turn is linked to a location. The LINK and PTR nodes connect the descriptor with the person, the organization with the person descriptor (and thus indirectly with the person), and the location with the organization. In the post-processing, the person name is extracted, with the descriptor text is linked to it, the organization name is extracted, and the employment relationship noted. The organization is also linked to the nested location; of the two location elements in the LOC phrase, the first is taken as the LOCALE field filler, while the second is looked up in the gazetteer to identify a country in which the locale value is then looked up.Example 2 from the last paragraph of the message shows the effect of a decoder error. Here the sentence-level decoder linked both organization descriptors back to the top-level named organization, while the correct reading would have attached the second descriptor to the nested \"Bloomberg L.P.\". The post-processing also therefore links both descriptor phrases to \"Bloomberg Information Television\" internally. Only the longest descriptor, however, is actually output, which in this case results in output of only the mistaken value.Not surprisingly, a number of the decoder errors that affected output stemmed from conjunctions. In paragraph 19, for example, the manufacturer organization name \"Lockheed Space and Strategic Missiles\" was incorrectly broken at the conjunction, causing the location relation with Bethesda to be missed. The cross sentence model is the system component that tries to find further relations beyond those identified by the sentence-level model. In the walk-through article, that component did not happen to succeed in finding any such relations. Example 3 shows the sort of relation that we would like that model to be able to get. There the sentence-level decoder did link Rubenstein to the organization descriptor \"company\", but since that descriptor was never linked to \"News Corporation\", the employee relation was missed. However, since News Corporation is mentioned both in that sentence and the following sentence, an improved cross sentence model would be one way of attacking such examples. The last step in processing is the output filter, which heuristically determines whether a proposed constituent should be included in the output. Example 4 from paragraph 1 shows two examples where this filter overrode correct decoder structure.(S (ART-DESC/NP-R (ART-DESC/NPA (DT A) (JJ Chinese) (NN rocket)) (ART-PTR/VP (VBG carrying) (ART-DESC/NPA-R (DT an) (ORG/NPP (NNP Intelsat)) (NN satellite)))) (VP (VBD exploded) ...",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "TE and TR Walkthrough",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "NYMBLE: A High-Performance Leraning Name-finder",
                "authors": [
                    {
                        "first": "Dan",
                        "middle": [],
                        "last": "Bikel",
                        "suffix": ""
                    },
                    {
                        "first": ";",
                        "middle": [
                            "S"
                        ],
                        "last": "Miller",
                        "suffix": ""
                    },
                    {
                        "first": "; R",
                        "middle": [],
                        "last": "Schwartz",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Weischedel",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "194--201",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Bikel, Dan; S. Miller; R. Schwartz; and R. Weischedel. (1997) \"NYMBLE: A High-Performance Leraning Name-finder.\" In Proceedings of the Fifth Conference on Applied Natural Language Processing , Association for Computational Linguistics, pp. 194-201.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "A New Statistical Parser Based on Bigram Lexical Dependencies",
                "authors": [
                    {
                        "first": "Michael",
                        "middle": [],
                        "last": "Collins",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "184--191",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Collins, Michael. (1996) \"A New Statistical Parser Based on Bigram Lexical Dependencies.\" In Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics , pp. 184-191.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Three Generative, Lexicalised Models for Statistical Parsing",
                "authors": [
                    {
                        "first": "Michael",
                        "middle": [],
                        "last": "Collins",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "16--23",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Collins, Michael. (1997) \"Three Generative, Lexicalised Models for Statistical Parsing.\" In Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics , pp. 16-23.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Building a Large Annotated Corpus of English: the Penn Treebank",
                "authors": [
                    {
                        "first": "M",
                        "middle": [
                            "; B"
                        ],
                        "last": "Marcus",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Santorini",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Marcinkiewicz",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Computational Linguistics",
                "volume": "19",
                "issue": "2",
                "pages": "313--330",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Marcus, M.; B. Santorini; and M. Marcinkiewicz. (1993) \"Building a Large Annotated Corpus of English: the Penn Treebank.\" Computational Linguistics, 19(2):313-330.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Global Thresholding and Multiple-Pass Parsing",
                "authors": [
                    {
                        "first": "Joshua",
                        "middle": [],
                        "last": "Goodman",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proceedings of the Second Conference on Empirical Methods in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "11--25",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Goodman, Joshua. (1997) \"Global Thresholding and Multiple-Pass Parsing.\" In Proceedings of the Second Conference on Empirical Methods in Natural Language Processing , Association for Computational Linguistics, pp. 11-25.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Coping with Ambiguity and Unknown Words through Probabilistic Models",
                "authors": [
                    {
                        "first": "Richard",
                        "middle": [],
                        "last": "Schwartz; Lance Ramshaw;",
                        "suffix": ""
                    },
                    {
                        "first": "Jeff",
                        "middle": [],
                        "last": "Palmucci",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Computational Linguistics",
                "volume": "19",
                "issue": "2",
                "pages": "359--382",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Richard Schwartz; Lance Ramshaw; and Jeff Palmucci. (1993) \"Coping with Ambiguity and Unknown Words through Probabilistic Models.\" Computational Linguistics, 19(2):359-382.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "Block diagram of sentence-level model.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF1": {
                "text": "An example of semantic annotation.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF2": {
                "text": "3. A head part-of-speech tag for the PER/NP, in this case PER/NNP.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF3": {
                "text": "4.A head word for the PER/NP, in this case nance.5. Word features for the head word of the PER/NP, in this case capitalized.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF4": {
                "text": "6. A head constituent for the PER/NP, in this case a PER-R/NP. 7. Pre-modifier constituents for the PER/NP. In this case, there are none.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF5": {
                "text": "An augmented parse tree.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF7": {
                "text": "For part-of-speech tags, the mixture components are:",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF8": {
                "text": "Pictorial representation of conceptual model",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF9": {
                "text": "Figure 5: IdentiFinder\u2122 Named Entity Results",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF10": {
                "text": "Figure 6: F-Measure Increases With Size of Training Set",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "TABREF4": {
                "content": "<table><tr><td>Task</td><td>Recall</td><td>Precision</td><td>F-Score</td></tr><tr><td>NE</td><td>89%</td><td>92%</td><td>90.44%</td></tr><tr><td>TE</td><td>83%</td><td>84%</td><td>83.49%</td></tr><tr><td>TR</td><td>64%</td><td>81%</td><td>71.23%</td></tr></table>",
                "num": null,
                "text": "",
                "html": null,
                "type_str": "table"
            }
        }
    }
}