File size: 52,092 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
{
    "paper_id": "M92-1030",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T03:13:04.202136Z"
    },
    "title": "MITRE-Bedford : Description of the ALEMBIC System as Used for MUC-4",
    "authors": [
        {
            "first": "John",
            "middle": [],
            "last": "Aberdeen",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "John",
            "middle": [],
            "last": "Burger",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Dennis",
            "middle": [],
            "last": "Connolly",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Susan",
            "middle": [],
            "last": "Roberts",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Marc",
            "middle": [],
            "last": "Vilai",
            "suffix": "",
            "affiliation": {},
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "The ALEMBIC text understanding system fielded at MUC-4 by MITRE-Bedford is primarily based on natura l language techniques. ALEMBIC 1 is a research prototype that is intended to explore several major areas o f investigation : \u2022 Error recovery, involving primarily issues of semi-parsing and recovery of plausible attachments. \u2022 Robustness, involving primarily issues of uncertain reasoning and tractable inference. \u2022 Self-extensibility, focusing primarily on machine learning of natural language and userconfigurable semantics. \u2022 System integration, through SGML (the Standard Generalized Markup Language), both at the leve l of meaning analysis and at the overall application level. This investigation is part of an internally-funded research initiative towards processing open source texts (i .e. , free natural language texts drawn from broadcast transcripts, news wires, etc .). This initiative has been underway for just over half a year, prior to which our group was focusing nearly exclusively on natural language interfaces t o expert systems. We are thus newcomers to the MUC data extraction task, and our system is still very much in earl y phases of development. The system details we present here should thus be taken as preliminary .",
    "pdf_parse": {
        "paper_id": "M92-1030",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "The ALEMBIC text understanding system fielded at MUC-4 by MITRE-Bedford is primarily based on natura l language techniques. ALEMBIC 1 is a research prototype that is intended to explore several major areas o f investigation : \u2022 Error recovery, involving primarily issues of semi-parsing and recovery of plausible attachments. \u2022 Robustness, involving primarily issues of uncertain reasoning and tractable inference. \u2022 Self-extensibility, focusing primarily on machine learning of natural language and userconfigurable semantics. \u2022 System integration, through SGML (the Standard Generalized Markup Language), both at the leve l of meaning analysis and at the overall application level. This investigation is part of an internally-funded research initiative towards processing open source texts (i .e. , free natural language texts drawn from broadcast transcripts, news wires, etc .). This initiative has been underway for just over half a year, prior to which our group was focusing nearly exclusively on natural language interfaces t o expert systems. We are thus newcomers to the MUC data extraction task, and our system is still very much in earl y phases of development. The system details we present here should thus be taken as preliminary .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The system ' s underlying architecture, shown in Figure 1 , follows a task breakdown used in several other system s that have recently participated in Muc (e .g ., PLUM [10] or NLTOOLSET [4] ) . Processing occurs in three distinct phases : preprocessing, natural language analysis, and application-specific output generation . One of the way s ALEMBIC differs from other Muc systems, however, is in exploiting SGML as the interchange lingua franca betwee n these three processing phases. The intention is to allow system modules whose invocation occurs early in the analysis of a document to record processing results directly in the document through SGML markup . This information then becomes available to subsequent modules as meta-data.",
                "cite_spans": [
                    {
                        "start": 169,
                        "end": 173,
                        "text": "[10]",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 187,
                        "end": 190,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 49,
                        "end": 57,
                        "text": "Figure 1",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "OVERALL ARCHITECTUR E",
                "sec_num": null
            },
            {
                "text": "As a result of this sGML-based architecture, the system's overall flow of control is governed from an objectoriented document manager built on top of a Common Lisp port of Goldfarb's public domain SGML parser . For MUC-4, the pre-processing phase thus takes a FBIS message file and normalizes it by recoding it in SGML Th e document manager then builds an internal document object by parsing the resulting sGML . The actual conten t l alembic 1 : an alchemical apparatus used for distillation 2 : something that refines or transmutes as if by distillation . ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "OVERALL ARCHITECTUR E",
                "sec_num": null
            },
            {
                "text": "Application-specific phase Doc manage r (SGML parser) f \u25ba analysis of the document is performed by invoking the natural language analysis modules on the internal documen t object, and the results of these analyses are stored as attributes of the document. The system's output is normall y just another SGML file, in which the content analysis is simply encoded as additional (semantic) markup . For MUC-4, we also provided selective output that consisted solely of filled templates .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NL parser & interpreter",
                "sec_num": null
            },
            {
                "text": "As an example of this overall flow of control, and its corresponding encoding in SGML, consider the first paragraph of message TST2-MUC4-0048 :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NL parser & interpreter",
                "sec_num": null
            },
            {
                "text": "The SGML normal\u00bbation of this paragraph produced by the pre-processor begins as follows .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SALVADORAN PRESIDENT-ELECT ALFREDO CRISTIANI CONDEMNED THE TERRORIS T KILLING OF ATTORNEY GENERAL ROBERTO GARCIA ALVARADO AND ACCUSED TH E FARABUNDO MARTI NATIONAL LIBERATION FRONT (FMLN) OF THE CRIME ,",
                "sec_num": null
            },
            {
                "text": "<p><s>SALVADORAN PRESIDENT<punct 1oc=\"midword\" type=\"hyphen\"> -</punct>ELECT ALFREDO CRISTIANI CONDEMNED THE TERRORIST KILLING . . .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SALVADORAN PRESIDENT-ELECT ALFREDO CRISTIANI CONDEMNED THE TERRORIS T KILLING OF ATTORNEY GENERAL ROBERTO GARCIA ALVARADO AND ACCUSED TH E FARABUNDO MARTI NATIONAL LIBERATION FRONT (FMLN) OF THE CRIME ,",
                "sec_num": null
            },
            {
                "text": "The p and s tags stand respectively for paragraph and sentence delimiters, and the punct tag encodes normalized punctuation . In SGML parlance, the text bracketed by the <punct . . .> and </punct> delimiters is a punct element, and the equated terms in the punct tag are attributes of the overall element . For other details on SGML, see, e.g., [8] .",
                "cite_spans": [
                    {
                        "start": 345,
                        "end": 348,
                        "text": "[8]",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SALVADORAN PRESIDENT-ELECT ALFREDO CRISTIANI CONDEMNED THE TERRORIS T KILLING OF ATTORNEY GENERAL ROBERTO GARCIA ALVARADO AND ACCUSED TH E FARABUNDO MARTI NATIONAL LIBERATION FRONT (FMLN) OF THE CRIME ,",
                "sec_num": null
            },
            {
                "text": "Turning to the natural language phase, the structural markup for sentences, paragraphs, and quotes is exploite d straightforwardly to dispatch text chunks to the linguistic parser . More interestingly, punctuation markup can also appear as part of the actual definitions of lexical items, e.g ., the possessive marker ('s) or hyphenated words. Th e lexicon entry for the title modifier -elect, for example, is the sequence (*mw-hyphen* elect), in which *mw-hyphen * matches any SGML punct element with loc and type attributes respectively set to midword and hyphen .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SALVADORAN PRESIDENT-ELECT ALFREDO CRISTIANI CONDEMNED THE TERRORIS T KILLING OF ATTORNEY GENERAL ROBERTO GARCIA ALVARADO AND ACCUSED TH E FARABUNDO MARTI NATIONAL LIBERATION FRONT (FMLN) OF THE CRIME ,",
                "sec_num": null
            },
            {
                "text": "As mentioned, when the natural language phase has been completed, ALEMBIC records its analysis of th e document as further annotation . In the case of the MUC-4 version of the system, this markup simply encodes th e templates that the system has produced, e .g . , <p><template > <slotname>0 . MESSAGE :ID</slotname> <slotval>TST2-MUC4-0048</slotval > <slotname>l . MESSAGE : TEMPLATE</slotname> <slotval>1</slotval > </template > <s>SALVADORAN PRESIDENT <punct loc=\"midword\" type=\"hyphen> -</punct>ELECT . ..",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "SALVADORAN PRESIDENT-ELECT ALFREDO CRISTIANI CONDEMNED THE TERRORIS T KILLING OF ATTORNEY GENERAL ROBERTO GARCIA ALVARADO AND ACCUSED TH E FARABUNDO MARTI NATIONAL LIBERATION FRONT (FMLN) OF THE CRIME ,",
                "sec_num": null
            },
            {
                "text": "As we alluded to above, this phase of processing is intended to normalize aspects of a document that are simpl y too messy to be dealt with during linguistic parsing . In the version of ALEMBIC used in MUC-4, this include s document structure, especially header structure, punctuation, and numeral strings . By handling the document structure in this preliminary phase, we gain all the usual advantages of separating tokenization from parsing, and additionally can introduce special-purpose error recovery strategies . These strategies address such problems as missing quote marks, missing periods at the end of paragraphs, and so forth. One advantage of using SGML is that it actually simplifies implementing these error recovery strategies . SGML allows the preprocessor to ommit issuin g many structural tags, in particular some that are keyed directly off of punctuation . The document manager treats th e missing markup as implicit, and fills it in from a document grammar instead .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "INDIVIDUAL PROCESSING MODULE S Pre-processing phas e",
                "sec_num": null
            },
            {
                "text": "A further motivation for using SGML is that it readily allows us to extend the scope of pre-processing through incremental addition of further modules . Among the modules that we have considered adding to the pre-processor are an sGML-native part-of-speech tagger, and a coarse subject identifier (based on Amsler's FORCE4 algorithm) . Both of these have been implemented by our colleagues in MITRE Washington .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "INDIVIDUAL PROCESSING MODULE S Pre-processing phas e",
                "sec_num": null
            },
            {
                "text": "The document manager provides an object-oriented framework for working with SGML documents . The manager is entirely cLOS-based, and SGML elements are thus made available as instances of cLOS objects . A sentence element (corresponding to the string bracketed by matching <s> and </s> tags) is mapped into an instance of the S object, and any S-specific code (e .g ., the linguistic parser) is thus made applicable to the element.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Document Manage r",
                "sec_num": null
            },
            {
                "text": "As mentioned, the document manager is built around a public domain SGML parser/tokenizer written by Goldfarb , the godfather of SGML. The parser consists of C language routines that were made available through the Consortiu m for Lexical Research . On the Lisp side, there are several ways to use the parser . At the lowest level, one can simpl y get token types and character strings out of an SGML document . At the highest level, one can get a stream of cLO s objects representing SGML tokens . The parser takes care of canonicalizing a document by, e .g., inserting any tags left implicit by the preprocessor, or filling in the default attribute values of attributes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Document Manage r",
                "sec_num": null
            },
            {
                "text": "The design of our lexicon is hierarchical, with specific lexical classes (e .g ., auxiliary verbs or mono-transitive verbs) being subsumed in the hierarchy by more abstract lexical classes (e .g ., verbs in general). This allows for significant sharing of lexical information between lexical classes, and reduces the complexity of the lexicon .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic Lexicon",
                "sec_num": null
            },
            {
                "text": "Lexicon entries correspond to base stems, which are automatically expanded into the lexical forms that ar e actually used by the system . Our syntactic framework closely follows Steedman's combinatory categorial grammar s (CCG's), and as a result the expansion of a stem occurs along two dimensions .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic Lexicon",
                "sec_num": null
            },
            {
                "text": "\u2022 Lexical word forms, that is, the surface forms of the stem . For count nouns this is just the singular and plural form of the noun; for main verbs, this includes the usual participial forms, tensed forms , and infinitive, as well as adjectival forms, and various nominalizations .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic Lexicon",
                "sec_num": null
            },
            {
                "text": "\u2022 Lexical category types, that is, the underlying syntactic categories that are sanctioned by a give n word form. In the case of a mono-transitive verb's past tense/participle (e.g ., \"condemned,\" the first verb in TST2-MUC4-0048), this includes the active voice (e .g ., \"Cristiani . condemned the terrorist killing\"), the passive voice, and ancillary forms such as the detransitivized active voice and verbal adjectives.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic Lexicon",
                "sec_num": null
            },
            {
                "text": "In our variant of CCU's, lexical categories are treated either as complex vectors of features, or as mathematica l functions over such vectors . For example, stripping away all syntactic and semantic features, the syntactic category corresponding to a transitive verb is the function SWP/NP, i.e ., a function that will produce an S given an object NP on its right (the /NP term) and a subject NP on its left (the \\NP term) . To accomodate syntactic and semanti c features, categories are actually implemented in a standard unification framework (as in [11] ). Some features can b e themselves category-valued, and so the overall category structure is actually a reentrant graph that can become fairl y involved, as attested to by a partial expansion of \"condemned\" in the active voice : This encoding is based on Pareschi and Steedman's category notation [6] , wherein the res, arg, and dir features are used to encode a syntactic function' s result, argument, and direction of application . To reduce the complexity of defining tens of thousands of such lexical entries, we associate to each category type (such as the active voice of a transitive verb) a lexical skeleton, that is, a partial lexical entry that is fleshed out during the expansion of a stem . The fleshing out of skeletons does not actually occur until run time, when a word form is actually found in a document. Since category data structures are fairly substantial, this yields tremendous memory savings .",
                "cite_spans": [
                    {
                        "start": 553,
                        "end": 557,
                        "text": "[11]",
                        "ref_id": null
                    },
                    {
                        "start": 856,
                        "end": 859,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic Lexicon",
                "sec_num": null
            },
            {
                "text": "[[",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic Lexicon",
                "sec_num": null
            },
            {
                "text": "The lexicon was populated in part by adapting knowledge bases provided to us by Richard Tong of ADS .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic Lexicon",
                "sec_num": null
            },
            {
                "text": "For Muc4, we used a number of strategies for handling open classes of proper names . For geographical names, we relied primarily on a listing of such names that had been compiled by previous MUC participants, and which was forwarded to us by ADS . As a back up, we also encoded a small grammar of Spanish geographical names-for example, \"san\" has a reading as a functor that produces geographical names given a personal name on its right.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Geographical Names, Personal Names, and Unknown Words",
                "sec_num": null
            },
            {
                "text": "For personal names, we relied primarily on a cross-cultural database of 15,000 names obtained from variou s public domain databases. Most of these are first names, with only about 2,000 family names covered by th e database . In order to fill inevitable gaps in the database, we allowed unknown words to be given, among others, a definition as a personal proper name . Separately, we provided a grammatical type-shifting rule that turns personal names into functors that combine with any personal name on their right. In non-ccG terms, this amounts to a grammar rule of form :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Geographical Names, Personal Names, and Unknown Words",
                "sec_num": null
            },
            {
                "text": "All the names in TST2-MUC4-0048 turned out to be in our database, in part because we had already extended i t with a list of VIP names provided by ADS .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERS-NAME -> PERS-NAME PERS-NAM E",
                "sec_num": null
            },
            {
                "text": "We chose to use categorial grammars in ALEMBIC for a number of reasons. First and foremost, we expected ou r syntactic coverage to be necessarily incomplete, and knew that we would have to rely heavily on bottom-up parsing . In this light, categorial grammars are effectively the uhr-bottom-up linguistic framework, as one canno t meaningfully speak of top-down parsing in this framework. We also wanted a framework that was strongly lexically governed, as in C(G's, in order to reduce the parsing search space. Finally, in anticipation of eventually wanting to provide fairly comprehensive coverage of syntax, we chose one of the recent mildly context sensitive frameworks, in the hope that we could exploit the linguistic strengths of the framework at some future point.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CCG Parser",
                "sec_num": null
            },
            {
                "text": "Our current ccG parser is based upon Pareschi and Steedman's algorithm for left-to-right parsing of combinatorial categorial grammars [6] . Their approach is nominally only intended to produce full chart-based parses . Because we anticipated our syntactic coverage to be incomplete, we extended the basic algorithm into a heuristi c semi-parser . The semi-parser heuristics are used to provide a spanning segmentation of the input string .",
                "cite_spans": [
                    {
                        "start": 134,
                        "end": 137,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CCG Parser",
                "sec_num": null
            },
            {
                "text": "In addition, we extended the CCG framework per se in order to cover optional and displaced arguments, which ar e typically weaknesses of traditional categorial grammar frameworks. The approach we've taken involves introducing a pair of features for each optional argument, one feature to encode the type of argument that is expected, and th e second to encode the disposition of the argument's semantics . For instance, consider the passive voice of a transitive verb-kill is a canonical example . A partial encoding would be as follows : The by-pp-prep feature indicates that the category can be modified by a PP headed by by; the by-pp-obj feature indicates that the embedded semantics of the PP is then indirectly unified with the semantic agent of the sentence .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CCG Parser",
                "sec_num": null
            },
            {
                "text": "[[",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CCG Parser",
                "sec_num": null
            },
            {
                "text": "At the time we fielded our system for MUC-4, both our syntactic coverage and semi-parsing heuristics were still very preliminary, and our overall parses were thus extremely fragmentary . For example, the first sentence in TST2-MUC4-0048 ended up being bracketed roughly as follows :",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CCG Parser",
                "sec_num": null
            },
            {
                "text": "This particular bracketing illustrates several early shortcomings of our grammar, many of which have bee n addressed in the months since the MUC-4 evaluation. First, the MUC-4 version of the title sub-grammar was weak: title modifiers such as -elect or general were simply absent from the grammar . Second, prepositional phrase coverage was incomplete: PP's that appeared as optional arguments of categories would parse, but those that shoul d be treated as modifiers failed to do so. In addition, many verbs simply lacked the appropriate subcategorization frame for PP arguments, as in this case with accused. Finally, as with many semi-parsers, ALEMBIC currently punts on coordination.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "[SALVADORAN PRESIDENT] [-] [ELECT] [[ALFREDO CRISTIANI] CONDEMNE D [THE TERRORIST KILLING OF ATTORNEY]] [GENERAL ROBERTO GARCI A ALVARADO] [AND] [ACCUSED [THE FARABUNDO MARTI NATIONAL LIBERATION FRONT] ] [( ] [FMLN] [ ) ] [OF] [THE CRIME] [ . 1",
                "sec_num": null
            },
            {
                "text": "As might be gleaned from the category definitions given above, ALEMBIC produces semantic interpretations concurrently with parsing . The meaning representation language that we use is directly descended from our earlie r work on the King Kong interface [2] , whose underlying approach is similar to that in the core language engine [1] . Meaning representations are given at the so-called interpretation level, where quantifiers are not scoped with respec t to each other, but are simply left \"in place,\" i .e., attached to their associated noun phrases . For example, the interpretation of the fragment \"the terrorist killing\" in message TST2-MUC4-0048 is : ",
                "cite_spans": [
                    {
                        "start": 253,
                        "end": 256,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 332,
                        "end": 335,
                        "text": "[1]",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic Interpretations",
                "sec_num": null
            },
            {
                "text": "[[",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Semantic Interpretations",
                "sec_num": null
            },
            {
                "text": "The approach we have taken towards reference resolution [3] attempts to integrate several disparate approache s towards the problem . The literature on reference resolution identifies a number of sources of linguistic evidence tha t can be applied towards resolving anaphoric references, but few attempts have been made at combining thes e evidence sources in a principled way (for an exception, see [5] ) . The approach embodied in our system attempts to perform the integration by exploiting a Bayesian belief network .",
                "cite_spans": [
                    {
                        "start": 56,
                        "end": 59,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 400,
                        "end": 403,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reference Resolution",
                "sec_num": null
            },
            {
                "text": "The network combines a number of evidence sources that bear upon whether an anaphor (either a definit e reference or a pronoun) can be resolved to a particular candidate referent . Because of the fragmentary nature of ou r parses, the reference resolution network only considered non-grammatical features of the anaphor and candidate . In particular, these included : We experimented with a number of such networks prior to the MUC-4 evaluation run, including hand-buil t networks and networks derived by machine learning algorithms . We ended up selecting a simple flat network i n which all evidence sources were directly allowed to bear upon the root node (which stood for coreference of th e anaphor and candidate) .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reference Resolution",
                "sec_num": null
            },
            {
                "text": "To apply the network, our system first collects a set of mentions from the parsed document : these amount roughly to noun phrases and to event verbs with any arguments that might have been attached by the parser. Anaphoric mentions are then compared to mentions preceding them in the document . The comparison is performed by populating the evidence nodes of the network according to the characteristics of the anaphor and candidate . Mentions that are found to co-refer are grouped together and assigned to a unique discourse entity (called a peg) .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reference Resolution",
                "sec_num": null
            },
            {
                "text": "In the case of our actual run on TST2-MUC4-0048, for example, the bracketing of the first sentence produced b y the semi-parser lead to identifying as mentions (among others) Alfredo Cristiani, and the murder event introduced b y the nominalization of \"kill .\" The second of these phrases was then taken as potentially anaphoric and compared t o earlier mentions in the sentence, including that for Cristiani . In the case, of Cristiani the mentions were found not to co-refer, reflecting the importance of KR compatibility . Nevertheless, the fragmentary nature of the parses, couple d with the relative lack of grammatical features in the Bayesian network, led to disapointing reference resolutio n performance overall . As we describe below and elsewhere in these proceedings, this led indirectly to our relativel y low precision scores .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reference Resolution",
                "sec_num": null
            },
            {
                "text": "Once reference resolution has been performed, the system enters a MUC-4-specific processing phase . The first step towards producing templates consists of identifying significant violent events, which is performed by searching the document for event pegs whose semantic heads are subsumed by the KR node for violence. In our actual run on TST2-MUC4-0048, two such pegs were found in the first paragraph : one for \"killing\" and one for \"the crime.\" The fact that two separate pegs were found for these phrases reflects a failure on the part of our reference resolutio n mechanism, as these two phrases should properly have been determined to be co-referential .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extraction of Significant Events and Template Generation",
                "sec_num": null
            },
            {
                "text": "In the MUC-4 version of ALEMBIC, the actual generation of templates is keyed off of the pegs for violent events .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extraction of Significant Events and Template Generation",
                "sec_num": null
            },
            {
                "text": "Each such peg is taken to indicate a separate act of violence for which a template should be generated . This strategy is very straightforward, but it places a tremendous burden on the system's ability to identify coreferential events . As reference resolution is actually a weak point in the MUC-4 version of the system, this leads us to generate multipl e templates for what is really the same event. As a result, we paid a significant toll in our precision scores.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extraction of Significant Events and Template Generation",
                "sec_num": null
            },
            {
                "text": "Turning to the slot-filling mechanism for a particular template, once a significant event peg has been identified , ALEMBIC then attempts to locate the participants in the event. Preferentially, the system attempts to use the syntactic arguments of the event phrase, if the parser succeeded in identifying them . In those cases where the parser failed to provide arguments to a verb or a nominalized event, the system assumes that the parse must have been fragmented , and attempts to locate potential arguments elsewhere in the sentence . This search is clearly heuristic and application-specific. In the case of the actual perpetrator of the event, the system attempts to fmd phrases with the appropriate agentive heads-this includes military organizations, terrorist organizations, and known terrorists. A similar process is performed to identify entities that might bear on other template slots . For example, targets an d instruments are identified by searching for phrases headed by KR relations denoting damage, injury, or weapons .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extraction of Significant Events and Template Generation",
                "sec_num": null
            },
            {
                "text": "The heuristic nature of this process yields mixed results. At times it reunites arguments that had been separate d from their verbs due to fragmentary parsing, and at times it simply results in unprincipled filling of slots (wit h results that can be incorrect, and even humorous . )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extraction of Significant Events and Template Generation",
                "sec_num": null
            },
            {
                "text": "The final step taken by the system towards analyzing a message is also the most uninteresting . Once the message has been fully analyzed, it is simply dumped back out to a file, along with any relevant markup of meaning analysis . In the case of the MUC-4 task, this amounts to associating SGML template tags to relevant paragraphs of text, or mor e simply, to ignoring the production of an SGML output file, and just printing the templates on their own .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extraction of Significant Events and Template Generation",
                "sec_num": null
            },
            {
                "text": "As we mentioned at the beginning of this note, ALEMBIC is still in a very early stage of development . Although we are satisfied with the system's (fairly humble) performance given its relative youth, we are also painfully aware of the compromises performed in fielding a message processing system on such a tight development schedule . Many of the shortcuts we took are unsatisfyingly heuristic . In addition, many interesting ideas that seemed promising in paper studies were never included in the fielded system . It was our original intent, for example, to us e a completely different parsing algorithm that supports nearly-semantic parse rules ; this class of rules is related to the nearly-syntactic extraction rules of some recent Muc-class systems, e .g., FASTUS and CIRCUS . We had also intended to extend the semantic interpretation process with a terminological inference mechanism based on [9] . These modules were not implemented in time for Muc--nor were a host of other improvements detailed in ou r companion results and analysis paper . It is to these ideas that we now turn, in the expectation that the next versio n of ALEMBIC that we apply to the MUC data extraction task will dramatically outperform the version presented here .",
                "cite_spans": [
                    {
                        "start": 902,
                        "end": 905,
                        "text": "[9]",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "CONCLUDING THOUGHTS",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "Principal funding for this work was provided by the MITRE Corporation . Funding for our participation in MUC-4 was provided by the Defense Advanced Research Projects Agency through the Muc conference committee . W e would also like to express our gratitude to Richard Tong and his colleagues at ADS for providing us with thei r helpful lexicon and taxonomies . Finally, we would like to thank Beth Sundheim for her ongoing support .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Logical forms in the core language engine",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Alshawi",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Van Eijck",
                        "suffix": ""
                    }
                ],
                "year": 1989,
                "venue": "Proceedings of the 27th Annua l Meeting of the ACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Alshawi, H and van Eijck, J, \"Logical forms in the core language engine,\" in Proceedings of the 27th Annua l Meeting of the ACL, Vancouver, British Columbia. 1989\"",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "The relation-based knowledge representation of King Kong",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Bayer",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Vilain",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "SIGART Bulletin",
                "volume": "2",
                "issue": "3",
                "pages": "15--21",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Bayer, S, and Vilain, M, \"The relation-based knowledge representation of King Kong,\" SIGART Bulletin 2(3) , 15-21 . 1991 .",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Probabilistic resolution of anaphoric reference",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Burger",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Connolly",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "To appear in Proceedings of the 1992 AAA' Fall Symposium on Probabilistic Approaches to Natural Language",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Burger, J and Connolly, D, \"Probabilistic resolution of anaphoric reference, \" To appear in Proceedings of the 1992 AAA' Fall Symposium on Probabilistic Approaches to Natural Language, Boston, MA. 1992 .",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "GE : Description of the NLTOOLSET system as used in MUC -3",
                "authors": [
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Krupka",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Jacobs",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Rau",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Iwanska",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Krupka, G, Jacobs, P, Rau, L, and Iwanska, L, \"GE : Description of the NLTOOLSET system as used in MUC - 3,\" in [7] . 1991 .",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "A Computational Analysis of Context-dependent Referring Expressions , doctoral dissertation",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Luperfoy",
                        "suffix": ""
                    },
                    {
                        "first": "Discourse",
                        "middle": [],
                        "last": "Pegs",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Luperfoy, S, Discourse Pegs : A Computational Analysis of Context-dependent Referring Expressions , doctoral dissertation, Dept. of Linguistics, University of Texas at Austin . 1991 .",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "A lazy way to chart-parse with categorial grammars",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Pareschi",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Steedman",
                        "suffix": ""
                    }
                ],
                "year": 1987,
                "venue": "Proceedings of the 25th Annual Meeting of the ACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Pareschi R and Steedman, M, \"A lazy way to chart-parse with categorial grammars,\" in Proceedings of the 25th Annual Meeting of the ACL, Stanford, CA . 1987.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Proceedings of the Third Message Understanding Conference",
                "authors": [
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Sudheim",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Sudheim, B, ed, Proceedings of the Third Message Understanding Conference, Morgan Kaufman. 1991 .",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Deduction as parsing: tractable classification in the KL-ONE framework",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Vilain",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "Proceedings of th e Ninth National Conference on Artificial Intelligence (AAA'91)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Vilain, M, \"Deduction as parsing: tractable classification in the KL-ONE framework,\" in Proceedings of th e Ninth National Conference on Artificial Intelligence (AAA'91), Anaheim, CA . 1991 .",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "BBN : Description of the PLUM system as used in MUC-3",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Weischedel",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Ayuso",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Boisen",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Ingria",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Palmucci",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Weischedel, R, Ayuso, D, Boisen, S, Ingria, R, and Palmucci J, \"BBN : Description of the PLUM system as used in MUC-3,\" in [7] . 1991 .",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF1": {
                "type_str": "figure",
                "text": "Overall System Architectur e",
                "uris": null,
                "num": null
            },
            "FIGREF2": {
                "type_str": "figure",
                "text": "Agreement on number, person, and gender \u2022 Compatibility vis-a-vis the semantic hierarchie s \u2022 Recency \u2022 Reflexivity \u2022 Phrase type (pronominal, definite, or otherwise )",
                "uris": null,
                "num": null
            },
            "TABREF2": {
                "type_str": "table",
                "html": null,
                "num": null,
                "content": "<table><tr><td colspan=\"2\">head :KILL ]</td><td/></tr><tr><td colspan=\"4\">[args #( [[head :TERRORISM-AGENT ]</td></tr><tr><td/><td colspan=\"2\">[quant NIL] ]</td></tr><tr><td/><td colspan=\"2\">OBJ-VAR ) ]</td></tr><tr><td colspan=\"3\">[quant :DEFINITE] ]</td></tr><tr><td colspan=\"2\">[[head :ATTACK ]</td><td/></tr><tr><td>[arg s</td><td>#([[hea d</td><td colspan=\"2\">:TERRORISM-AGENT]] ... ) ]</td></tr><tr><td>[proxy</td><td colspan=\"2\">PROX179]</td></tr><tr><td colspan=\"2\">[mods ([[hea d</td><td>:TIME-OF ]</td></tr><tr><td/><td>[args</td><td>#(PROX17 9</td></tr><tr><td/><td/><td>[[head</td><td>:DATE ]</td></tr><tr><td/><td/><td>[proxy</td><td>PROX180 ]</td></tr><tr><td/><td/><td>[mods</td><td>([head :BEFORE ]</td></tr><tr><td/><td/><td/><td>[args #(PROX:180 *NOW* [[head :DAY ]</td></tr><tr><td/><td/><td/><td>[quant 5]])]}]])]])]]</td></tr></table>",
                "text": "In addition, the representation maintains an implicitly Davidsonian representation of events and other relations . That is, aside from their underlying arguments, the relations may be modified through a proxy variable, as in th e following encoding of a later sentence in the message, \"guerillas attacked . . . five days ago . \""
            }
        }
    }
}