File size: 47,918 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
{
    "paper_id": "1994",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T02:07:12.750638Z"
    },
    "title": "INTEGRATING TRANSLATIONS FROM MULTIPLE SOURCES WITHIN THE PANGLOSS MARK III MACHINE TRANSLATION SYSTEM",
    "authors": [
        {
            "first": "Robert",
            "middle": [],
            "last": "Frederking",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Sergei",
            "middle": [],
            "last": "Nirenburg",
            "suffix": "",
            "affiliation": {
                "laboratory": "Computing Research Laboratory",
                "institution": "Carnegie Mellon University New Mexico State University",
                "location": {
                    "addrLine": "Schenley Park Box",
                    "postCode": "30001 / 3CRL, 15213-3890, 88003",
                    "settlement": "Pittsburgh, Las Cruces",
                    "region": "PA, NM"
                }
            },
            "email": ""
        },
        {
            "first": "David",
            "middle": [],
            "last": "Farwell",
            "suffix": "",
            "affiliation": {
                "laboratory": "Computing Research Laboratory",
                "institution": "Carnegie Mellon University New Mexico State University",
                "location": {
                    "addrLine": "Schenley Park Box",
                    "postCode": "30001 / 3CRL, 15213-3890, 88003",
                    "settlement": "Pittsburgh, Las Cruces",
                    "region": "PA, NM"
                }
            },
            "email": ""
        },
        {
            "first": "Stephen",
            "middle": [],
            "last": "Helmreich",
            "suffix": "",
            "affiliation": {
                "laboratory": "Computing Research Laboratory",
                "institution": "Carnegie Mellon University New Mexico State University",
                "location": {
                    "addrLine": "Schenley Park Box",
                    "postCode": "30001 / 3CRL, 15213-3890, 88003",
                    "settlement": "Pittsburgh, Las Cruces",
                    "region": "PA, NM"
                }
            },
            "email": ""
        },
        {
            "first": "Eduard",
            "middle": [],
            "last": "Hovy",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of S",
                "location": {
                    "addrLine": "California 4676 Admiralty Way Marina del Rey",
                    "postCode": "90292",
                    "region": "CA"
                }
            },
            "email": ""
        },
        {
            "first": "Kevin",
            "middle": [],
            "last": "Knight",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of S",
                "location": {
                    "addrLine": "California 4676 Admiralty Way Marina del Rey",
                    "postCode": "90292",
                    "region": "CA"
                }
            },
            "email": ""
        },
        {
            "first": "Stephen",
            "middle": [],
            "last": "Beale",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of S",
                "location": {
                    "addrLine": "California 4676 Admiralty Way Marina del Rey",
                    "postCode": "90292",
                    "region": "CA"
                }
            },
            "email": ""
        },
        {
            "first": "Constantine",
            "middle": [],
            "last": "Domashnev",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Donalee",
            "middle": [],
            "last": "Attardo",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Dean",
            "middle": [],
            "last": "Grannes",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "Ralf",
            "middle": [],
            "last": "Brown",
            "suffix": "",
            "affiliation": {},
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Since MT systems, whatever translation method they employ, do not reach an optimum output on free text; each method handles some problems better than others. The PANGLOSS Mark III system is an MT environment that uses the best results from a variety of independent MT systems or engines working simultaneously within a single framework on the same text. This paper describes the method used to combine the outputs of the engines into a single text.",
    "pdf_parse": {
        "paper_id": "1994",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Since MT systems, whatever translation method they employ, do not reach an optimum output on free text; each method handles some problems better than others. The PANGLOSS Mark III system is an MT environment that uses the best results from a variety of independent MT systems or engines working simultaneously within a single framework on the same text. This paper describes the method used to combine the outputs of the engines into a single text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "PANGLOSS Mark III is a multi-engine Spanish-to-English MT system. The PANGLOSS team is distributed at three sites -the Computing Research Laboratory of New Mexico State University, the Information Sciences Institute of the University of Southern California and the Center for Machine Translation of Carnegie Mellon University. Originally, PANGLOSS was supposed to be a pure knowledge-based machine translation (KBMT) system implemented in a version of the interlingua architecture. The project, however, evolved toward a more eclectic approach, mostly due to the necessity to perform well during periodical external evaluations whose timing and frequency was established after the project started. As KBMT systems rely on extensive knowledge bases, their gestation periods are typically longer than those of other kinds of systems. The first system configuration, the PANGLOSS Mark I system, was pure KBMT and did not perform well on the first evaluation in Summer 1992. The project team then decided to channel some of the resources into developing an interim evaluation system which would show an immediate improvement in output quality, while in parallel continuing to develop the \"mainline\" KBMT system. The idea was that at a certain stage the KBMT system would supplant the interim system. Thus, PANGLOSS Mark II was a simple lexical transfer system based on phrasal bilingual glossaries and machinereadable dictionaries. The evaluation results were better than during the first evaluation (White and O'Connell, 1994) .",
                "cite_spans": [
                    {
                        "start": 1496,
                        "end": 1523,
                        "text": "(White and O'Connell, 1994)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "As our understanding of the MT task continued to evolve, it was decided not to discard the \"interim\" system in favor of the \"mainline\" one but rather to make these systems coexist and even include additional MT systems. In fact, our current system, PANGLOSS Mark III, differs from other MT systems because it employs not a single translation engine but a set of several engines whose results are integrated for the best overall output. PANGLOSS Mark III contains three distinct MT engines:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "\u2022 a KBMT system, the mainline Pangloss engine;",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "\u2022 an example-based MT (EBMT) system ; the original idea is due to Nagao, 1984) ; and",
                "cite_spans": [
                    {
                        "start": 66,
                        "end": 78,
                        "text": "Nagao, 1984)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "\u2022 a lexical transfer system, fortified with morphological analysis and synthesis modules and relying on a number of databases -a machine-readable dictionary (the Collins Spanish/English), the lexicons used by the KBMT modules, a large set of user-generated bilingual glossaries as well as a gazetteer and a list of proper and organization names.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In what follows, we first describe the multi-engine architecture and the nature of engine integration in PANGLOSS Mark III. Then we describe the individual engines, focusing primarily on KBMT. Finally we discuss two extensions to the project, the handling of Japanese to English translation and a project to the quality of language analysis via the development of computationallinguistic microtheories.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "A number of proposals have come up in recent years for hybridization of MT. Current MT projects -both \"pure\" and hybrid, both predominantly technology-oriented and research-oriented -are single-engine projects, capable of one particular type of source text analysis, one particular method of finding target language correspondences for source language elements and one prescribed method of generating the target language text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Obtaining and Scoring the Outputs of MT Engines",
                "sec_num": "2.1"
            },
            {
                "text": "It is common knowledge that MT systems, whatever translation method they at present employ, do not reach an optimum output on free text. In part, this is due to the inherent problems of a particular method -for instance, the inability of statistics-based MT to take into account long-distance dependencies or the reliance of most transfer-oriented MT systems on similarities in syntactic structures of the source and the target languages. Another crucial source of deficiencies is the size and quality of the static knowledge sources underlying the various MT systems -particular grammars, lexicons and world models. Thus, in knowledge-based MT the size of the underlying world model is typically smaller than necessary for secure coverage of free text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Obtaining and Scoring the Outputs of MT Engines",
                "sec_num": "2.1"
            },
            {
                "text": "The PANGLOSS Mark III system of January 1994 is an MT environment which uses the best results from a variety of MT systems working simultaneously on the same text. In this novel multiengine MT approach, we submit an input text to a battery of independent machine translation systems (engines). The results obtained from the individual engines (target language words and phrases) are then recorded, jointly, in a chart whose initial edges correspond to words in the source language input. New edges are added to the chart and labeled with the translation of a segment of the input string and indexed by this segment's beginning and end positions. In addition, a quality score is added to each edge. Using the normalized quality score, the chart manager selects the overall best cover from a collection of candidate partial translations by the \"chart-walk\" algorithm.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Obtaining and Scoring the Outputs of MT Engines",
                "sec_num": "2.1"
            },
            {
                "text": "The KBMT and EBMT engines provide a score for each output element, based on their internal confidence in the quality of its translation. For the Lexical Transfer Engine, the score for each glossary is a constant based on the reliability of the glossary. The scores are also normalized so as to be comparable. Finally, during chart construction, the base score produced by the scoring functions is multiplied by the length of the candidate in words, on the assumption that longer items are better.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Obtaining and Scoring the Outputs of MT Engines",
                "sec_num": "2.1"
            },
            {
                "text": "Once the edges are scored, the cover is produced using a simple dynamic programming algorithm. The figure below presents the chart-walk algorithm used to produce a single, best, non-overlapping, contiguous combination of the available component translations. The algorithm uses dynamic programming to find the optimal cover (a cover with the best cumulative score), assuming correct component quality scores. The code is organized as a recursive divide-and-conquer procedure: for each position within a segment, the sentence is split into two parts, the best possible cover for each part is recursively found and the two scores are combined to give a score for the chart-walk containing the two best subwalks. This primitive step is repeated for each possible top-level split of the input sentence, compared with each other and with any simple edges (from the chart) spanning the segment, and the overall best result is used.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Chart-Walk Algorithm",
                "sec_num": "2.2"
            },
            {
                "text": "To find best walk on a segment: if there is a stored result for this segment then return it else begin get all primitive edges for this segment for each position p within this segment begin split segment into two parts at p find best walk for first part find best walk for second part combine into an edge end find maximum score over all primitive and combined edges store and return it end Without dynamic programming, this algorithm would have a combinatorial time complexity. Dynamic programming utilizes a large array to store partial results, so that the best cover of any given subsequence is only computed once; the second time that a recursive call would compute the same result, it is retrieved from the array instead. This reduces the time complexity to 0(n 3 ), and in practice it uses an insignificant part of total processing time.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Chart-Walk Algorithm",
                "sec_num": "2.2"
            },
            {
                "text": "The combined score for a sequence of edges is the weighted average of their individual scores. Weighting by length is necessary so that the same edges, when combined in a different order, produce the same combined scores. In other words, whether edges a, b and c are combined as ((a b) c) or (a (b c)), the combined edge must have the same score, or the algorithm can produce inconsistent results.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Chart-Walk Algorithm",
                "sec_num": "2.2"
            },
            {
                "text": "The chart-walk algorithm can also be visualized as a task of filling a two-dimensional array. The array for our example sentence is shown in the figure below. Element (i,j) of the array is the best score for any set of edges covering the input from word i to word j. (The associated list of edges is not shown, for readability.) For any position, the score is calculated as a weighted average of the scores in the row to its left, in the column below it and the previous contents of the array cell for its position. So to calculate element (1,4), we compare the combined scores of the best walks over (1,1) and (2,4), (1,2) and (3,4), and (1,3) and (4,4) with the scores of any chart edges going from 1 to 4, and take the maximum. When the score in the top-right corner is produced, the algorithm is finished and the associated set of edges is the final chart-walk result.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Chart-Walk Algorithm",
                "sec_num": "2.2"
            },
            {
                "text": "It may seem that the scores should increase towards the top-right corner. In our experience, however, this has not generally been the case. Indeed, the system suggested a number of highscoring short edges, but many low-scoring edges had to be included to span the entire input. Since the score is a weighted AVERAGE, these low-scoring edges pull it down. A clear example can be seen at position (18,18), which has a score of 15. The scores above and to its right each average this 15 with a 5, for total values of 10.0 and the score continues to decrease with distance from this point as one moves towards the final score, which does include (18,18) in the cover.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Chart-Walk Algorithm",
                "sec_num": "2.2"
            },
            {
                "text": "The chart-oriented integration of MT engines does not easily support deviations from the linear order of the source text elements, as when discontinuous constituents translate contiguous strings or in the case of cross-segmental substring order differences. Following a venerable tradition in MT, we used a target language-dependent set of postprocessing rules to alleviate this problem (e.g., by switching the order of adjectives and nouns in a noun phrase if it was produced by the word-for-word engine).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reordering Components",
                "sec_num": "2.3"
            },
            {
                "text": "In normal operation, results of multi-engine MT are fed in PANGLOSS into a translator's workstation (TWS) (Cohen et al., 1993) through which a translator either approves the system's output or modifies it. The main option for human interaction in TWS currently is the Component Machine-Aided Translation (CMAT) editor , in which the user can use menus, function keys, and mouse clicks to change the system's initially chosen candidate translation string, as well as perform both regular and enhanced editing actions.",
                "cite_spans": [
                    {
                        "start": 106,
                        "end": 126,
                        "text": "(Cohen et al., 1993)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Translation Delivery System",
                "sec_num": "2.4"
            },
            {
                "text": "The mainline engine of PANGLOSS is the knowledge-based engine. It consists of an analyzer, called the PANGLYZER and a generation module centered on the generator called PENMAN. Unfortunately, given space limitations, we can provide only a brief overview here; please see (PANGLOSS 1994) for a more comprehensive account.",
                "cite_spans": [
                    {
                        "start": 271,
                        "end": 286,
                        "text": "(PANGLOSS 1994)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Knowledge-Based MT Engine",
                "sec_num": "3"
            },
            {
                "text": "The function of the Spanish analysis component of the PANGLOSS system, or Panglyzer, is to provide for each clause in the input text a set of possible meaning representations ranked on the basis of likelihood. The system is described in more detail in (Farwell et al. 1994) . For this effort the focus is placed on Spanish language newspaper articles in the area of finance, specifically mergers and acquisitions. Currently, the output is a sequence of sets of ranked partial meaning representations corresponding at times to the words, at times to the phrases and at times to the clauses of the input text. The output is passed to a mapper which converts Panglyzer representations to representation appropriate for generation.",
                "cite_spans": [
                    {
                        "start": 252,
                        "end": 273,
                        "text": "(Farwell et al. 1994)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Panglyzer: Spanish Language Analysis System",
                "sec_num": "3.1"
            },
            {
                "text": "The approach has been to develop the system in a bottom up manner, providing layer after layer of increasingly abstract analysis in a multi-pass process. Each level of analysis is based on a focused type of knowledge and, to the extent possible, exploits proven techniques. Since a high premium has been placed on robustness, that is, producing some throughput even if it is partially correct, an iterative approach to design has been used which relies on rapidly producing an initial prototype and then following a short test and revise cycle. Thus, at this point, all but the deepest level of analysis produces throughput and the on-going objective is to improve the accuracy of that throughput from one test cycle to the next.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Panglyzer: Spanish Language Analysis System",
                "sec_num": "3.1"
            },
            {
                "text": "In the eventual PANGLOSS, semantic analysis will constitute a significant module in the pathway from source text to Interlingua form, involving several tasks, including mapping of syntactic and semantic information as produced by the Panglyzer into basic TMR propositions, reference resolution, metonymy, discourse structure building at the paragraph level, and so on. At present, however, mainly the basic proposition construction portion has been addressed, using a unification-based bottom-up chart parser with approx. 500 rules.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Panglyzer-to-Penman Mapper",
                "sec_num": "3.2"
            },
            {
                "text": "PENMAN is of the largest English sentence generator programs available. A detailed overview of language generation in Penman can be found in Matthiessen and Bateman (1991) .",
                "cite_spans": [
                    {
                        "start": 141,
                        "end": 171,
                        "text": "Matthiessen and Bateman (1991)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Penman: English Sentence and Phrase Generation",
                "sec_num": "3.3"
            },
            {
                "text": "Penman consists of a number of components. Nigel, the English grammar, is the heart of the system. Based on the theory of Systemic Functional Linguistics (a theory of language and communication, and used in various AI applications, such as in SHRDLU, Nigel is a network of over 700 nodes called \"systems\", each node representing a single minimal grammatical alternation. In order to generate a sentence, Penman traverses the network guided by its inputs and default settings. At each system node, Penman selects a feature until it has assembled enough features to fully specify a sentence. After constructing a syntax tree and choosing words to satisfy the features selected, Penman generates the English sentence. The Nigel grammar is described in, among others, Matthiessen (1984) .",
                "cite_spans": [
                    {
                        "start": 764,
                        "end": 782,
                        "text": "Matthiessen (1984)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Penman: English Sentence and Phrase Generation",
                "sec_num": "3.3"
            },
            {
                "text": "The taxonomy of generalizations is called the Penman Upper Model and can be seen as a very general taxonomic model of the entities, objects, qualities and relations in the world (Bateman et al., 1989) . This taxonomy acts to link the terms in a user's application domain to the terms used and decision made within Penman. Within PANGLOSS, the Upper Model is embedded in the Ontology Base and ensures that any Ontology term used in an input to Penman will be handled correctly in creating an appropriate English sentence or phrase.",
                "cite_spans": [
                    {
                        "start": 178,
                        "end": 200,
                        "text": "(Bateman et al., 1989)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Penman: English Sentence and Phrase Generation",
                "sec_num": "3.3"
            },
            {
                "text": "The basic idea of EBMT is simple (Nagao, 1984) : given an input passage S in a source language and a bilingual text archive, where text passages S' in the source language are stored, aligned with their translations into a target language, passages T', S is compared with the source-language \"side\" of the archive. The \"closest\" match for passage S' is selected and the translation of this closest match, the passage T' is accepted as the translation of S.",
                "cite_spans": [
                    {
                        "start": 33,
                        "end": 46,
                        "text": "(Nagao, 1984)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example-Based MT Engine",
                "sec_num": "4"
            },
            {
                "text": "EBMT steps include the following:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example-Based MT Engine",
                "sec_num": "4"
            },
            {
                "text": "1. align corpus at sentence level; 2. find chunks from the source language part of corpus which are best candidates for matching an input chunk (intra-language matching);",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example-Based MT Engine",
                "sec_num": "4"
            },
            {
                "text": "3. find the target language chunk corresponding to the chunk from the source language part of the corpus (inter-language matching); 4. combine chunk-level results to obtain the \"cover\" for the entire text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Example-Based MT Engine",
                "sec_num": "4"
            },
            {
                "text": "PANGLOSS uses a simple and traditional lexical transfer MT engine as a safety net. Lexical transfer is carried out using a number of bilingual resources: the lexicons developed as an aid in the KBMT engine; a machine readable dictionary (Spanish-English Collins) and a set of manually produced glossaries. PANGLOSS Spanish-to-English glossaries were improved in 3 ways between the May 1993 and January 1994 evaluations:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Lexical Transfer MT Engine",
                "sec_num": "5"
            },
            {
                "text": "\u2022 Pure size: glossaries grew from 68,000 to 174,000 entries between October and December 1993;",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Lexical Transfer MT Engine",
                "sec_num": "5"
            },
            {
                "text": "\u2022 Cleaning: mass effort by the glossary acquisition team to rid all glossaries of useless grammatical information, correct inaccurate entries, etc.;",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Lexical Transfer MT Engine",
                "sec_num": "5"
            },
            {
                "text": "\u2022 Variables: mass effort by the glossary acquisition team to allow the glossary entries to use coindexed variables.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Lexical Transfer MT Engine",
                "sec_num": "5"
            },
            {
                "text": "To allow matching on \"open\" patterns, variables were introduced into the glossary entries for proper names, such as individual, company and place names; numbers; and various classes of pronouns.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Lexical Transfer MT Engine",
                "sec_num": "5"
            },
            {
                "text": "Ultimately, a multi-engine system depends on the basic quality of each particular engine. We expect the performance of some of the individual engines (especially, KBMT and EBMT) to grow. Consequently, the multi-engine environment will improve, as larger static knowledge sources are added and the scoring mechanism is further adjusted. In addition, we are currently working on adding a statistical English language model, to allow PANGLOSS to operate as a fully-automatic MT system. PANGLOSS is being extended not only from the standpoint of system architecture. Starting in December 1993, considerable effort has been devoted at ISI on the development of Japanese parsing capabilities for PANGLOSS. The overall goal is to test and strengthen the interlingual nature of the Ontology and to demonstrate the utility of the PANGLOSS framework for the incorporation of new languages. The work on Japanese falls into two major areas: KBMT system module development and resource construction. A mixture of statistical and symbolic methods are used to attain coverage, robustness, and quality in a short amount of time. This work is described in (Knight et al., 1994) .",
                "cite_spans": [
                    {
                        "start": 1139,
                        "end": 1160,
                        "text": "(Knight et al., 1994)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Additional Activities",
                "sec_num": "6"
            },
            {
                "text": "The Mikrokosmos project is an extension of PANGLOSS devoted to a study of semantic and pragmatic aspects of texts. It is not realistic to hope for the development of a single allencompassing theory of computational linguistics. However, high-quality applications require knowledge about a large number of language and language use phenomena. A natural way of combining the diverse knowledge into a single entity is to allow for the various phenomena to be treated by separate computational-linguistic \"microtheories\" united through a system's control architecture and knowledge representation conventions. We perceive the following microtheories as central for the support of knowledge-based machine translation (and other high-demand applications): lexical-semantic dependency, aspect, time, modality and other speaker attitudes, discourse relations, reference, style, and spatial description.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Additional Activities",
                "sec_num": "6"
            },
            {
                "text": "[White & O'Connel 94] White, J.S. and T. O'Connell. Evaluation in the ARPA Machine Translation Program: 1993 Methodology. Proceedings of the ARPA HLT Workshop. Plainsboro, NJ.March, 130-133,1994.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "A General Organization of Knowledge for Natural Language Processing: The Penman Upper Model",
                "authors": [
                    {
                        "first": "J",
                        "middle": [
                            "A"
                        ],
                        "last": "Bateman",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [
                            "T"
                        ],
                        "last": "Kasper",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [
                            "D"
                        ],
                        "last": "Moore",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [
                            "A"
                        ],
                        "last": "Whitney",
                        "suffix": ""
                    }
                ],
                "year": 1989,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "References [Bateman et al. 89] Bateman, J.A., R.T. Kasper, J.D. Moore and R.A. Whitney. A General Organi- zation of Knowledge for Natural Language Processing: The Penman Upper Model. Unpublished research report, USC/Information Sciences Institute, Marina del Rey, 1989.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Translator's Workstation User Document",
                "authors": [
                    {
                        "first": "[",
                        "middle": [],
                        "last": "Cohen",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Cohen et al. 93] Cohen, A., P. Cousseau, R. Frederking, D. Grannes, S. Khanna, C. McNeilly, S. Nirenburg, P. Shell and D. Waeltermann. Translator's Workstation User Document, Center for Machine Translation, Carnegie Mellon University, 1993.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "An MAT Tool and Its Effectiveness",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Frederking",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Grannes",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Cousseau",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Nirenburg",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of the DARPA Human Language Technology Workshop",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Frederking et al. 93] Frederking, R., D. Grannes, P. Cousseau, and S. Nirenburg. An MAT Tool and Its Effectiveness. Proceedings of the DARPA Human Language Technology Workshop, Prince- ton, NJ, 1993.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Integrating Knowledge Bases and Statistics in MT",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Knight",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Chander",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Haines",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Hatzivassiloglou",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Hovy",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Iida",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Luk",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Okumura",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Whitney",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Yamada",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proceedings of the 1st AMTA Conference",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Knight et al. 94] Knight, K., I. Chander, M. Haines, V. Hatzivassiloglou, E. Hovy, M. Iida, S. Luk, A. Okumura, R. Whitney, K. Yamada. Integrating Knowledge Bases and Statistics in MT. Proceedings of the 1st AMTA Conference, Columbia MD, October 1994.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Systemic Grammar in Computation: The Nigel Case",
                "authors": [
                    {
                        "first": "C",
                        "middle": [
                            "M I M"
                        ],
                        "last": "Matthiessen",
                        "suffix": ""
                    }
                ],
                "year": 1984,
                "venue": "Proceedings of 1st Conference of the European Association for Computational Linguistics, Pisa. Also available as USC/ISI Research Report",
                "volume": "84",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Matthiessen 84] Matthiessen, C.M.I.M. Systemic Grammar in Computation: The Nigel Case. Pro- ceedings of 1st Conference of the European Association for Computational Linguistics, Pisa. Also available as USC/ISI Research Report RR-84-121, 1984.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Systemic-Functional Linguistics in Language Generation: Penman",
                "authors": [
                    {
                        "first": "C",
                        "middle": [
                            "M I M"
                        ],
                        "last": "Matthiessen & Bateman 91] Matthiessen",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [
                            "A"
                        ],
                        "last": "Bateman",
                        "suffix": ""
                    }
                ],
                "year": 1991,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Matthiessen & Bateman 91] Matthiessen, C.M.I.M. and J.A. Bateman. Systemic-Functional Lin- guistics in Language Generation: Penman. 1991.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "A framework of a mechanical translation between Japanese and English by analogy principle",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Nagao",
                        "suffix": ""
                    }
                ],
                "year": 1984,
                "venue": "Artificial and Human Intelligence",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Nagao 84] Nagao, M. A framework of a mechanical translation between Japanese and English by analogy principle. In: A. Elithorn and R. Banerji (eds.) Artificial and Human Intelligence. NATO Publications, 1984.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Two Approaches to Matching in Example-Based Machine Translation",
                "authors": [
                    {
                        "first": "S",
                        "middle": [
                            "C"
                        ],
                        "last": "Nirenburg",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [
                            "J"
                        ],
                        "last": "Domashnev",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Grannes",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of TMI-93",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Nirenburg et al. 93] Nirenburg, S. C. Domashnev and D.J. Grannes. Two Approaches to Matching in Example-Based Machine Translation. Proceedings of TMI-93, Kyoto, 1993.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "The Pangloss Machine Translation System",
                "authors": [],
                "year": 1994,
                "venue": "Center for Machine Translation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[PANGLOSS 94] The Pangloss Machine Translation System. Joint Technical Report, Computing Research Laboratory (New Mexico State University), Center for Machine Translation (Carnegie Mellon University), Information Sciences Institute (University of Southern California). 1994.",
                "links": null
            }
        },
        "ref_entries": {}
    }
}