File size: 58,727 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
{
    "paper_id": "I05-1018",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:25:05.861217Z"
    },
    "title": "Adapting a Probabilistic Disambiguation Model of an HPSG Parser to a New Domain",
    "authors": [
        {
            "first": "Tadayoshi",
            "middle": [],
            "last": "Hara",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Tokyo",
                "location": {
                    "addrLine": "Hongo 7-3-1, Bunkyo-ku",
                    "postCode": "113-0033",
                    "settlement": "Tokyo",
                    "country": "Japan"
                }
            },
            "email": ""
        },
        {
            "first": "Yusuke",
            "middle": [],
            "last": "Miyao",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Tokyo",
                "location": {
                    "addrLine": "Hongo 7-3-1, Bunkyo-ku",
                    "postCode": "113-0033",
                    "settlement": "Tokyo",
                    "country": "Japan"
                }
            },
            "email": ""
        },
        {
            "first": "Jun",
            "middle": [
                "'"
            ],
            "last": "Ichi Tsujii",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Tokyo",
                "location": {
                    "addrLine": "Hongo 7-3-1, Bunkyo-ku",
                    "postCode": "113-0033",
                    "settlement": "Tokyo",
                    "country": "Japan"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper describes a method of adapting a domain-independent HPSG parser to a biomedical domain. Without modifying the grammar and the probabilistic model of the original HPSG parser, we develop a log-linear model with additional features on a treebank of the biomedical domain. Since the treebank of the target domain is limited, we need to exploit an original disambiguation model that was trained on a larger treebank. Our model incorporates the original model as a reference probabilistic distribution. The experimental results for our model trained with a small amount of a treebank demonstrated an improvement in parsing accuracy.",
    "pdf_parse": {
        "paper_id": "I05-1018",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper describes a method of adapting a domain-independent HPSG parser to a biomedical domain. Without modifying the grammar and the probabilistic model of the original HPSG parser, we develop a log-linear model with additional features on a treebank of the biomedical domain. Since the treebank of the target domain is limited, we need to exploit an original disambiguation model that was trained on a larger treebank. Our model incorporates the original model as a reference probabilistic distribution. The experimental results for our model trained with a small amount of a treebank demonstrated an improvement in parsing accuracy.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Natural language processing (NLP) is being demanded in various fields, such as biomedical research, patent application, and WWW, because an unmanageable amount of information is being published in unstructured data, i.e., natural language texts. To exploit latent information in these, the assistance of NLP technologies is highly required. However, an obstacle is the lack of portability of NLP tools. In general, NLP tools specialized to each domain were developed from scratch, or adapted by considerable human effort. This is because linguistic resources for each domain, such as a treebank, have not been sufficiently developed yet. Since dealing with various kinds of domains is an almost intractable job, sufficient resources can not be expected.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The method presented in this paper is the development of disambiguation models of an HPSG parser by combining a disambiguation model of an original parser with a new model adapting to a new domain. Although the training of a disambiguation model of a parser requires a sufficient amount of a treebank, its construction requires a considerable human effort. Hence, we exploit the original disambiguation model that was trained with a larger, but domain-independent treebank. Since the original disambiguation model contains rich information of general grammatical constraints, we try to use its information in developing a disambiguation model for a new domain.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Our disambiguation model is a log-linear model into which the original disambiguation model is incorporated as a reference distribution. However, we cannot simply estimate this model, because of the problem that has been discussed in studies of the probabilistic modeling of unification-based grammars [1, 2] . That is, the exponential explosion of parse candidates assigned by the grammar makes the estimation intractable. The previous studies solved the problem by applying a dynamic programming algorithm to a packed representation of parse trees. In this paper, we borrow their idea, and define reference distribution on a packed structure. With this method, the log-linear model with a reference distribution can be estimated by using dynamic programming.",
                "cite_spans": [
                    {
                        "start": 302,
                        "end": 305,
                        "text": "[1,",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 306,
                        "end": 308,
                        "text": "2]",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In the experiments, we used an HPSG parser originally trained with the Penn Treebank [3] , and evaluated a disambiguation model trained with the GE-NIA treebank [4] , which consisted of abstracts of biomedical papers. First, we measured the accuracy of parsing and the time required for parameter estimation. For comparison, we also examined other possible models other than our disambiguation model. Next, we varied the size of a training corpus in order to evaluate the size sufficient for domain adaptation. Then, we varied feature sets used for training and examined the parsing accuracy. Finally, we compared the errors in the parsing results of our model with those of the original parser.",
                "cite_spans": [
                    {
                        "start": 85,
                        "end": 88,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 161,
                        "end": 164,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In Section 2, we introduce the disambiguation model of an HPSG parser. In Section 3, we describe a method of adopting reference distribution for adapting a probabilistic disambiguation model to a new domain. In Section 4, we examine our method through experiments on the GENIA treebank.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The HPSG parser used in this study is Enju [5] . The grammar of Enju was extracted from the Penn Treebank [3] , which consisted of sentences collected from The Wall Street Journal [6] . The disambiguation model of Enju was trained on the same treebank. This means that the parser has been adapted to The Wall Street Journal, and would be difficult to apply to other domains such as biomedical papers that include different distribution of words and their constraints.",
                "cite_spans": [
                    {
                        "start": 43,
                        "end": 46,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 106,
                        "end": 109,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 180,
                        "end": 183,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "An HPSG Parser",
                "sec_num": "2"
            },
            {
                "text": "In this study, we attempted the adaptation of a probabilistic disambiguation model by fixing the grammar and the disambiguation model of the original parser. The disambiguation model of Enju is based on a feature forest model [2] , which is a maximum entropy model [7] on packed forest structure. The probability, p E (t|s), of producing the parse result t for a given sentence s is defined as To avoid an exponential explosion, Enju represented T (s) in a packed form of HPSG parse trees [5] . In chart parsing, partial parse candidates are stored in a chart, in which phrasal signs are identified and packed into an equivalence class if they are determined to be equivalent and dominate the same word sequence. A set of parse trees is then represented as a set of relations among equivalence classes. Figure 1 shows a chart for parsing \"he saw a girl with a telescope\", where the modifiee (\"saw\" or \"girl\") of \"with\" is ambiguous. Each feature structure expresses an equivalence class, and the arrows represent immediate-dominance relations. The phrase, \"saw a girl with a telescope\", has two ambiguous subtrees (A in the figure). Since the signs of the top-most nodes are equivalent, they are packed into the same equivalence class. The ambiguity is represented as two pairs of arrows that come out of the node.",
                "cite_spans": [
                    {
                        "start": 226,
                        "end": 229,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 265,
                        "end": 268,
                        "text": "[7]",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 489,
                        "end": 492,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 803,
                        "end": 811,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "An HPSG Parser",
                "sec_num": "2"
            },
            {
                "text": "p E (t|s) = 1 Z s exp i \u03bb i f i (t, s) Z s = t \u2208T (s) exp i \u03bb i f i (t , s) ,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "An HPSG Parser",
                "sec_num": "2"
            },
            {
                "text": "A packed chart can be interpreted as an instance of a feature forest [2] . A feature forest represents a set of exponentially-many trees in an \"and/or\" graph of a tractable size. A feature forest is formally defined as a tuple C, D, R, \u03b3, \u03b4 , where C is a set of conjunctive nodes, D is a set of disjunctive nodes, R \u2286 C is a set of root nodes 1 , \u03b3 : D \u2192 2 C is a conjunctive daughter function, and Based on the definition, parse tree t of sentence s can be represented as the set of conjunctive nodes in the feature forest. The probability p E (t|s) is then redefined as",
                "cite_spans": [
                    {
                        "start": 69,
                        "end": 72,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "An HPSG Parser",
                "sec_num": "2"
            },
            {
                "text": "\u03b4 : C \u2192 2 D is",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "An HPSG Parser",
                "sec_num": "2"
            },
            {
                "text": "p E (t|s) = 1 Z s exp c\u2208t i \u03bb i f i (c) Z s = t \u2208T (s) exp c\u2208t i \u03bb i f i (c) ,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "An HPSG Parser",
                "sec_num": "2"
            },
            {
                "text": "where f i (c) are alternative feature functions assigned to conjunctive nodes c \u2208 C. By using this redefined probability, a dynamic programming algorithm can be applied to estimate p(t|T (s)) without unpacking the packed chart [2] . Feature functions in feature forest models are designed to capture the characteristics of a conjunctive node. In HPSG parsing, it corresponds to a tuple of a mother and its daughters. Enju uses features that are combinations of the atomic features listed in Table 1 . The following combinations are used for representing the characteristics of the binary/unary rule applications. where suffix h and n means a head daughter and a non-head daughter, respectively. ",
                "cite_spans": [
                    {
                        "start": 227,
                        "end": 230,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 491,
                        "end": 498,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "An HPSG Parser",
                "sec_num": "2"
            },
            {
                "text": "In addition, the following feature is used for expressing the condition of the root node of the parse tree.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 3. Example features",
                "sec_num": null
            },
            {
                "text": "f root = sym,word,pos,le Figure 3 shows example features: f root is the feature for the root node, in which the phrase symbol is S and the surface form, part-of-speech, and lexical entry of the lexical head are \"saw\", VBD, and a transitive verb, respectively. The f binary is the feature for the binary rule application to \"saw a girl\" and \"with a telescope\", in which the applied schema is the Head-Modifier Schema, the head daughter is VP headed by \"saw\", and the non-head daughter is PP headed by \"with\", whose part-of-speech is IN and the lexical entry is a VP-modifying preposition.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 25,
                        "end": 33,
                        "text": "Figure 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Fig. 3. Example features",
                "sec_num": null
            },
            {
                "text": "The method of domain adaptation is to develop a new maximum entropy model with incorporating an original model as a reference probabilistic distribution. The idea of adaptation using a reference distribution has already been presented in several studies [8, 9] . When we have a reference probabilistic model p 0 (t|s) and are making a new model p M (t|s), the probability is defined as",
                "cite_spans": [
                    {
                        "start": 254,
                        "end": 257,
                        "text": "[8,",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 258,
                        "end": 260,
                        "text": "9]",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-training of Disambiguation Models",
                "sec_num": "3"
            },
            {
                "text": "p M (t|s) = 1 Z s p 0 (t|s) exp \u239b \u239d j \u03c1 j g j (t , s) \u239e \u23a0 where Z s = t \u2208T (s) p 0 (t |s) exp \u239b \u239d j \u03c1 j g j (t , s) \u239e \u23a0 .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-training of Disambiguation Models",
                "sec_num": "3"
            },
            {
                "text": "Model parameters, \u03c1 j , are estimated so as to maximize the likelihood of the training data as in ordinary maximum entropy models. The maximization of the likelihood with the above model is equivalent to finding the model p M that is closest to the reference probability p 0 in terms of the Kullback-Leibler distance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-training of Disambiguation Models",
                "sec_num": "3"
            },
            {
                "text": "However, we cannot simply apply the above method to our task because the parameter estimation requires the computation of the above probability for all parse candidates T (s). As discussed in Section 2, the size of T (s) is exponentially related to the length of s. This imposes a new problem, that is, we need to enumerate p 0 (t|s) for all candidate parses. Obviously, this is intractable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-training of Disambiguation Models",
                "sec_num": "3"
            },
            {
                "text": "Since Enju represented a probabilistic disambiguation model in a packed forest structure, we exploit that structure to represent our probabilistic model. That is, we redefine p M with feature functions g j on conjunctive nodes as ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-training of Disambiguation Models",
                "sec_num": "3"
            },
            {
                "text": "p M (t|s) = 1 Z s p 0 (t|s) exp \u239b \u239d c\u2208t j \u03c1 j g j (c) \u239e \u23a0 where Z s = t \u2208T (s) p 0 (t|s) exp \u239b \u239d c\u2208t j \u03c1 j g j (c) \u239e \u23a0 .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-training of Disambiguation Models",
                "sec_num": "3"
            },
            {
                "text": "t 1 selected t 2 selected \u2211 j c gj j ) ( 1 \u03c1 \u2211 i c fi i ) ( 1 \u03bb \u2211 j c gj j ) ( 4 \u03c1 \u2211 j c gj j ) ( 3 \u03c1 \u2211 j c gj j ) ( 2 \u03c1 \u2211 i c fi i ) ( 2 \u03bb \u2211 i c fi i ) ( 3 \u03bb \u2211 i c fi i ) ( 4 \u03bb",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Re-training of Disambiguation Models",
                "sec_num": "3"
            },
            {
                "text": "As described in Section 2, the original model, p E (t|s), is expressed in a packed structure as",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 4. Example of importing a reference distribution into each conjunctive node",
                "sec_num": null
            },
            {
                "text": "p E (t|s) = 1 Z s exp c\u2208t i \u03bb i f i (c)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 4. Example of importing a reference distribution into each conjunctive node",
                "sec_num": null
            },
            {
                "text": "where",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 4. Example of importing a reference distribution into each conjunctive node",
                "sec_num": null
            },
            {
                "text": "Z s = t \u2208T (s) exp c\u2208t i \u03bb i f i (c) .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 4. Example of importing a reference distribution into each conjunctive node",
                "sec_num": null
            },
            {
                "text": "Then, p 0 (t|s) is substituted by p E (t|s), and p M (t|s) is formulated as",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 4. Example of importing a reference distribution into each conjunctive node",
                "sec_num": null
            },
            {
                "text": "p M (t|s) = 1 Z s 1 Z s exp c\u2208t i \u03bb i f i (c) exp \u239b \u239d c\u2208t j \u03c1 j g j (c) \u239e \u23a0 = 1 Z s \u2022 Z s exp \u239b \u239d c\u2208t i \u03bb i f i (c) + c\u2208t j \u03c1 j g j (c) \u239e \u23a0 = 1 Z s exp \u23a7 \u23a8 \u23a9 c\u2208t \u239b \u239d i \u03bb i f i (c) + j \u03c1 j g j (c) \u239e \u23a0 \u23ab \u23ac \u23ad where Z s = Z s \u2022 Z s = t\u2208T (s) exp \u23a7 \u23a8 \u23a9 c\u2208t \u239b \u239d i \u03bb i f i (c) + j \u03c1 j g j (c) \u239e \u23a0 \u23ab \u23ac \u23ad .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 4. Example of importing a reference distribution into each conjunctive node",
                "sec_num": null
            },
            {
                "text": "With this form of p M (t|s), a dynamic programing algorithm can be applied. For example, we show how to obtain probabilities of parse trees in the case of ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 4. Example of importing a reference distribution into each conjunctive node",
                "sec_num": null
            },
            {
                "text": "p M (t 1 |s)= 1 Z s exp \u23a7 \u23a8 \u23a9 \u239b \u239d i \u03bb i f i (c 1 ) + j \u03c1 j g j (c 1 ) \u239e \u23a0 + \u239b \u239d i \u03bb i f i (c 2 ) + j \u03c1 j g j (c 2 ) \u239e \u23a0 + \u239b \u239d i \u03bb i f i (c 3 ) + j \u03c1 j g j (c 3 ) \u239e \u23a0 + \u2022 \u2022 \u2022 \u23ab \u23ac \u23ad p M (t 2 |s)= 1 Z s exp \u23a7 \u23a8 \u23a9 \u239b \u239d i \u03bb i f i (c 1 ) + j \u03c1 j g j (c 1 ) \u239e \u23a0 + \u239b \u239d i \u03bb i f i (c 2 ) + j \u03c1 j g j (c 2 ) \u239e \u23a0 + \u239b \u239d i \u03bb i f i (c 4 ) + j \u03c1 j g j (c 4 ) \u239e \u23a0 + \u2022 \u2022 \u2022 \u23ab \u23ac \u23ad .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Fig. 4. Example of importing a reference distribution into each conjunctive node",
                "sec_num": null
            },
            {
                "text": "We implemented the method described in Section 3. The original parser, Enju, was developed on Section 02-21 of the Penn Treebank (39,832 sentences) [5] . For the training of our model, we used the GENIA treebank [4] , which consisted of 500 abstracts (4,446 sentences) extracted from MEDLINE. We divided the GENIA treebank into three sets of 400, 50, and 50 abstracts (3, 524, 455 , and 467 sentences), and these sets were used respectively as training, development, and final evaluation data. The method of Gaussian MAP estimation [10] was used for smoothing. The meta parameter \u03c3 of the Gaussian distribution was determined so as to maximize the accuracy on the development set. In the following experiments, we measured the accuracy of predicate-argument dependencies on the evaluation set. The measure is labeled precision/recall (LP/LR), which is the same measure as previous work [11, 5] that evaluated the accuracy of lexicalized grammars on the Penn Treebank.",
                "cite_spans": [
                    {
                        "start": 148,
                        "end": 151,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 212,
                        "end": 215,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 368,
                        "end": 371,
                        "text": "(3,",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 372,
                        "end": 376,
                        "text": "524,",
                        "ref_id": null
                    },
                    {
                        "start": 377,
                        "end": 380,
                        "text": "455",
                        "ref_id": null
                    },
                    {
                        "start": 532,
                        "end": 536,
                        "text": "[10]",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 886,
                        "end": 890,
                        "text": "[11,",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 891,
                        "end": 893,
                        "text": "5]",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "4"
            },
            {
                "text": "First, we measured the accuracy of parsing and the time required for parameter estimation. Table 2 compares the results of the following estimation methods. method gave significantly lower accuracy. We expect that the method clearly lacked the amount of the training corpus for obtaining generic grammatical information.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 91,
                        "end": 98,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "4"
            },
            {
                "text": "The \"Combined\" method achieved the accuracy close to our method. However, it is notable that our method took much less time for the training of the model since ours did not need to handle the Penn Treebank. Instead, our method exploited the original model of Enju, which was trained on the Penn Treebank, and this resulted in much less cost of training.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "4"
            },
            {
                "text": "Next, we changed the size of the GENIA treebank for training: 40, 80, 120, 160, 200, 240, 280, 320, 360, and 400 abstracts. Figure 5 shows the accuracy when the size of the training data was changed. We can say that, for those feature sets giving remarkable accuracy in the experiments, the accuracy edged upwards with the size of the training corpus, and the trend does not seem to converge even if more than 400 abstracts exist. If we choose more complex feature sets for higher accuracy, data sparseness will occur and an even larger corpus will be needed. These findings indicate that we can further improve the accuracy by using a larger treebank and a proper feature set. Table 3 shows the accuracy of models with only atomic feature templates. The bottom of the table gives the accuracy attained by the original parser. When we focus on the WORD features, we can see the combination of WORD h and WORD n improved the accuracy significantly, although each of the features by itself did not improve so much. DIST, SYMBOL, and POS feature templates lowered the accuracy. The other feature templates improved the accuracy, though not as well as the WORD templates. Table 4 shows that the RULE feature combined with one or more other features often gave a little higher accuracy than the RULE feature gave by itself, though not as well as the WORD features. Table 5 shows that the WORD features combined with one or more other features gave remarkable improvement to the accuracy as a whole. RULE and COMMA features gave even higher accuracy than with only the WORD features. Our results revealed that the WORD features were crucial for the adaptation to the biomedical domain. We expect that this was because the biomedical domain had a different distribution of words, while more generic grammatical constraints were not significantly different from other domains. Table 6 shows the comparison of the number of errors of our model with those of the original model in parsing the GENIA corpus. Though our model gave less errors than the original model, our model introduced a certain amount of new errors. In future work, we need to investigate manually those errors to find more suitable feature templates without losing the information in the original model.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 124,
                        "end": 132,
                        "text": "Figure 5",
                        "ref_id": "FIGREF5"
                    },
                    {
                        "start": 678,
                        "end": 685,
                        "text": "Table 3",
                        "ref_id": null
                    },
                    {
                        "start": 1168,
                        "end": 1175,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 1360,
                        "end": 1367,
                        "text": "Table 5",
                        "ref_id": "TABREF4"
                    },
                    {
                        "start": 1869,
                        "end": 1876,
                        "text": "Table 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "4"
            },
            {
                "text": "We have presented a method of adapting a domain-independent HPSG parser to a biomedical domain. Since the treebank of the new domain was limited, we exploited an original disambiguation model. The new model was trained on a biomedical treebank, and was combined with the original model by using it as a reference distribution of a log-linear model. The experimental results demonstrated our new model was adapted to the target domain, and was superior to other adaptation methods in accuracy and the cost of training time. With our model, the parsing accuracy for the target domain improved by 1.77 point with the treebank of 3,524 sentences. Since the accuracy did not seem to saturate, we will further improve the accuracy by increasing the size of the domain-dependent treebank. In addition, the experimental results showed that the WORD feature significantly contributed to the accuracy improvement.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "5"
            },
            {
                "text": "We examined only a few feature templates, and we must search for further more feature templates. Not only the new combinations of the atomic features but also new types of features, which may be domain-dependent such as named entities, will be possible.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "5"
            },
            {
                "text": "R. Dale et al. (Eds.): IJCNLP 2005, LNAI 3651, pp. 199-210, 2005. c Springer-Verlag Berlin Heidelberg 2005",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "For the ease of explanation, the definition of root node is slightly different from the original.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "Our method: training with our method Combined: training Enju model with the training corpus replaced by the combination of the GENIA corpus and the Penn Treebank GENIA only: training Enju model with the training corpus replaced by the GENIA corpus only Original Model: training an original Enju modelThe table shows the accuracy and the parsing time for the GENIA corpus and the Penn Treebank Section 23, and also shows the time required for the training of the model. The additional feature used in our method was RULE+WORD h + WORD n , which will be explained later. In the \"Combined\" method, we could not train the model with the original training parameters (n = 20, = 0.98 in [5] ) because the estimator ran out of memory. Hence, we reduced the parameters to n = 10, = 0.95.For the GENIA corpus, our model gave the higher accuracy than the original model and the other estimation methods, while for the Penn Treebank, our model gave a little lower accuracy than the original model. This result indicates that our model was more adapted to the specific domain. The \"GENIA only\"",
                "cite_spans": [
                    {
                        "start": 681,
                        "end": 684,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "annex",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Dynamic programming for parsing and estimation of stochastic unification-based grammars",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Geman",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Johnson",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proc. 40th ACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Geman, S., Johnson, M.: Dynamic programming for parsing and estimation of stochastic unification-based grammars. In: Proc. 40th ACL. (2002)",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Maximum entropy estimation for feature forests",
                "authors": [
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Miyao",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Tsujii",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proc. HLT 2002",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Miyao, Y., Tsujii, J.: Maximum entropy estimation for feature forests. In: Proc. HLT 2002. (2002)",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "The Penn Treebank: Annotating predicate argument structure",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Marcus",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Kim",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [
                            "A"
                        ],
                        "last": "Marcinkiewicz",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Macintyre",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Bies",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Ferguson",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Katz",
                        "suffix": ""
                    },
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Schasberger",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "ARPA Human Language Technology Workshop",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Marcus, M., Kim, G., Marcinkiewicz, M.A., MacIntyre, R., Bies, A., Ferguson, M., Katz, K., Schasberger, B.: The Penn Treebank: Annotating predicate argument structure. In: ARPA Human Language Technology Workshop. (1994)",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Genia corpus -a semantically annotated corpus for bio-textmining",
                "authors": [
                    {
                        "first": "J",
                        "middle": [
                            "D"
                        ],
                        "last": "Kim",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Ohta",
                        "suffix": ""
                    },
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Teteisi",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Tsujii",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Bioinformatics",
                "volume": "19",
                "issue": "",
                "pages": "180--182",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kim, J.D., Ohta, T., Teteisi, Y., Tsujii, J.: Genia corpus -a semantically annotated corpus for bio-textmining. Bioinformatics 19 (2003) i180-i182",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Probabilistic disambiguation models for wide-coverage HPSG parsing",
                "authors": [
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Miyao",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Tsujii",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proc. ACL 2005",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Miyao, Y., Tsujii, J.: Probabilistic disambiguation models for wide-coverage HPSG parsing. In: Proc. ACL 2005. (2005)",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Corpus-oriented grammar development for acquiring a Head-driven Phrase Structure Grammar from the Penn Treebank",
                "authors": [
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "Miyao",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Ninomiya",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Tsujii",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proc. IJCNLP-04",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Miyao, Y., Ninomiya, T., Tsujii, J.: Corpus-oriented grammar development for acquiring a Head-driven Phrase Structure Grammar from the Penn Treebank. In: Proc. IJCNLP-04. (2004)",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "A maximum entropy approach to natural language processing",
                "authors": [
                    {
                        "first": "A",
                        "middle": [
                            "L"
                        ],
                        "last": "Berger",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [
                            "A D"
                        ],
                        "last": "Pietra",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [
                            "J D"
                        ],
                        "last": "Pietra",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Computational Linguistics",
                "volume": "22",
                "issue": "",
                "pages": "39--71",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Berger, A.L., Pietra, S.A.D., Pietra, V.J.D.: A maximum entropy approach to natural language processing. Computational Linguistics 22 (1996) 39-71",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Statistical Methods for Speech Recognition",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Jelinek",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jelinek, F.: Statistical Methods for Speech Recognition. The MIT Press (1998)",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Exploiting auxiliary distributions in stochastic unificationbased grammars",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Johnson",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Riezler",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proc. 1st NAACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Johnson, M., Riezler, S.: Exploiting auxiliary distributions in stochastic unification- based grammars. In: Proc. 1st NAACL. (2000)",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "A gaussian prior for smoothing maximum entropy models",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Rosenfeld",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Chen, S., Rosenfeld, R.: A gaussian prior for smoothing maximum entropy models. Technical Report CMUCS-99-108, Carnegie Mellon University (1999)",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Parsing the WSJ using CCG and log-linear models",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Clark",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [
                            "R"
                        ],
                        "last": "Curran",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proc. 42nd ACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Clark, S., Curran, J.R.: Parsing the WSJ using CCG and log-linear models. In: Proc. 42nd ACL. (2004)",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "text": "Chart for parsing \"he saw a girl with a telescope\"where T (s) is the set of parse candidates assigned to s. The feature function f i (t, s) represents the characteristics of t and s, while the corresponding model parameter \u03bb i is its weight. Model parameters were estimated so as to maximize the log-likelihood of the training data.Estimation of the above model requires a set of training pairs t s , T (s) , where t s is the correct parse for the sentence s. While t s is provided by a treebank, T (s) is computed by parsing each s in the treebank. However, the simple enumeration of T (s) is impractical because the size of T (s) is exponential to the length of s.",
                "num": null,
                "uris": null
            },
            "FIGREF1": {
                "type_str": "figure",
                "text": "Packed representation of HPSG parse trees inFigure 1",
                "num": null,
                "uris": null
            },
            "FIGREF2": {
                "type_str": "figure",
                "text": "shows (a part of) the HPSG parse trees inFigure 1represented as a feature forest. Square boxes are conjunctive nodes, dotted lines express a disjunctive daughter function, and solid arrows represent a conjunctive daughter function.",
                "num": null,
                "uris": null
            },
            "FIGREF3": {
                "type_str": "figure",
                "text": "f binary = rule,dist,comma, span h , sym h , word h , pos h , le h , spann, symn, wordn, posn, len funary = rule,sym,word,pos,le",
                "num": null,
                "uris": null
            },
            "FIGREF4": {
                "type_str": "figure",
                "text": "For ease, we assume that there are only two disjunctive daughters (dotted lines) that are of the top conjunctive node. The left disjunctive node introduces a parse tree t 1 that consists of conjunctive nodes {c 1 , c 2 , c 3 , . . . }, and the right one, t 2 that consists of {c 1 , c 2 , c 4 , . . . }. To each conjunctive node c k , a weight from the reference distribution i \u03bb i f i (c k ) is assigned. Probability p M (t 1 |s) and p M (t 2 |s) are then given as",
                "num": null,
                "uris": null
            },
            "FIGREF5": {
                "type_str": "figure",
                "text": "Corpus size vs. Accuracy",
                "num": null,
                "uris": null
            },
            "TABREF0": {
                "type_str": "table",
                "text": "a disjunctive daughter function.",
                "num": null,
                "html": null,
                "content": "<table><tr><td/><td/><td/><td>c 1</td><td>HEAD verb</td></tr><tr><td/><td/><td/><td/><td>SUBCAT &lt;&gt;</td></tr><tr><td/><td/><td/><td colspan=\"2\">HEAD noun</td><td>HEAD verb</td></tr><tr><td/><td/><td/><td colspan=\"2\">SUBCAT &lt;&gt;</td><td>SUBCAT &lt;NP&gt;</td></tr><tr><td/><td>c 3</td><td colspan=\"2\">HEAD verb SUBCAT &lt;NP&gt;</td><td>c 4</td><td>SUBCAT &lt;NP&gt; HEAD verb</td></tr><tr><td/><td colspan=\"2\">HEAD verb SUBCAT &lt;NP&gt;</td><td colspan=\"2\">HEAD prep MOD VP SUBCAT &lt;&gt;</td><td>HEAD verb SUBCAT &lt;NP,NP&gt;</td><td>HEAD noun SUBCAT &lt;&gt;</td></tr><tr><td colspan=\"2\">he SUBCAT &lt;&gt; HEAD noun c 2</td><td colspan=\"3\">HEAD prep MOD VP SUBCAT &lt;NP&gt; HEAD prep MOD VP HEAD noun SUBCAT &lt;&gt; SUBCAT &lt;&gt; c 5</td><td>HEAD prep MOD NP SUBCAT &lt;&gt; HEAD noun SUBCAT &lt;&gt; HEAD noun SUBCAT &lt;&gt; c 6</td></tr><tr><td>c 7</td><td colspan=\"2\">HEAD verb</td><td/><td>c 8</td></tr><tr><td colspan=\"4\">SUBCAT &lt;NP&gt; HEAD verb SUBCAT &lt;NP,NP&gt; HEAD noun SUBCAT &lt;&gt;</td><td>HEAD verb SUBCAT &lt;NP,NP&gt; saw</td></tr></table>"
            },
            "TABREF1": {
                "type_str": "table",
                "text": "Templates of atomic features rule the name of the applied schema dist the distance between the head words of the daughters comma whether a comma exists between daughters and/or inside of daughter phrases span the number of words dominated by the phrase sym the symbol of the phrasal category (e.g. NP, VP) word the surface form of the head word pos the part-of-speech of the head word le the lexical entry assigned to the head word",
                "num": null,
                "html": null,
                "content": "<table><tr><td/><td/><td/><td>f</td><td>root =</td><td>S,</td><td>saw,</td><td>VBD,</td><td>transitive</td></tr><tr><td/><td>c 1</td><td colspan=\"2\">HEAD verb</td><td/><td/></tr><tr><td/><td/><td colspan=\"3\">SUBCAT &lt;&gt;</td><td/></tr><tr><td/><td colspan=\"2\">HEAD noun</td><td colspan=\"3\">HEAD verb</td></tr><tr><td/><td colspan=\"2\">SUBCAT &lt;&gt;</td><td colspan=\"4\">SUBCAT &lt;NP&gt;</td></tr><tr><td>he SUBCAT &lt;&gt; HEAD noun c 2</td><td colspan=\"4\">HEAD verb SUBCAT &lt;NP&gt; HEAD verb SUBCAT &lt;&gt; SUBCAT &lt;NP&gt; HEAD prep MOD VP c 3</td><td colspan=\"2\">HEAD verb SUBCAT &lt;NP,NP&gt; HEAD verb HEAD noun SUBCAT &lt;&gt; SUBCAT &lt;NP&gt; c 4</td></tr><tr><td/><td>f</td><td>binary =</td><td colspan=\"4\">vp VBD, with,IN, saw, mod,3,0, -PP, VP, head 3, 3,</td><td>prep , transitive -mod -</td></tr></table>"
            },
            "TABREF3": {
                "type_str": "table",
                "text": "Accuracy and time cost for various estimation methods",
                "num": null,
                "html": null,
                "content": "<table><tr><td/><td/><td>F-score</td><td/><td>Training</td><td colspan=\"2\">Parsing time (sec.)</td></tr><tr><td/><td colspan=\"6\">GENIA Corpus Penn Treebank time (sec.) GENIA Corpus Penn Treebank</td></tr><tr><td>Our method</td><td colspan=\"2\">86.87</td><td>86.81</td><td>2,278</td><td>611</td><td>3,165</td></tr><tr><td>Combined</td><td colspan=\"2\">86.32</td><td>86.09</td><td>29,421</td><td>424</td><td>2,757</td></tr><tr><td>GENIA only</td><td colspan=\"2\">85.72</td><td>42.49</td><td>1,694</td><td>332</td><td>8,183</td></tr><tr><td>Original model</td><td colspan=\"2\">85.10</td><td>87.16</td><td>137,038</td><td>515</td><td>2,554</td></tr><tr><td/><td/><td>87</td><td/><td/><td/></tr><tr><td/><td/><td>86.8</td><td/><td/><td/></tr><tr><td/><td/><td>86.6</td><td/><td/><td/></tr><tr><td/><td/><td>86.4</td><td/><td/><td/></tr><tr><td/><td>F-score</td><td>85.8 86 86.2</td><td/><td/><td/></tr><tr><td/><td/><td>85.6</td><td/><td/><td/></tr><tr><td/><td/><td>85.4</td><td/><td/><td/></tr><tr><td/><td/><td>85.2</td><td/><td/><td/></tr><tr><td/><td/><td>85</td><td/><td/><td/></tr><tr><td/><td/><td colspan=\"4\">0 500 1000 1500 2000 2500 3000 3500</td></tr><tr><td/><td/><td/><td colspan=\"2\">training sentences</td><td/></tr></table>"
            },
            "TABREF4": {
                "type_str": "table",
                "text": "Accuracy with the combination of WORD and another feature Features LP LR F-score diff. WORD h +WORDn+RULE 87.12 86.62 86.87 +1.77 WORD h +WORDn+DIST 86.41 85.86 86.14 +1.04 WORD h +WORDn+COMMA 86.91 86.38 86.64 +1.54 WORD h +WORDn+SPAN h +SPANn 85.77 85.22 85.49 +0.39 WORD h +WORDn+SYMBOL h +SYMBOLn 86.58 85.70 86.14 +1.04 WORD",
                "num": null,
                "html": null,
                "content": "<table><tr><td colspan=\"3\">h +WORDn+POS h +POSn</td><td>86.53 85.99 86.26 +1.16</td></tr><tr><td colspan=\"2\">WORD h +WORDn+LE h +LEn</td><td/><td>86.16 85.68 85.92 +0.82</td></tr><tr><td colspan=\"2\">None</td><td/><td>85.39 84.82 85.10</td></tr><tr><td colspan=\"4\">Table 6. Errors in our model and Enju</td></tr><tr><td/><td colspan=\"3\">Total errors Common errors Errors not in</td></tr><tr><td/><td/><td/><td>the other model</td></tr><tr><td>Our model</td><td>1179</td><td>1050</td><td>129</td></tr><tr><td>Original model</td><td>1338</td><td>1050</td><td>288</td></tr></table>"
            }
        }
    }
}