File size: 63,612 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
{
    "paper_id": "I11-1033",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:31:54.767215Z"
    },
    "title": "Automatic Labeling of Voiced Consonants for Morphological Analysis of Modern Japanese Literature",
    "authors": [
        {
            "first": "Teruaki",
            "middle": [],
            "last": "Oka",
            "suffix": "",
            "affiliation": {},
            "email": "teruaki-o@is.naist.jp"
        },
        {
            "first": "Mamoru",
            "middle": [],
            "last": "Komachi",
            "suffix": "",
            "affiliation": {},
            "email": "komachi@is.naist.jp"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Since the present-day Japanese use of voiced consonant mark had established in the Meiji Era, modern Japanese literary text written in the Meiji Era often lacks compulsory voiced consonant marks. This deteriorates the performance of morphological analyzers using ordinary dictionary. In this paper, we propose an approach for automatic labeling of voiced consonant marks for modern literary Japanese. We formulate the task into a binary classification problem. Our pointwise prediction method uses as its feature set only surface information about the surrounding character strings. As a consequence, training corpus is easy to obtain and maintain because we can exploit a partially annotated corpus for learning. We compared our proposed method as a preprocessing step for morphological analysis with a dictionary-based approach, and confirmed that pointwise prediction outperforms dictionary-based approach by a large margin.",
    "pdf_parse": {
        "paper_id": "I11-1033",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Since the present-day Japanese use of voiced consonant mark had established in the Meiji Era, modern Japanese literary text written in the Meiji Era often lacks compulsory voiced consonant marks. This deteriorates the performance of morphological analyzers using ordinary dictionary. In this paper, we propose an approach for automatic labeling of voiced consonant marks for modern literary Japanese. We formulate the task into a binary classification problem. Our pointwise prediction method uses as its feature set only surface information about the surrounding character strings. As a consequence, training corpus is easy to obtain and maintain because we can exploit a partially annotated corpus for learning. We compared our proposed method as a preprocessing step for morphological analysis with a dictionary-based approach, and confirmed that pointwise prediction outperforms dictionary-based approach by a large margin.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Recently, corpus-based approaches have been successfully adopted in the field of Japanese Linguistics. However, the central part of the fields has been occupied by historical research that uses ancient material, on which fundamental annotations are often not yet available.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Despite the limited annotated corpora, researchers have developed several morphological analysis dictionaries for past-day Japanese. National Institute for Japanese Language and Linguistics creates Kindai-bungo UniDic, 1 a morphological analysis dictionary for modern Japanese 1 http://www2.ninjal.ac.jp/lrc/index.php?UniDic literary text, 2 which achieves high performance on analysis for existing electronic text (e.g. Aozorabunko, an online digital library of freely available books and work mainly from out-of-copyright materials).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "However, the performance of morphological analyzers using the dictionary deteriorates if the text is not normalized, because these dictionaries often lack orthographic variations such as Okuri-gana, 3 accompanying characters following Kanji stems in Japanese written words. This is problematic because not all historical texts are manually corrected with orthography, and it is time-consuming to annotate by hand. It is one of the major issues in applying NLP tools to Japanese Linguistics because ancient materials often contain a wide variety of orthographic variations.",
                "cite_spans": [
                    {
                        "start": 199,
                        "end": 200,
                        "text": "3",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "For example, there is an issue of voiced consonant marks. Any \"Hiragana\" character and \"Katakana\" character (called Kana character altogether) represent either consonant (k, s, t, n, h, m, y, r, w) onset with vowel (a, i, u, e, o) nucleus or only the vowel (except for nasal codas N). Furthermore, the characters alone can not represent syllables beginning with a voiced consonant (g, z, d, b) in current orthography. They are spelled with Kana and a voiced consonant mark ( ) to the upper right (see Figure 1 ). However, confusingly, it was not ungrammatical to put down the character without the mark to represent voiced syllable 2 In historical linguistics, the phrase \"modern Japanese\" refers to the language from 1600 on to the present in a broad sense. However, most Japanese people regard the phrase to the Meiji and Taisho Era; we also use the phrase to intend the narrower sense.",
                "cite_spans": [
                    {
                        "start": 170,
                        "end": 197,
                        "text": "(k, s, t, n, h, m, y, r, w)",
                        "ref_id": null
                    },
                    {
                        "start": 381,
                        "end": 393,
                        "text": "(g, z, d, b)",
                        "ref_id": null
                    },
                    {
                        "start": 632,
                        "end": 633,
                        "text": "2",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 501,
                        "end": 509,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "3 In Japanese Literature, both Kana (phonogramic characters) and Kanji (ideographic characters) are used together. Generally, conjugated form is ambiguous, given the preceding Kanji characters. However, the character's pronunciation can also be written using Kana characters. Thus, the pronunciation's tailing few syllables are hanged out (Okuri), using Kana (gana) characters for disambiguating the form. Although the number of Okuri-gana is fixed for each Kanji character now, it was not fixed in the Meiji Era. until the Meiji Era, because Japanese orthography dates back to the Meiji Era. Consequently, modern Japanese literary text written in the Meiji Era often lacks compulsory voiced consonant marks. The mark was used only when the author deems it necessary to disambiguate; and it was not often used if one can infer from the context that the pronunciation is voiced. Figure 2 shows characters which lack the voiced consonant mark even though we expect it to be marked in the text. Hereafter, we call such characters as \"unmarked characters.\" Also, we call the characters to which the voiced consonant mark can be attached as \"ambiguous characters.\" In Table 1 , we present the statistics of the voiced consonants in \"Kokumin-no-tomo\" corpus which we will use for our evaluation. As you can see, 12% of the ambiguous characters are actually voiced but not marked. In addition, 44% of the voiced characters have the voiced consonant mark, showing the variation of using the voiced consonant mark in the corpus.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 878,
                        "end": 886,
                        "text": "Figure 2",
                        "ref_id": null
                    },
                    {
                        "start": 1163,
                        "end": 1170,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In the modern Japanese literary text, orthographic variations are not only the unmarked. However, unmarked characters appear a lot in the text and can be annotated easily by hand. Thus, we can get natural texts for evaluation of our method at low cost (in fact, it cost only a few weeks to annotate our above-mentioned test corpus). Therefore, we decided to begin with attaching voiced consonant mark for unmarked characters as a starting point for normalizing orthographic variations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Basically, Kindai-bungo UniDic is created for a :: ::",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": ":: ::",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "::: :: :: :: ::",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "\uf8eb \uf8ec \uf8ed",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Today, the fame of \"Hiroshima\" has been broadly known in and outside Japan, and if you talk about current affairs, you want to know how the place has been established.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Figure 2: Example of sentences that includes unmarked characters. This text is an excerpt from \"The tide of Hiroshima\": Katsuichi Noguchi, Taiyo, No.2, p.64 (1925) . Wavy-underlined characters are ambiguous character, and gray-boxed characters are unmarked character.",
                "cite_spans": [
                    {
                        "start": 130,
                        "end": 163,
                        "text": "Noguchi, Taiyo, No.2, p.64 (1925)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf8f6 \uf8f7 \uf8f8",
                "sec_num": null
            },
            {
                "text": "fully annotated sentence that does not include unmarked characters, and thus if the target sentence includes unmarked character(s), the performance can degrade considerably. There are two major approaches to handle this problem: a dictionary-based approach and a classification-based approach.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf8f6 \uf8f7 \uf8f8",
                "sec_num": null
            },
            {
                "text": "First, the dictionary-based approach creates a dictionary that has both original spellings and modified variants without the mark. For example, Kindai-bungo UniDic includes both entries \" (zu)\" and \" (zu)\" for frequent words such as \" (zu)\" in auxiliary verb. This allows morphological analysis algorithms to learn the weights of both entries all together from a corpus annotated with part-of-speech tags in order to select appropriate entries during decoding.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf8f6 \uf8f7 \uf8f8",
                "sec_num": null
            },
            {
                "text": "Second, the classification-based approach employs a corpus annotated with unmarked characters to learn a classifier that labels the voiced consonant mark for unmarked characters. Unlike the dictionary-based approach, the classificationbased approach does not require part-of-speech tagged nor tokenized corpora. Since it is easier for human annotators to annotate unmarked characters than word boundaries and part-of-speech tags, we can obtain a large scale annotated corpus at low cost.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf8f6 \uf8f7 \uf8f8",
                "sec_num": null
            },
            {
                "text": "Therefore, in this paper, we propose a classification-based approach to automatic labeling of voiced consonant marks as a preprocessing step for morphological analysis for modern Japanese literary language.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf8f6 \uf8f7 \uf8f8",
                "sec_num": null
            },
            {
                "text": "We formulate the task of labeling voiced con-sonant marks into a binary classification problem. Our method uses as its feature set only surface information about the surrounding character strings with pointwise prediction, whose training data are available at low cost. We use an online learning method for learning large spelling variation from massive datasets rapidly and accurately. Thus, we can improve its performance easily by increasing amount of training data. In addition, we perform clustering of Kanji, which is abundant in the training data, and employ class n-grams for addressing the data sparseness problem. We compared our classification-based approach with the dictionarybased approach and showed that the classificationbased method outperforms the dictionary-based method, especially in an out-of-domain setting.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf8f6 \uf8f7 \uf8f8",
                "sec_num": null
            },
            {
                "text": "We also conducted an experiment to demonstrate that automatic labeling of unmarked characters as a pre-processing step improves the performance of morphological analysis of historical texts without normalization by a large margin, taking advantage of large scale annotated corpus of unmarked characters. The rest of this paper is organized as follows: In section 2 we describe related work of automatic labeling of Japanese voiced consonant marks. Section 3 details our proposed classification-based method using pointwise prediction. We then explain experimental settings and results in section 4. Section 5 concludes our work and presents future work.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "\uf8f6 \uf8f7 \uf8f8",
                "sec_num": null
            },
            {
                "text": "If we assume an unmarked character as substitution error of one voiced consonant to one voiceless consonant, the task of detecting an unmarked character can be considered as a kind of error correction. In English, we can perform error correction for the one character's error by word-based approach. However, in Japanese, we cannot simply apply word-based approach because sentences are not segmented into words. Nagata (1998) proposed a statistical method using dynamic programming for selecting the most likely word sequences from candidate word lattice estimated from observed characters in Japanese sentence. In this method, the product of the transition probability of words is used as a word segmentation model. However, most of the historical materials that we deal with are raw text, and there exist little, if any, annotated texts with words and part-of-speech tags. Thus, a word segmentation model learned from such a limited amount of data is unreliable. Unlike Nagata's method, our classification-based method does not rely on word segmentation and can exploit low-level annotation such as voiced consonant mark, which is available quite easily.",
                "cite_spans": [
                    {
                        "start": 413,
                        "end": 426,
                        "text": "Nagata (1998)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "In addition, Nagata performed clustering of characters for smoothing confusion probability among characters to narrow down correction candidates. We also perform clustering on Kanji for addressing the data sparseness problem. Though Nagata uses character's shape for clustering, we instead use neighboring characters of the Kanji character. The intuition behind this is that whether to attach voiced consonant mark is affected by surrounding contexts, like sequential voicing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "On contrary, Shinnou (1999) proposed an error detection and correction method that does not perform word segmentation. He restricts the target to Hiragana characters and uses Hiragana ngram that is a substring of the characters. In his method, error detection is determined by the Hiragana n-gram frequency. One counts each Hiragana n-gram frequency in training corpus and judges whether the string includes error by checking if the smallest frequency among them (minimum frequency of n-gram) is larger than a threshold value. After error detection, one enumerates candidate strings and corrects the input string to the string that has the largest minimum frequency of n-gram compared to other candidates.",
                "cite_spans": [
                    {
                        "start": 13,
                        "end": 27,
                        "text": "Shinnou (1999)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "The reason why Shinnou restricts targets to Hiragana characters is that it narrows down candidates of error correction. He used the fact that the number of Hiragana characters is 50 at most while the total number of distinct characters is more than 6,000 in Japanese. This method works well for present-day Japanese literary texts that contain relatively long Hiragana character strings. However, modern Japanese texts contain many Kanji characters and relatively short Hiragana character strings because modern Japanese texts are similar to Kanbun-kundokubun, or the Japanese reading of a Chinese text. Therefore, Hiragana ngrams fail to model error detection well for modern Japanese texts. Moreover, error correction of unmarked characters is much simpler than error correction of all the Hiragana. Our method differs from Shinnou's method in that we focus on automatic labeling of voiced consonant marks and em-ploy a discriminative character n-gram model using a classification-based method. Although Shinnou's generative model is not capable of using overlapping features, our classification-based approach allows flexible feature design such as including character types that may help classification on unmarked characters. In addition, Shinnou's method requires a fully annotated corpus with unmarked characters even though there is a large amount of raw text in modern literary Japanese.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "We formulate the task of automatic labeling of unmarked character into a binary-classification problem. More precisely, we build a binary classifier for detecting whether the target character is unmarked or not.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Detecting Unmarked Character with Pointwise Prediction",
                "sec_num": "3"
            },
            {
                "text": "In our classifier, we use only surface information about one target character and its surrounding characters, and the classifier output is either unmarked (+1) or not (-1). Since proposed method does not require a corpus annotated with word boundaries or part-of-speech tags for learning, we take advantage of a large modern a Japanese corpus, Taiyo-Corpus, 4 which is based on Japanese magazines from the Meiji Era. This corpus is not annotated with neither word boundaries nor partof-speech tags but is manually annotated with unmarked characters.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Detecting Unmarked Character with Pointwise Prediction",
                "sec_num": "3"
            },
            {
                "text": "We employed pointwise prediction which makes a single independent decision at each point: ambiguous Hiragana character or Kunoji-ten 5 . 6 Therefore, our method can learn from partially annotated corpora (Neubig and Mori, 2010) including raw corpora of modern Japanese literary text, and thus it is easy to obtain training data. Neubig et al. (2011) extend the word segmentation method proposed by Sassano (2002) to Japanese morphological analysis using pointwise prediction. In our method, we adopt the binary features from (Sassano, 2002) to this task. Unlike Sassano and Neubig et al. who use an SVM, we use an online Passive-Aggressive algorithm for exploiting large datasets while achieving high accuracy.",
                "cite_spans": [
                    {
                        "start": 204,
                        "end": 227,
                        "text": "(Neubig and Mori, 2010)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 329,
                        "end": 349,
                        "text": "Neubig et al. (2011)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 398,
                        "end": 412,
                        "text": "Sassano (2002)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 525,
                        "end": 540,
                        "text": "(Sassano, 2002)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Detecting Unmarked Character with Pointwise Prediction",
                "sec_num": "3"
            },
            {
                "text": "Our approach builds a binary classifier that uses binary features indicating whether the following ngrams exist or not (shown in Figure 3 ).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 129,
                        "end": 137,
                        "text": "Figure 3",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Features for Classification",
                "sec_num": "3.1"
            },
            {
                "text": "These features correspond to character n-grams that surround the target character. Only characters within a window of three characters are used in classification (n \u2264 3). These n-grams are referred with relative position from the target character.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Character n-grams",
                "sec_num": "3.1.1"
            },
            {
                "text": "If given sentence is",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Character n-grams",
                "sec_num": "3.1.1"
            },
            {
                "text": "c 1 c 2 \u2022 \u2022 \u2022 c m and tar- get character is c i , character n-grams are (\u22123/c i\u22123 c i\u22122 c i\u22121 , \u22122/c i\u22122 c i\u22121 c i , \u22121/c i\u22121 c i c i+1 , 0/c i c i+1 c i+2 , 1/c i+1 c i+2 c i+3 , \u22123/c i\u22123 c i\u22122 , \u22122/c i\u22122 c i\u22121 , \u22121/c i\u22121 c i , 0/c i c i+1 , 1/c i+1 c i+2 , 2/c i+2 c i+3 , \u22123/c i\u22123 , \u22122/c i\u22122 , \u22121/c i\u22121 , 0/c i , 1/c i+1 , 2/c i+2 , 3/c i+3 ).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Character n-grams",
                "sec_num": "3.1.1"
            },
            {
                "text": "These features are similar to previously mentioned character n-grams with only the modification of replacing the character itself with the character type. We deal with eleven character types, Hiragana/H, Katakana/K, Kanji/C, Odoriji/O, Latin/L, Digit/D, dash/d, stop and comma/S, BOS (\u27e8s\u27e9)/B, EOS (\u27e8/s\u27e9)/E and others/o as the character types.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Character type n-grams",
                "sec_num": "3.1.2"
            },
            {
                "text": "These features are also similar to character ngrams with only the modification of replacing the character itself with 0 (voiced consonant mark cannot be attached), 1 (the mark can be attached) and 2 (it already has the mark).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Markedness n-grams",
                "sec_num": "3.1.3"
            },
            {
                "text": "In modern Japanese literary text, various Kanji characters were found commonly even in a sentence compared to nowadays. However, the frequency of each Kanji character varies. Learning tends to be sparse around a Kanji character that appears only several times in training corpus. For example, if \" \" (deep) appeared only once in training corpus as in a word \" \" (is deep), then we will not be able to use the information \" \" in a phrase \" \" (if it is deep) when we classify a character \" \" in \"",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clustering on Kanji",
                "sec_num": "3.2"
            },
            {
                "text": ".\"",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clustering on Kanji",
                "sec_num": "3.2"
            },
            {
                "text": "target character position \u2193 -3 -2 -1 0 1 2 3 \u27e8s\u27e9 \u27e8/s\u27e9 ( Though we planned to publish a big magazine that compares favorably with the one in that country, ) Therefore, we carry out clustering on Kanji characters and add character class n-gramin feature sets. For example, if \" \" and \" \" (cold) belong to the same class X, and \" \" appears in training corpus as in a phrase \" \" (if it is cold), then features corresponding to a phrase \"X \" (if it is X) will be learned from \"",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clustering on Kanji",
                "sec_num": "3.2"
            },
            {
                "text": "Character 1-gram: -3/ -2/ -2/ -1/ 0/ 1/ 2/ 2/\u27e8B90\u27e9 3/ 3/\u27e8B74\u27e9 Character 2-gram: -3/ -3/ -2/ -2/ -1/ 0/ 1/ 1/ \u27e8B90\u27e9 2/ 2/\u27e8B90\u27e9\u27e8B74\u27e9 Character 3-gram: -3/ -3/ -2/ -2/ -1/ 0/ 0/ \u27e8B90\u27e9 1/ 1/ \u27e8B90\u27e9\u27e8B74\u27e9",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clustering on Kanji",
                "sec_num": "3.2"
            },
            {
                "text": ".\" As a result, we will be able to exploit \" \" as evidence of detecting \" \" in \" \" as unmarked character.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clustering on Kanji",
                "sec_num": "3.2"
            },
            {
                "text": "Clustering was performed on Kanji characters with the subsequent and the previous two characters individually based on (Pereira et al. 1993) .",
                "cite_spans": [
                    {
                        "start": 119,
                        "end": 140,
                        "text": "(Pereira et al. 1993)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clustering on Kanji",
                "sec_num": "3.2"
            },
            {
                "text": "A Kanji character that appears left of the target character is replaced with the class of the formerclusters and that appears right is replaced with the class of the latter-clusters.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Clustering on Kanji",
                "sec_num": "3.2"
            },
            {
                "text": "We conducted two experiments for evaluating our method as follows.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "4"
            },
            {
                "text": "We compare three approaches for automatic labeling of unmarked character as a pre-processing to morphological analysis on historical texts.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Settings",
                "sec_num": "4.1"
            },
            {
                "text": "First, we built a naive generative model as baseline for labeling voiced consonant mark. This method labels voiced consonant marks that maximize the likelihood of a sentence by using a character 3-gram model. One deficiency of the baseline method is that it requires a fully annotated corpus with the marks.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Settings",
                "sec_num": "4.1"
            },
            {
                "text": "Second, for the dictionary-based approach, we created a dictionary and corpus from the same training corpus used by the Kindai-bungo Uni-Dic (U-Train) with all the marks removed. We preserved the original orthography in the field of each entry. We then trained a morphological analyzer 7 using the dictionary and corpus. Finally, we added to the dictionary entries with which we partially (or completely) replaced voiced consonant marks. This method assigns voiced consonant marks and performs morphological analysis jointly. However, it requires an annotated corpus with both the marks, word segmentation and partof-speech tags, which are scarce to obtain.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Settings",
                "sec_num": "4.1"
            },
            {
                "text": "Third, we constructed a proposed classifier from an annotated corpus with the voiced consonant marks. Our method does not need the information of word segmentation and part-of-speech. There- fore we can take advantage of Taiyo-Corpus. We use only articles written in a literary style in the corpus (398,077 sentences). We use 10% of this corpus for evaluation (T-Eval, including 33,847 sentences), and the rest for training including 364, 230 sentences) . For evaluation, we prepared a modern Japanese magazine \"Kokumin-no-Tomo\" corpus (85,291 sentences). It is not annotated with word boundaries nor part-of-speech tags. From the corpus, we use four numbers for testing, No.10, 20, 30 and 36, which we had finished annotating voiced consonant mark at the time (K-Eval, including 10,587 sentences), and the rest for training (K-Train, including 74,704 sentences).",
                "cite_spans": [
                    {
                        "start": 424,
                        "end": 438,
                        "text": "including 364,",
                        "ref_id": null
                    },
                    {
                        "start": 439,
                        "end": 453,
                        "text": "230 sentences)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Settings",
                "sec_num": "4.1"
            },
            {
                "text": "We extract training instances from all ambiguous characters. We regard instances with the mark as positive instances and instances without the mark as negative instances. Note that we detach voiced consonant mark from target character when extracting training instances. Although we extract test instances in a similar manner, we do not count characters originally with the mark at testing. In other words, we evaluate the accuracy only on unmarked characters present in real world setting. We show per instance breakdown of training and evaluation instances in Tables 2 and 3.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Preparing Training and Test Corpus",
                "sec_num": "4.2"
            },
            {
                "text": "In this paper, we use an online Passive Aggressive algorithm, specifically PA-I for learning a binary classifier with (Yoshinaga et al. 2010) . 8 We use a linear kernel and set the iteration number to 20. Also, we optimized the regularization parameter C by performing 10-fold cross-validation on the training corpus.",
                "cite_spans": [
                    {
                        "start": 118,
                        "end": 141,
                        "text": "(Yoshinaga et al. 2010)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 144,
                        "end": 145,
                        "text": "8",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tools",
                "sec_num": "4.3"
            },
            {
                "text": "We performed clustering on Kanji with narrative sentences in training corpus. We used a clustering tool bayon 9 that implements the Repeated Bisection algorithm, which is a variant of the kmeans algorithm. We use the product of probability of character bigram P (char 1 |char kanji ) and trigram P (char 2 |char kanji char 1 ) as distributions of two characters connecting to Kanji P (char 1 char 2 |char kanji ). Probabilities of character bigram and trigram are calculated by using the language modeling toolkit Palmkit. 10 We use Witten Bell smoothing. For computational efficiency, we replaced characters that are not Hiragana or Odori-ji with character type when creating the language model.",
                "cite_spans": [
                    {
                        "start": 523,
                        "end": 525,
                        "text": "10",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tools",
                "sec_num": "4.3"
            },
            {
                "text": "In our first intrinsic experiment, we compared the precision, recall and F-measure of labeling voiced consonant mark with three approaches. Table 4 presents the results of the intrinsic evaluation. The proposed method outperforms other methods in terms of precision and F-measure using the same training corpus. Moreover, by adding T-Train, the proposed method achieves the best performance in all evaluation metrics including recall. This is because our proposed method can benefit from a large-scale annotated corpus with voiced consonant marks, which is not possible for the dictionary-based method since it requires fully annotated corpus with words and part-of-speech tags. Although the baseline method can use corpora annotated with voiced consonant marks and achieves comparable performance to the proposed method regarding recall, its precision is inferior to the proposed method by a large margin. We suppose that this improvement comes from discriminative learning of the language model, which enables us to design flexible features. Generally, precisions are lower in T-Eval than in K-Eval over all methods. This is because T-Eval has relatively few positive instances and most of the instances are difficult to judge whether they are unmarked or not even for human.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 140,
                        "end": 147,
                        "text": "Table 4",
                        "ref_id": "TABREF7"
                    }
                ],
                "eq_spans": [],
                "section": "Experiment 1: intrinsic",
                "sec_num": "4.4"
            },
            {
                "text": "In the baseline and the proposed method, performance is improved further by increasing amount of training data. By adding T-Train for U-Train, F-measure increases more than 10-points in T-Eval. We show in Figure 4 the change in recall when adding training instances from T-Train to U-Train in T-Eval (k=100). We confirmed that with just 1,000 instances added, recall increased 0.05 with the proposed method. Moreover, the proposed method's recall exceeded that of the dictionary-based approach after 100,000 instances were added. Although the F-measure was degraded by adding positive instances from K-Train, recall improved in K-Eval since positive instances add evidence for decision on voiced consonant marks. Apparently, it is effective to add instances from the same domain. However, the baseline and dictionary-based methods are not capable of using partially annotated corpora like K-Train. Our method employs pointwise prediction to make use of partially annotated corpus. Thus, we confirmed the effectiveness of using partially annotated corpora.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 205,
                        "end": 213,
                        "text": "Figure 4",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Experiment 1: intrinsic",
                "sec_num": "4.4"
            },
            {
                "text": "In addition, the proposed method shows the highest performance in k=1,000 for T-Eval and k=100 for K-Eval, respectively, when learned on T-Train and U-Train. In all settings, clustering improves precision while recall sometimes deteriorates. The performance gain is even larger when training data is scarce (See the results of U-Train). From this fact, we confirmed the effectiveness of clustering on Kanji for addressing the data sparseness problem. Table 5 lists our features and their performance. Because the performance of detection degrades drastically when we subtract Character n-gram from All, this feature is crucial for determining unmarked characters. This is another piece of evidence that discriminative language model works quit well for this task. On the other hand, both Character type n-gram and Markedness n-gram contribute to improvement of precision. As a result, F-measure increases using those features.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 451,
                        "end": 458,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Experiment 1: intrinsic",
                "sec_num": "4.4"
            },
            {
                "text": "We also investigated errors of the classification on our method. Although we found some errors which due to lack of training data, we found errors which are difficult to determine without discourse context, like \" \"(ka) of binding particle or auxiliary verb and \" \"(ga) of case-marking particle or auxiliary verb. However, these instances are difficult even for human to determine whether unmarked or not. Since the basic policy is to use the mark when there is ambiguity, the absence of the mark in an ambiguous case can be considered as evidence of non-unmarked character. Moreover, Table 5 : Performance of each feature and their combination.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 585,
                        "end": 592,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Experiment 1: intrinsic",
                "sec_num": "4.4"
            },
            {
                "text": "our method can not refer the discourse information since we only employed local context of character n-grams. Therefore, our method excessively tend to classify characters into unmarked. On the other hand, we found instances for which both unmarked and marked form are acceptable, like \" \"(tie) and \" \"(tie). Note that \" \" and \" \" are pronounced differently as \"musubi\" and \"yui,\" respectively. These instances seem to be the cause of degradation of precisions in T-Eval. For Odori-ji, it tends to fail classification because they not only depend on information of previous consonants but also on common practice such as \" ( )\"(again and again).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiment 1: intrinsic",
                "sec_num": "4.4"
            },
            {
                "text": "As a second extrinsic experiment, we investigated how effective these approaches are at improving accuracy of morphological analysis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiment 2: extrinsic",
                "sec_num": "4.5"
            },
            {
                "text": "To create gold-standard annotation for morphological analysis, we take the result of morphological analysis for the corpus annotated with voiced consonant marks using the standard version of Kindai-bungo UniDic. Since the word and partof-speech information are not available in Taiyo and Kokumin-no-Tomo corpus, this constitutes the upper bound of the morphological analysis performance on these data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiment 2: extrinsic",
                "sec_num": "4.5"
            },
            {
                "text": "We evaluated the result of morphological analysis for two methods.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiment 2: extrinsic",
                "sec_num": "4.5"
            },
            {
                "text": "First, we tested the dictionary-based method by performing morphological analysis using the same Kindai-bungo Unidic with additional entries that partially (or all) without voiced consonant marks as we described in section 4.1. Second, we evaluated the proposed method by pre-processing the unlabeled test corpus with the proposed method and performing morphological analysis using the standard version of Kindai-bungo Unidic. Then, we calculated the agreement rate between each method and the gold standard by counting how many sentences are identical to the gold standard. We compared each word's parts-of-speech tags and lexemes for the comparison. Table 6 shows the results of the extrinsic evaluation. As you can see, the proposed method gives higher agreement with the gold standard in morphological analysis results than the dictionarybased approach, thanks to the large scale Taiyo corpus annotated with voiced consonant marks. In these experiments, we confirmed that preprocessing with the proposed method is effective for improving morphological analysis of unnormalized modern Japanese literary text.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 652,
                        "end": 659,
                        "text": "Table 6",
                        "ref_id": "TABREF10"
                    }
                ],
                "eq_spans": [],
                "section": "Experiment 2: extrinsic",
                "sec_num": "4.5"
            },
            {
                "text": "In this paper, we proposed a pointwise approach to label voiced consonant marks for modern Japanese literary text. We confirmed that pointwise prediction outperforms the dictionary-based approach by a large margin. By using the proposed method as pre-processing, morphological analysis results become much closer to the gold standard than using the dictionary-based approach.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "Also, we are using the method for annotating the modern Japanese literature. Thanks to the method, we are able to accelerate manual annotation with considerably small effort.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "One limitation is that we only deal with unmarked characters in this work. In modern Japanese literary text, there are other orthographic variations such as Okuri-gana and Kana-usage as well. As our future work, we will work on normalizing these variations for improving accuracy of morphological analysis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "We hope this work will encourage further investigation into historical work.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "http://www2.ninjal.ac.jp/lrc/index.php?%C2%C0%CD% DB%A5%B3%A1%BC%A5%D1%A5%B95 Kunoji-ten is a iteration mark, either \" \" or \" \".6 Katakana characters had been used for specific words like adopted words and proper nouns. Thus, we excluded Katakana characters in this paper.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "http://mecab.sourceforge.net/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "http://www.tkl.iis.u-tokyo.ac.jp/\u02dcynaga/opal/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "http://code.google.com/p/bayon/ 10 http://palmkit.sourceforge.net/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Distributional Clustering of English Words",
                "authors": [
                    {
                        "first": "Fernando",
                        "middle": [],
                        "last": "Pereira",
                        "suffix": ""
                    },
                    {
                        "first": "Naftali",
                        "middle": [],
                        "last": "Tishby",
                        "suffix": ""
                    },
                    {
                        "first": "Lillian",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of the 31th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "183--190",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Fernando Pereira, Naftali Tishby, and Lillian Lee. 1993. Distributional Clustering of English Words. In Proceedings of the 31th Annual Meeting of the Association for Computational Linguistics (ACL- 93):183-190.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Word-based Partial Annotation for Efficient Corpus Construction",
                "authors": [
                    {
                        "first": "Graham",
                        "middle": [],
                        "last": "Neubig",
                        "suffix": ""
                    },
                    {
                        "first": "Shinsuke",
                        "middle": [],
                        "last": "Mori",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Proceedings of the 7th international conference on Language Resources and Evaluation",
                "volume": "",
                "issue": "",
                "pages": "2723--2727",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Graham Neubig, Shinsuke Mori. 2010. Word-based Partial Annotation for Efficient Corpus Construc- tion. In Proceedings of the 7th international confer- ence on Language Resources and Evaluation (LREC 2010):2723-2727.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Pointwise Predication for Robust, Adaptable Japanese Morphological Analysis",
                "authors": [
                    {
                        "first": "Graham",
                        "middle": [],
                        "last": "Neubig",
                        "suffix": ""
                    },
                    {
                        "first": "Yosuke",
                        "middle": [],
                        "last": "Nakata",
                        "suffix": ""
                    },
                    {
                        "first": "Shinsuke",
                        "middle": [],
                        "last": "Mori",
                        "suffix": ""
                    }
                ],
                "year": 2011,
                "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies (ACL HLT",
                "volume": "",
                "issue": "",
                "pages": "529--533",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Graham Neubig, Yosuke Nakata, Shinsuke Mori. 2011. Pointwise Predication for Robust, Adaptable Japanese Morphological Analysis. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Tech- nologies (ACL HLT 2011):529-533.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Detecting and Correction for Errors in Hiragana Sequences by a Hiragana Character N-gram",
                "authors": [
                    {
                        "first": "Hiroyuki",
                        "middle": [],
                        "last": "Shinnou",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "40",
                "issue": "",
                "pages": "2690--2698",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hiroyuki Shinnou. 1999. Detecting and Correction for Errors in Hiragana Sequences by a Hiragana Char- acter N-gram. Journal of Information Processing Society of Japan,40(6):2690-2698.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Shai Shalev-Shwartz, and Yoram Singer",
                "authors": [
                    {
                        "first": "Koby",
                        "middle": [],
                        "last": "Crammer",
                        "suffix": ""
                    },
                    {
                        "first": "Ofer",
                        "middle": [],
                        "last": "Dekel",
                        "suffix": ""
                    },
                    {
                        "first": "Joseph",
                        "middle": [],
                        "last": "Keshet",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Journal of Machine Learning Research",
                "volume": "7",
                "issue": "",
                "pages": "551--585",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Koby Crammer, Ofer Dekel, Joseph Keshet, Shai Shalev-Shwartz, and Yoram Singer. 2006. Online Passive-Aggressive Algorithms. Journal of Machine Learning Research 7:551-585.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "An Empirical Study of Active Learning with Support Vector Machines for Japanese Word Segmentation",
                "authors": [
                    {
                        "first": "Manabu",
                        "middle": [],
                        "last": "Sassano",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "505--512",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Manabu Sassano. 2002. An Empirical Study of Ac- tive Learning with Support Vector Machines for Japanese Word Segmentation. In Proceedings of the 40th Annual Meeting of the Association for Compu- tational Linguistics (ACL 2002):505-512.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "A Japanese OCR Error Correction Method Using Character Shape Similarity and Statistical Language Model",
                "authors": [
                    {
                        "first": "Masaaki",
                        "middle": [],
                        "last": "Nagata",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and 17th International Conference on Computational Linguistic (COLING-ACL '98",
                "volume": "",
                "issue": "",
                "pages": "922--928",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Masaaki Nagata. 1998. A Japanese OCR Error Correc- tion Method Using Character Shape Similarity and Statistical Language Model. In Proceedings of the 36th Annual Meeting of the Association for Com- putational Linguistics and 17th International Con- ference on Computational Linguistic (COLING-ACL '98):922-928.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Kernel Slicing: Scalable Online Training with Conjunctive Features",
                "authors": [
                    {
                        "first": "Naoki",
                        "middle": [],
                        "last": "Yoshinaga",
                        "suffix": ""
                    },
                    {
                        "first": "Masaru",
                        "middle": [],
                        "last": "Kitsuregawa",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Proceedings of the 23th International Conference on Computational Linguistic",
                "volume": "",
                "issue": "",
                "pages": "1245--1253",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Naoki Yoshinaga and Masaru Kitsuregawa. 2010. Ker- nel Slicing: Scalable Online Training with Con- junctive Features. In Proceedings of the 23th In- ternational Conference on Computational Linguistic (COLING 2010):1245-1253.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "Feature for classification of unmarked characters."
            },
            "FIGREF1": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "Improvement of recall with adding training instances."
            },
            "TABREF1": {
                "text": "The contingency table of observed frequencies of characters and voiceness.",
                "html": null,
                "type_str": "table",
                "content": "<table/>",
                "num": null
            },
            "TABREF4": {
                "text": "Number of instances in each training corpus.",
                "html": null,
                "type_str": "table",
                "content": "<table><tr><td>Test corpus positive negative T-Eval 899 93,022 93,921 all K-Eval 3,843 25,461 29,304</td></tr></table>",
                "num": null
            },
            "TABREF5": {
                "text": "Number of instances in each test corpus.",
                "html": null,
                "type_str": "table",
                "content": "<table/>",
                "num": null
            },
            "TABREF7": {
                "text": "Performance of intrinsic evaluation: labeling voiced consonant mark.",
                "html": null,
                "type_str": "table",
                "content": "<table/>",
                "num": null
            },
            "TABREF8": {
                "text": "Character Type n-gram 70.651/96.028 95.328/98.126 81.115/97.066 All \u2212 Markedness n-gram 69.764/95.884 95.217/98.205 80.527/97.031 All 72.472/96.146 94.883/98.022 82.177/97.075",
                "html": null,
                "type_str": "table",
                "content": "<table><tr><td>Feature Character n-gram only All \u2212 Character n-gram All \u2212</td><td>T-Eval/K-Eval Rec.[%] 70.041/95.882 95.439/98.152 80.791/97.004 Prec.[%] F 2.521/20.000 1.001/ 0.156 1.433/0.310</td></tr></table>",
                "num": null
            },
            "TABREF10": {
                "text": "Performance of extrinsic evaluation: agreement rate of morphological analysis result.",
                "html": null,
                "type_str": "table",
                "content": "<table/>",
                "num": null
            }
        }
    }
}