File size: 70,812 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
{
    "paper_id": "A00-1024",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T01:12:01.745614Z"
    },
    "title": "Categorizing Unknown Words: Using Decision Trees to Identify Names and Misspellings",
    "authors": [
        {
            "first": "Janine",
            "middle": [],
            "last": "Toole",
            "suffix": "",
            "affiliation": {
                "laboratory": "Natural Language Laboratory",
                "institution": "Simon Fraser University Burnaby",
                "location": {
                    "region": "BC",
                    "country": "Canada VSA IS6"
                }
            },
            "email": "toole@cs.sfu.ca"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper introduces a system for categorizing unknown words. The system is based on a multicomponent architecture where each component is responsible for identifying one class of unknown words. The focus of this paper is the components that identify names and spelling errors. Each component uses a decision tree architecture to combine multiple types of evidence about the unknown word. The system is evaluated using data from live closed captions a genre replete with a wide variety of unknown words.",
    "pdf_parse": {
        "paper_id": "A00-1024",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper introduces a system for categorizing unknown words. The system is based on a multicomponent architecture where each component is responsible for identifying one class of unknown words. The focus of this paper is the components that identify names and spelling errors. Each component uses a decision tree architecture to combine multiple types of evidence about the unknown word. The system is evaluated using data from live closed captions a genre replete with a wide variety of unknown words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "In any real world use, a Natural Language Processing (NLP) system will encounter words that are not in its lexicon, what we term 'unknown words'. Unknown words are problematic because a NLP system will perform well only if it recognizes the words that it is meant to analyze or translate: the more words a system does not recognize the more the system's performance will degrade. Even when unknown words are infrequent, they can have a disproportionate effect on system quality. For example, Min (1996) found that while only 0.6% of words in 300 e-mails were misspelled, this meant that 12% of the sentences contained an error (discussed in (Min and Wilson, 1998) ).",
                "cite_spans": [
                    {
                        "start": 492,
                        "end": 502,
                        "text": "Min (1996)",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 641,
                        "end": 663,
                        "text": "(Min and Wilson, 1998)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "Words may be unknown for many reasons: the word may be a proper name, a misspelling, an abbreviation, a number, a morphological variant of a known word (e.g. recleared), or missing from the dictionary. The first step in dealing with unknown words is to identify the class of the unknown word; whether it is a misspelling, a proper name, an abbreviation etc. Once this is known, the proper action can be taken, misspellings can be corrected, abbreviations can be expanded and so on, as deemed necessary by the particular text processing application. In this paper we introduce a system for categorizing unknown words. The system is based on a multi-component architecture where each component is responsible for identifying one category of unknown words. The main focus of this paper is the components that identify names and spelling errors. Both components use a decision tree architecture to combine multiple types of evidence about the unknown word. Results from the two components are combined using a weighted voting procedure. The system is evaluated using data from live closed captions -a genre replete with a wide variety of unknown words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "This paper is organized as follows. In section 2 we outline the overall architecture of the unknown word categorizer. The name identifier and the misspelling identifier are introduced in section 3. Performance and evaluation issues are discussed in section 4. Section 5 considers portability issues. Section 6 compares the current system with relevant preceding research. Concluding comments can be found in section 6.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "The goal of our research is to develop a system that automatically categorizes unknown words. According to our definition, an unknown word is a word that is not contained in the lexicon of an NLP system. As defined, 'unknown-ness' is a relative concept: a word that is known to one system may be unknown to another system. Our research is motivated by the problems that we have experienced in translating live closed captions: live captions are produced under tight time constraints and contain many unknown words. Typically, the caption transcriber has a five second window to transcribe the broadcast dialogue. Because of the live nature of the broadcast, there is no opportunity to post-edit the transcript in any way. Although motivated by our specific requirements, the unknown word categorizer would benefit any NLP system that encounters unknown words of differing categories. Some immediately obvious domains where unknown words are frequent include e-mail messages, internet chat rooms, data typed in by call centre operators, etc.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "System Architecture",
                "sec_num": "2"
            },
            {
                "text": "To deal with these issues we propose a multicomponent architecture where individual components specialize in identifying one particular type of unknown word. For example, the misspelling identifier will specialize in identifying misspellings, the abbreviation component will specialize in identifying abbreviations, etc. Each component will return a confidence measure of the reliability of its prediction, c.f. (Elworthy, 1998) . The results from each component are evaluated to determine the final category of the word.",
                "cite_spans": [
                    {
                        "start": 412,
                        "end": 428,
                        "text": "(Elworthy, 1998)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "System Architecture",
                "sec_num": "2"
            },
            {
                "text": "There are several advantages to this approach. Firstly, the system can take advantage of existing research. For example, the name recognition module can make use of the considerable research that exists on name recognition, e.g. (McDonald, 1996) , (Mani et al., 1996) . Secondly, individual components can be replaced when improved models are available, without affecting other parts of the system. Thirdly, this approach is compatible with incorporating multiple components of the same type to improve performance (cf. (van Halteren et al., 1998) who found that combining the results of several part of speech taggers increased performance).",
                "cite_spans": [
                    {
                        "start": 229,
                        "end": 245,
                        "text": "(McDonald, 1996)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 248,
                        "end": 267,
                        "text": "(Mani et al., 1996)",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 515,
                        "end": 547,
                        "text": "(cf. (van Halteren et al., 1998)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "System Architecture",
                "sec_num": "2"
            },
            {
                "text": "The Current System",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "3",
                "sec_num": null
            },
            {
                "text": "In this paper we introduce a simplified version of the unknown word categorizer: one that contains just two components: misspelling identification and name identification. In this section we introduce these components and the 'decision: component which combines the results from the individual modules.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "3",
                "sec_num": null
            },
            {
                "text": "The goal of the name identifier is to differentiate between those unknown words which are proper names, and those which are not. We define a name as word identifying a person, place, or concept that would typically require capitalization in English. One of the motivations for the modular architecture introduced above, was to be able to leverage existing research. For example, ideally, we should be able to plug in an existing proper name recognizer and avoid the problem of creating our own. However, the domain in which we are currently operating -live closed captions -makes this approach difficult. Closed captions do not contain any case information, all captions are in upper case. Existing proper name recognizers rely heavily on case to identify names, hence they perform poorly on our data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Name Identifier",
                "sec_num": "3.1"
            },
            {
                "text": "A second disadvantage of currently available name recognizers is that they do not generally return a confidence measure with their prediction. Some indication of confidence is required in the multicomponent architecture we have implemented. However, while currently existing name recognizers are inappropriate for the needs of our domain, future name recognizers may well meet these requirements and be able to be incorporated into the architecture we propose.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Name Identifier",
                "sec_num": "3.1"
            },
            {
                "text": "For these reasons we develop our own name identifier. We utilize a decision tree to model the characteristics of proper names. The advantage of decision trees is that they are highly explainable: one can readily understand the features that are affecting the analysis (Weiss and Indurkhya, 1998) . Furthermore, decision trees are well-suited for combining a wide variety of information.",
                "cite_spans": [
                    {
                        "start": 268,
                        "end": 295,
                        "text": "(Weiss and Indurkhya, 1998)",
                        "ref_id": "BIBREF16"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Name Identifier",
                "sec_num": "3.1"
            },
            {
                "text": "For this project, we made use of the decision tree that is part of IBM's Intelligent Miner suite for data mining. Since the point of this paper is to describe an application of decision trees rather than to argue for a particular decision tree algorithm, we omit further details of the decision tree software. Similar results should be obtained by using other decision tree software. Indeed, the results we obtain could perhaps be improved by using more sophisticated decision-tree approaches such as the adaptiveresampling described in (Weiss et al, 1999) .",
                "cite_spans": [
                    {
                        "start": 537,
                        "end": 556,
                        "text": "(Weiss et al, 1999)",
                        "ref_id": "BIBREF17"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Name Identifier",
                "sec_num": "3.1"
            },
            {
                "text": "The features that we use to train the decision tree are intended to capture the characteristics of names. We specify a total of ten features for each unknown word. These identify two features of the unknown word itself as well as two features for each of the two preceding and two following words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Name Identifier",
                "sec_num": "3.1"
            },
            {
                "text": "The first feature represents the part of speech of the word. Vv'e use an in-house statistical tagger (based on (Church, 1988) ) to tag the text in which the unknown word occurs. The tag set used is a simplified version of the tags used in the machinereadable version of the Oxford Advanced Learners Dictionary (OALD). The tag set contains just one tag to identify nouns.",
                "cite_spans": [
                    {
                        "start": 111,
                        "end": 125,
                        "text": "(Church, 1988)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Name Identifier",
                "sec_num": "3.1"
            },
            {
                "text": "The second feature provides more informative tagging for specific parts of speech (these are referred to as 'detailed tags' (DETAG)). This tagset consists of the nine tags listed in Table 1 . All parts of speech apart from noun and punctuation tags are assigned the tag 'OTHER;. All punctuation tags are assigned the tag 'BOUNDARY'. Words identified as nouns are assigned one of the remaining tags depending on the information provided in the OALD (although the unknown word, by definition, will not appear in the OALD, the preceding and following words may well appear in the dictionary). If the word is identified in the OALD as a common noun it is assigned the tag 'COM'. If it is identified in the OALD as a proper name it is assigned the tag 'NAME'. If the word is specified as both a name and a common noun (e.g. 'bilF), then it is assigned the tag 'NCOM'. Pronouns are assigned the tag 'PRON'. If the word is in a list of titles that we have compiled, then the tag 'TITLE' is assigned. Similarly, if the word is a member of the class of words that can follow a name (e.g. 'jr'), then the tag 'POST ~ is assigned. A simple rule-based sys- tern is used to assign these tags. If we were dealing with data that contains case information, we would also include fields representing the existence/non-existence of initial upper case for the five words. However, since our current data does not include case information we do not include these features.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 182,
                        "end": 189,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The Name Identifier",
                "sec_num": "3.1"
            },
            {
                "text": "The goal of the misspelling identifier is to differentiate between those unknown words which are spelling errors and those which are not. We define a misspelling as an unintended, orthographically incorrect representation (with respect to the NLP system) of a word. A misspelling differs from the intended known word through one or more additions, deletions, substitutions, or reversals of letters, or the exclusion of punctuation such as hyphenation or spacing. Like the definition of 'unknown word', the definition of a misspelling is also relative to a particular NLP system.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Misspelling Identifier",
                "sec_num": "3.2"
            },
            {
                "text": "Like the name identifier, we make use of a decision tree to capture the characteristics of misspellings. The features we use are derived from previous research, including our own previous research on misspelling identification. An abridged list of the features that are used in the training data is listed in Table 2 and discussed below.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 309,
                        "end": 316,
                        "text": "Table 2",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "The Misspelling Identifier",
                "sec_num": "3.2"
            },
            {
                "text": "Corpus frequency: (Vosse, 1992) differentiates between misspellings and neologisms (new words) in terms of their frequency. His algorithm classifies unknown words that appear infrequently as misspellings, and those that appear more frequently as neologisms. Our corpus frequency variable specifies the frequency of each unknown word in a 2.6 million word corpus of business news closed captions.",
                "cite_spans": [
                    {
                        "start": 18,
                        "end": 31,
                        "text": "(Vosse, 1992)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Misspelling Identifier",
                "sec_num": "3.2"
            },
            {
                "text": "I~'ord Length: (Agirre et al., 1998) note that their predictions for the correct spelling of misspelled words are more accurate for words longer than four characters, and much less accurate for shorter words. This observation can also be found in (Kukich, 1992) . Our word length variables measures the number of characters in each word.",
                "cite_spans": [
                    {
                        "start": 15,
                        "end": 36,
                        "text": "(Agirre et al., 1998)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 247,
                        "end": 261,
                        "text": "(Kukich, 1992)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Misspelling Identifier",
                "sec_num": "3.2"
            },
            {
                "text": "Edit distance: Edit-distance is a metric for identifying the orthographic similarity of two words. Typically, one edit-distance corresponds to one substitution, deletion, reversal or addition of a character. (Damerau, 1964) observed that 80% of spelling errors in his data were just one edit-distance from the intended word. Similarly, (Mitton, 1987) found that 70% of his data was within one edit-distance from the intended word. Our edit distance feature represents the edit distance from the unknown word to the closest suggestion produced by the unix spell checker, ispell. If ispell does not produce any suggestions, an edit distance of thirty is assigned. In previous work we have experimented with more sophisticated distance measures. However, simple edit distance proved to be the most effective (Toole, 1999) .",
                "cite_spans": [
                    {
                        "start": 208,
                        "end": 223,
                        "text": "(Damerau, 1964)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 336,
                        "end": 350,
                        "text": "(Mitton, 1987)",
                        "ref_id": null
                    },
                    {
                        "start": 805,
                        "end": 818,
                        "text": "(Toole, 1999)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Misspelling Identifier",
                "sec_num": "3.2"
            },
            {
                "text": "Character sequence frequency: A characteristic of some misspellings is that they contain character sequences which are not typical of the language, e.g.tlted, wful. Exploiting this information is a standard way of identifying spelling errors when using a dictionary is not desired or appropriate, e.g. (Hull and Srihari, 1982) , (Zamora et al., 1981) .",
                "cite_spans": [
                    {
                        "start": 302,
                        "end": 326,
                        "text": "(Hull and Srihari, 1982)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 329,
                        "end": 350,
                        "text": "(Zamora et al., 1981)",
                        "ref_id": "BIBREF18"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Misspelling Identifier",
                "sec_num": "3.2"
            },
            {
                "text": "To calculate our character sequence feature, we firstly determine the frequencies of the two least frequent character tri-gram sequences in the word in each of a selection of corpora. In previous work we included each of these values as individual features. However, the resulting trees were quite unstable as one feature would be relevant to one tree, whereas a different character sequence feature would be relevant to another tree. To avoid this problem, we developed a composite feature that is the sum of all individual character sequence frequencies.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Misspelling Identifier",
                "sec_num": "3.2"
            },
            {
                "text": "This binary feature specifies whether a word contains a character that is not typical of English words, such as accented characters, etc. Such characters are indicative of foreign names or transmission noise (in the case of captions) rather than misspellings.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Non-English characters:",
                "sec_num": null
            },
            {
                "text": "The misspelling identifier and the name identifier will each return a prediction for an unknown word. In cases where the predictions are compatible, e.g. where the name identifier predicts that it is a name and the spelling identifier predicts that it is not a misspelling, then the decision is straightforward. Similarly, if both decision trees make negative predictions, then we can assume that the unknown word is neither a misspelling nor a name, but some other category of unknown word. However, it is also possible that both the spelling identifier and the name identifier will make positive predictions. In these cases we need a mechanism to decide which assignment is upheld. For the purposes of this paper, we make use of a simple heuristic where in the case of two positive predictions the one with the highest confidence measure is accepted. The decision trees return a confidence measure for each leaf of the tree. The confidence measure for a particular leaf is calculated from the training data and corresponds to the proportion of correct predictions over the total number of predictions at this leaf.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Decision Making Component",
                "sec_num": "3.3"
            },
            {
                "text": "In this section we evaluate the unknown word categorizer introduced above. We begin by describing the training and test data. Following this, we evaluate the individual components and finally, we evaluate the decision making component.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "The training and test data for the decision tree consists of 7000 cases of unknown words extracted from a 2.6 million word corpus of live business news captions. Of the 7000 cases, 70.4% were manually identified as names and 21.3% were identified as misspellings.The remaining cases were other types of unknown words such as abbreviations, morphological variants, etc. Seventy percent of the data was randomly selected to serve as the training corpus. The remaining thirty percent, or 2100 records, was reserved as the test corpus. The test data consists of ten samples of 2100 records selected randomly with replacement from the test corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "We now consider the results of training a decision tree to identify misspellings using those features we introduced in the section on the misspelling identifier. The tree was trained on the training data described above. The tree was evaluated using each of the ten test data sets. The average precision and recall data for the ten test sets are given in Table 3, together with the base-line case of assuming that we categorize all unknown words as names (the most common category). With the baseline case we achieve 70.4% precision but with 0% recall. In contrast, the decision tree approach obtains 77.1% precision and 73.8% recall.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "We also trained a decision tree using not only the features identified in our discussion on misspellings but also those features that we introduced in our discussion of name identification. The results for this tree can be found in the second line of Table  3 . The inclusion of the additional features has increased precision by approximately 5%. However, it has also decreased recall by about the same amount. The overall F-score is quite similar. It appears that the name features are not predictive for identifying misspellings in this domain. This is not surprising considering that eight of the ten features specified for name identification are concerned with features of the two preceding and two following words. Such word-external information is of little use in identifying a misspelling.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 251,
                        "end": 259,
                        "text": "Table  3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "An analysis of the cases where the misspelling decision tree failed to identify a misspelling revealed two major classes of omissions. The first class contains a collection of words which have typical characteristics of English words, but differ from the intended word by the addition or deletion of a syllable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "Words in this class include creditability for credibility, coordmatored for coordinated, and representires for representatives. The second class contains misspellings that differ from known words by the deletion of a blank. Examples in this class include webpage, crewmembers, and rainshower. The second class of misspellings can be addressed by adding a feature that specifies whether the unknown word can be split up into two component known words. Such a feature should provide strong predictability for the second class of words. The first class of words are more of a challenge. These words have a close homophonic relationship with the intended word rather than a close homographic relationship (as captured by edit distance). Perhaps this class of words would benefit from a feature representing phonetic distance rather than edit distance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "Among those words which were incorrectly identified as misspellings, it is also possible to identify common causes for the misidentification. Among these words are many foreign words which have character sequences which are not common in English. Examples include khanehanalak, phytopla~2kton, brycee1~.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "The results for our name identifier are given in Table 4 . Again, the decision tree approach is a significant improvement over the baseline case. If we take the baseline approach and assume that all unknown words are names, then we would achieve a precision of 70.4%. However, using the decision tree approach, we obtain 86.5% precision and 92.9% recall.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 49,
                        "end": 56,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "We also trained a tree using both the name and misspelling features. The results can be found in the second line of Table 4 . Unlike the case when we trained the misspelling identifier on all the features, the extended tree for the name identifier provides increased recall as well as increased precision. Unlike the case with the misspelling decision-tree, the misspelling-identification features do provide predictive information for name identification. If we review the features, this result seems quite reasonable: features such as corpus frequency and non-English characters can provide evidence for/against name iden- A review of the errors made by the name identifier again provides insight for future development. Among those unknown words that are names but which were not identified as such are predominantly names that can (and did) appear with determiners.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 116,
                        "end": 123,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "Examples of this class include steelers in the steelers, and pathfinder in the pathfinder. Hence, the name identifier seems adept at finding the names of individual people and places, which typically cannot be combined with determiners. But, the name identifier has more problems with names that have similar distributions to common nouns.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "The cases where the name identifier incorrectly identifies unknown words as names also have identifiable characteristics. These examples mostly include words with unusual character sequences such as the misspellings sxetion and fwlamg. No doubt these have similar characteristics to foreign names. As the misidentified words are also correctly identified as misspellings by the misspelling identifier, these are less problematic. It is the task of the decisionmaking component to resolve issues such as these.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "The final results we include are for the unknown word categorizer itself using the voting procedure outlined in previous discussion. As introduced previously, confidence measure is used as a tie-breaker in cases where the two components make positive decision. We evaluate the categorizer using precision and recall metrics. The precision metric identifies the number of correct misspelling or name categorizations over the total number of times a word was identified as a misspelling or a name. The recall metric identifies the number of times the system correctly identifies a misspelling or name over the number of misspellings and names existing in the data. As illustrated in Table 5 , the unknown word categorizer achieves 86% precision and 89.9% recall on the task of identifying names and misspellings.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 681,
                        "end": 688,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "An examination of the confusion matrix of the tiebreaker decisions is also revealing. We include the confusion matrix for one test data set in Table 6 . Firstly, in only about 5% of the cases was it necessary to revert to confidence measure to determine the category of the unknown word. In all other cases the predictions were compatible. Secondly, in the majority of cases the decision-maker rules in favour of the name prediction. In hindsight this is not surprising since the name decision tree has higher resuits and hence is likely to have higher confidence measures.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 143,
                        "end": 150,
                        "text": "Table 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "A review of the largest error category in this confusion matrix is also insightful. These are cases where the decision-maker classifies the unknown word as a name when it should be a misspelling (37 cases). The words in this category are typically examples where the misspelled word has a phonetic relationship with the intended word. For example, temt for tempt, floyda for florida, and dimow part of the intended word democrat. Not surprisingly, it was these types of words which were identified as problematic for the current misspelling identifier. Augmenting the misspelling identifier with features to identify these types of misspellings should also lead to improvement in the decision-maker.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "We find these results encouraging: they indicate that the approach we are taking is productive. Our future work will focus on three fronts. Firstly, we will improve our existing components by developing further features which are sensitive to the distinction between names and misspellings. The discussion in this section has indicated several promising directions. Secondly, we will develop components to identify the remaining types of unknown words, such as abbreviations, morphological variants, etc. Thirdly, we will experiment with alternative decision-making processes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "In this paper we have introduced a means for identifying names and misspellings from among other types of unknown words and have illustrated the process using the domain of closed captions. Although not explicitly specified, one of the goals of the research has been to develop an approach that will be portable to new domains and languages.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Examining Portability",
                "sec_num": "5"
            },
            {
                "text": "We are optimistic that the approach we have developed is portable. The system that we have developed requires very little in terms of linguistic resources. Apart from a corpus of the new domain and language, the only other requirements are some means of generating spelling suggestions (ispell is available for many languages) and a part-of-speech tagger. Table 5 : Precision and recall for decision-making component more information sources are available, then these can be readily included in the information provided to the decision tree training algorithm. For many languages, the features used in the unknown word categorizer may well be sufficient. However, the features used do make some assumptions about the nature of the writing system used. For example, the edit distance feature in the misspelling identifier assumes that words consist of alphabetic characters which have undergone substitution/addition/deletion. However, this feature will be less useful in a language such as Japanese or Chinese which use ideographic characters. However, while the exact features used in this paper may be inappropriate for a given language, we believe the generM approach is transferable. In the case of a language such as Japanese, one would consider the means by which misspellings differ from their intended word and identify features to capture these differences.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 356,
                        "end": 363,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Examining Portability",
                "sec_num": "5"
            },
            {
                "text": "There is little research that has focused on differentiating the different types of unknown words. For example, research on spelling error detection and correction for the most part assumes that all unknown words are misspellings and makes no attempt to identify other types of unknown words, e.g. (Elmi and Evens, 1998) . Naturally, these are not appropriate comparisons for the work reported here. However, as is evident from the discussion above, previous spelling research does provide an important role in suggesting productive features to include in the decision tree.",
                "cite_spans": [
                    {
                        "start": 298,
                        "end": 320,
                        "text": "(Elmi and Evens, 1998)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Research",
                "sec_num": "6"
            },
            {
                "text": "Research that is more similar in goal to that outlined in this paper is Vosse (Vosse, 1992) . Vosse uses a simple algorithm to identify three classes of unknown words: misspellings, neologisms, and names. Capitalization is his sole means of identifying names. However, capitalization information is not available in closed captions. Hence, his system would be ineffective on the closed caption domain with which we are working. (Granger, 1983) uses expectations generated by scripts to anMyze unknown words. The drawback of his system is that it lacks portability since it incorporates scripts that make use of world knowledge of the situation being described; in this case, naval ship-to-shore messages.",
                "cite_spans": [
                    {
                        "start": 78,
                        "end": 91,
                        "text": "(Vosse, 1992)",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 428,
                        "end": 443,
                        "text": "(Granger, 1983)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Research",
                "sec_num": "6"
            },
            {
                "text": "Research that is similar in technique to that reported here is (Baluja et al., 1999) . Baluja and his colleagues use a decision tree classifier to identify proper names in text. They incorporate three types of features: word level (essentially utilizes case information), dictionary-level (comparable to our ispell feature), and POS information (comparable to our POS tagging). Their highest F-score for name identification is 95.2, slightly higher than our name identifier. However, it is difficult to compare the two sets of results since our tasks are slightly different. The goal of Baluja's research, and all other proper name identification research, is to identify all those words and phrases in the text which are proper names. Our research, on the other hand, is not concerned with all text, but only those words which are unknown. Also preventing comparison is the type of data that we deal with. Baluja's data contains case information whereas ours does not-the lack of case information makes name identification significantly more difficult. Indeed, Baluja's results when they exclude their word-level (case) features are significantly lower: a maximum F-score of 79.7.",
                "cite_spans": [
                    {
                        "start": 63,
                        "end": 84,
                        "text": "(Baluja et al., 1999)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Research",
                "sec_num": "6"
            },
            {
                "text": "In this paper we have introduced an unknown word eategorizer that can identify misspellings and names. The unknown word categorizer consists of individual components, each of which specialize in identifying a particular class of unknown word. The two existing components are implemented as decision trees. The system provides encouraging results when evaluated against a particularly challenging domain: transcripts from live closed captions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "7"
            },
            {
                "text": "Predicted Spelling Predicted Name Neither name nor misspelling 0 6 Misspelling 10 37 Name 4 43 Table 6 : Confusion matrix for decision maker: includes only those examples where both components made a positive prediction.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 95,
                        "end": 102,
                        "text": "Table 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "7"
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Towards a single proposal in spelling correction",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Agirre",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Gojenola",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Sarasola",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Voutilainen",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 36th Ammal Meeting of the ACL and the 17th International",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Agirre, K. Gojenola, K. Sarasola, , and A. Vouti- lainen. 1998. Towards a single proposal in spelling correction. In Proceedings of the 36th Ammal Meeting of the ACL and the 17th International",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Applying machine learning for high performance named-entity extraction",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Baluja",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Mittal",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Sukthankar",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Proceedings of the Colzference of the Pacific Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "365--378",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Baluja, V. Mittal, and R.. Sukthankar. 1999. Applying machine learning for high performance named-entity extraction. In Proceedings of the Colzference of the Pacific Association for Com- putational Linguistics , pages 365-378.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "A stochastic parts program and noun phrase parser for unrestricted text",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Church",
                        "suffix": ""
                    }
                ],
                "year": 1988,
                "venue": "Proceedings of the Second Conference on Applied Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "136--143",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. Church 1988. A stochastic parts program and noun phrase parser for unrestricted text. In Pro- ceedings of the Second Conference on Applied Nat- ural Language Processing, pages 136-143.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "A technique for computer detection and correction of spelling errors",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Damerau",
                        "suffix": ""
                    }
                ],
                "year": 1964,
                "venue": "Communications of the ACM",
                "volume": "7",
                "issue": "",
                "pages": "171--176",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Damerau. 1964. A technique for computer detec- tion and correction of spelling errors. Communi- cations of the ACM, 7:171-176.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Spelling correction using context",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Elmi",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Evens",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 36th Annual Meeting of the A CL and the 17th hlternational Collference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "360--364",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Elmi and M. Evens. 1998. Spelling correction using context. In Proceedings of the 36th Annual Meeting of the A CL and the 17th hlternational Collference on Computational Linguistics, pages 360-364.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Language identification with confidence limits",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Elworthy",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 6th Workshop on Very large Corpora",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. Elworthy. 1998. Language identification with confidence limits. In Proceedings of the 6th Work- shop on Very large Corpora.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "The nomad system: expectationbased detection and correction of errors during understanding of syntactically and semantically illformed text",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Granger",
                        "suffix": ""
                    }
                ],
                "year": 1983,
                "venue": "American Journal of Computational Linguistics",
                "volume": "9",
                "issue": "",
                "pages": "188--198",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "R. Granger. 1983. The nomad system: expectation- based detection and correction of errors during un- derstanding of syntactically and semantically ill- formed text. American Journal of Computational Linguistics, 9:188-198.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Experiments in text recognition with binary n-gram and viterbi algorithms",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Hull",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Srihari",
                        "suffix": ""
                    }
                ],
                "year": 1982,
                "venue": "IEEE Trans. Patt. Anal. Machine b~tell. PAMI-4",
                "volume": "5",
                "issue": "",
                "pages": "520--530",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Hull and S. Srihari. 1982. Experiments in text recognition with binary n-gram and viterbi algo- rithms. IEEE Trans. Patt. Anal. Machine b~tell. PAMI-4, 5:520-530.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Techniques for automatically correcting words in text",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Kukich",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "ACM Computing Surveys",
                "volume": "24",
                "issue": "",
                "pages": "377--439",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K.. Kukich. 1992. Techniques for automatically cor- recting words in text. ACM Computing Surveys, 24:377-439.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Corpus Processing for Lexical Acquisition, chapter Identifying unknown proper names in newswire text",
                "authors": [
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Mani",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Mcmillan",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Luperfoy",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Lusher",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Laskowski",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "I. Mani, R. McMillan, S. Luperfoy, E. Lusher, and S. Laskowski, 1996. Corpus Processing for Lexical Acquisition, chapter Identifying unknown proper names in newswire text. MIT Press, Cambridge.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Corpus Processing for Lexical Acquisition, chapter Internal and external evidence in the identification and semantic categorization of proper names",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Mcdonald",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. McDonald, 1996. Corpus Processing for Lexi- cal Acquisition, chapter Internal and external ev- idence in the identification and semantic catego- rization of proper names. MIT Press, Cambridge.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Integrated control of chart items for error repair",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Min",
                        "suffix": ""
                    },
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Wilson",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and the 17th hlternational Conferet~ce on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. Min and W. Wilson. 1998. Integrated control of chart items for error repair. In Proceedings of the 36th Annual Meeting of the Association for Com- putational Linguistics and the 17th hlternational Conferet~ce on Computational Linguistics.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Spelling checkers, spelling coffeetots, and the misspellings of poor spellers",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Min",
                        "suffix": ""
                    }
                ],
                "year": 1987,
                "venue": "Inf. Process. Manage",
                "volume": "23",
                "issue": "",
                "pages": "495--505",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. Min. 1996. Hierarchical Error Re.covery Based on Bidirectional Chart Parsing Techniques. Ph.D. thesis, University of NSW, Sydney, Australia. R. Mitton. 1987. Spelling checkers, spelling coffee- tots, and the misspellings of poor spellers. Inf. Process. Manage, 23:495-505.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Categorizing Unknown Words: A decision tree-based misspelling identifier",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Toole",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Advanced Topics in Artificial h2telligence",
                "volume": "",
                "issue": "",
                "pages": "122--133",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Toole 1999 Categorizing Unknown Words: A de- cision tree-based misspelling identifier In Foo, N (ed.) Advanced Topics in Artificial h2telligence, pages 122-133.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Improving data driven word class tagging by system combination",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Van Halteren",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Zavrel",
                        "suffix": ""
                    },
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Daelemans",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 36th Annual Meeting of the ACL and the 17th International Conference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "491--497",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "H. van Halteren, J. Zavrel, and W. Daelemans. 1998. Improving data driven word class tagging by sys- tem combination. In Proceedings of the 36th An- nual Meeting of the ACL and the 17th Interna- tional Conference on Computational Linguistics, pages 491-497.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Detecting and correcting morphosyntactic errors in real texts",
                "authors": [
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Vosse",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "Proceedin9s of the 3rd Conference o11 Applied Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "111--118",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "T. Vosse. 1992. Detecting and correcting morpho- syntactic errors in real texts. In Proceedin9s of the 3rd Conference o11 Applied Natural Language Processing, pages 111-118.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Predictive Data Mining",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Weiss",
                        "suffix": ""
                    },
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Indurkhya",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Weiss and N. Indurkhya. 1998. Predictive Data Mining. Morgan Kauffman Publishers.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "Maximizing text-mining performance",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Weiss",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Apte",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Damerau",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Johnson",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Oles",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Goetz",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Hampp",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "IEEE Intelligent Systems and their Applications",
                "volume": "14",
                "issue": "4",
                "pages": "63--69",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Weiss, and C. Apte, and F. Damerau, and D. Johnson, and F. Oles and T. Goetz, and T. Hampp. 1999 Maximizing text-mining per- formance. IEEE Intelligent Systems and their Applications, 14(4):63-69",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "The use of tri-gram analysis for spelling error detection. he",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Zamora",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Pollock",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Zamora",
                        "suffix": ""
                    }
                ],
                "year": 1981,
                "venue": "Process. Manage",
                "volume": "17",
                "issue": "",
                "pages": "305--316",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "E. Zamora, J. Pollock, and A. Zamora. 1981. The use of tri-gram analysis for spelling error detec- tion. he Process. Manage., 17:305-316.",
                "links": null
            }
        },
        "ref_entries": {
            "TABREF1": {
                "content": "<table/>",
                "text": "",
                "type_str": "table",
                "num": null,
                "html": null
            },
            "TABREF3": {
                "content": "<table><tr><td/><td colspan=\"3\">Baseline Precision Precision Recall F-score</td></tr><tr><td>Name features only</td><td>70.4%</td><td>86.5% 92.9%</td><td>89.6</td></tr><tr><td>All Features</td><td/><td>91.8% 94.5%</td><td>93.1</td></tr><tr><td colspan=\"3\">Table 4: Precision and recall for name identification</td><td/></tr><tr><td/><td/><td colspan=\"2\">Precision Recall F-score</td></tr><tr><td colspan=\"2\">Predicting Names and Misspellings</td><td>86.6% 89.9%</td><td>88.2</td></tr></table>",
                "text": "For this reason, the unknown word categorizer should be portable to new languages, even where extensive language resources do not exist. If",
                "type_str": "table",
                "num": null,
                "html": null
            }
        }
    }
}