File size: 58,254 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 |
{
"paper_id": "A00-1035",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T01:12:07.309709Z"
},
"title": "Spelling and Grammar Correction for Danish in SCARRIE",
"authors": [
{
"first": "Patrizia",
"middle": [],
"last": "Paggio",
"suffix": "",
"affiliation": {},
"email": "patrizia@cst@ku.dk"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This paper reports on work carried out to develop a spelling and grammar corrector for Danish, addressing in particular the issue of how a form of shallow parsing is combined with error detection and correction for the treatment of context-dependent spelling errors. The syntactic grammar for Danish used by the system has been developed with the aim of dealing with the most frequent error types found in a parallel corpus of unedited and proofread texts specifically collected by the project's end users. By focussing on certain grammatical constructions and certain error types, it has been possible to exploit the linguistic 'intelligence' provided by syntactic parsing and yet keep the system robust and efficient. The system described is thus superior to other existing spelling checkers for Danish in its ability to deal with contextdependent errors.",
"pdf_parse": {
"paper_id": "A00-1035",
"_pdf_hash": "",
"abstract": [
{
"text": "This paper reports on work carried out to develop a spelling and grammar corrector for Danish, addressing in particular the issue of how a form of shallow parsing is combined with error detection and correction for the treatment of context-dependent spelling errors. The syntactic grammar for Danish used by the system has been developed with the aim of dealing with the most frequent error types found in a parallel corpus of unedited and proofread texts specifically collected by the project's end users. By focussing on certain grammatical constructions and certain error types, it has been possible to exploit the linguistic 'intelligence' provided by syntactic parsing and yet keep the system robust and efficient. The system described is thus superior to other existing spelling checkers for Danish in its ability to deal with contextdependent errors.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "In her much-quoted and still relevant review of technologies for automatic word correction (Kukich, 1992) , Kukich observes that \"research in context-dependent spelling correction is in its infancy\" (p. 429), and that the task of treating context-dependent errors is still an elusive one due to the complexity of the linguistic knowledge often necessary to analyse the context in sufficient depth to find and correct such errors. But progress in parsing technology and the growing speed of computers seem to have made the task less of a chimera. The '90s have in fact seen a renewed interest in grammar checking, and proposals have been made for systems covering English (Bernth, 1997) and other languages such as Italian (Bolioli et al., 1992) , Spanish and Greek (Bustamante and Ldon, 1996) , Czech (Holan et al., 1997) and Swedish (Hein, 1998) .",
"cite_spans": [
{
"start": 91,
"end": 105,
"text": "(Kukich, 1992)",
"ref_id": "BIBREF6"
},
{
"start": 671,
"end": 685,
"text": "(Bernth, 1997)",
"ref_id": "BIBREF0"
},
{
"start": 722,
"end": 744,
"text": "(Bolioli et al., 1992)",
"ref_id": "BIBREF1"
},
{
"start": 747,
"end": 792,
"text": "Spanish and Greek (Bustamante and Ldon, 1996)",
"ref_id": null
},
{
"start": 801,
"end": 821,
"text": "(Holan et al., 1997)",
"ref_id": "BIBREF5"
},
{
"start": 826,
"end": 846,
"text": "Swedish (Hein, 1998)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "This paper describes the prototype of a spelling and grammar corrector for Danish which combines traditional spelling checking functionalities with the ability to carry out compound analysis and to detect and correct certain types of context-dependent spelling errors (hereafter simply \"grammar errors\"). Grammar correction is carried out by parsing the text, making use of feature overriding and error weights to accommodate the errors. Although a full parse of each sentence is attempted, the grammar has been developed with the aim of dealing only with the most frequent error types found in a parallel corpus of unedited and proofread texts specifically collected by the project's end users. By focussing on certain grammatical constructions and certain error types, it has been possible to exploit the linguistic 'intelligence' provided by syntactic parsing and yet keep the system robust and efficient. The system described is thus superior to other existing spelling checkers for Danish in its ability to deal with certain types of grammar errors.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We begin by giving an overview of the system's components in Section 2. In Section 3 we describe the error types we want to deal with: Section 4 gives an overview of the grammar: in particular, the methods adopted for treating feature mismatches and structural errors are explained. Finally, in Section 5 evaluation results are presented and a conclusion is drawn.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The prototype is a system for high-quality proofreading for Danish which has been developed in the context of a collaborative EUproject 1. Together with the Danish prototype, 1Main contractors in the consortium were: WordFinder Software AB (Sweden), Center for the project has also produced similar systems for Swedish and Norwegian, all of them tailored to meet the specific needs of the Scandinavian publishing industry. They all provide writing support in the form of word and grammar checking.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "The Danish version of the system 2 constitutes a further development of the CORRie prototype (Vosse, 1992) (Vosse, 1994) , adapted to deal with the Danish language, and to the needs of the project's end users. The system processes text in batch mode and produces an annotated output text where errors are flagged and replacements suggested where possible. Text correction is performed in two steps: first the system deals with spelling errors and typos resulting in invalid words, and then with grammar errors.",
"cite_spans": [
{
"start": 93,
"end": 106,
"text": "(Vosse, 1992)",
"ref_id": null
},
{
"start": 107,
"end": 120,
"text": "(Vosse, 1994)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "Invalid words are identified on the basis of dictionary lookup. The dictionary presently consists of 251,000 domain-relevant word forms extracted from a collection of 68,000 newspaper articles. A separate idiom list allowing for the identification of multi-word expressions is also available. Among the words not found in the dictionary or the idiom list, those occurring most frequently in the text (where frequency is assessed relative to the length of the text) are taken to be new words or proper names 3. The remaining unknown words are passed on to the compound analysis grammar, which is a set of regular expressions covering the most common types of compound nominals in Danish. This is an important feature, as in Danish compounding is very productive, and compounds are written as single words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "Words still unknown at this point are taken to be spelling errors. The System flags them as Sprogteknologi (Denmark), Department of Linguistics at Uppsala University (Sweden), Institutt for lingvistikk og litteraturvitenskab at the University of Bergen (Norway), and Svenska Dagbladet (Sweden). A number of subcontractors also contributed to the project. Subcontractors in Denmark were: Munksgaard International Publishers, Berlingske Tidende, Det Danske Sprog-og Litteraturselskab, and Institut for Almen og Anvendt Sprogvidenskab at the University of Copenhagen.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "2In addition to the author of the present paper, tlle Danish SCARRIE team at CST consisted of Claus Povlsen, Bart Kongejan and Bradley Music.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "3The system also checks whether a closely matching alternative can be found in the dictionary, to avoid mistaking a consistently misspelt word for a new word. such and tries to suggest a replacement. The algorithm used is based on trigram and triphone analysis (van Berkel and Smedt, 1988) , and takes into account the orthographic strings corresponding to the invalid word under consideration and its possible replacement, as well as the phonetic representations of the same two words. Phonetic representations are generated by a set of grapheme-to-phoneme rules (Hansen, 1999) the aim of which is to assign phonetically motivated misspellings and their correct counterparts identical or similar phonetic representations.",
"cite_spans": [
{
"start": 261,
"end": 289,
"text": "(van Berkel and Smedt, 1988)",
"ref_id": "BIBREF13"
},
{
"start": 564,
"end": 578,
"text": "(Hansen, 1999)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "Then the system tries to identify contextdependent spelling errors. This is done by parsing the text. Parsing results are passed on to a corrector to find replacements for the errors found. The parser is an implementation of the Tomita algorithm with a component for error recognition whose job is to keep track of error weights and feature mismatches as described in (Vosse, 1991) . Each input sentence is assigned the analysis with the lowest error weight. If the error is due to a feature mismatch, the offending feature is overridden, and if a dictionary entry satisfying the grammar constraints expressed by the context is found in the dictionary, it is offered as a replacement. If the structure is incomplete, on the other hand, an error message is generated. Finally, if the system identifies an error as a split-up or a run-on, it will suggest either a possible concatenation, or a sequence of valid words into which the misspelt word can be split up. Figure 1: Error distribution in the Danish corpus grammar development was then to enable the system to identify and analyse the grammatical constructions in which errors typically occur, whilst to some extent disregarding the remainder of the text.",
"cite_spans": [
{
"start": 368,
"end": 381,
"text": "(Vosse, 1991)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "The errors occurring in the corlbus have been analysed according to the taxonomy in (Rambell, 1997) . Figure 1 shows the distribution of the various error types into the five top-level categories of the taxonomy. As can be seen, grammar errors account for 30~0 of the errors. Of these, 70% fall into one of the following categories (Povlsen, 1998) Another way of grouping the errors is by the kind of parsing failure they generate: they can then be viewed as either feature mismatches, or as structural errors. Agreement errors are typical examples of feature mismatches. In the following nominal phrase, for example:",
"cite_spans": [
{
"start": 84,
"end": 99,
"text": "(Rambell, 1997)",
"ref_id": "BIBREF12"
},
{
"start": 332,
"end": 347,
"text": "(Povlsen, 1998)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [
{
"start": 102,
"end": 110,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "(1) de *interessant projekter (the interesting projects) _the error can be formalised as a mismatch between the definiteness of the determiner de (the) and the indefiniteness of the adjective interessant (interesting). Adjectives have in fact both an indefinite and a definite form in Danish.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "The sentence below, on the other hand, is an example of structural error.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "(2) i sin tid *skabet han skulpturer over atomkraften (during his time wardrobe/created he sculptures about nuclear power)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "Since the finite verb skabte (created) has been misspelt as skabet (the wardrobe), the syntactic structure corresponding to the sentence is missing a verbal head.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "Run-ons and split-ups are structural errors of a particular kind, having to do with leaves in the syntactic tree. In some cases they can only be detected on the basis of the context, because the misspelt word has the wrong category or bears some other grammatical feature that is incorrect in the context. Examples are given in (3) and (4) below, which like the preceding examples are taken from the project's corpus. In both cases, the error would be a valid word in a different context. More specifically, rigtignok (indeed) is an adverb, whilst rigtig nok (actually correct) is a modified adjective; and inden .for (inside) is a preposition, whilst indenfor (indoors) is an adverb. In both examples the correct alternative is indicated in parentheses.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "(3) ... studerede rain gruppe *rigtig nok (rigtignok) under temaoverskrifter (studied my group indeed on the basis of topic headings) (4) *indenfor (inden for) de gule mute (inside the yellow walls)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "Although the system has a facility for identifying and correcting split-ups and run-ons based on a complex interaction between the dictionary, the idiom list, the compound grammar and the syntactic grammar, this facility has not been fully developed yet, and will therefore not be described any further here. More details can be found in (Paggio, 1999) .",
"cite_spans": [
{
"start": 338,
"end": 352,
"text": "(Paggio, 1999)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The prototype",
"sec_num": "2"
},
{
"text": "The grammar is an augmented context-free grammar consisting of rewrite rules where symbols are associated with features. Error weights and error messages can also be attached to either rules or single features. The rules are applied by unification, but in cases where one or more features do not unify, the offending features will be overridden.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The grammar",
"sec_num": "4"
},
{
"text": "In the current version of the grammar~ only the structures relevant to the error types we want the system to deal with -in other words nominal phrases and verbal groups -are accounted for in detail. The analysis produced is thus a kind of shallow syntactic analysis where the various sentence constituents are attached under the topmost S node as fragments.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The grammar",
"sec_num": "4"
},
{
"text": "For example, adjective phrases can be analysed as fragments, as shown in the following rule:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The grammar",
"sec_num": "4"
},
{
"text": "Fragment -> AP \"?Fragment AP rule\":2",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The grammar",
"sec_num": "4"
},
{
"text": "To indicate that the fragment analysis is not optimal, it is associated with an error weight, as well as an error message to be used for debugging purposes (the message is not visible to the end user). The weight penalises parse trees built by applying the rule. The rule is used e.g. to analyse an AP following a copula verb as in:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The grammar",
"sec_num": "4"
},
{
"text": "(5) De projekter er ikke interessante.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The grammar",
"sec_num": "4"
},
{
"text": "(Those projects are not interesting)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The grammar",
"sec_num": "4"
},
{
"text": "The main motivation for implementing a grammar based on the idea of fragments was efficiency. Furthermore, the fragment strategy could be implemented very quickly. However, as will be clarified in Section 5, this strategy is sometimes responsible for bad flags.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The grammar",
"sec_num": "4"
},
{
"text": "As an alternative to the fragment analysis, APs can be attached as daughters in NPs. This is of course necessary for the treatment of agreement in NPs, one of the error types targeted in our application. This is shown in the following rule:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature mismatches",
"sec_num": "4.1"
},
{
"text": "NP(def Gender PersNumber) -> Det (def Gender PersNumber) AP(def _ _) N(indef Gender:9-PersNumber)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature mismatches",
"sec_num": "4.1"
},
{
"text": "The rule will parse a correct definite NP such as:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature mismatches",
"sec_num": "4.1"
},
{
"text": "but also (7) (S) de interessante projekter (the interesting projects)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature mismatches",
"sec_num": "4.1"
},
{
"text": "de *interessant projekter de interessante *projekterne",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature mismatches",
"sec_num": "4.1"
},
{
"text": "The feature overriding mechanism makes it possible for the system to suggest interessante as the correct replacement in (7), and projekter in (8). Let us see how this is done in more detail for example (7). The parser tries to apply the NP rule to the input string. The rule states that the adjective phrase must be definite (AP (def _ _)). But the dictionary entry corresponding to interessant bears the feature 'indef'. The parser will override this feature and build an NP according to the constraints expressed by the rule. At this point, a new dictionary lookup is performed, and the definite form of the adjective can be suggested as a replacement.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature mismatches",
"sec_num": "4.1"
},
{
"text": "Weights are used to control rule interaction as well as to establish priorities among features that may have to be overridden. For example in our NP rule, a weight has been attached to the Gender feature in the N node. The weight expresses the fact that it costs more to override gender on the head noun than on the determiner or adjective. The rationale behind this is the fact that if there is a gender mismatch, the parser should not try to find an alternative \u2022 form of the noun (which does not exist), but if necessary override the gender feature either on the adjective or the determiner.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Feature mismatches",
"sec_num": "4.1"
},
{
"text": "To capture structural errors, the formalism allows the grammar writer to write so-called error rules. The syntax of error rules is very similar to that used in 'normal' rules, the only difference being that an error rule must have an er-\u2022 ror weight and an error message attached to it. The purpose of the weight is to ensure that error rules are applied only if 'normal' rules are not applicable. The error message can serve two purposes. Depending on whether it is stated as an implicit or an explicit message (i.e. whether it is preceded by a question mark or not), it will appear in the log file where it can be used for debugging purposes, or in the output text as a message to the end user.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Capturing structural errors in grammar rules",
"sec_num": "4.2."
},
{
"text": "The following is an error rule example.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Capturing structural errors in grammar rules",
"sec_num": "4.2."
},
{
"text": "VGroup(_ finite Tense) -> V(_ finite:4 Tense) V(_ finite:4 _) \"Sequence of two finite verbs\":4 A weight of 4 is attached to the rule as a whole, but there are also weights attached to the 'finiteness' feature on the daughters: their function is to make it costly for the system to apply the rule to non-finite forms. In other words, the feature specification 'finite' is made difficult to override to ensure that it is indeed a sequence of finite verbal forms the rule applies to and flags.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Capturing structural errors in grammar rules",
"sec_num": "4.2."
},
{
"text": "The rule will for example parse the verbal sequence in the following sentence:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Capturing structural errors in grammar rules",
"sec_num": "4.2."
},
{
"text": "(9) Jeg vil *bevarer (berate) rain frihed.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Capturing structural errors in grammar rules",
"sec_num": "4.2."
},
{
"text": "(*I want keep my freedom)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Capturing structural errors in grammar rules",
"sec_num": "4.2."
},
{
"text": "As a result of parsing, the system in this case will not attempt to correct the wrong verbal form, but issue the error message \"Sequence of two finite verbs\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Capturing structural errors in grammar rules",
"sec_num": "4.2."
},
{
"text": "Error rules can thus be used to explicitly describe an error and to issue error messages. However, so far we have made very limited use of them, as controlling their interaction with 'normal' rules and with the feature overriding mechanism is not entirely easy. In fact, they are consistently used only to identify incorrect sequences of finite verbal forms or sentences missing a finite verb. To this sparse use of error rules corresponds, on the other hand, an extensive exploitation of the feature overriding mechanism. This strategy allows us to keep the number of rules in the grammar relatively low, but relies on a careful manual adjustment of the weights attached to the various features in the rules.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Capturing structural errors in grammar rules",
"sec_num": "4.2."
},
{
"text": "The project's access to a set of parallel unedited and proofread texts has made it possible to automate the evaluation of the system's linguistic functionality. A tool has been implemented to compare the results obtained by the system with the corrections suggested by the publisher's human proofreaders in order to derive measures telling us how well the system performed on recall (lexical coverage as well as coverage of errors), precision (percentage of correct flaggings), as well as suggestion adequacy (hits, misses and no suggestions offered). The reader is referred to (Paggio and Music, 1998) for more details on the evaluation methodology. The automatic procedure was used to evaluate the system during development, and in connection with the user validation. Testing was done on constructed test suites displaying examples of the errors targeted in the project and with text excerpts from the parallel corpora~ Figure 2 shows error recall and suggestion adequacy figures for the various error types represented in the test suites. These figures are very positive, especially with regard to the treatment of grammar errors. To make a comparison with a commercial product, the Danish version of the spelling and grammar checker provided by Microsoft Word does not flag any of the grammar errors. Figure 3 shows how the system performed on one of the test corpora. The corpus was assembled by mixing short excerpts containing relevant grammar errors and randomly chosen text. Since unlike test suites, the corpus also contains correct text, the figure this time also shows lexical coverage and precision figures. The corpus consists of 278 sentences, with an average length of 15.18 words per sentence. It may be surprising to see that it contains a limited number of errors, but it must be remembered that the texts targeted in the project are written by experienced journalists.",
"cite_spans": [
{
"start": 578,
"end": 602,
"text": "(Paggio and Music, 1998)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [
{
"start": 923,
"end": 931,
"text": "Figure 2",
"ref_id": null
},
{
"start": 1306,
"end": 1314,
"text": "Figure 3",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Evaluation and Conclusion",
"sec_num": "5"
},
{
"text": "The corpus was processed in 58 cpu-seconds on an HP 9000/B160. As expected, the system performs less well than on the test suites, and in general precision is clearly too low. However, we still consider these results encouraging given the relatively small resources the project has been able to spend on grammar development, and we We regard error coverage as quite satisfactory for a research prototype. In a comparative test made on a similar (slightly smaller) corpus, SCARR/E obtained 58.1% error coverage, and Word 53.5%. To quote a figure from another recently published test (Martins et al., 1998), the ReGra system is reported to miss 48.1% real errors. It is worth noting that ReGra has much more extensive linguistic resources available than SCARRIE, i.e. a dictionary of 1.5 million words and a grammar of 600 production rules. Most of the errors not found by SCAR-RIE in the test have to do with punctuation and other stylistic matters not treated in the project. There are also, however, agreement errors which go unnoticed. These failures are due to one of two reasons: either that no parse has been produced for the sentence in question, or that the grammar has produced a wrong analysis.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation and Conclusion",
"sec_num": "5"
},
{
"text": "The precision obtained is at least at first sight much too low. On the same test corpus, however, Word only reached 15.9% precision. On closer inspection, 72 of the bad flags produced by SCARRIE turned out to be due to unrecognised proper names. Disregarding those, precision goes up to 34.9%. As was mentioned early, SCARRIE has a facility for guessing unknown proper names on the basis of their frequency of occurrence in the text. But since the test corpus consists of Short unrelated excerpts, a large number of proper names only occur once or twice. To get an impression of how the system would perform in a situation where the same proper names and unknown words had a higher frequency of occurrence, we doubled the test corpus by simply repeating the same text twice. As expected, precision increased. The system produced 178 flags, 60 of which were correct (39.7%). This compares well with the 40% precision reported for instance for ReGra.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation and Conclusion",
"sec_num": "5"
},
{
"text": "In addition to the problem of unkown proper names, false flags are related to unrecognised acronyms and compounds (typically forms containing acronyms or dashes), and a not very precise treatment of capitalisation. Only 13 false flags are due to wrong grammar analyses caused either by the fragment approach or by the grammar's limited coverage. In particular, genitive phrases, which are not treated at the moment, are responsible for most of these false alarms.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation and Conclusion",
"sec_num": "5"
},
{
"text": "In conclusion, we consider the results obtained so far promising, and the problems revealed by the evaluation tractable within the current system design. In particular, future development should focus on treating stylistic matters such as capitalisation and punctuation which have not been in focus in the current prototype. The coverage of the grammar, in particular the treatment of genitive phrases, should also be further developed. The data pro-vided by the evaluation reported on in this paper, however, are much too limited to base further development on. Therefore, more extensive testing and debugging should also be carried out.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation and Conclusion",
"sec_num": "5"
},
{
"text": "In addition, two aspects of the system that have only be touched on in this paper would be worth further attention: one is the mechanism for the treatment of split-ups and run-ons, which as mentioned earlier is not well-integrated at the moment; the other is the weight adjustment process, which is done manually at the moment, and for which the adoption of a semiautomatic tool could be considered.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation and Conclusion",
"sec_num": "5"
},
{
"text": "The errorsTo ensure the coverage of relevant error types, a set of parallel unedited and proofread texts provided by the Danish end users has been collected. This text collection consists of newspaper and magazine articles published in 1997 for a total of 270,805 running words. The articles have been collected in their raw version, as well as in the edited version provided by the publisher's own proofreaders. Although not very large in number of words, th@ corpus consists of excerpts from 450 different articles to ensure a good spread of lexical domains and error types. The corpus has been used to construct test suites for progress evaluation, and also to guide grammar development. The aim set for",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": ",'jr-O",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "EasyEnglish: a tool for improving document quality",
"authors": [
{
"first": "A",
"middle": [],
"last": "Bernth",
"suffix": ""
}
],
"year": 1997,
"venue": "Proceedings of \u2022 the Fifth Conference on Applied Natural Language Processing",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Bernth. 1997. EasyEnglish: a tool for im- proving document quality. In Proceedings of \u2022 the Fifth Conference on Applied Natural Lan- guage Processing.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "JDII: Parsing Italian with a robust constraint grammar",
"authors": [
{
"first": "A",
"middle": [],
"last": "Bolioli",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Dini",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Malnati",
"suffix": ""
}
],
"year": 1992,
"venue": "Proceedings of COLING:92",
"volume": "",
"issue": "",
"pages": "1003--1007",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. Bolioli, L. Dini, and G. Malnati. 1992. JDII: Parsing Italian with a robust constraint grammar. In Proceedings of COLING:92, pages 1003-1007.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "GramCheck: A grammar and style checker",
"authors": [
{
"first": "Ram~rez",
"middle": [],
"last": "Flora",
"suffix": ""
},
{
"first": "Fernando",
"middle": [],
"last": "Bustamante",
"suffix": ""
},
{
"first": "L@",
"middle": [],
"last": "S\u00a3nchez",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "On",
"suffix": ""
}
],
"year": 1996,
"venue": "Proceedings of COLING-96",
"volume": "",
"issue": "",
"pages": "175--181",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Flora Ram~rez Bustamante and Fer- nando S\u00a3nchez L@on. 1996. GramCheck: A grammar and style checker. In Proceedings of COLING-96, pages 175-181, Copenhagen, Denmark.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Grapheme-tophoneme rules for the Danish component of the SCARRIE project",
"authors": [
{
"first": "",
"middle": [],
"last": "Peter Molb~ek Hansen",
"suffix": ""
}
],
"year": 1999,
"venue": "Datalingvistisk Forenings drsmcde 1998 i Kcbehavn, Proceedings, number 25 in LAMBDA",
"volume": "",
"issue": "",
"pages": "79--91",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Peter Molb~ek Hansen. 1999. Grapheme-to- phoneme rules for the Danish component of the SCARRIE project. In Hanne E. Thomsen and Sabine'Kirchmeier-Andersen, editors, Datalingvistisk Forenings drsmcde 1998 i Kcbehavn, Proceedings, number 25 in LAMBDA, pages 79-91. Institut for datal- ingvistik, Handelshcjskolen i Kcbenhaven.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "A chart-based framework for grammar checking: Initial studies",
"authors": [
{
"first": "Anna",
"middle": [],
"last": "S\u00a3gvall Hein",
"suffix": ""
}
],
"year": 1998,
"venue": "Proceedings of Nodalida-98",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Anna S\u00a3gvall Hein. 1998. A chart-based frame- work for grammar checking: Initial studies. In Proceedings of Nodalida-98.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "A prototype of a grammar checker for Czech",
"authors": [
{
"first": "Tom~",
"middle": [],
"last": "Holan",
"suffix": ""
},
{
"first": "Vladislav",
"middle": [],
"last": "Kubofi",
"suffix": ""
}
],
"year": 1997,
"venue": "Proceedings of ANLP'97",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tom~ Holan, Vladislav Kubofi, and Mar- tin Pl\u00a3tek. 1997. A prototype of a gram- mar checker for Czech. In Proceedings of ANLP'97.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Techniques for automatically correcting words in text",
"authors": [
{
"first": "Karen",
"middle": [],
"last": "Kukich",
"suffix": ""
}
],
"year": 1992,
"venue": "A CM Comput-_ ing Surveys",
"volume": "24",
"issue": "4",
"pages": "377--439",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karen Kukich. 1992. Techniques for automati- cally correcting words in text. A CM Comput- _ ing Surveys, 24(4):377-439.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Linguistic issues in the development ofReGra: a grammar Checker for Brazilian Portuguese",
"authors": [
{
"first": "Ricardo",
"middle": [],
"last": "Ronaldo Teixeira Martins",
"suffix": ""
},
{
"first": "Maria",
"middle": [
"Volpe"
],
"last": "Hasegawa",
"suffix": ""
},
{
"first": "Gisele",
"middle": [],
"last": "Nunes",
"suffix": ""
},
{
"first": "Osvaldo Novais De",
"middle": [],
"last": "Monthila",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Oliveira",
"suffix": ""
}
],
"year": 1998,
"venue": "Natural Language Engineering",
"volume": "4",
"issue": "4",
"pages": "287--307",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ronaldo Teixeira Martins, Ricardo Hasegawa, Maria Volpe Nunes, Gisele Monthila, and Os- valdo Novais De Oliveira Jr. 1998. Linguistic issues in the development ofReGra: a gram- mar Checker for Brazilian Portuguese. Natu- ral Language Engineering, 4(4):287-307, De- cember.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Evaluation in the SCARRIE project",
"authors": [
{
"first": "'",
"middle": [],
"last": "Patrizia",
"suffix": ""
},
{
"first": "Bradley",
"middle": [],
"last": "Paggio",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Music",
"suffix": ""
}
],
"year": 1998,
"venue": "Proceedings of the First International Conference on Language Resources ~ Evaluation",
"volume": "",
"issue": "",
"pages": "277--282",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Patrizia' Paggio and Bradley Music. 1998. Eval- uation in the SCARRIE project. In Pro- ceedings of the First International Conference on Language Resources ~ Evaluation, pages 277-282. Granada, Spain.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Treatment of grammatical errors and evaluation in SCARRIE",
"authors": [
{
"first": "Patrizia",
"middle": [],
"last": "Paggio",
"suffix": ""
}
],
"year": 1999,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Patrizia Paggio. 1999. Treatment of grammat- ical errors and evaluation in SCARRIE. In Hanne E. Thomsen and Sabine Kirchmeier-",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Datalingvistisk Forenings drsmCde 1998 i KCbehavn, Proceedings, number 25 in LAMBDA",
"authors": [
{
"first": "",
"middle": [],
"last": "Andersen",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "65--78",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Andersen, editors, Datalingvistisk Forenings drsmCde 1998 i KCbehavn, Proceedings, num- ber 25 in LAMBDA, pages 65-78. Insti- tut for datalingvistik, Handelshcjskolen i Kcbenhaven.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Three types of grammatical errors in Danish",
"authors": [
{
"first": "Claus",
"middle": [],
"last": "Povlsen",
"suffix": ""
}
],
"year": 1998,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Claus Povlsen. 1998. Three types of gram- matical errors in Danish. Technical report, Copenhagen: Center for Sprogteknologi.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Error typology for automatic proof-reading purposes",
"authors": [
{
"first": "Olga",
"middle": [],
"last": "Rambell",
"suffix": ""
}
],
"year": 1997,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Olga Rambell. 1997. Error typology for auto- matic proof-reading purposes. Technical re- port, Uppsala: Uppsala University. :",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Triphone analysis: a combined method for the correction of orthographical and typographical errors",
"authors": [
{
"first": "Brigitte",
"middle": [],
"last": "Van Berkel",
"suffix": ""
},
{
"first": "Koenra~d De",
"middle": [],
"last": "Smedt",
"suffix": ""
}
],
"year": 1988,
"venue": "Proceedings of the 2nd conference on Applied Natural Language Processing",
"volume": "",
"issue": "",
"pages": "77--83",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Brigitte van Berkel and Koenra~d De Smedt. 1988. Triphone analysis: a combined method for the correction of orthographical and ty- pographical errors. In Proceedings of the 2nd conference on Applied Natural Language Pro- cessing, pages 77-83. ACL, Austin.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Detection and correction of morpho-syntactic errors in shift-reduce parsing",
"authors": [],
"year": 1991,
"venue": "Tomita's Algorithm: Extensions and Applications, number 91-68 in Memoranda Informatica",
"volume": "",
"issue": "",
"pages": "69--78",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Theo Vosse. 1991. Detection and correction of morpho-syntactic errors in shift-reduce parsing. In R. Heemels, A. Nijholt, and K. Sikkel, editors, Tomita's Algorithm: Ex- tensions and Applications, number 91-68 in Memoranda Informatica, pages 69-78. Uni- versity of Twente/",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Detecting and correcting morpho-syntactic errors in real texts",
"authors": [],
"year": 1992,
"venue": "Proceedings of the Third Conference on Applied Natural Language Processing",
"volume": "",
"issue": "",
"pages": "111--118",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Theo Vosse. 1992. Detecting and correcting morpho-syntactic errors in real texts. In Pro- ceedings of the Third Conference on Applied Natural Language Processing, pages 111-118, Trento, Italy.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "The Word Connection -Grammar-based Spelling Error Correction in Dutch",
"authors": [
{
"first": "G",
"middle": [],
"last": "Theo",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Vosse",
"suffix": ""
}
],
"year": 1994,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Theo G. Vosse. 1994. The Word Connection - Grammar-based Spelling Error Correction in Dutch. Ph.D. thesis, Rijksuniversiteit at Lei- den: the Netherlands. ISBN 90-75296-01-0.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"text": "and run-ons.",
"type_str": "figure",
"uris": null
},
"FIGREF1": {
"num": null,
"text": "Test corpus evaluation believe they can be improved.",
"type_str": "figure",
"uris": null
}
}
}
} |