File size: 51,964 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 |
{
"paper_id": "A00-1029",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T01:12:18.672629Z"
},
"title": "A Tool for Automated Revision of Grammars for NLP Systems",
"authors": [
{
"first": "Nanda",
"middle": [],
"last": "Kambhatla",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "IBM T.J. Watson Research Center",
"location": {
"addrLine": "30 Saw Mill River Road",
"postCode": "10532",
"settlement": "Hawthorne",
"region": "NY"
}
},
"email": ""
},
{
"first": "Wlodek",
"middle": [],
"last": "Zadrozny",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "IBM T.J. Watson Research Center",
"location": {
"addrLine": "30 Saw Mill River Road",
"postCode": "10532",
"settlement": "Hawthorne",
"region": "NY"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "We present an algorithm and a tool for automatically revising grammars for natural language processing (NLP) systems to disallow specifically identified sentences or sets of sentences. We also outline an approach for automatically revising attribute value grammars using counterexamples. Developing grammars for NLP systems that are both general enough to accept most sentences about a domain, but constrained enough to disallow other sentences is very tedious. Our approach of revising grammars automatically using counterexamples greatly simplifies the development and revision of tightly constrained grammars. We have successfully used our tool to constrain over-generalizing grammars of speech understanding systems and obtained higher recognition accuracy.",
"pdf_parse": {
"paper_id": "A00-1029",
"_pdf_hash": "",
"abstract": [
{
"text": "We present an algorithm and a tool for automatically revising grammars for natural language processing (NLP) systems to disallow specifically identified sentences or sets of sentences. We also outline an approach for automatically revising attribute value grammars using counterexamples. Developing grammars for NLP systems that are both general enough to accept most sentences about a domain, but constrained enough to disallow other sentences is very tedious. Our approach of revising grammars automatically using counterexamples greatly simplifies the development and revision of tightly constrained grammars. We have successfully used our tool to constrain over-generalizing grammars of speech understanding systems and obtained higher recognition accuracy.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Natural language processing systems often constrain the set of \"utterances\" from a user (spoken, typed in, etc.) to narrow down the possible syntactic and semantic resolutions of the utterance and reduce the number of misrecognitions and/or misunderstandings by the system. Such constraints on the allowed syntax and the inferred semantics are often expressed in the form of a \"grammar \"l, a set of Throughout this document, by using the word \"grammar\", we refer to a Context-Free Grammar that consists of a finite set of non-terminals, a finite set of terminals, a unique non-terminal called the start symbol, and a set of production rules of the form A-> a, where A is a non-terminal and a is a string of terminal or non-terminal symbols. The 'language' rules specifying the set of allowed utterances and possibly also specifying the semantics associated with these utterances. For instance, grammars are commonly used in speech understanding systems to specify both the set of allowed sentences and to specify \"tags\" to extract semantic entities (e.g. the \"amount\" of money).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Constraining the number of sentences accepted by a grammar is essential for reducing misinterpretations of user queries by an NLP system. For instance, for speech understanding systems, if the grammar accepts a large number of sentences, then the likelihood of recognizing uttered sentences as random, irrelevant, or undesirable sentences is increased. For transaction processing systems, misrecognized words can lead to unintended transactions being processed. An effective constraining grammar can reduce transactional errors by limiting the number of sentence level errors. The problem of over-generalization of speech grammars and related issues is well discussed by Seneff (1992) .",
"cite_spans": [
{
"start": 671,
"end": 684,
"text": "Seneff (1992)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Thus, speech grammars must often balance the conflicting requirements of",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 accepting a wide variety of sentences to increase flexibility, and \u2022 accepting a small number of sentences to increase system accuracy and robustness. Developing tight grammars which trade-off these conflicting constraints is a tedious and accepted by a grammar is the set of all terminal strings that can be generated from the start symbol by successive application of the production rules. The grammar may optionally have semantic interpretation rules associated with each production rule (e.g. see (Allen 95) ). difficult process.",
"cite_spans": [
{
"start": 503,
"end": 513,
"text": "(Allen 95)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Typically, grammars overgeneralize and accept too many sentences that are irrelevant or undesirable for a given application. We call such sentences \"counterexamples\". The problem is usually handled by revising the grammar manually to disallow such counter-examples. For instance, the sentence \"give me my last eighteen transactions\" may need to be excluded from a grammar for a speech understanding system, since the words \"eighteen\" and \"ATM\" are easily confused by the speech recogniser. However, \"five\" and \"ten\" should remain as possible modifiers of \"transactions\". Counter-examples can also be sets of sentences that need to be excluded from a grammar (specified by allowing the inclusion of non-terminals in counter-examples). For example, for a banking application that disallows money transfers to online accounts, we might wish to exclude the set of sentences \"transfer <AMOUNT> dollars to my online account\" from the grammar, where <AMOUNT> is a non-terminal in the grammar that maps to all possible ways of specifying amounts.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this paper, we are proposing techniques for automatically revising grammars using counterexamples. The grammar developer identifies counter-examples from among sentences (or sets of sentences) mis-recognized by the speech recognizer or from sentences randomly generated by a sentence generator using the original grammar. The grammar reviser modifies the original grammar to invalidate the counterexamples. The revised grammar can be fed back to the grammar reviser and whole process can be iterated several times until the resulting grammar is deemed satisfactory.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Figure I .....................................",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In the next sections, we first describe our algorithm for revising grammars to disallow counter-examples. We also discuss algorithms to make the revised grammar compact using minimum description length (MDL) based grammar compaction techniques and extensions to our basic algorithm to handle grammars with recursion. We then present some results of applying our grammar reviser tool to constrain speech grammars of speech understanding systems. Finally, we present an approach for revising attribute value grammars using our technique and present our conclusions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this section, we describe an algorithm (see Figure 1 ) for revising grammars that directly modifies the rules of the grammar to disallow counter-examples. For each counter-example 2, we generate the parse tree (representation of all the grammar rules needed to generate the sentence or set of sentences) and the grammar modifier modifies the production rules of the grammar to invalidate the counter-example. This process is repeated for each counter-example using the revised grammar from the previous iteration for generating the parse tree for the current counter-example. If a counter-example generates multiple parse trees, the above algorithm is repeated for each parse tree in turn.",
"cite_spans": [],
"ref_spans": [
{
"start": 47,
"end": 55,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Automated Grammar Revision by rule modification",
"sec_num": "2"
},
{
"text": "We present the grammar modification algorithm below. For, we assume that the parse-tree(s) of the counter-example contain no recursion (i.e. the same production rule does not occur twice in any of the parse trees). In section 2.4, we present an approach for using the algorithm even when the parse-trees contain recursion. Thus, the algorithm is applicable for any context-free grammar. The grammar modification algorithm a Note that a counter-example can be a sentence such as \"move to operator\" or a set of sentences such as \"transfer <AMOUNT> to online account\". The latter is specified using non-terminals interspersed with words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Grammar modification algorithm",
"sec_num": "2.1"
},
{
"text": "for modifying the rules of a grammar to disallow a counter-example c (identified by a grammar developer) using a parse-tree for e proceeds as follows : .............................................................................................................................. i We illustrate the algorithm with an example. Figure 2 (a) shows a simple grammar. Suppose the sentence \"move to operator\" is a counterexample for an application. Figure 2 (b) shows the parse-tree for \"move to operator\". Since the parse tree contains the rule: <V> ::= \"move\", new rules are added to define non-terminals <V'> and <Vo>, where <V'> does not generate \"move\" and <Vo> generates only \"move\". Similarly, since the parse tree contains the rule: <N>::= \"operator\", the new rules: <N'>::= \"checking\" I \"savings\" I \"money\"; and <No>::= \"operator\", are added. For the non-terminal <PP>, the new rules: <PP'>::= \"to\" <N'>; and <PPo>::= \"to\" <No>, are added. Note that since <No> only generates the phrase \"operator\" which is part of the counter-example, <PPo> only generates the phrase \"to operator\" which is part of the counter-example. Also, <PP'> generates all phrases that <PP> generates except for the phrase \"to operator\". Finally, the rule: <<START>>::= <V> <PP> is modified using the newly created non-terminals <V'>, <Vo>, <PP'> and <PPo> such that the only sentences which are accepted by the grammar and begin with the phrase \"move\" do not end with the phrase \"to operator\", and also, the only sentences which are accepted by the grammar and end with the phrase \"to operator\" do not begin with the phrase \"move\". Figure 3 shows the final modified grammar that accepts all the sentences that the grammar in Figure 2 (a) accepts except for the sentence \"move to operator\". In Figure 3 , all the grammar rules that are new or modified are shown in bold and italics.",
"cite_spans": [
{
"start": 152,
"end": 280,
"text": ".............................................................................................................................. i",
"ref_id": null
}
],
"ref_spans": [
{
"start": 326,
"end": 334,
"text": "Figure 2",
"ref_id": "FIGREF1"
},
{
"start": 443,
"end": 451,
"text": "Figure 2",
"ref_id": "FIGREF1"
},
{
"start": 1609,
"end": 1617,
"text": "Figure 3",
"ref_id": null
},
{
"start": 1702,
"end": 1710,
"text": "Figure 2",
"ref_id": "FIGREF1"
},
{
"start": 1770,
"end": 1778,
"text": "Figure 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Grammar modification algorithm",
"sec_num": "2.1"
},
{
"text": "The above algorithm for grammar modification has a time complexity of O(m*2 k) rule creation (or modification) steps for removing a counterexample, where m is the number of production rules in the parse tree of the counter-example and k is the largest number of non-terminals on the right hand side of any of these production rules. Since grammars used for real applications rarely have more than a handful of non-terminals on the right hand side of production rules, this complexity is quite manageable.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Grammar modification algorithm",
"sec_num": "2.1"
},
{
"text": "As seen in the example described above, the size of the grammar (number of production rules) can increase greatly by applying our algorithm successively for a number of counter-examples. However, we can remedy this by applying grammar induction algorithms based on minimum description length (MDL) (e.g. Grunwald (1996) and Zadrozny (1997) ) to combine rules and create a compact grammar that accepts the same language.",
"cite_spans": [
{
"start": 304,
"end": 319,
"text": "Grunwald (1996)",
"ref_id": null
},
{
"start": 324,
"end": 339,
"text": "Zadrozny (1997)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "MDL based grammar induction",
"sec_num": null
},
{
"text": "The MDL principle (Rissanen (1982) ) selects that description (theory) of data, which minimizes the sum of the length, in bits, of the description of the theory, and the length, in bits, of data when encoded using the theory. In our case, the data is the set of possible word combinations and the theory is the grammar that specifies it. We are primarily interested in using the MDL principle to obtain (select) a compact grammar (the theory) from among a set of equivalent grammars. Since the set of possible word combinations (data) is the same for all grammars in consideration, we focus on the description length of the grammars itself, which we approximate by using a set of heuristics described in step 1 below.",
"cite_spans": [
{
"start": 18,
"end": 34,
"text": "(Rissanen (1982)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "MDL based grammar induction",
"sec_num": null
},
{
"text": "We use the following modified version of Zadrozny's (1997) algorithm to generate a more compact grammar from the revised grammar using the MDL principle: 1. Compute the description length of the grammar, i.e. the total number of symbols needed to specify the grammar, where each non-terminal, \"::=\", and \"1\" are counted as one symbol. 2. Modify the current grammar by concatenating all possible pairs of nonterminals, and compute the description length of each such resultant grammar. For concatenating <NI> and <N2>, introduce the rule <N3>::= <NI> <N2>, search all other rules for consecutive occurrences of <NI> and <N2>, and replace such occurrences with <N3>. Note that this change results in an equivalent grammar (that accepts the same set of sentences as the original grammar). 3. Modify the current grammar by merging all possible pairs of non-terminals, and compute the description length of each such resultant grammar. For merging <N4> and <N5>, introduce the rule: <N6>::= <N4> [ <N5>, search for pairs of rules which differ only in one position such that for one of the rules, <N4> occurs in that position and the other rule, the <N5> occurs in the same position. Replace the pair of rules with a new rule that is exactly the same as either of the pairs of rules, except for the use of <N6> instead of <N3> or <N4>. Note that this change results in an equivalent grammar (that accepts the same set of sentences as the original grammar).",
"cite_spans": [
{
"start": 41,
"end": 58,
"text": "Zadrozny's (1997)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "MDL based grammar induction",
"sec_num": null
},
{
"text": "4. Compute a table of description lengths of the grammars obtained by concatenating or merging all possible pairs of non-terminals of the initial grammar, as described above. Select the pair of non-terminals (if any) together with the action (concatenate or merge) that results in the least description length and execute the corresponding action. 5. Iterate steps 2, 3, and 4 until the description length does not decrease. No further modification is performed if the base description length of the grammar is lower than that resulting from merging or concatenating any pair of non-terminals.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "MDL based grammar induction",
"sec_num": null
},
{
"text": "In variations of this algorithm, the selection of the pairs of non-terminals to concatenate or merge, can be based on; the syntactic categories of the corresponding terminals, the semantic categories of the corresponding terminals, and the frequency of occurrence of the nonterminals.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "MDL based grammar induction",
"sec_num": null
},
{
"text": "Using the algorithm described above in conjunction with the algorithm in section 2.1, we can obtain a compact grammar that is guaranteed to disallow the counter-examples.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "MDL based grammar induction",
"sec_num": null
},
{
"text": "We have built a graphical tool for revising grammars for NLP systems based on the algorithm described in sections 2.1 and 2.2 above. The tool takes as input an existing grammar and can randomly generate sentences accepted by the grammar including non-terminal strings and strings containing terminals and nonterminals (e.g. both \"move to operator\" and \"transfer <AMOUNT> to online account\" would be generated if they were accepted by the grammar). A grammar developer (a human) interacts with the tool and either inputs counterexamples selected from speech recognition error logs or selects counter-examples like the ones listed above. The grammar developer can then revise the grammar to disallow the counterexamples by pressing a button and then reduce the size of the resulting grammar using the algorithm in section 2.2 by pressing another button to obtain a compact grammar that does not accept any of the identified counterexamples. Typically, the grammar developer repeats the above cycle several times to obtain a tightly constrained grammar.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results for grammar revision for speech understanding systems",
"sec_num": "2.3"
},
{
"text": "We have successfully used the tool described above to greatly constrain overgeneralizing grammars for speech understanding systems that we built for telephony banking, stock trading and directory assistance (Zadrozny et al, 1998) . The speech recognition grammars for these systems accepted around fifty million sentences each. We successfully used the reviser tool to constrain these grammars by eliminating thousands of sentences and obtained around 20-30% improvement in sentence recognition accuracy. We conducted two user studies of our telephony banking system at different stages of development. The user studies were conducted eight months apart. During these eight months, we used a multi-pronged strategy of constraining grammars using the grammar revision algorithms described in this paper, improving the pronunciation models of some words and redesigning the prompts of the system to enable fast and easy error recovery by users. The combination of all these techniques resulted in improving the 'successful transaction in first try '3 from 43% to 71\u00b0/0, an improvement of 65%. The average number of wrong tries (turns of conversation) to get a successful answer was reduced from 2.1 to 0.5 tries. We did not conduct experiments to isolate the contribution of each factor towards this improvement in system performance.",
"cite_spans": [
{
"start": 207,
"end": 229,
"text": "(Zadrozny et al, 1998)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Results for grammar revision for speech understanding systems",
"sec_num": "2.3"
},
{
"text": "It is important to note here that we would probably have obtained this improvement in recognition accuracy even with a manual revision of the grammars. However, the main advantage in using our tool is the tremendous simplification of the whole process of revision for a grammar developer who now selects counter-examples with an interactive tool instead of manually revising the grammars.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results for grammar revision for speech understanding systems",
"sec_num": "2.3"
},
{
"text": "We now describe an extension of the algorithm in section 2.1 that can modify grammars with recursion to disallow a finite set of counterexamples. The example grammars shown above are regular grammars (i.e. equivalent finite state automatons exist). For regular grammars (and only for regular grammars), an alternative approach for eliminating counter-examples using standard automata theory is\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "\u2022 Compute the finite state automaton (FSA) G corresponding to the original grammar. \u2022 Compute the FSA C corresponding to the set of counter-examples. \u2022 Compute C', the complement of C with respect to the given alphabet. \u2022 Compute G', the intersection of G and C'. The FSA G' is equivalent to a revised grammar which disallows the counterexamples.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "The time complexity of the algorithm is O(n*m), where n and m are the number of states in the finite state automatons G and C respectively. This is comparable to the quadratic time complexity of our grammar revision algorithm presented in Section 3.1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "However, the above algorithm for eliminating counter-examples only works for regular grammars. This is because context-free grammars are not closed under complementation and intersection. However we can use our algorithm for grammar modification (section 2.1) to handle any context-free grammar as follows: 1) As before, generate parse tree p for counter-example c for an initial grammar G. 2) If p contains a recursion (two or more repetitions of any production rule in the same parse tree), rewrite the initial grammar G as the equivalent grammar G', where the recursion is \"unrolled\" sufficiently many times (at least one more time than the number of repetitions of the recursive production rule in the parse tree). We explain the unrolling of recursion in greater detail below. If p does not contain any recursion, go to step 4. 3) Generate parse tree p' for the counter-example c for the rewritten grammar G'. Note that p' will no longer contain a recursive application of any production rules, though G' itself will still have recursion. 4) Use the algorithm described in section 2.1 to modify the grammar G' to eliminate the counter-example c using the parse tree p'.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "We illustrate the above algorithm with an example. Figure 4(a) shows a context free grammar which accepts all strings of the form a\"b\", for any n greater than 0. Note that this is not a regular language. Suppose we wish to eliminate the counter-example aaabbb from the initial grammar. The parse treep for the counterexample aaabbb is shown in Figure 4(b) . The grammar in 4(a) can be rewritten as the equivalent grammar 4(c), where the recursion of (S->aSb) is unrolled three times. The parse tree p' for the counter-example aaabbb with respect to grammar in 4(c) is shown in Figure 4(d) . Note that p' does not contain any recursion, though the rewritten grammar does. We revised the FIGURE 4",
"cite_spans": [],
"ref_spans": [
{
"start": 51,
"end": 62,
"text": "Figure 4(a)",
"ref_id": null
},
{
"start": 344,
"end": 355,
"text": "Figure 4(b)",
"ref_id": null
},
{
"start": 577,
"end": 588,
"text": "Figure 4(d)",
"ref_id": null
}
],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "(a) ORIGINAL GRAMMAR G <S> ::= \"a\" <S> \"b\" [ \"a n \"b\" .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "(b) PARSE TREE p <S> ::= \"a n <S> \"b\" .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "<S> ::= \"a\" <S> \"b\" . <S> ::= \"a n rib\" .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "(c) REWRITTEN GRAMMAR G' <S> ::= \"a\" <$1> \"b\" l \"a\" \"b\" . <Sl> ::= \"a\" <$2> \"b\" I \"a\" \"b\" . <$2> ::= \"a\" <$3>",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "\"b\" I \"a\" \"b\" . <$3> ::= \"a\" <$3> \"b\" [ \"a\" \"b\" .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Handling recursion in grammars",
"sec_num": "2.4"
},
{
"text": "<S> ::= \"a\" <Sl> \"b\" . <$1> ::= \"a\" <$2> \"b\" . <$2> ::= \"a\" \"b\" .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "(d) PARSE TREE p'",
"sec_num": null
},
{
"text": "<S> ::= \"a\" <Sl> \"b\" [ \"a\" \"b\" .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "~) REVISED GRAMMAR Gr",
"sec_num": null
},
{
"text": "::= \"a\" <$2> \"b\" I \"a\" \"b\" . <82> ::= \"a\" <$3> \"b\" . <$3> ::= \"a\" <$3> \"b\" [ \"a\" \"b\" .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "<SI>",
"sec_num": null
},
{
"text": "grammar in 4(c) to eliminate the counterexample aaabbb using the parse tree in Figure 4 (d). The revised grammar is shown in Figure 4 (e). Note that here we are assuming that a mechanism exists for rewriting the rules of a grammar with recursion to unroll the recursion (if it exists) a finite number of times. Such an unrolling is readily accomplished by introducing a set of new non-terminars, one for each iteration of unrolling as shown in Figure 4 (c).",
"cite_spans": [],
"ref_spans": [
{
"start": 79,
"end": 89,
"text": "Figure 4",
"ref_id": null
},
{
"start": 127,
"end": 136,
"text": "Figure 4",
"ref_id": null
},
{
"start": 447,
"end": 455,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "<SI>",
"sec_num": null
},
{
"text": "In this section, we delineate an approach for automatically modifying attribute value grammars using counter-examples. We first convert an attribute value grammar into an equivalent non-attributed grammar by creating new non-terminals and encoding the attributes in the names of the new non-terminals (see Manaster Ramer and Zadrozny (1990) and Pollard and Sag (1994) ).",
"cite_spans": [
{
"start": 325,
"end": 340,
"text": "Zadrozny (1990)",
"ref_id": null
},
{
"start": 345,
"end": 367,
"text": "Pollard and Sag (1994)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Automated revision of attribute-value grammars",
"sec_num": "3"
},
{
"text": "For example, suppose the grammar in Figure 2 (a) is an attribute value grammar with an",
"cite_spans": [],
"ref_spans": [
{
"start": 36,
"end": 45,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Automated revision of attribute-value grammars",
"sec_num": "3"
},
{
"text": "We have presented a set of algorithms and an interactive tool for automatically revising grammars of NLP systems to disallow identified counter-examples (sentences or sets of sentences accepted by the current grammar but deemed to be irrelevant for a given application). We have successfully used the tool to constrain overgeneralizing grammars of speech understanding systems and obtained 20-30% higher recognition accuracy. However, we believe the primary benefit of using our tool is the tremendously reduced effort for the grammar developer. Our technique relieves the grammar developer from the burden of going through the tedious and time consuming task of revising grammars by manually modifying production rules one at a time. Instead, the grammar developer simply identifies counter-examples to an interactive tool that revises the grammar to invalidate the identified sentences.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Automated revision of attribute-value grammars",
"sec_num": "3"
},
{
"text": "We also discussed an MDL based algorithm for grammar compaction to reduce the size of the revised grammar. Thus, using a combination of the algorithms presented in this paper, one can obtain a compact grammar that is guaranteed to disallow the counter-examples.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Automated revision of attribute-value grammars",
"sec_num": "3"
},
{
"text": "Although our discussion here was focussed on speech understanding applications, the algorithms and the tool described here are applicable for any domain where grammars are used. We are currently implementing an extension of the grammar modifier to handle attribute-value grammars. We outlined an approach for automated modification of attribute-value grammars in Section 3.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Automated revision of attribute-value grammars",
"sec_num": "3"
},
{
"text": "We conclude that algorithms for automatically constraining grammars based on counterexamples can be highly effective in reducing the burden on grammar developers to develop constrained, domain specific grammars. Moreover, these algorithms can be used in any applications, which deal with grammars.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Automated revision of attribute-value grammars",
"sec_num": "3"
},
{
"text": "We measured the number of times the user's transactional intent (e.g. checking balance, last five transactions etc.) was recognized and acted upon correctly by the system in the first try, even when the actual utterance may not have been recognized correctly word for word.914.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Conclusions",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "We thank all of our colleagues in the conversation machines group at IBM T.J. Watson Research Center for several helpful comments and suggestions through the course of this work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgements",
"sec_num": null
},
{
"text": "<N_account_savings> : := \"savings\". <N_account_unspecified> ::= \"money\" I \"operator\" .attribute 'account', which encodes information about the type of account specified, e.g. 'account' might have the values, SAVINGS, CHECKING and UNSPECIFIED. Figure 5 shows an equivalent non-attributed grammar, where the value of the attribute 'account' has been encoded in the names of the non-terminals. Note that such an encoding can potentially create a very large number of non-terminals. Also, the specific coding used needs to be such that the attributes can be easily recovered from the non-terminal names later on.We can now use our modification algorithms (Section 2.1 and 2.2) to eliminate counterexamples from the non-attributed grammar. For instance, suppose we wish to eliminate 'move to operator' from the attributed grammar based on Figure 2 (a), as discussed above. We apply our algorithm (Section 2.1) to the grammar in Figure 5 and obtain the grammar shown in Figure 6 . Note that we name any new non-terminals created during the grammar modification in such a way as to leave the encoding of the attribute values in the non-terminal names intact.After applying the grammar revision algorithm, we can extract the attribute values from the encoding in the non-terminal names. For instance, in the example outlined above, we might systematically check for suffixes of a certain type and recover the attributes and their values. Also, as described earlier, we can use the algorithm described in section 2.2 to make the resulting grammar compact again by using MDL based grammar induction algorithms.",
"cite_spans": [],
"ref_spans": [
{
"start": 243,
"end": 251,
"text": "Figure 5",
"ref_id": null
},
{
"start": 834,
"end": 842,
"text": "Figure 2",
"ref_id": null
},
{
"start": 923,
"end": 932,
"text": "Figure 5",
"ref_id": null
},
{
"start": 965,
"end": 973,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "annex",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Conversation machines for transaction processing",
"authors": [
{
"first": "W",
"middle": [],
"last": "Zadrozny",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Wolf",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Kambhatla",
"suffix": ""
},
{
"first": "Ye",
"middle": [
"Y"
],
"last": "",
"suffix": ""
}
],
"year": 1998,
"venue": "proceedings of AAAI'98/IAAI'98",
"volume": "",
"issue": "",
"pages": "1160--1166",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zadrozny W., Wolf C., Kambhatla N., and Ye Y. (1998). Conversation machines for transaction processing. In proceedings of AAAI'98/IAAI'98, AAAI Press/MIT Press, pp 1160-1166.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Natural Language Understanding. The Benjamin/Cummings Publishing Company",
"authors": [
{
"first": "J",
"middle": [],
"last": "Allen",
"suffix": ""
}
],
"year": 1995,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Allen J. (1995). Natural Language Understanding. The Benjamin/Cummings Publishing Company, Redwood City, CA 94065.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "A minimum description length approach to grammar inference",
"authors": [
{
"first": "P",
"middle": [],
"last": "Gnmwald",
"suffix": ""
}
],
"year": 1996,
"venue": "Symbolic, Connectionist and Statistical Approach to Learning for Natural Language Processing",
"volume": "",
"issue": "",
"pages": "203--216",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gnmwald P. (1996). A minimum description length approach to grammar inference. In S. Wemter et al., editors, Symbolic, Connectionist and Statistical Approach to Learning for Natural Language Processing, Springer, Berlin, p 203-216.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Expressive Power of Grammatical Formalisms",
"authors": [],
"year": null,
"venue": "Proceedings of Coling-90. Universitas Helsingiensis. Helsinki, Finland",
"volume": "",
"issue": "",
"pages": "195--200",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Expressive Power of Grammatical Formalisms, Proceedings of Coling-90. Universitas Helsingiensis. Helsinki, Finland\", pp. 195-200.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Head-Driven Phrase Structure Grammar",
"authors": [
{
"first": "C",
"middle": [],
"last": "Pollard",
"suffix": ""
},
{
"first": "I",
"middle": [
"A"
],
"last": "Sag",
"suffix": ""
}
],
"year": 1994,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pollard, C. and Sag I A. (1994). Head-Driven Phrase Structure Grammar. The U. of Chicago Press.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "A universal prior for integers and estimation by minimum description length",
"authors": [
{
"first": "J",
"middle": [],
"last": "Rissanen",
"suffix": ""
}
],
"year": 1982,
"venue": "Annals of Statistics",
"volume": "11",
"issue": "",
"pages": "416--431",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rissanen J. (1982). A universal prior for integers and estimation by minimum description length. Annals of Statistics, 11:416-431.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "TINA: A natural language system for spoken language applications, Computational Linguistics",
"authors": [
{
"first": "S",
"middle": [],
"last": "Seneff",
"suffix": ""
}
],
"year": 1992,
"venue": "",
"volume": "18",
"issue": "",
"pages": "61--86",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Seneff S. (1992). TINA: A natural language system for spoken language applications, Computational Linguistics, 18:p61-86.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Minimum description length and compositionality",
"authors": [
{
"first": "W",
"middle": [],
"last": "Zadrozny",
"suffix": ""
}
],
"year": 1997,
"venue": "Proceedings of Second International Workshop for Computational Semantics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zadrozny W. (1997). Minimum description length and compositionality. Proceedings of Second International Workshop for Computational Semantics, Tilburg. Recently re-published as a book chapter in: H.Bunt and R.Muskens (eds.) Computing Meaning. Kluwer Academic Publishers, Dordrecht/Boston, 1999.",
"links": null
}
},
"ref_entries": {
"FIGREF1": {
"type_str": "figure",
"num": null,
"text": "Figure 2",
"uris": null
},
"FIGREF2": {
"type_str": "figure",
"num": null,
"text": ".....................................................<%'> : == \"move\"",
"uris": null
},
"FIGREF3": {
"type_str": "figure",
"num": null,
"text": "Figure 3",
"uris": null
}
}
}
} |