|
{ |
|
"paper_id": "N15-1029", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:33:07.652821Z" |
|
}, |
|
"title": "Disfluency Detection with a Semi-Markov Model and Prosodic Features", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Ferguson", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California", |
|
"location": { |
|
"settlement": "Berkeley" |
|
} |
|
}, |
|
"email": "jferguson@berkeley.edu" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Durrett", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California", |
|
"location": { |
|
"settlement": "Berkeley" |
|
} |
|
}, |
|
"email": "gdurrett@berkeley.edu" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California", |
|
"location": { |
|
"settlement": "Berkeley" |
|
} |
|
}, |
|
"email": "klein@berkeley.edu" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present a discriminative model for detecting disfluencies in spoken language transcripts. Structurally, our model is a semi-Markov conditional random field with features targeting characteristics unique to speech repairs. This gives a significant performance improvement over standard chain-structured CRFs that have been employed in past work. We then incorporate prosodic features over silences and relative word duration into our semi-CRF model, resulting in further performance gains; moreover, these features are not easily replaced by discrete prosodic indicators such as ToBI breaks. Our final system, the semi-CRF with prosodic information, achieves an F-score of 85.4, which is 1.3 F 1 better than the best prior reported F-score on this dataset.", |
|
"pdf_parse": { |
|
"paper_id": "N15-1029", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present a discriminative model for detecting disfluencies in spoken language transcripts. Structurally, our model is a semi-Markov conditional random field with features targeting characteristics unique to speech repairs. This gives a significant performance improvement over standard chain-structured CRFs that have been employed in past work. We then incorporate prosodic features over silences and relative word duration into our semi-CRF model, resulting in further performance gains; moreover, these features are not easily replaced by discrete prosodic indicators such as ToBI breaks. Our final system, the semi-CRF with prosodic information, achieves an F-score of 85.4, which is 1.3 F 1 better than the best prior reported F-score on this dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Spoken language is fundamentally different from written language in that it contains frequent disfluencies, or parts of an utterance that are corrected by the speaker. Removing these disfluencies is desirable in order to clean the input for use in downstream NLP tasks. However, automatically identifying disfluencies is challenging for a number of reasons. First, disfluencies are a syntactic phenomenon, but defy standard context-free parsing models due to their parallel substructures (Johnson and Charniak, 2004) , causing researchers to employ other approaches such as pipelines of sequence models (Qian and Liu, 2013) or incremental syntactic systems (Honnibal and Johnson, 2014) . Second, human processing of spoken language is complex and mixes acoustic and syntactic indicators (Cutler et al., 1997) , so an automatic system must employ features targeting all levels of the perceptual stack to achieve high performance. In spite of this, the primary thread of work in the NLP community has focused on identifying disfluencies based only on lexicosyntactic cues (Heeman and Allen, 1994; Charniak and Johnson, 2001; Snover et al., 2004; Rasooli and Tetreault, 2013) . A separate line of work has therefore attempted to build systems that leverage prosody as well as lexical information (Shriberg et al., 1997; Liu et al., 2003; Kim et al., 2004; Liu et al., 2006) , though often with mixed success. In this work, we present a model for disfluency detection that improves upon model structures used in past work and leverages additional prosodic information. Our model is a semi-Markov conditional random field that distinguishes disfluent chunks (to be deleted) from fluent chunks (everything else), as shown in Figure 1 . By making chunk-level predictions, we can incorporate not only standard tokenlevel features but also features that can consider the entire reparandum and the start of the repair, enabling our model to easily capture parallelism between these two parts of the utterance. 1 This frame-work also enables novel prosodic features that compute pauses and word duration based on alignments to the speech signal itself, allowing the model to capture acoustic cues like pauses and hesitations that have proven useful for disfluency detection in earlier work (Shriberg et al., 1997) . Such information has been exploited by NLP systems in the past via ToBI break indices (Silverman et al., 1992) , a mid-level prosodic abstraction that might be indicative of disfluencies. These have been incorporated into syntactic parsers with some success (Kahn et al., 2005; Dreyer and Shafran, 2007; Huang and Harper, 2010 ), but we find that using features on predicted breaks is ineffective compared to directly using acoustic indicators.", |
|
"cite_spans": [ |
|
{ |
|
"start": 488, |
|
"end": 516, |
|
"text": "(Johnson and Charniak, 2004)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 603, |
|
"end": 623, |
|
"text": "(Qian and Liu, 2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 685, |
|
"text": "(Honnibal and Johnson, 2014)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 808, |
|
"text": "(Cutler et al., 1997)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1070, |
|
"end": 1094, |
|
"text": "(Heeman and Allen, 1994;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1095, |
|
"end": 1122, |
|
"text": "Charniak and Johnson, 2001;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1123, |
|
"end": 1143, |
|
"text": "Snover et al., 2004;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1144, |
|
"end": 1172, |
|
"text": "Rasooli and Tetreault, 2013)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1293, |
|
"end": 1316, |
|
"text": "(Shriberg et al., 1997;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1317, |
|
"end": 1334, |
|
"text": "Liu et al., 2003;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1335, |
|
"end": 1352, |
|
"text": "Kim et al., 2004;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1353, |
|
"end": 1370, |
|
"text": "Liu et al., 2006)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 2279, |
|
"end": 2302, |
|
"text": "(Shriberg et al., 1997)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 2391, |
|
"end": 2415, |
|
"text": "(Silverman et al., 1992)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 2563, |
|
"end": 2582, |
|
"text": "(Kahn et al., 2005;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 2583, |
|
"end": 2608, |
|
"text": "Dreyer and Shafran, 2007;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 2609, |
|
"end": 2631, |
|
"text": "Huang and Harper, 2010", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1719, |
|
"end": 1727, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our implementation of a baseline CRF model already achieves results comparable to those of a highperformance system based on pipelined inference (Qian and Liu, 2013) . Our semi-CRF with span features improves on this, and adding prosodic indicators gives additional gains. Our final system gets an F-score of 85.4, which is 1.3 F 1 better than the best prior reported F-score on this dataset (Honnibal and Johnson, 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 145, |
|
"end": 165, |
|
"text": "(Qian and Liu, 2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 420, |
|
"text": "(Honnibal and Johnson, 2014)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Throughout this work, we make use of the Switchboard corpus using the train/test splits specified by Johnson and Charniak (2004) and used in other work. We use the provided transcripts and gold alignments between the text and the speech signal. We follow the same preprocessing regimen as past work: we remove partial words, punctuation, and capitalization to make the input more realistic. 2 Finally, we use predicted POS tags from the Berkeley parser (Petrov et al., 2006 ) trained on Switchboard.", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 128, |
|
"text": "Johnson and Charniak (2004)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 473, |
|
"text": "(Petrov et al., 2006", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setup", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Past work on disfluency detection has employed CRFs to predict disfluencies using a IOBES tag set (Qian and Liu, 2013) . An example of this is shown in Figure 2 . One major shortcoming of this model is that beginning and ending of a disfluency are not decided jointly: because features in the CRF are local to emissions and transitions, features in this model cannot recognize that a proposed disfluency begins with upper and ends before another occurrence of upper (see Figure 1 ). Identifying instances of this parallelism is key to accurately predicting disfluencies. Past work has captured information about repeats using token-level features (Qian and Liu, 2013) , but these still apply to either the beginning or ending of a disfluency in isolation. Such features are naturally less effective on longer disfluencies as well, and roughly 15% of tokens occurring in disfluencies are in disfluencies of length 5 or greater. The presence of these longer disfluencies suggests using a more powerful semi-CRF model as we describe in the next section.", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 118, |
|
"text": "(Qian and Liu, 2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 647, |
|
"end": 667, |
|
"text": "(Qian and Liu, 2013)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 160, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 471, |
|
"end": 479, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The model that we propose in this work is a semi-Markov conditional random field (Sarawagi and Cohen, 2004) . Given a sentence x = (x 1 , . . . , x n ) the model considers sequences of labeled spans", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 107, |
|
"text": "(Sarawagi and Cohen, 2004)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semi-CRF Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "s = (( 1 , b 1 , e 1 ), ( 2 , b 2 , e 2 ), . . . , ( k , b k , e k ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semi-CRF Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": ", where i \u2208 {Fluent, Disfluent} is a label for each span and b i , e i \u2208 {0, 1 . . . n} are fenceposts for each span such that b i < e i and e i = b i+1 . The model places distributions over these sequences given the sentence as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semi-CRF Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p \u03b8 (s|x) \u221d exp \u03b8 k i=1 f (x, ( i , b i , e i ))", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Semi-CRF Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where f is a feature function that computes features for a span given the input sentence. In our model we constrain the transitions so that fluent spans can only be followed by disfluent spans. For this task, the spans we are predicting correspond directly to the reparanda of disfluencies, since these are the parts of the input sentences that should be removed. Note that our feature function can jointly inspect both the beginning and ending of the disfluency; we will describe the features of this form more specifically in Section 3.2.2. To train our model, we maximize conditional log likelihood of the training data augmented with a loss function via softmax-margin (Gimpel and Smith, 2010) . Specifically, during training, we maximize L to be token-level asymmetric Hamming distance (where the output is viewed as binary edited/nonedited). We optimize with the AdaGrad algorithm of Duchi et al. (2011) with L 2 regularization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 673, |
|
"end": 697, |
|
"text": "(Gimpel and Smith, 2010)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 890, |
|
"end": 909, |
|
"text": "Duchi et al. (2011)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semi-CRF Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "(\u03b8) = d i=1 log p \u03b8 (s|x), where p \u03b8 (s|x) = p \u03b8 (s|x) exp ( (s,s * )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semi-CRF Model", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Features in our semi-CRF factor over spans, which cover the reparandum of a proposed disfluency, and thus generally end at the beginning of the repair. This means that they can look at information throughout the reparandum as well as the repair by looking at content following the span. Many of our features are inspired by those in Qian and Liu (2013) and Honnibal and Johnson (2014) . We use a combination of features that are fired for each token within a span, and features that consider properties of the span as a whole. Figure 2 depicts the token-level word features we employ in both our basic CRF and our semi-CRF models. Similar to standard sequence modeling tasks, we fire word and predicted part-of-speech unigrams and bigrams in a window around the current token. In addition, we fire features on repeated words and part-of-speech tags in order to capture the fact that the repair is typically a partial copy of the reparandum, with possibly a word or two switched out. Specifically, we fire features on the distance to any duplicate words or parts-of-speech in a window around the current token, conjoined with the word identity itself or its POS tag (see the Duplicate box in Figure 2 ). We also fire similar features for POS tags since substituted words in the repair frequently have the same tag (compare address and weigh). Finally, we include a duplicate bigram feature that fires if the bigram formed from the current and next words is repeated later on. When this happens, we fire an indicator for the POS tags of the bigram. In Figure 2 , this feature is fired for the word how because how you is repeated later on, and contains the POS tag bigram (WRB, PRP). Table 1 shows the results for using these features in a CRF model run on the development set. 3", |
|
"cite_spans": [ |
|
{ |
|
"start": 333, |
|
"end": 352, |
|
"text": "Qian and Liu (2013)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 384, |
|
"text": "Honnibal and Johnson (2014)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 527, |
|
"end": 535, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1191, |
|
"end": 1199, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1550, |
|
"end": 1558, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1682, |
|
"end": 1689, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In addition to features that fire for each individual token, the semi-CRF model allows for the inclusion of features that look at characteristics of the proposed span as a whole, allowing us to consider the repair directly by firing features targeting the words following the span. These are shown in Figure 3 . Critically, repeated sequences of words and partsof-speech are now featurized in a coordinated way, making it less likely that spurious repeated content will cause the model to falsely posit a disfluency.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 301, |
|
"end": 309, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Span Features", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "We first fire an indicator of whether or not the entire proposed span is later repeated, conjoined with the length of the span. Because many disfluencies are just repeated phrases, and longer phrases are generally not repeated verbatim in fluent language, this feature is a strong indicator of disfluencies when it fires on longer spans. For similar reasons, we fire features for the length of the longest repeated sequences of words and POS tags (the bottom box in Figure 3 ). In addition to general repeated words, we fire a separate feature for the number of uncommon words (appearing less than 50 times in the training data) contained in the span that are repeated later in the sentence; consider upper from Figure 1 , which would be unlikely to be repeated on its own as compared to stopwords. Lastly, we include features on the POS tag bigrams surrounding each span boundary (top of Figure 3) , as well as the bigram formed from the POS tags immediately before and after the span. These features aim to capture the idea that a disfluency is a mistake with a disjuncture before the repair, so the ending bigram will generally not be a commonly seen fluent pair, and the POS tags surrounding the reparandum should be fluent if the reparandum were removed. Table 1 shows that the additional features enabled by the CRF significantly improve performance on top of the basic CRF model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 466, |
|
"end": 474, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 720, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 889, |
|
"end": 898, |
|
"text": "Figure 3)", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1260, |
|
"end": 1267, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Span Features", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Section 3 discussed a primarily structural improvement to disfluency detection. Henceforth, we will use the semi-CRF model exclusively and discuss two methods of incorporating acoustic duration information that might be predictive of disfluencies. Our results will show that features targeting raw acoustic properties of the signal (Section 4.1) are quite effective, while using ToBI breaks as a discrete indicator to import the same information does not give benefits (Section 4.2) Pause: 1313ms", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exploiting Acoustic Information", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Long; 2.5x average duration for of that kind of to me it is more Figure 4 : Raw acoustic features. The combination of a long pause and considerably longer than average duration for of is a strong indicator of a disfluency.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 73, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Exploiting Acoustic Information", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The first way we implemented this information was in the form of raw prosodic features related to pauses between words and word duration. To compute these features, we make use of the alignment between the speech signal and the raw text. Pauses are then simply identified by looking for pairs of words whose alignments are not flush. The specific features used are indicators of the existence of a pause immediately before or after a span, and the total number of pauses contained within a span. Word duration is computed based on the deviation of a word's length from its average length averaged over all occurrences in the corpus. 4 We fire duration features similar to the pause features, namely indicators of whether the duration of the first and last words in a span deviate beyond some threshold from the average, and the total number of such deviations within a span. As displayed in Table 1 , adding these raw features results in improved performance on top of the gains from the semi-CRF model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 633, |
|
"end": 634, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 891, |
|
"end": 898, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Raw Acoustic Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In addition to the raw acoustic features, we also tried utilizing discrete indicators of acoustic information, specifically ToBI break indices (Silverman et al., 1992) . Previous work has shown performance improvements resulting from the use of such discrete information in other tasks, such as parsing (Kahn et al., 2005; Dreyer and Shafran, 2007; Huang and Harper, 2010) . We chose to focus specifically on ToBI breaks rather than on ToBI tones because tonal information has appeared relatively less Table 2 : Disfluency results with predicted ToBI features on the development set. We compare our baseline semi-CRF system (Baseline) with systems that incorporate prosody via predictions from the AuToBI system of Rosenberg (2010) and from our CRF ToBI predictor, as well as the full system using raw acoustic features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 167, |
|
"text": "(Silverman et al., 1992)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 322, |
|
"text": "(Kahn et al., 2005;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 348, |
|
"text": "Dreyer and Shafran, 2007;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 372, |
|
"text": "Huang and Harper, 2010)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 715, |
|
"end": 731, |
|
"text": "Rosenberg (2010)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 502, |
|
"end": 509, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ToBI Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "useful for this task (Shriberg et al., 1997) . Moreover, the ToBI break specification stipulates a category for strong disjuncture with a pause (2) as well as a pause marker (p), both of which correlate well with disfluencies on gold-annotated ToBI data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 44, |
|
"text": "(Shriberg et al., 1997)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ToBI Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "To investigate whether this correlation translates into a performance improvement for a disfluency detection system like ours, we add features targeting ToBI annotations as follows: for each word in a proposed disfluent span, we fire a feature indicating the break index on the fencepost following that word, conjoined with where that word is in the span (beginning, middle, or end).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ToBI Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We try two different ways of generating the break indices used by these features. The first is using the AuToBI system of Rosenberg (2010), a state-ofthe-art automatic ToBI prediction systems based on acoustic information which focuses particularly on detecting occurrences of 3 and 4. Second, we use the subset of Switchboard labeled with ToBI breaks (Taylor et al., 2003) to train a CRF-based ToBI predictor. This model employs both acoustic and lexical features, which are both useful for ToBI prediction despite breaks being a seemingly more acoustic phenomenon (Rosenberg, 2010) . The acoustic indicators that we use are similar to the ones described in Section 4 and our lexical features consist of a set of standard surface features similar to those used in Section 3.2.1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 352, |
|
"end": 373, |
|
"text": "(Taylor et al., 2003)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 566, |
|
"end": 583, |
|
"text": "(Rosenberg, 2010)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ToBI Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In Table 2 we see that neither source of predicted ToBI breaks does much to improve performance. In particular, the gains from using raw acoustic features are substantially greater despite the fact that the pre- Table 3 : Disfluency prediction results on the test set; our base system outperforms that of Honnibal and Johnson (2014) , a state-of-the-art system on this dataset, and incorporating prosody further improves performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 305, |
|
"end": 332, |
|
"text": "Honnibal and Johnson (2014)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 212, |
|
"end": 219, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ToBI Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "dictions were made in part using similar raw acoustic features. This is somewhat surprising, since intuitively, ToBI should be capturing information very similar to what pauses and word durations capture, particularly when it is predicted based partially on these phenomena. However, our learned ToBI predictor only gets roughly 50 F 1 on break prediction, so ToBI prediction is clearly a hard task even with sophisticated features. The fact that ToBI cannot be derived from acoustic features also indicates that it may draw on information posterior to signal processing, such as syntactic and semantic cues. Finally, pauses are also simply more prevalent in the data than ToBI markers of interest: there are roughly 40,000 pauses on the ToBI-annotated subset of the dataset, yet there are fewer than 10,000 2 or p break indices. The ToBI predictor is therefore trained to ignore information that may be relevant for disfluency detection. Table 3 shows results on the Switchboard test set. Our final system substantially outperforms the results of prior work, and we see that this is a result of both incorporating span features via a semi-CRF as well as incorporating prosodic indicators.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 939, |
|
"end": 946, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "ToBI Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The reparandum and repair are important concepts that we will refer to in this paper, but the model does not distinguish the repair from other fluent text which follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As described inHonnibal and Johnson (2014), we computed features over sentences with filler words (um and uh) and the phrases I mean and you know removed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We created our development set by randomly sampling documents from the training set. Compared to the development set ofJohnson and Charniak (2004), this more closely matches the disfluency distribution of the corpus: their development set has 0.53 disfluent tokens per sentence, while our set has 0.38 per sentence, and the training set has 0.37 per sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that this averages over multiple speakers as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was partially supported by BBN under DARPA contract HR0011-12-C-0014 and by a Facebook Fellowship for the second author. Thanks to the anonymous reviewers for their helpful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Edit Detection and Parsing for Transcribed Speech", |
|
"authors": [ |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Charniak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the North American Chapter", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eugene Charniak and Mark Johnson. 2001. Edit Detec- tion and Parsing for Transcribed Speech. In Proceed- ings of the North American Chapter of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Prosody in the Comprehension of Spoken Language: A Literature Review", |
|
"authors": [ |
|
{ |
|
"first": "Anne", |
|
"middle": [], |
|
"last": "Cutler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Delphine", |
|
"middle": [], |
|
"last": "Dahan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wilma", |
|
"middle": [], |
|
"last": "Van Donselaar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Language and Speech", |
|
"volume": "40", |
|
"issue": "2", |
|
"pages": "141--201", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anne Cutler, Delphine Dahan, and Wilma van Donselaar. 1997. Prosody in the Comprehension of Spoken Lan- guage: A Literature Review. Language and Speech, 40(2):141-201.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Exploiting Prosody for PCFGs with Latent Annotations", |
|
"authors": [ |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Dreyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Izhak", |
|
"middle": [], |
|
"last": "Shafran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markus Dreyer and Izhak Shafran. 2007. Exploiting Prosody for PCFGs with Latent Annotations. In Pro- ceedings of Interspeech.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Adaptive Subgradient Methods for Online Learning and Stochastic Optimization", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Duchi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elad", |
|
"middle": [], |
|
"last": "Hazan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoram", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2121--2159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Duchi, Elad Hazan, and Yoram Singer. 2011. Adaptive Subgradient Methods for Online Learning and Stochastic Optimization. Journal of Machine Learning Research, 12:2121-2159, July.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Softmax-Margin CRFs: Training Log-Linear Models with Cost Functions", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Noah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the North American Chapter for the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Gimpel and Noah A. Smith. 2010. Softmax- Margin CRFs: Training Log-Linear Models with Cost Functions. In Proceedings of the North American Chapter for the Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Detecting and Correcting Speech Repairs", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Heeman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Allen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Proceedings of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Heeman and James Allen. 1994. Detecting and Correcting Speech Repairs. In Proceedings of the As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The tobi annotation conventions", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hirschberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Beckman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Hirschberg and Mary E. Beckman. 1992. The tobi annotation conventions. Online at http://www.cs.columbia.edu/ julia/files/conv.pdf.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Joint Incremental Disfluency Detection and Dependency Parsing", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Transactions of the Association of Computational Linguistics", |
|
"volume": "2", |
|
"issue": "1", |
|
"pages": "131--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Honnibal and Mark Johnson. 2014. Joint Incre- mental Disfluency Detection and Dependency Parsing. Transactions of the Association of Computational Lin- guistics -Volume 2, Issue 1, pages 131-142.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Appropriately Handled Prosodic Breaks Help PCFG Parsing", |
|
"authors": [ |
|
{ |
|
"first": "Zhongqiang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [], |
|
"last": "Harper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the North American Chapter", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhongqiang Huang and Mary Harper. 2010. Appropri- ately Handled Prosodic Breaks Help PCFG Parsing. In Proceedings of the North American Chapter of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A TAGbased Noisy-channel Model of Speech Repairs", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Charniak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Johnson and Eugene Charniak. 2004. A TAG- based Noisy-channel Model of Speech Repairs. In Proceedings of the Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Effective Use of Prosody in Parsing Conversational Speech", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Kahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Lease", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Charniak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy G. Kahn, Matthew Lease, Eugene Charniak, Mark Johnson, and Mari Ostendorf. 2005. Effective Use of Prosody in Parsing Conversational Speech. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Detecting Structural Metadata with Decision Trees and Transformation-Based Learning", |
|
"authors": [ |
|
{ |
|
"first": "Joungbum", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schwarm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joungbum Kim, Sarah E Schwarm, and Mari Ostendorf. 2004. Detecting Structural Metadata with Decision Trees and Transformation-Based Learning. In Pro- ceedings of the North American Chapter of the Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Automatic Disfluency Identification in Conversational Speech Using Multiple Knowledge Sources", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of Eurospeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu, Elizabeth Shriberg, and Andreas Stolcke. 2003. Automatic Disfluency Identification in Conver- sational Speech Using Multiple Knowledge Sources. In Proceedings of Eurospeech.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Enriching Speech Recognition with Automatic Detection of Sentence Boundaries and Disfluencies", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hillard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Harper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Transactions of Audio, Speech and Language Processing", |
|
"volume": "14", |
|
"issue": "5", |
|
"pages": "1526--1540", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Liu, E. Shriberg, A. Stolcke, D. Hillard, M. Osten- dorf, and M. Harper. 2006. Enriching Speech Recog- nition with Automatic Detection of Sentence Bound- aries and Disfluencies. Transactions of Audio, Speech and Language Processing, 14(5):1526-1540, Septem- ber.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Learning Accurate, Compact, and Interpretable Tree Annotation", |
|
"authors": [ |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Barrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Romain", |
|
"middle": [], |
|
"last": "Thibaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Conference on Computational Linguistics and the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slav Petrov, Leon Barrett, Romain Thibaux, and Dan Klein. 2006. Learning Accurate, Compact, and Inter- pretable Tree Annotation. In Proceedings of the Con- ference on Computational Linguistics and the Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Disfluency Detection Using Multi-step Stacked Learning", |
|
"authors": [ |
|
{ |
|
"first": "Xian", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the North American Chapter", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xian Qian and Yang Liu. 2013. Disfluency Detection Using Multi-step Stacked Learning. In Proceedings of the North American Chapter of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Joint Parsing and Disfluency Detection in Linear Time", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Sadegh Rasooli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Tetreault", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Sadegh Rasooli and Joel Tetreault. 2013. Joint Parsing and Disfluency Detection in Linear Time. In Proceedings of the Conference on Empirical Meth- ods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "AuToBI -A Tool for Automatic ToBI Annotation", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Rosenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Rosenberg. 2010. AuToBI -A Tool for Au- tomatic ToBI Annotation. In Proceedings of Inter- speech.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Semi-Markov Conditional Random Fields for Information Extraction", |
|
"authors": [ |
|
{ |
|
"first": "Sunita", |
|
"middle": [], |
|
"last": "Sarawagi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunita Sarawagi and William W. Cohen. 2004. Semi- Markov Conditional Random Fields for Information Extraction. In Proceedings of Advances in Neural In- formation Processing Systems.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A Prosody-only Decision-tree Model for Disfluency Detection", |
|
"authors": [ |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Shriberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Bates", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stolcke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of Eurospeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elizabeth Shriberg, Rebecca Bates, and Andreas Stolcke. 1997. A Prosody-only Decision-tree Model for Dis- fluency Detection. In Proceedings of Eurospeech.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "ToBI: A Standard for Labeling English Prosody", |
|
"authors": [ |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Silverman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [], |
|
"last": "Beckman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Pitrelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Wightman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patti", |
|
"middle": [], |
|
"last": "Price", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janet", |
|
"middle": [], |
|
"last": "Pierrehumbert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hirschberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Proceedings of the International Conference on Spoken Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kim Silverman, Mary Beckman, John Pitrelli, Mari Os- tendorf, Colin Wightman, Patti Price, Janet Pierrehum- bert, and Julia Hirschberg. 1992. ToBI: A Standard for Labeling English Prosody. In Proceedings of the International Conference on Spoken Language Pro- cessing.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "A Lexically-Driven Algorithm for Disfluency Detection", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Snover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Snover, Bonnie Dorr, and Richard Schwartz. 2004. A Lexically-Driven Algorithm for Disfluency Detection. In Proceedings of the North American Chapter of the Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The penn treebank: An overview", |
|
"authors": [ |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Taylor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [], |
|
"last": "Santorini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ann Taylor, Mitchell Marcus, and Beatrice Santorini. 2003. The penn treebank: An overview.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Example of a disfluency where the speaker corrected upper school. Our model considers both transcribed text and the acoustic signal and predicts disfluencies as complete chunks using a semi-Markov conditional random field.", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Span features for semi-CRF model.", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"content": "<table><tr><td/><td/><td colspan=\"5\">Unigrams: determine, how, you</td></tr><tr><td/><td/><td colspan=\"6\">Bigrams: (determine, how), (how, you)</td></tr><tr><td/><td/><td colspan=\"5\">POS Unigrams: VB, WRB, PRP</td></tr><tr><td/><td/><td colspan=\"6\">POS Bigrams: (VB, WRB), (WRB, PRP)</td></tr><tr><td>O</td><td>O</td><td/><td>B</td><td>I</td><td>E</td><td>O O</td><td>O</td></tr><tr><td>TO</td><td>VB</td><td colspan=\"6\">WRB PRP VBP WRB PRP VBP</td></tr><tr><td/><td/><td/><td colspan=\"2\">Distance: 3</td><td/><td/></tr><tr><td/><td colspan=\"2\">Duplicate</td><td colspan=\"4\">Word+Distance: (3, how)</td></tr><tr><td/><td/><td/><td colspan=\"4\">POS Bigram: (WRB, PRP)</td></tr><tr><td colspan=\"8\">Figure 2: Token features for CRF and semi-CRF models.</td></tr></table>", |
|
"html": null, |
|
"text": "). We take the loss function to determine how you address how you weigh\u2026", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td>Prec. Rec.</td><td>F 1</td></tr><tr><td colspan=\"2\">CRF 84.0 82.1 83.0</td></tr><tr><td colspan=\"2\">Semi-CRF 88.6 81.7 85.0</td></tr><tr><td colspan=\"2\">Semi-CRF + Prosody 89.5 82.7 86.0</td></tr></table>", |
|
"html": null, |
|
"text": "Disfluency results on the development set. Adding span features on top of a CRF baseline improves performance, and including raw acoustic information gives further performance gains.", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td/><td>Disfluency</td><td/></tr><tr><td>Prec.</td><td>Rec.</td><td>F 1</td></tr></table>", |
|
"html": null, |
|
"text": "Baseline 88.61 81.69 85.01 AuToBI 3, 4 88.46 81.92 85.06 CRF ToBI 88.42 81.96 85.07 Raw acoustic 89.53 82.74 86.00", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td/><td colspan=\"2\">Prec. Rec.</td><td>F 1</td></tr><tr><td>Johnson and Charniak (2004)</td><td>\u2212</td><td>\u2212</td><td>79.7</td></tr><tr><td>Qian and Liu (2013)</td><td>\u2212</td><td>\u2212</td><td>83.7</td></tr><tr><td>Honnibal and Johnson (2014)</td><td>\u2212</td><td>\u2212</td><td>84.1</td></tr><tr><td colspan=\"4\">CRF 88.7 78.8 83.4</td></tr></table>", |
|
"html": null, |
|
"text": "Semi-CRF 90.1 80.0 84.8 Semi-CRF + Prosody 90.0 81.2 85.4", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |