Benjamin Aw commited on
Commit
bb777db
1 Parent(s): c151c7d

JSON files W to Y

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Full_text_JSON/prefixW/json/W00/W00-0100.json +102 -0
  2. Full_text_JSON/prefixW/json/W00/W00-0101.json +333 -0
  3. Full_text_JSON/prefixW/json/W00/W00-0102.json +649 -0
  4. Full_text_JSON/prefixW/json/W00/W00-0103.json +824 -0
  5. Full_text_JSON/prefixW/json/W00/W00-0104.json +1196 -0
  6. Full_text_JSON/prefixW/json/W00/W00-0105.json +684 -0
  7. Full_text_JSON/prefixW/json/W00/W00-0106.json +1007 -0
  8. Full_text_JSON/prefixW/json/W00/W00-0107.json +668 -0
  9. Full_text_JSON/prefixW/json/W00/W00-0108.json +458 -0
  10. Full_text_JSON/prefixW/json/W00/W00-0109.json +542 -0
  11. Full_text_JSON/prefixW/json/W00/W00-0110.json +854 -0
  12. Full_text_JSON/prefixW/json/W00/W00-0200.json +22 -0
  13. Full_text_JSON/prefixW/json/W00/W00-0201.json +1236 -0
  14. Full_text_JSON/prefixW/json/W00/W00-0202.json +1092 -0
  15. Full_text_JSON/prefixW/json/W00/W00-0203.json +644 -0
  16. Full_text_JSON/prefixW/json/W00/W00-0204.json +656 -0
  17. Full_text_JSON/prefixW/json/W00/W00-0205.json +1260 -0
  18. Full_text_JSON/prefixW/json/W00/W00-0206.json +1022 -0
  19. Full_text_JSON/prefixW/json/W00/W00-0207.json +1109 -0
  20. Full_text_JSON/prefixW/json/W00/W00-0300.json +449 -0
  21. Full_text_JSON/prefixW/json/W00/W00-0301.json +574 -0
  22. Full_text_JSON/prefixW/json/W00/W00-0302.json +514 -0
  23. Full_text_JSON/prefixW/json/W00/W00-0303.json +745 -0
  24. Full_text_JSON/prefixW/json/W00/W00-0304.json +611 -0
  25. Full_text_JSON/prefixW/json/W00/W00-0305.json +761 -0
  26. Full_text_JSON/prefixW/json/W00/W00-0306.json +560 -0
  27. Full_text_JSON/prefixW/json/W00/W00-0307.json +136 -0
  28. Full_text_JSON/prefixW/json/W00/W00-0308.json +643 -0
  29. Full_text_JSON/prefixW/json/W00/W00-0309.json +539 -0
  30. Full_text_JSON/prefixW/json/W00/W00-0310.json +1106 -0
  31. Full_text_JSON/prefixW/json/W00/W00-0311.json +1357 -0
  32. Full_text_JSON/prefixW/json/W00/W00-0312.json +683 -0
  33. Full_text_JSON/prefixW/json/W00/W00-0400.json +90 -0
  34. Full_text_JSON/prefixW/json/W00/W00-0401.json +1228 -0
  35. Full_text_JSON/prefixW/json/W00/W00-0402.json +0 -0
  36. Full_text_JSON/prefixW/json/W00/W00-0403.json +1439 -0
  37. Full_text_JSON/prefixW/json/W00/W00-0404.json +0 -0
  38. Full_text_JSON/prefixW/json/W00/W00-0405.json +1650 -0
  39. Full_text_JSON/prefixW/json/W00/W00-0406.json +926 -0
  40. Full_text_JSON/prefixW/json/W00/W00-0407.json +996 -0
  41. Full_text_JSON/prefixW/json/W00/W00-0408.json +640 -0
  42. Full_text_JSON/prefixW/json/W00/W00-0409.json +0 -0
  43. Full_text_JSON/prefixW/json/W00/W00-0410.json +979 -0
  44. Full_text_JSON/prefixW/json/W00/W00-0500.json +22 -0
  45. Full_text_JSON/prefixW/json/W00/W00-0501.json +818 -0
  46. Full_text_JSON/prefixW/json/W00/W00-0502.json +797 -0
  47. Full_text_JSON/prefixW/json/W00/W00-0503.json +415 -0
  48. Full_text_JSON/prefixW/json/W00/W00-0504.json +1338 -0
  49. Full_text_JSON/prefixW/json/W00/W00-0505.json +973 -0
  50. Full_text_JSON/prefixW/json/W00/W00-0506.json +973 -0
Full_text_JSON/prefixW/json/W00/W00-0100.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0100",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:31.327932Z"
6
+ },
7
+ "title": "",
8
+ "authors": [],
9
+ "year": "",
10
+ "venue": null,
11
+ "identifiers": {},
12
+ "abstract": "",
13
+ "pdf_parse": {
14
+ "paper_id": "W00-0100",
15
+ "_pdf_hash": "",
16
+ "abstract": [],
17
+ "body_text": [
18
+ {
19
+ "text": "The last decade has seen an explosion in the work done in the development of robust natural language processing systems. A common methodology used in building these systems has been to analyze a sample of the data available (either manually, or automatically for training statistical systems), build statistical/heuristical schemas based upon the analysis, and test the system on a blind sample of the data. Due to this commonly used paradigm, an important area of research that has not been given the attention it deserves is the estimation of syntactic and semantic complexity faced by these systems in the tasks they perform.",
20
+ "cite_spans": [],
21
+ "ref_spans": [],
22
+ "eq_spans": [],
23
+ "section": "",
24
+ "sec_num": null
25
+ },
26
+ {
27
+ "text": "The Workshop on Syntactic and Semantic Complexity in Natural Language Processing Systems, held on April 30th, 2000 at the Language Technology Joint Conference on Applied Natural Language Processing and the North American Chapter of the Association of Computational Linguistics (ANLP-NAACL2000) was organized around the goals of discussing, promoting, and presenting new research results regarding the question of complexity as it pertains to the syntax and semantics of natural language. In particular, the goal of the workshop was to focus on:",
28
+ "cite_spans": [],
29
+ "ref_spans": [],
30
+ "eq_spans": [],
31
+ "section": "",
32
+ "sec_num": null
33
+ },
34
+ {
35
+ "text": "\u2022 estimation of the syntactic and semantic complexity of specific NLP tasks We would like to thank all authors who showed their interest by submitting papers to the workshop. We would also like to thank the members of the program committee: Branimir Boguraev (IBM Research), J-P Chanod (Xerox, Grenoble), Shalom Lappin (Kings College, London), Aravind Joshi (University of Pennsylvania), Larry Moss (Indiana), Rohit Parikh (CUNY), and Adam Pease (Teknowledge). and domain complexity ............................................................................................................................. 28 Paola Velardi and Alessandro Cucchiarelli Analyzing the Reading Comprehension Task ............................................................................................ ",
36
+ "cite_spans": [
37
+ {
38
+ "start": 461,
39
+ "end": 787,
40
+ "text": "and domain complexity ............................................................................................................................. 28 Paola Velardi and Alessandro Cucchiarelli Analyzing the Reading Comprehension Task ............................................................................................",
41
+ "ref_id": null
42
+ }
43
+ ],
44
+ "ref_spans": [],
45
+ "eq_spans": [],
46
+ "section": "",
47
+ "sec_num": null
48
+ },
49
+ {
50
+ "text": "A Measure of Semantic Complexity for Natural Adnominal Constituents ............................................................................................................................. 59 Kyoko Kanzaki, Qing Ma, and Hitoshi Isahara ",
51
+ "cite_spans": [
52
+ {
53
+ "start": 45,
54
+ "end": 211,
55
+ "text": "Adnominal Constituents ............................................................................................................................. 59 Kyoko Kanzaki,",
56
+ "ref_id": null
57
+ },
58
+ {
59
+ "start": 212,
60
+ "end": 220,
61
+ "text": "Qing Ma,",
62
+ "ref_id": null
63
+ },
64
+ {
65
+ "start": 221,
66
+ "end": 240,
67
+ "text": "and Hitoshi Isahara",
68
+ "ref_id": null
69
+ }
70
+ ],
71
+ "ref_spans": [],
72
+ "eq_spans": [],
73
+ "section": "Arnit Bagga",
74
+ "sec_num": "35"
75
+ }
76
+ ],
77
+ "back_matter": [],
78
+ "bib_entries": {},
79
+ "ref_entries": {
80
+ "FIGREF0": {
81
+ "uris": null,
82
+ "text": "semantic complexity and world knowledge \u2022 role of syntactic and semantic complexity in system design and testing \u2022 syntactic and semantic complexity and its role in the evaluation of NLP systems \u2022 use of syntactic and semantic complexity as a performance predictor \u2022 relationship between syntactic and semantic complexity",
83
+ "type_str": "figure",
84
+ "num": null
85
+ },
86
+ "TABREF0": {
87
+ "text": "Sentences vs. Phrases: Syntactic Complexity in Multimedia Information Retrieval ................................... 1",
88
+ "html": null,
89
+ "num": null,
90
+ "content": "<table><tr><td>Sharon Flank</td></tr><tr><td>Using Long Runs as Predictors of Semantic Coherence in a Partial Document</td></tr><tr><td>Retrieval System .</td></tr></table>",
91
+ "type_str": "table"
92
+ },
93
+ "TABREF1": {
94
+ "text": "Arrangements of Examples Robert John FreemanPartially Saturated Referents as aSource of Complexity in Semantic Interpretation .............................. 51 David D. McDonaldSimilarities and Differences among Semantic Behaviors of Japanese",
95
+ "html": null,
96
+ "num": null,
97
+ "content": "<table/>",
98
+ "type_str": "table"
99
+ }
100
+ }
101
+ }
102
+ }
Full_text_JSON/prefixW/json/W00/W00-0101.json ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0101",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:32.781891Z"
6
+ },
7
+ "title": "Sentences vs. Phrases: Syntactic Complexity in Multimedia Information Retrieval",
8
+ "authors": [
9
+ {
10
+ "first": "Sharon",
11
+ "middle": [],
12
+ "last": "Flank",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "emotion, Inc",
17
+ "location": {
18
+ "addrLine": "2600 Park Tower Dr., Ste. 600",
19
+ "postCode": "22180",
20
+ "settlement": "Vienna",
21
+ "region": "VA"
22
+ }
23
+ },
24
+ "email": "sharon.flank@emotion.corn"
25
+ }
26
+ ],
27
+ "year": "",
28
+ "venue": null,
29
+ "identifiers": {},
30
+ "abstract": "In experiments on a natural language information retrieval system that retrieves images based on textual captions, we show that syntactic complexity actually aids retrieval. We compare two types of captioned images, those characterized with full sentences in English, and those characterized by lists of words and phrases. The full-sentence captions show a 15% increase in retrieval accuracy over the wordlist captions. We conclude that the syntactic complexity may be of use in fact because it decreases semantic ambiguity: the word-list captions may be syntactically simple, but they are semantically confusingly complex.",
31
+ "pdf_parse": {
32
+ "paper_id": "W00-0101",
33
+ "_pdf_hash": "",
34
+ "abstract": [
35
+ {
36
+ "text": "In experiments on a natural language information retrieval system that retrieves images based on textual captions, we show that syntactic complexity actually aids retrieval. We compare two types of captioned images, those characterized with full sentences in English, and those characterized by lists of words and phrases. The full-sentence captions show a 15% increase in retrieval accuracy over the wordlist captions. We conclude that the syntactic complexity may be of use in fact because it decreases semantic ambiguity: the word-list captions may be syntactically simple, but they are semantically confusingly complex.",
37
+ "cite_spans": [],
38
+ "ref_spans": [],
39
+ "eq_spans": [],
40
+ "section": "Abstract",
41
+ "sec_num": null
42
+ }
43
+ ],
44
+ "body_text": [
45
+ {
46
+ "text": "In this paper, we describe experiments conducted on an image retrieval system, PictureQuest, which uses text captions to characterize images. The text captions are of two types. Optimally, they consist of a prose description of the image, generally two to three sentences, with perhaps three or four additional words or phrases that describe emotional or non-literal image content, e.g.",
47
+ "cite_spans": [],
48
+ "ref_spans": [],
49
+ "eq_spans": [],
50
+ "section": "Introduction",
51
+ "sec_num": "1"
52
+ },
53
+ {
54
+ "text": "The younger girl, wearing a blue shirt, laughs and prepares to knock over the tower that the older girl has constructed The older girl, dressed in a red shirt, winces in anticipation.",
55
+ "cite_spans": [],
56
+ "ref_spans": [],
57
+ "eq_spans": [],
58
+ "section": "Two little girls play with blocks.",
59
+ "sec_num": null
60
+ },
61
+ {
62
+ "text": "Some of the captions in PictureQuest are not as well-behaved. They may contain legacy data or data shared with a keyword-retrieval system. They are optimized for exact-match retrieval, and, as such, consist of lists of words or, at best, a few short phrases mixed in with long lists of words. The same image might appear with the following caption: girl, girls, little girl little girls, block, blocks, play, playing, plays, blue, red, shirt, tower, knock; over, construct, construction, siblings, cooperation, rivalry PictureQuest relies on several natural language processing techniques to enhance retrieval accuracy. It contains a part-ofspeech tagger, morphological analyzer, noun phrase pattern matcher, semantic expansion based on WordNet, and special processing for names and locations. These have been tuned to perform most effectively on caption text of the first type, i.e. sentences. The following chart illustrates how these linguistic processes operate -or fail to operate -on syntactic units.",
63
+ "cite_spans": [
64
+ {
65
+ "start": 381,
66
+ "end": 518,
67
+ "text": "girls, block, blocks, play, playing, plays, blue, red, shirt, tower, knock; over, construct, construction, siblings, cooperation, rivalry",
68
+ "ref_id": null
69
+ }
70
+ ],
71
+ "ref_spans": [],
72
+ "eq_spans": [],
73
+ "section": "Siblings, cooperation, rivalry",
74
+ "sec_num": null
75
+ },
76
+ {
77
+ "text": "Tagger MacDonald 1997; Rosen 1974; Bar-Hillel et al. 1967) . Fortunately, for the purposes of our investigation, we are dealing with broad distinctions that would foster agreement even among those with different definitions of complexity. For the captioned data, in one case, the data are in full sentences. The average sentence length is approximately ten words, and the average number of sentences is between two and three. In the other case, the data are either in lists of single words, or in lists of single words with a few two-word or three-word phrases included, but with no sentences whatsoever. Regardless of the exact measure of syntactic complexity used, it is clear that sentences are syntactically more complex than word lists or even phrase lists.",
78
+ "cite_spans": [
79
+ {
80
+ "start": 7,
81
+ "end": 22,
82
+ "text": "MacDonald 1997;",
83
+ "ref_id": null
84
+ },
85
+ {
86
+ "start": 23,
87
+ "end": 34,
88
+ "text": "Rosen 1974;",
89
+ "ref_id": "BIBREF3"
90
+ },
91
+ {
92
+ "start": 35,
93
+ "end": 58,
94
+ "text": "Bar-Hillel et al. 1967)",
95
+ "ref_id": "BIBREF0"
96
+ }
97
+ ],
98
+ "ref_spans": [],
99
+ "eq_spans": [],
100
+ "section": "Siblings, cooperation, rivalry",
101
+ "sec_num": null
102
+ },
103
+ {
104
+ "text": "dog-N herding-V sheep- N dog-N,V; herding-N,V",
105
+ "cite_spans": [],
106
+ "ref_spans": [],
107
+ "eq_spans": [],
108
+ "section": "Siblings, cooperation, rivalry",
109
+ "sec_num": null
110
+ },
111
+ {
112
+ "text": "The standard query length for Web applications is between two and three words, and our experience with PictureQuest confirms that observation. In comparisons with other text-based image retrieval applications, including keyword systems, query complexity is important: one-word queries work equally well on keyword systems and on linguistically-enhanced natural language processing systems. The difference comes with longer queries, and in particular with syntactic phrases. Nonetheless, we would argue that they are syntactically more complex when intact. The PictureQuest system uses a WordNetbased semantic net to expand the caption data.",
113
+ "cite_spans": [],
114
+ "ref_spans": [],
115
+ "eq_spans": [],
116
+ "section": "Query Complexity",
117
+ "sec_num": "2.2"
118
+ },
119
+ {
120
+ "text": "To some extent, the syntactic measures (part-of-speech tagging, noun phrase pattern matching, name and location identification) serve to constrain the semantic expansion, since they eliminate some possible semantic expansions based on syntactic factors. One could interpret the word-list captions, then, not as syntactically less complex, but rather as semantically less constrained, therefore more ambiguous and thus more complex.",
121
+ "cite_spans": [],
122
+ "ref_spans": [],
123
+ "eq_spans": [],
124
+ "section": "Query Complexity",
125
+ "sec_num": "2.2"
126
+ },
127
+ {
128
+ "text": "This view would, perhaps, restore the more intuitive notion that complexity should lead to worse rather than better results.",
129
+ "cite_spans": [],
130
+ "ref_spans": [],
131
+ "eq_spans": [],
132
+ "section": "Query Complexity",
133
+ "sec_num": "2.2"
134
+ },
135
+ {
136
+ "text": "While the sentence captions are syntactically more complex, by almost any measure, they contain more information than the legacy word list captions. Specifically, the part-ofspeech tagger and the noun phrase pattern matcher are essentially useless with the word lists, since they rely on syntactic patterns that are not present. We therefore hypothesized that our retrieval accuracy would be lower with the legacy word list captions than with the sentence captions.",
137
+ "cite_spans": [],
138
+ "ref_spans": [],
139
+ "eq_spans": [],
140
+ "section": "Experiments",
141
+ "sec_num": "3"
142
+ },
143
+ {
144
+ "text": "We performed two sets of experiments, one with legacy word list captions and the other with sentence captions. Fortunately, the corpus can be easily divided, since it is possible to select image providers with either full sentence or word list captions, and limit the search to those providers. In order to ensure that we did not introduce a bias because of the quality of captioning for a particular provider, we aggregated scores from at least three providers in each test.",
145
+ "cite_spans": [],
146
+ "ref_spans": [],
147
+ "eq_spans": [],
148
+ "section": "Experiments",
149
+ "sec_num": "3"
150
+ },
151
+ {
152
+ "text": "Because the collection is large and live, and includes ranked results, we selected a modified version of precision at 20 rather than a manual gold standard precision/recall test. We chose this evaluation path for the following reasons:",
153
+ "cite_spans": [],
154
+ "ref_spans": [],
155
+ "eq_spans": [],
156
+ "section": "Experiments",
157
+ "sec_num": "3"
158
+ },
159
+ {
160
+ "text": "\u2022 Ranking image relevance was difficult for humans \u2022 The collection was large and live, i.e. changing daily \u2022 The modified measure more accurately reflected user evaluations",
161
+ "cite_spans": [],
162
+ "ref_spans": [],
163
+ "eq_spans": [],
164
+ "section": "Experiments",
165
+ "sec_num": "3"
166
+ },
167
+ {
168
+ "text": "We performed experiments initially with manual ranking, and found that it was impossible to get reliable cross-coder judgements for ranked results. That is, we could get humans to assess whether an image should or should not have been included, but the rankings did not yield agreement. Complicating the problem was the fact that we had a large collection (400,000+ images), and creating a test subset meant that most queries would generate almost no relevant results. Finally, we wanted to focus more on precision than on recall, because our work with users had made it clear that precision was far more important in this application.",
169
+ "cite_spans": [],
170
+ "ref_spans": [],
171
+ "eq_spans": [],
172
+ "section": "Experiments",
173
+ "sec_num": "3"
174
+ },
175
+ {
176
+ "text": "To evaluate precision at 20 for this collection, we used the crossing measure introduced in Flank 1998. The crossing measure (in which any image ranked above another, better-matching image counts as an error) is both finer-grained and better suited to a ranking application in which user evaluations are not binary. We calibrated the crossing measure (on a subset of the queries) as follows: That is, we calculated the precision \"for all terms\" as a binary measure with respect to a query, and scored an error if any terms in the query were not matched. For the \"any term\" precision measure, we scored an error only if the image failed to match any term in the query in such a way that a user would consider it a partial match.",
177
+ "cite_spans": [],
178
+ "ref_spans": [],
179
+ "eq_spans": [],
180
+ "section": "Experiments",
181
+ "sec_num": "3"
182
+ },
183
+ {
184
+ "text": "Thus, for example, for an \"all terms\" match, tall glass of beer succeeded only when the images showed (and captions mentioned) all three terms tall, glass, and beer, or their synonyms. For an \"any-term\" match, tall or glass or beer or a direct synonym would need to be present (but not, say, glasses).",
185
+ "cite_spans": [],
186
+ "ref_spans": [],
187
+ "eq_spans": [],
188
+ "section": "Experiments",
189
+ "sec_num": "3"
190
+ },
191
+ {
192
+ "text": "(For two of the test queries, fewer than 20 images were retrieved, so the measure is, more precisely, R-precision: precision at the number of documents retrieved or at 20 or 5, whichever is less.",
193
+ "cite_spans": [],
194
+ "ref_spans": [],
195
+ "eq_spans": [],
196
+ "section": "Experiments",
197
+ "sec_num": "3"
198
+ },
199
+ {
200
+ "text": "We found a statistically significant difference in retrieval quality between the syntactically simple word list captions and the syntactically complex sentence captions. The word list captions scored 74.6% on our crossing measure, while the sentence captions scored 89.5%.",
201
+ "cite_spans": [],
202
+ "ref_spans": [],
203
+ "eq_spans": [],
204
+ "section": "Results",
205
+ "sec_num": "4"
206
+ },
207
+ {
208
+ "text": "We performed one test comparing one-word and two-word queries on sentence versus word list captions. The sentence captions showed little difference: 82.7% on the oneword queries, and 80% on the two-word queries. The word-list captions, however, were dramatically worse on two-word queries (70.5%) than on one-word queries (89.7%).",
209
+ "cite_spans": [],
210
+ "ref_spans": [],
211
+ "eq_spans": [],
212
+ "section": "Results",
213
+ "sec_num": "4"
214
+ },
215
+ {
216
+ "text": "Overall 74.6%",
217
+ "cite_spans": [],
218
+ "ref_spans": [],
219
+ "eq_spans": [],
220
+ "section": "Results",
221
+ "sec_num": "4"
222
+ },
223
+ {
224
+ "text": "89.5% 1-word 89.7% 82.7% 2-word 7015% 80%",
225
+ "cite_spans": [],
226
+ "ref_spans": [],
227
+ "eq_spans": [],
228
+ "section": "Results",
229
+ "sec_num": "4"
230
+ },
231
+ {
232
+ "text": "Our experiments indicate that, in an information retrieval system tuned to recognize and reward matches using syntactic information, syntactic complexity yields better results than syntactically mixed-up \"word salad.\" One can interpret these results from a semantic complexity standpoint, since the syntactically simple captions all include considerably more semantic ambiguity, unconstrained as they are from a syntactic standpoint. This observation leads us to an additional conclusion about the relationship between syntactic and semantic complexity: in this instance, at least, the relationship is inverse rather than direct. The word-list captions are syntactically simple but, as a result, since syntactic factors are not available to limit ambiguity, semantically more complex than the same information presented in a more syntactically complex fashion, i.e. in sentences.",
233
+ "cite_spans": [],
234
+ "ref_spans": [],
235
+ "eq_spans": [],
236
+ "section": "Conclusion",
237
+ "sec_num": "5"
238
+ }
239
+ ],
240
+ "back_matter": [],
241
+ "bib_entries": {
242
+ "BIBREF0": {
243
+ "ref_id": "b0",
244
+ "title": "Measures of Syntactic Complexity",
245
+ "authors": [
246
+ {
247
+ "first": "Y",
248
+ "middle": [],
249
+ "last": "Bar-Hillel",
250
+ "suffix": ""
251
+ },
252
+ {
253
+ "first": "A",
254
+ "middle": [],
255
+ "last": "Kasher",
256
+ "suffix": ""
257
+ },
258
+ {
259
+ "first": "E",
260
+ "middle": [],
261
+ "last": "Shamir",
262
+ "suffix": ""
263
+ }
264
+ ],
265
+ "year": 1967,
266
+ "venue": "",
267
+ "volume": "",
268
+ "issue": "",
269
+ "pages": "29--50",
270
+ "other_ids": {},
271
+ "num": null,
272
+ "urls": [],
273
+ "raw_text": "Bar-Hillel, Y., A. Kasher and E. Shamir 1967. \"Measures of Syntactic Complexity,\" in Machine Translation, A.D. Booth, ed. Amsterdam: North- Holland, pp. 29-50.",
274
+ "links": null
275
+ },
276
+ "BIBREF1": {
277
+ "ref_id": "b1",
278
+ "title": "A Layered Approach to NLP-Based Information Retrieval",
279
+ "authors": [
280
+ {
281
+ "first": "Sharon",
282
+ "middle": [],
283
+ "last": "Flank",
284
+ "suffix": ""
285
+ }
286
+ ],
287
+ "year": 1998,
288
+ "venue": "Proceedings of COLING-ACL, 36th Annual Meeting of the Association for Computational Linguistics",
289
+ "volume": "",
290
+ "issue": "",
291
+ "pages": "10--14",
292
+ "other_ids": {},
293
+ "num": null,
294
+ "urls": [],
295
+ "raw_text": "Flank, Sharon, 1998. \"A Layered Approach to NLP- Based Information Retrieval,\" in Proceedings of COLING-ACL, 36th Annual Meeting of the Association for Computational Linguistics, Montreal, Canada, 10-14 August 1998.",
296
+ "links": null
297
+ },
298
+ "BIBREF3": {
299
+ "ref_id": "b3",
300
+ "title": "Syntactic Complexity",
301
+ "authors": [
302
+ {
303
+ "first": "B",
304
+ "middle": [
305
+ "K"
306
+ ],
307
+ "last": "Rosen",
308
+ "suffix": ""
309
+ }
310
+ ],
311
+ "year": 1974,
312
+ "venue": "",
313
+ "volume": "",
314
+ "issue": "",
315
+ "pages": "305--335",
316
+ "other_ids": {},
317
+ "num": null,
318
+ "urls": [],
319
+ "raw_text": "Rosen, B.K. 1974. \"Syntactic Complexity,\" in Information and Control 24, pp. 305-335.",
320
+ "links": null
321
+ }
322
+ },
323
+ "ref_entries": {
324
+ "TABREF1": {
325
+ "type_str": "table",
326
+ "text": "word, e.g.rubber baby buggy bumpers is more semantically complex than a simple list of nouns and attributes, since there are various modification ambiguities in the longer version that are not present once it has been reduced to rubber, baby buggy, bumpers (or rubber, baby, buggy, bumpers, for that matter).",
327
+ "num": null,
328
+ "html": null,
329
+ "content": "<table><tr><td/><td>Both are equally good</td><td/></tr><tr><td>zlephant</td><td/><td/></tr><tr><td>Boolean, e.g. rhino</td><td>Both are equally good,</td><td/></tr><tr><td>9r rhinoceros</td><td>assuming they both</td><td/></tr><tr><td/><td>recognize the meaning of</td><td/></tr><tr><td/><td>the Boolean operator</td><td/></tr><tr><td>NP V-ing NP, e.g.</td><td>NLIR shows some</td><td/></tr><tr><td>girl leading a horse</td><td>improvement</td><td/></tr><tr><td>noun phrase, e.g.</td><td>NLIR shows major</td><td/></tr><tr><td>black woman in a</td><td>improvement; keyword</td><td/></tr><tr><td>white hat</td><td>retrieval scrambles</td><td/></tr><tr><td/><td>modifiers randomly</td><td/></tr><tr><td colspan=\"2\">2.3 Semantic Complexity</td><td/></tr><tr><td colspan=\"2\">Semantic complexity is more difficult to</td><td/></tr><tr><td colspan=\"2\">evaluate, but we can make certain</td><td/></tr><tr><td colspan=\"2\">observations. Leaving noun phrases intact</td><td/></tr><tr><td colspan=\"2\">makes a text more semantically complex</td><td/></tr><tr><td colspan=\"2\">than deconstructing those noun phrases:</td><td/></tr><tr><td colspan=\"2\">As for the names of people and locations,</td><td/></tr><tr><td colspan=\"2\">one could argue that the intact syntactic</td><td/></tr><tr><td colspan=\"2\">units (AI Gore; George Bush; Arlington,</td><td/></tr><tr><td colspan=\"2\">Virginia; New England) are semantically</td><td/><td>(Boolean</td></tr><tr><td colspan=\"2\">simpler, since they resolve ambiguity and</td><td colspan=\"2\">three-word queries, e.g. A and B; A or B, do</td></tr><tr><td colspan=\"2\">eliminate the spurious readings gore, bush,</td><td>not show much difference.)</td><td>The more</td></tr><tr><td colspan=\"2\">Arlington [Massachusetts], new England.</td><td colspan=\"2\">complex queries (and, in fact, the queries</td></tr><tr><td/><td/><td colspan=\"2\">that show PictureQuest off to best</td></tr><tr><td/><td/><td colspan=\"2\">advantage) consist either of a noun phrase or</td></tr><tr><td/><td/><td colspan=\"2\">are of the form NP V-ing NP. The table</td></tr><tr><td/><td/><td colspan=\"2\">below summarizes the differences in query</td></tr><tr><td/><td/><td colspan=\"2\">complexity for natural language information</td></tr><tr><td/><td/><td colspan=\"2\">retrieval as compared to keyword-only</td></tr><tr><td/><td/><td>information retrieval.</td></tr></table>"
330
+ }
331
+ }
332
+ }
333
+ }
Full_text_JSON/prefixW/json/W00/W00-0102.json ADDED
@@ -0,0 +1,649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0102",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:31.898778Z"
6
+ },
7
+ "title": "Using Long Runs as Predictors of Semantic Coherence in a Partial Document Retrieval System",
8
+ "authors": [
9
+ {
10
+ "first": "Hyopil",
11
+ "middle": [],
12
+ "last": "Shin",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "Computing Research Laboratory",
16
+ "institution": "NMSU",
17
+ "location": {
18
+ "postBox": "PO Box 30001",
19
+ "postCode": "88003",
20
+ "settlement": "Las Cruces",
21
+ "region": "NM"
22
+ }
23
+ },
24
+ "email": "hshin@crl.nmsu.edu"
25
+ },
26
+ {
27
+ "first": "Jerrold",
28
+ "middle": [
29
+ "F"
30
+ ],
31
+ "last": "Stach",
32
+ "suffix": "",
33
+ "affiliation": {
34
+ "laboratory": "",
35
+ "institution": "UMKC",
36
+ "location": {
37
+ "addrLine": "5100 Rockhill Road Kansas City",
38
+ "postCode": "64110",
39
+ "region": "MO"
40
+ }
41
+ },
42
+ "email": "stach@cstp.umkc.edu"
43
+ }
44
+ ],
45
+ "year": "",
46
+ "venue": null,
47
+ "identifiers": {},
48
+ "abstract": "We propose a method for dealing with semantic complexities occurring in information retrieval systems on the basis of linguistic observations. Our method follows from an analysis indicating that long runs of content words appear in a stopped document cluster, and our observation that these long runs predominately originate from the prepositional phrase and subject complement positions and as such, may be useful predictors of semantic coherence. From this linguistic basis, we test three statistical hypotheses over a small collection of documents from different genre. By coordinating thesaurus semantic categories (SEMCATs) of the long run words to the semantic categories of paragraphs, we conclude that for paragraphs containing both long runs and short runs, the SEMCAT weight of long runs of content words is a strong predictor of the semantic coherence of the paragraph.",
49
+ "pdf_parse": {
50
+ "paper_id": "W00-0102",
51
+ "_pdf_hash": "",
52
+ "abstract": [
53
+ {
54
+ "text": "We propose a method for dealing with semantic complexities occurring in information retrieval systems on the basis of linguistic observations. Our method follows from an analysis indicating that long runs of content words appear in a stopped document cluster, and our observation that these long runs predominately originate from the prepositional phrase and subject complement positions and as such, may be useful predictors of semantic coherence. From this linguistic basis, we test three statistical hypotheses over a small collection of documents from different genre. By coordinating thesaurus semantic categories (SEMCATs) of the long run words to the semantic categories of paragraphs, we conclude that for paragraphs containing both long runs and short runs, the SEMCAT weight of long runs of content words is a strong predictor of the semantic coherence of the paragraph.",
55
+ "cite_spans": [],
56
+ "ref_spans": [],
57
+ "eq_spans": [],
58
+ "section": "Abstract",
59
+ "sec_num": null
60
+ }
61
+ ],
62
+ "body_text": [
63
+ {
64
+ "text": "One of the fundamental deficiencies of current information retrieval methods is that the words searchers use to construct terms often are not the same as those by which the searched information has been indexed. There are two components to this problem, synonymy and polysemy (Deerwester et. al., 1990) . By definition of polysemy, a document containing the search terms or indexed with the search terms is not necessarily relevant. Polysemy contributes heavily to poor precision. Attempts to deal with the synonymy problem have relied on intellectual or automatic term expansion, or the construction of a thesaurus.",
65
+ "cite_spans": [
66
+ {
67
+ "start": 276,
68
+ "end": 302,
69
+ "text": "(Deerwester et. al., 1990)",
70
+ "ref_id": "BIBREF1"
71
+ }
72
+ ],
73
+ "ref_spans": [],
74
+ "eq_spans": [],
75
+ "section": "Introduction",
76
+ "sec_num": null
77
+ },
78
+ {
79
+ "text": "Also the ambiguity of natural language causes semantic complexities that result in poor precision. Since queries are mostly formulated as words or phrases in a language, and the expressions of a language are ambiguous in many cases, the system must have ways to disambiguate the query.",
80
+ "cite_spans": [],
81
+ "ref_spans": [],
82
+ "eq_spans": [],
83
+ "section": "Introduction",
84
+ "sec_num": null
85
+ },
86
+ {
87
+ "text": "In order to resolve semantic complexities in information retrieval systems, we designed a method to incorporate semantic information into current IR systems. Our method (1) adopts widely used Semantic Information or Categories, (2) calculates Semantic Weight based on probability, and (3) (for the purpose of verifying the method) performs partial text retrieval based upon Semantic Weight or Coherence to overcome cognitive overload of the human agent. We make two basic assumptions: 1. Matching search terms to semantic categories should improve retrieval precision. 2. Long runs of content words have a linguistic basis for Semantic Weight and can also be verified statistically.",
88
+ "cite_spans": [],
89
+ "ref_spans": [],
90
+ "eq_spans": [],
91
+ "section": "Introduction",
92
+ "sec_num": null
93
+ },
94
+ {
95
+ "text": "There have been several attempts to deal with complexity using semantic information. These methods are hampered by the lack of dictionaries containing proper semantic categories for classifying text. Semantic methods designed by Boyd et. al. (1994) and Wendlandt et. al. (1991) demonstrate only simple examples and are restricted to small numbers of words. In order to overcome this deficiency, we propose to incorporate the structural information of the thesaurus, semantic categories (SEMCATs). However, we must also incorporate semantic categories into current IR systems in a compatible manner. The problem we deal with is partial text retrieval when all the terms of the traditional vector equations are not known. This is the case when retrieval is associated with a near real time filter, or when the size or number of documents in a corpus is unknown. In such cases we can retrieve only partial text, a paragraph or page. But since there is no document wide or corpus wide statistics, it is difficult to judge whether or not the text fragment is relevant. The method we employ in this paper identifies semantic \"hot spots\" in partial text. These \"hot spots\" are loci of semantic coherence in a paragraph of text. Such paragraphs are likely to convey the central ideas of the document.",
96
+ "cite_spans": [
97
+ {
98
+ "start": 229,
99
+ "end": 248,
100
+ "text": "Boyd et. al. (1994)",
101
+ "ref_id": "BIBREF0"
102
+ },
103
+ {
104
+ "start": 253,
105
+ "end": 277,
106
+ "text": "Wendlandt et. al. (1991)",
107
+ "ref_id": null
108
+ }
109
+ ],
110
+ "ref_spans": [],
111
+ "eq_spans": [],
112
+ "section": "A Brief Overview of Previous Approaches",
113
+ "sec_num": "1"
114
+ },
115
+ {
116
+ "text": "We also deal with the computational aspects of partial text retrieval. We use a simple stop/stem method to expose long runs of context words that are evaluated relative to the search terms. Our goal is not to retrieve a highly relevant sentence, but rather to retrieve a portion of text that is semantically coherent with respect to the search terms. This locale can be returned to the searcher for evaluation and if it is relevant, the search terms can be refined. This approach is compatible with Latent Semantic Indexing (LSI) for partial text retrieval when the terms of the vector space are not known. LSI is based on a vector space information retrieval method that has demonstrated improved performance over the traditional vector space techniques. So when incorporating semantic information, it is necessary to adopt existing mathematical methods including probabilistic methods and statistical methods.",
117
+ "cite_spans": [],
118
+ "ref_spans": [],
119
+ "eq_spans": [],
120
+ "section": "A Brief Overview of Previous Approaches",
121
+ "sec_num": "1"
122
+ },
123
+ {
124
+ "text": "Theoretical Background",
125
+ "cite_spans": [],
126
+ "ref_spans": [],
127
+ "eq_spans": [],
128
+ "section": "2",
129
+ "sec_num": null
130
+ },
131
+ {
132
+ "text": "Partial Information Retrieval has to with detection of main ideas. Main ideas are topic sentences that have central meaning to the text. Our method of detecting main idea paragraphs extends from Jang (1997) who observed that after stemming and stopping a document, long runs of cor/tent words cluster. Content word runs are a sequence of content words with a function word(s) prefix and suffix. These runs can be weighted for density in a stopped document and vector processed. We observed that these long content word runs generally originate from the prepositional phrase and subject complement positions, providing a linguistic basis for a dense neighbourhood of long runs of content words signalling a semantic locus of the writing. We suppose that these neighbourhoods may contain main ideas of the text. In order to verify this, we designed a methodology to incorporate semantic features into information retrieval and examined long runs of content words as a semantic predictor.",
133
+ "cite_spans": [],
134
+ "ref_spans": [],
135
+ "eq_spans": [],
136
+ "section": "Long Runs",
137
+ "sec_num": "2.1"
138
+ },
139
+ {
140
+ "text": "We examined all the long runs of the Jang (1997) collection and discovered most of them originate from the prepositional phrase and subject complement positions. According to Halliday (1985) , a preposition is explained as a minor verb. It functions as a minor Predicator having a nominal group as its complement. Thus the internal structure of 'across the lake' is like that of 'crossing the lake', with a non-finite verb as Predicator (thus our choice of _> 3 words as a long run).",
141
+ "cite_spans": [
142
+ {
143
+ "start": 175,
144
+ "end": 190,
145
+ "text": "Halliday (1985)",
146
+ "ref_id": null
147
+ }
148
+ ],
149
+ "ref_spans": [],
150
+ "eq_spans": [],
151
+ "section": "Long Runs",
152
+ "sec_num": "2.1"
153
+ },
154
+ {
155
+ "text": "When we interpret the preposition as a \"minor Predicator\" and \"minor Process\", we are interpreting the prepositional phrase as a kind of minor clause. That is, prepositional phrases function as a clause and their role is predication.",
156
+ "cite_spans": [],
157
+ "ref_spans": [],
158
+ "eq_spans": [],
159
+ "section": "Long Runs",
160
+ "sec_num": "2.1"
161
+ },
162
+ {
163
+ "text": "Traditionally, predication is what a statement says about its subject. A named predication corresponds to an externally defined function, namely what the speaker intends to say his or her subject, i.e. their referent. If long runs largely appear in predication positions, it would suggest that the speaker is saying something important and the longer runs of content words would signal a locus of the speaker's intention.",
164
+ "cite_spans": [],
165
+ "ref_spans": [],
166
+ "eq_spans": [],
167
+ "section": "Long Runs",
168
+ "sec_num": "2.1"
169
+ },
170
+ {
171
+ "text": "Extending from the statistical analysis of Jang (1997) and our observations of those long runs in the collection, we give a basic assumption of our study:",
172
+ "cite_spans": [],
173
+ "ref_spans": [],
174
+ "eq_spans": [],
175
+ "section": "Long Runs",
176
+ "sec_num": "2.1"
177
+ },
178
+ {
179
+ "text": "Long runs of content words contain significant semantic information that a speaker wants to express and focus, and thus are semantic indicators or loci or main ideas.",
180
+ "cite_spans": [],
181
+ "ref_spans": [],
182
+ "eq_spans": [],
183
+ "section": "Long Runs",
184
+ "sec_num": "2.1"
185
+ },
186
+ {
187
+ "text": "In this paper, we examine the SEMCAT values of long and short runs, extracted from a random document of the collection in Jang (1997), to determine if the SEMCAT weights of long runs of content words are semantic predictors.",
188
+ "cite_spans": [],
189
+ "ref_spans": [],
190
+ "eq_spans": [],
191
+ "section": "Long Runs",
192
+ "sec_num": "2.1"
193
+ },
194
+ {
195
+ "text": "We adopted Roget's Thesaurus for our basic semantic categories (SEMCATs). We extracted the semantic categories from the online Thesaurus for convenience. We employ the 39 intermediate categories as basic semantic information, since the 6 main categories are too general, and the many sub-categories are too narrow to be taken into account. We refer to these 39 categories as SEMCATs. ",
196
+ "cite_spans": [],
197
+ "ref_spans": [],
198
+ "eq_spans": [],
199
+ "section": "SEMCATs",
200
+ "sec_num": "2.2"
201
+ },
202
+ {
203
+ "text": "Many of the most frequently occurring words in English, such as \"the,\" \"of, .... and,\" \"to,\" etc. are non-discriminators with respect to information filtering. Since many of these function words make up a large fraction of the text of most documents, their early elimination in the indexing process speeds processing, saves significant amounts of index space and does not compromise the filtering process. In the Brown Corpus, the frequency of stop words is 551,057 out of 1,013,644 total words. Function words therefore account for about 54.5% of the tokens in a document.",
204
+ "cite_spans": [],
205
+ "ref_spans": [],
206
+ "eq_spans": [],
207
+ "section": "Indexing Space and Stop Lists",
208
+ "sec_num": "2.3"
209
+ },
210
+ {
211
+ "text": "The Brown Corpus is useful in text retrieval because it is small and efficiently exposes content word runs. Furthermore, minimizing the document token size is very important in NLPbased methods, because NLP-based methods usually need much larger indexing spaces than statistical-based methods due to processes for tagging and parsing.",
212
+ "cite_spans": [],
213
+ "ref_spans": [],
214
+ "eq_spans": [],
215
+ "section": "Indexing Space and Stop Lists",
216
+ "sec_num": "2.3"
217
+ },
218
+ {
219
+ "text": "In order to verify that long runs contribute to resolve semantic complexities and can be used as predictors of semantic intent, we employed a probabilistic, vector processing methodology.",
220
+ "cite_spans": [],
221
+ "ref_spans": [],
222
+ "eq_spans": [],
223
+ "section": "Experimental Basis",
224
+ "sec_num": "3"
225
+ },
226
+ {
227
+ "text": "In order to understand the calculation of SEMCATs, it is helpful to look at the structure of a preprocessed document. One document \"Barbie\" in the Jang (1997) collection has a total of 1,468 words comprised of 755 content words and 713 function words. The document has 17 paragraphs. Filtering out function words using the Brown Corpus exposed the runs of content words as shown in Figure 1 . The traditional vector processing model requires the following set of terms:",
228
+ "cite_spans": [],
229
+ "ref_spans": [
230
+ {
231
+ "start": 382,
232
+ "end": 390,
233
+ "text": "Figure 1",
234
+ "ref_id": null
235
+ }
236
+ ],
237
+ "eq_spans": [],
238
+ "section": "Revised Probability and Vector Processing",
239
+ "sec_num": "3.1"
240
+ },
241
+ {
242
+ "text": "\u2022 (dO the number of documents in the collection that each word occurs in \u2022 (idf) the inverse document frequency of each word determined by logl0(N/df) where N is the total number of documents. If a word appears in a query but not in a document, its idf is undefined. \u2022 The category probability of each query word. Wendlandt (1991) points out that it is useful to retrieve a set of documents based upon key words only, and then considers only those documents for semantic category and attribute analysis. Wendlandt (1991) appends the s category weights to the t term weights of each document vector Di and the Query vector Q.",
243
+ "cite_spans": [],
244
+ "ref_spans": [],
245
+ "eq_spans": [],
246
+ "section": "Revised Probability and Vector Processing",
247
+ "sec_num": "3.1"
248
+ },
249
+ {
250
+ "text": "Since our basic query unit is a paragraph, document frequencY (df) and inverse document frequency (idf) have to be redefined. As we pointed out in Section 1, all terms are not known in partial text retrieval. Further, our approach is based on semantic weight rather than word frequency. Therefore any frequency based measures defined by Boyd et al. (1994) and Wendlandt (1991) need to be built from the probabilities of individual semantic categories. Those modifications are described below. As a simplifying assumption, we assume SEMCATs have a uniform probability distribution with regard to a word.",
251
+ "cite_spans": [
252
+ {
253
+ "start": 337,
254
+ "end": 355,
255
+ "text": "Boyd et al. (1994)",
256
+ "ref_id": "BIBREF0"
257
+ }
258
+ ],
259
+ "ref_spans": [],
260
+ "eq_spans": [],
261
+ "section": "Revised Probability and Vector Processing",
262
+ "sec_num": "3.1"
263
+ },
264
+ {
265
+ "text": "Our first task in computing SEMCAT values was to create a SEMCAT dictionary for our method. We extracted SEMCATs for every word from the World Wide Web version of Roget's thesaurus. SEMCATs give probabilities of a word corresponding to a semantic category. The content word run 'favorite companion detractors love' is of length 4. Each word of the run maps to at least one SEMCAT. The word 'favorite' maps to categories 'PEAF and SYAF'. 'companion' maps to categories 'ANT, MECO, NUM, ORD, ORGM, PEAF, PRVO, QUAN, and SYAF'. 'detractor' maps to 'MOAF'. 'love' maps to 'AFIG, ANT, MECO, MOAF, MOCO, ORGM, PEAF, PORE, PRVO, SYAF, and VOIG'. We treat the long runs as a semantic core from which to calculate SEMCAT values. SEMCAT weights are calculated based on the following equations.",
266
+ "cite_spans": [],
267
+ "ref_spans": [],
268
+ "eq_spans": [],
269
+ "section": "Calculating SEMCATs",
270
+ "sec_num": "3.2"
271
+ },
272
+ {
273
+ "text": "Eq.1 Pjk(Probability) -The likelihood of SEMCAT Sj occurring due to the K th trigger. For example, assuming a uniform probability distribution, the category PEAF triggered by the word favorite above, has the following probability: PPEAF, favorite ----0.5(1/2) Eq.2 Swj (SEMCAT Weights in Long runs)",
274
+ "cite_spans": [],
275
+ "ref_spans": [],
276
+ "eq_spans": [],
277
+ "section": "Calculating SEMCATs",
278
+ "sec_num": "3.2"
279
+ },
280
+ {
281
+ "text": "is the sum of each SEMCAT(j) weight of long runs based on their probabilities. In the above example, the long run 'favorite companion detractors love,' ihe SEMCAT 'MOAF' has SWMoAv : (detractor(l) + love(.09)) = 1.09. We can write;",
282
+ "cite_spans": [],
283
+ "ref_spans": [],
284
+ "eq_spans": [],
285
+ "section": "Calculating SEMCATs",
286
+ "sec_num": "3.2"
287
+ },
288
+ {
289
+ "text": "Swj= \u00a3 PO i=1",
290
+ "cite_spans": [],
291
+ "ref_spans": [],
292
+ "eq_spans": [],
293
+ "section": "Calculating SEMCATs",
294
+ "sec_num": "3.2"
295
+ },
296
+ {
297
+ "text": "Eq.3 edwj (Expected data weights in a paragraph) -Given a set of N content words (data) in a paragraph, the expected weight of the SEMCATs of long runs in a paragraph is: Our method performs the following steps: 1. calculate the SEMCAT weight of each long content word run in every paragraph (Sw) 2. calculate the expected data weight of each paragraph (edw) 3. calculate the inverse expected data weight of each paragraph (idw) 4. calculate the actual weight of each paragraph (Swxidw) 5. calculate coherence weights (total relevance) by summing the weights of (Swxidw).",
298
+ "cite_spans": [],
299
+ "ref_spans": [],
300
+ "eq_spans": [],
301
+ "section": "Calculating SEMCATs",
302
+ "sec_num": "3.2"
303
+ },
304
+ {
305
+ "text": "N edwj = E Po i=1",
306
+ "cite_spans": [],
307
+ "ref_spans": [],
308
+ "eq_spans": [],
309
+ "section": "Calculating SEMCATs",
310
+ "sec_num": "3.2"
311
+ },
312
+ {
313
+ "text": "In every paragraph, extraction of SEMCATs from long runs is done first. The next step is finding the same SEMCATs of long runs through every word in a paragraph (expected data weight), then calculate idw, and finally Sw\u00d7idw. The final, total relevance weights are an accumulation of all weights of SEMCATs of content words in a paragraph. Total relevance tells how many SEMCATs of the Query's long runs appear in a paragraph. Higher values imply that the paragraph is relevant to the long runs of the Query.",
314
+ "cite_spans": [],
315
+ "ref_spans": [],
316
+ "eq_spans": [],
317
+ "section": "Calculating SEMCATs",
318
+ "sec_num": "3.2"
319
+ },
320
+ {
321
+ "text": "The following is a program output for calculating SEMCAT weights for an arbitrary long run: \"SEVEN INTERACTIVE PRODUCTS LED\" SEMCAT: EXOT Sw : 1.00 edw : 1.99 idw : 1.44 Swxidw : 1.44 SEMCAT: GINV Sw : 0.33 edw : 1.62 idw : 1.53 Swxidw : 0.51 SEMCAT: MOT Sw : 0.20 edw : 0.71 idw : 1.89 Swxidw : 0.38 SEMCAT: NUM Sw : 0.20 edw : 1.76 idw : 1.49 Swxidw : 0.30 SEMCAT: ORGM Sw : 0.20 edw : 1.67 idw : 1.52 Sw\u00d7idw : 0.30 SEMCAT: PEAF Sw : 0.53 edw : 1.50 idw : 1.56 Swxidw : 0.83 SEMCAT: REAF Sw : 0.20 edw : 0.20 idw : 2.44 Swxidw : 0.49 SEMCAT: SYAF Sw : 0.33 edw : 1.19 idw : 1.66 Swxidw : 0.55 Total (Swxidw) : 4.79",
322
+ "cite_spans": [],
323
+ "ref_spans": [],
324
+ "eq_spans": [],
325
+ "section": "Calculating SEMCATs",
326
+ "sec_num": "3.2"
327
+ },
328
+ {
329
+ "text": "The goal of employing probability and vector processing is to prove the linguistic basis that long runs of content words can be used as predictors of semantic intent But we also want to exploit the computational advantage of removing the function words from the document, which reduces the number of tokens processed by about 50% and thus reduces vector space and probability computations. If it is true that long runs of content words are predictors of semantic coherence, we can further reduce the complexity of vector computations: (1) by eliminating those paragraphs without long runs from consideration, (2) within remaining paragraphs with long runs, computing and summing the semantic coherence of the longest runs only, (3) ranking the eligible paragraphs for retrieval based upon their semantic weights relative to the query.",
330
+ "cite_spans": [],
331
+ "ref_spans": [],
332
+ "eq_spans": [],
333
+ "section": "Experimental Results",
334
+ "sec_num": "4"
335
+ },
336
+ {
337
+ "text": "Jang (1997) established that the distribution of long runs of content words and short runs of content words in a collection of paragraphs are drawn from different populations. This implies that either long runs or short runs are predictors, but since all paragraphs contain short runs, i.e. a single content word separated by function words, only long runs can be useful predictors. Furthermore, only long runs as we define them can be used as predictors because short runs are insufficient to construct the language constructs for prepositional phrase and subject complement positions. If short runs were discriminators, the linguistic assumption of this research would be violated. The statistical analysis of Jang (1997) does not indicate this to be the case.",
338
+ "cite_spans": [
339
+ {
340
+ "start": 5,
341
+ "end": 11,
342
+ "text": "(1997)",
343
+ "ref_id": null
344
+ }
345
+ ],
346
+ "ref_spans": [],
347
+ "eq_spans": [],
348
+ "section": "Experimental Results",
349
+ "sec_num": "4"
350
+ },
351
+ {
352
+ "text": "To proceed in establishing the viability of our approach, we proposed the following experimental hypotheses:",
353
+ "cite_spans": [],
354
+ "ref_spans": [],
355
+ "eq_spans": [],
356
+ "section": "Experimental Results",
357
+ "sec_num": "4"
358
+ },
359
+ {
360
+ "text": "(HI) The SEMCAT weights for long runs of content words are statistically greater than weights for short runs of content words. Since each content word can map to multiple SEMCATs, we cannot assume that the semantic weight of a long run is a function of its length. The semantic coherence of long runs should be a more granular discriminator. H2For paragraphs containing long runs and short runs, the distribution of long run SEMCAT weights is statistically different from the distribution of short run SEMCAT weights. (H3)",
361
+ "cite_spans": [],
362
+ "ref_spans": [],
363
+ "eq_spans": [],
364
+ "section": "Experimental Results",
365
+ "sec_num": "4"
366
+ },
367
+ {
368
+ "text": "There is a positive correlation between the sum of long run SEMCAT weights and the semantic coherence of a paragraph, the total paragraph SEMCAT weight.",
369
+ "cite_spans": [],
370
+ "ref_spans": [],
371
+ "eq_spans": [],
372
+ "section": "Experimental Results",
373
+ "sec_num": "4"
374
+ },
375
+ {
376
+ "text": "A detailed description of these experiments and their outcome are described in Shin (1997 Shin ( , 1999 . The results of the experiments and the implications of those results relative to the method we propose are discussed below. Table 3 gives the SEMCAT weights for seventeen paragraphs randomly chosen from one document in the collection of Jang (1997). Table 3 is shown in Table 4 . Table 2 . Long run and short run weights come from different \u2022 populations. We accept HI.",
377
+ "cite_spans": [
378
+ {
379
+ "start": 79,
380
+ "end": 89,
381
+ "text": "Shin (1997",
382
+ "ref_id": null
383
+ },
384
+ {
385
+ "start": 90,
386
+ "end": 103,
387
+ "text": "Shin ( , 1999",
388
+ "ref_id": null
389
+ }
390
+ ],
391
+ "ref_spans": [
392
+ {
393
+ "start": 230,
394
+ "end": 237,
395
+ "text": "Table 3",
396
+ "ref_id": "TABREF3"
397
+ },
398
+ {
399
+ "start": 356,
400
+ "end": 363,
401
+ "text": "Table 3",
402
+ "ref_id": "TABREF3"
403
+ },
404
+ {
405
+ "start": 376,
406
+ "end": 383,
407
+ "text": "Table 4",
408
+ "ref_id": "TABREF4"
409
+ },
410
+ {
411
+ "start": 386,
412
+ "end": 393,
413
+ "text": "Table 2",
414
+ "ref_id": "TABREF1"
415
+ }
416
+ ],
417
+ "eq_spans": [],
418
+ "section": "Experimental Results",
419
+ "sec_num": "4"
420
+ },
421
+ {
422
+ "text": "For the between paragraph treatment, the row means (paragraph weights) have an F value of 2.21. At the .05 significance level, F,~ = 05 = 2.28 for 16,16 degrees of freedom. Since 2.21 < 2.28 we cannot reject the assertion that there is no significant difference in SEMCAT weights between paragraphs. That is, paragraph weights do not appear to be taken from different populations, as do the long run and short run weight distributions. Thus, the semantic weight of the content words in a paragraph cannot be used to predict the semantic weight of the paragraph. We therefore proceed to examine H2.",
423
+ "cite_spans": [],
424
+ "ref_spans": [],
425
+ "eq_spans": [],
426
+ "section": "Experimental Results",
427
+ "sec_num": "4"
428
+ },
429
+ {
430
+ "text": "Notice that two paragraphs in Table 2 are without long runs. We need to repeat the analysis of variance for only those paragraphs with long runs to see if long runs are discriminators. Table 5 summarizes those paragraphs. This data was evaluated using a standard two way F test and analysis of variance with cx = .05. The analysis of variance table for the paragraphs in Table 5 follows. For paragraphs in a collection containing both long and short runs, the SEMCAT weights of the long runs and short runs are drawn from different distributions. We accept H2. For paragraphs containing long runs and short runs, the distributions of long run SEMCAT weights is different from the distribution of short run SEMCAT weights. We know from the linguistic basis for long runs that short runs cannot be used as predictors. We therefore proceed to examine the Pearson correlation between the long run SEMCAT weights and paragraph SEMCAT weights for those paragraphs with both long and short content word runs. The weights in Table have a positive Pearson Product Correlation coefficient of .952. We therefore accept H3. There is a positive correlation between the sum of long run SEMCAT weights and the semantic coherence of a paragraph, the total paragraph SEMCAT weight.",
431
+ "cite_spans": [],
432
+ "ref_spans": [
433
+ {
434
+ "start": 30,
435
+ "end": 37,
436
+ "text": "Table 2",
437
+ "ref_id": "TABREF1"
438
+ },
439
+ {
440
+ "start": 185,
441
+ "end": 192,
442
+ "text": "Table 5",
443
+ "ref_id": "TABREF5"
444
+ },
445
+ {
446
+ "start": 371,
447
+ "end": 378,
448
+ "text": "Table 5",
449
+ "ref_id": "TABREF5"
450
+ },
451
+ {
452
+ "start": 1017,
453
+ "end": 1027,
454
+ "text": "Table have",
455
+ "ref_id": null
456
+ }
457
+ ],
458
+ "eq_spans": [],
459
+ "section": "Experimental Results",
460
+ "sec_num": "4"
461
+ },
462
+ {
463
+ "text": "This research tested three statistical hypotheses extending from two observations: (1) Jang (1997) observed the clustering of long runs of content words and established the distribution of long run lengths and short run lengths are drawn from different populations, (2) our observation that these long runs of content words originate from the prepositional phrase and subject complement positions. According to Halliday (1985) those grammar structures function as",
464
+ "cite_spans": [
465
+ {
466
+ "start": 411,
467
+ "end": 426,
468
+ "text": "Halliday (1985)",
469
+ "ref_id": null
470
+ }
471
+ ],
472
+ "ref_spans": [],
473
+ "eq_spans": [],
474
+ "section": "Conclusion",
475
+ "sec_num": "5."
476
+ }
477
+ ],
478
+ "back_matter": [
479
+ {
480
+ "text": "minor predication and as such are loci of semantic intent or coherence. In order to facilitate the use of long runs as predictors, we modified the traditional measures of Boyd et al. (1994) , Wendlandt (1991) to accommodate semantic categories and partial text retrieval. The revised metrics and the computational method we propose were used in the statistical experiments presented above. The main findings of this work are 1.the distribution semantic coherence (SEMCAT weights) of long runs is not statistically greater than that of short runs, 2.for paragraphs containing both long runs and short runs, the SEMCAT weight distributions are drawn from different populations 3.there is a positive correlation between the sum of long run SEMCAT weights and the total SEMCAT weight of the paragraph (its semantic coherence).Significant additional work is required to validate these preliminary results. The collection employed in Jang (1997) is not a standard Corpus so we have no way to test precision and relevance of the proposed method. The results of the proposed method are subject to the accuracy of the stop lists and filtering function.Nonetheless, we feel the approach proposed has potential to improve performance through reduced token processing and increased relevance through consideration of semantic coherence of long runs. Significantly, our approach does not require knowledge of the collection.Halliday M. A.K. (1985) Management, vol. 2, No. 4, December 1999, to appear.Wendlandt E. and Driscoll R. (1991) Incorporating a semantic analysis into a document retrieval strategy. CACM 31, pp. 54-48.",
481
+ "cite_spans": [
482
+ {
483
+ "start": 171,
484
+ "end": 189,
485
+ "text": "Boyd et al. (1994)",
486
+ "ref_id": "BIBREF0"
487
+ },
488
+ {
489
+ "start": 192,
490
+ "end": 208,
491
+ "text": "Wendlandt (1991)",
492
+ "ref_id": null
493
+ },
494
+ {
495
+ "start": 1423,
496
+ "end": 1434,
497
+ "text": "A.K. (1985)",
498
+ "ref_id": null
499
+ },
500
+ {
501
+ "start": 1435,
502
+ "end": 1479,
503
+ "text": "Management, vol. 2, No. 4, December 1999, to",
504
+ "ref_id": null
505
+ }
506
+ ],
507
+ "ref_spans": [],
508
+ "eq_spans": [],
509
+ "section": "annex",
510
+ "sec_num": null
511
+ }
512
+ ],
513
+ "bib_entries": {
514
+ "BIBREF0": {
515
+ "ref_id": "b0",
516
+ "title": "Incorporating Semantics Within a Connectionist Model and a Vector Processing Model",
517
+ "authors": [
518
+ {
519
+ "first": "R",
520
+ "middle": [],
521
+ "last": "Boyd",
522
+ "suffix": ""
523
+ },
524
+ {
525
+ "first": "J",
526
+ "middle": [],
527
+ "last": "Driscoll",
528
+ "suffix": ""
529
+ },
530
+ {
531
+ "first": "Syu",
532
+ "middle": [
533
+ "I"
534
+ ],
535
+ "last": "",
536
+ "suffix": ""
537
+ }
538
+ ],
539
+ "year": 1994,
540
+ "venue": "Proceedings of the TREC-2",
541
+ "volume": "",
542
+ "issue": "",
543
+ "pages": "",
544
+ "other_ids": {},
545
+ "num": null,
546
+ "urls": [],
547
+ "raw_text": "Boyd R., Driscoll J, and Syu I. (1994) Incorporating Semantics Within a Connectionist Model and a Vector Processing Model. In Proceedings of the TREC-2, NIST.",
548
+ "links": null
549
+ },
550
+ "BIBREF1": {
551
+ "ref_id": "b1",
552
+ "title": "Indexing by Latent Semantic Anaysis",
553
+ "authors": [
554
+ {
555
+ "first": "S",
556
+ "middle": [],
557
+ "last": "Deerwester",
558
+ "suffix": ""
559
+ },
560
+ {
561
+ "first": "G",
562
+ "middle": [],
563
+ "last": "Furnas",
564
+ "suffix": ""
565
+ },
566
+ {
567
+ "first": "T",
568
+ "middle": [],
569
+ "last": "Landauer",
570
+ "suffix": ""
571
+ },
572
+ {
573
+ "first": "R",
574
+ "middle": [],
575
+ "last": "Harshman",
576
+ "suffix": ""
577
+ }
578
+ ],
579
+ "year": 1990,
580
+ "venue": "Journal of the American Society of Information Science",
581
+ "volume": "",
582
+ "issue": "",
583
+ "pages": "41--47",
584
+ "other_ids": {},
585
+ "num": null,
586
+ "urls": [],
587
+ "raw_text": "Deerwester S., Furnas G., Landauer T., and Harshman R. (1990) Indexing by Latent Semantic Anaysis. Journal of the American Society of Information Science 41-6.",
588
+ "links": null
589
+ }
590
+ },
591
+ "ref_entries": {
592
+ "FIGREF0": {
593
+ "text": "FASHION DOLL * * IMPOSSIBLE FIGURE * LONG * * * POPULAR GIRL * MATTEL * WORLD * TOYMAKER * PRODUCTS RANGE * FISHER PRICE INFANT * SALES * * * TALL MANNEQUIN * BARBIE * * AGE * * * BEST SELLING GIRLS BRAND * * POISED * STRUT * * * CHANGE * * MALE DOMINATED WORLD * MULTIMEDIA SOFTWARE * VIDEO GAMES In Figure 1, asterisks occupy positions where function words were filtered out. The bold type indicates the location of the longest runs of content words. The run length distribution of Figure 1 is shown below:",
594
+ "num": null,
595
+ "uris": null,
596
+ "type_str": "figure"
597
+ },
598
+ "TABREF0": {
599
+ "html": null,
600
+ "content": "<table><tr><td/><td>Abbreviation</td><td/></tr><tr><td/><td/><td>Full Description</td></tr><tr><td colspan=\"2\">1 AFIG</td><td>Affection in General</td></tr><tr><td>2</td><td>ANT</td><td>Antagonism</td></tr><tr><td>3</td><td>CAU</td><td>Causation</td></tr><tr><td>4</td><td>CHN</td><td>Change</td></tr><tr><td colspan=\"2\">5 COIV</td><td>Conditional Intersocial Volition</td></tr><tr><td>6</td><td>'CRTH</td><td>Creative Thought</td></tr><tr><td/><td>DIM</td><td>Dimensions</td></tr><tr><td/><td>EXIS</td><td>Existence</td></tr><tr><td/><td>EXOT</td><td>Extension of Thought</td></tr><tr><td colspan=\"2\">Io FORM</td><td>Form</td></tr><tr><td colspan=\"2\">11 GINV</td><td>General Inter social Volition</td></tr><tr><td colspan=\"2\">12 INOM</td><td>Inorganic Matter</td></tr><tr><td colspan=\"2\">13 MECO</td><td>Means of Communication</td></tr><tr><td colspan=\"2\">14 MFRE</td><td>Materials for Reasoning</td></tr><tr><td colspan=\"2\">15 MIG</td><td>Matter in general</td></tr><tr><td colspan=\"2\">16 MOAF</td><td>Moral Affections</td></tr><tr><td colspan=\"2\">17 MOCO</td><td>Modes of Communication</td></tr><tr><td colspan=\"2\">18 MOT</td><td>Motion</td></tr><tr><td colspan=\"2\">19 NOIC</td><td>Nature of Ideas Communicated</td></tr><tr><td colspan=\"2\">2o NUM</td><td>Number</td></tr><tr><td/><td>OPIG</td><td>Operations of Intelligence</td></tr><tr><td/><td/><td>In General</td></tr><tr><td/><td>ORD</td><td>Order</td></tr><tr><td/><td>ORGM</td><td>Organic Matter</td></tr><tr><td/><td>PEAF</td><td>Personal Affections</td></tr></table>",
601
+ "text": "Semantic Categories (SEMCATs)",
602
+ "type_str": "table",
603
+ "num": null
604
+ },
605
+ "TABREF1": {
606
+ "html": null,
607
+ "content": "<table><tr><td>Run Length</td><td>Frequency</td></tr><tr><td>1</td><td>11</td></tr><tr><td>2</td><td>8</td></tr><tr><td>3</td><td>2</td></tr><tr><td>4</td><td>2</td></tr></table>",
608
+ "text": "Distribution of Content Run Lengths in a sample Document",
609
+ "type_str": "table",
610
+ "num": null
611
+ },
612
+ "TABREF3": {
613
+ "html": null,
614
+ "content": "<table><tr><td colspan=\"3\">: SEMCAT Weights of 17 Paragraphs Chosen</td></tr><tr><td>Randomly From</td><td>a Collection</td><td/></tr><tr><td>Paragraph</td><td colspan=\"2\">Short Runs I Long Runs</td></tr><tr><td/><td>Weight</td><td>Weight</td></tr><tr><td/><td>29.84</td><td>18.60</td></tr><tr><td>2</td><td>31.29</td><td>12.81</td></tr><tr><td>3</td><td>23.29</td><td>14.25</td></tr></table>",
615
+ "text": "",
616
+ "type_str": "table",
617
+ "num": null
618
+ },
619
+ "TABREF4": {
620
+ "html": null,
621
+ "content": "<table><tr><td colspan=\"4\">: Analysis of Vari~ of Variance for Table 2 Data</td></tr><tr><td>Variation</td><td>Degrees</td><td>Mean</td><td/></tr><tr><td/><td>of</td><td>Square</td><td>F</td></tr><tr><td/><td>Freedom</td><td/><td/></tr><tr><td>Between</td><td/><td/><td/></tr><tr><td>Treatments</td><td>1</td><td>2904.51</td><td>68.56</td></tr><tr><td>V R = 2904.51</td><td/><td/><td/></tr><tr><td>Between</td><td/><td/><td/></tr><tr><td>Blocks</td><td>16</td><td>93.92</td><td>2.21</td></tr><tr><td>Vc = 1502.83</td><td/><td/><td/></tr><tr><td>Residual or</td><td/><td/><td/></tr><tr><td>Random</td><td>16</td><td>42.36</td><td/></tr><tr><td>VE= 677.77</td><td/><td/><td/></tr><tr><td>Total</td><td>33</td><td/><td/></tr><tr><td>V = 5085.11</td><td/><td/><td/></tr><tr><td colspan=\"4\">At the .05 significance level, Fa _ .o5 = 4.49 for</td></tr><tr><td colspan=\"4\">1,16 degrees of freedom. Since 68.56 &gt; 4.49 we</td></tr><tr><td colspan=\"4\">reject the assertion that column means (run</td></tr><tr><td colspan=\"2\">weights) are equal in</td><td/><td/></tr></table>",
622
+ "text": "",
623
+ "type_str": "table",
624
+ "num": null
625
+ },
626
+ "TABREF5": {
627
+ "html": null,
628
+ "content": "<table><tr><td colspan=\"3\">: SEMCAT weights of 11 paragraphs</td></tr><tr><td colspan=\"3\">containing long runs and short runs</td></tr><tr><td>Paragraph</td><td>Short Runs</td><td>Long Runs</td></tr><tr><td/><td>Weight</td><td>Weight</td></tr><tr><td>1</td><td>29.84</td><td>18.60</td></tr><tr><td>2</td><td>31.29</td><td>12.81</td></tr><tr><td>3</td><td>23.29</td><td>4.25</td></tr><tr><td>4</td><td>23.94</td><td>11.63</td></tr><tr><td>5</td><td>34.63</td><td>35.00</td></tr><tr><td>6</td><td>22.85</td><td>03.32</td></tr><tr><td>8</td><td>35.84</td><td>15.94</td></tr><tr><td>11</td><td>23.01</td><td>07.82</td></tr><tr><td>12</td><td>31.69</td><td>04.79</td></tr><tr><td>14</td><td>17.91</td><td>10.55</td></tr><tr><td>15</td><td>19.70</td><td>05.83</td></tr></table>",
629
+ "text": "",
630
+ "type_str": "table",
631
+ "num": null
632
+ },
633
+ "TABREF6": {
634
+ "html": null,
635
+ "content": "<table><tr><td>: Anal</td><td colspan=\"3\">sis of Variance for Table 5 Data</td></tr><tr><td>Variation</td><td>Degrees</td><td>Mean</td><td/></tr><tr><td/><td>of</td><td>Square</td><td>F</td></tr><tr><td/><td>Freedom</td><td/><td/></tr><tr><td>Between</td><td/><td/><td/></tr><tr><td>Treatments</td><td>1</td><td>1430.98</td><td>291.44</td></tr><tr><td>V R = 1430.98</td><td/><td/><td/></tr><tr><td>Between</td><td/><td/><td/></tr><tr><td>Blocks</td><td>10</td><td>94.40</td><td>19.22</td></tr><tr><td>V c = 944.08</td><td/><td/><td/></tr><tr><td>Residual or</td><td/><td/><td/></tr><tr><td>Random</td><td>10</td><td>4.91</td><td/></tr><tr><td>VF= 49.19</td><td/><td/><td/></tr><tr><td>Total</td><td/><td/><td/></tr><tr><td>V = 2424.26</td><td>21</td><td/><td/></tr></table>",
636
+ "text": "At the .05 significance level, F== .05 = 4.10 for 2,10 degrees of freedom. 4.10 < 291.44. At the .05 significance level, F= = .05 = 2.98 for 10,10 degrees of freedom. 2.98 < 19.22.",
637
+ "type_str": "table",
638
+ "num": null
639
+ },
640
+ "TABREF7": {
641
+ "html": null,
642
+ "content": "<table><tr><td/><td colspan=\"2\">Correlation of Long Run SEMCAT</td></tr><tr><td colspan=\"3\">Weights to Paragraph SEMCAT Weight</td></tr><tr><td colspan=\"2\">Paragraph Long Runs</td><td>Paragraph</td></tr><tr><td/><td>Semantic</td><td>Semantic</td></tr><tr><td/><td>Weight</td><td>Weight</td></tr><tr><td/><td>18.60</td><td>48.44</td></tr><tr><td/><td>12.81</td><td>44.10</td></tr><tr><td>3</td><td>4.25</td><td>27.54</td></tr><tr><td>4</td><td>11.63</td><td>35.57</td></tr><tr><td>5</td><td>35.00</td><td>69.63</td></tr><tr><td>6</td><td>03.32</td><td>26.17</td></tr><tr><td>8</td><td>15.94</td><td>51.78</td></tr><tr><td>11</td><td>07.82</td><td>30.83</td></tr><tr><td>12</td><td>04.79</td><td>31.69</td></tr><tr><td>14</td><td>10.55</td><td>28.46</td></tr><tr><td>15</td><td>05.83</td><td>25.53</td></tr></table>",
643
+ "text": "",
644
+ "type_str": "table",
645
+ "num": null
646
+ }
647
+ }
648
+ }
649
+ }
Full_text_JSON/prefixW/json/W00/W00-0103.json ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0103",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:22.951046Z"
6
+ },
7
+ "title": "Reducing Lexical Semantic Complexity with Systematic Polysemous Classes and Underspecification",
8
+ "authors": [
9
+ {
10
+ "first": "Paul",
11
+ "middle": [],
12
+ "last": "Buitelaar",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "DFKI Language Technology Lab Stuhlsatzenhausweg 3",
16
+ "institution": "",
17
+ "location": {
18
+ "postCode": "66123",
19
+ "settlement": "Saarbrticken",
20
+ "country": "Germany"
21
+ }
22
+ },
23
+ "email": "paulb@dfki.de"
24
+ }
25
+ ],
26
+ "year": "",
27
+ "venue": null,
28
+ "identifiers": {},
29
+ "abstract": "This paper presents an algorithm for finding systematic polysemous classes in WordNet and similar semantic databases, based on a definition in (Apresjan 1973). The introduction of systematic polysemous classes can reduce the amount of lexical semantic processing, because the number of disambiguation decisions can be restricted more clearly to those cases that involve real ambiguity (homonymy). In many applications, for instance in document categorization, information retrieval, and information extraction, it may be sufficient to know if a given word belongs to a certain class (underspecified sense) rather than to know which of its (related) senses exactly to pick. The approach for finding systematic polysemous classes is based on that of (Buitelaar 1998a, Buitelaar 1998b), while addressing some previous shortcomings.",
30
+ "pdf_parse": {
31
+ "paper_id": "W00-0103",
32
+ "_pdf_hash": "",
33
+ "abstract": [
34
+ {
35
+ "text": "This paper presents an algorithm for finding systematic polysemous classes in WordNet and similar semantic databases, based on a definition in (Apresjan 1973). The introduction of systematic polysemous classes can reduce the amount of lexical semantic processing, because the number of disambiguation decisions can be restricted more clearly to those cases that involve real ambiguity (homonymy). In many applications, for instance in document categorization, information retrieval, and information extraction, it may be sufficient to know if a given word belongs to a certain class (underspecified sense) rather than to know which of its (related) senses exactly to pick. The approach for finding systematic polysemous classes is based on that of (Buitelaar 1998a, Buitelaar 1998b), while addressing some previous shortcomings.",
36
+ "cite_spans": [],
37
+ "ref_spans": [],
38
+ "eq_spans": [],
39
+ "section": "Abstract",
40
+ "sec_num": null
41
+ }
42
+ ],
43
+ "body_text": [
44
+ {
45
+ "text": "This paper presents an algorithm for finding systematic polysemous classes in WordNet (Miller et al 1990) and GermaNet (Hamp and Feldweg 1997) --a semantic database for German similar to WordNet. The introduction of such classes can reduce the amount of lexical semantic processing, because the number of disambiguation decisions can be restricted more clearly to those cases that involve real ambiguity (homonymy).",
46
+ "cite_spans": [
47
+ {
48
+ "start": 86,
49
+ "end": 105,
50
+ "text": "(Miller et al 1990)",
51
+ "ref_id": "BIBREF9"
52
+ },
53
+ {
54
+ "start": 119,
55
+ "end": 142,
56
+ "text": "(Hamp and Feldweg 1997)",
57
+ "ref_id": "BIBREF6"
58
+ }
59
+ ],
60
+ "ref_spans": [],
61
+ "eq_spans": [],
62
+ "section": "Introduction",
63
+ "sec_num": null
64
+ },
65
+ {
66
+ "text": "Different than with homonyms, systematically polysemous words need not always be disambiguated, because such words have several related senses that are shared in a systematic way by a group of similar words. In many applications then, for instance in document categorization and other areas of information retrieval, it may be sufficient to know if a given word belongs to this grou p rather than to know which of its (related) senses exactly to pick. In other words, it will suffice to assign a more coarse grained sense that leaves several related senses underspecified, but which can be further specified on demand 1.",
67
+ "cite_spans": [],
68
+ "ref_spans": [],
69
+ "eq_spans": [],
70
+ "section": "Introduction",
71
+ "sec_num": null
72
+ },
73
+ {
74
+ "text": "The approach for finding systematic polysemous classes is based on that of (Buitelaar 1998a , Buitelaar 1998b ), but takes into account some shortcomings as pointed out in (Krymolowski and Roth 1998) (Peters, Peters and Vossen 1998) (Tomuro 1998) . Whereas the original approach identified a small set of top-level synsets for grouping together lexical items, i As pointed out in (Wilks 99) , earlier work in AI on 'Polaroid Words' (Hirst 87) and 'Word Experts' (Small 81) advocated a similar, incremental approach to sense representation and interpretation. In line with this, the CoreLex approach discussed here provides a large scale inventory of systematically polysemous lexical items with underspecified representations that can be incrementally refined.",
75
+ "cite_spans": [
76
+ {
77
+ "start": 75,
78
+ "end": 91,
79
+ "text": "(Buitelaar 1998a",
80
+ "ref_id": "BIBREF1"
81
+ },
82
+ {
83
+ "start": 92,
84
+ "end": 109,
85
+ "text": ", Buitelaar 1998b",
86
+ "ref_id": "BIBREF2"
87
+ },
88
+ {
89
+ "start": 172,
90
+ "end": 199,
91
+ "text": "(Krymolowski and Roth 1998)",
92
+ "ref_id": "BIBREF8"
93
+ },
94
+ {
95
+ "start": 200,
96
+ "end": 232,
97
+ "text": "(Peters, Peters and Vossen 1998)",
98
+ "ref_id": "BIBREF10"
99
+ },
100
+ {
101
+ "start": 233,
102
+ "end": 246,
103
+ "text": "(Tomuro 1998)",
104
+ "ref_id": "BIBREF14"
105
+ },
106
+ {
107
+ "start": 380,
108
+ "end": 390,
109
+ "text": "(Wilks 99)",
110
+ "ref_id": null
111
+ },
112
+ {
113
+ "start": 432,
114
+ "end": 442,
115
+ "text": "(Hirst 87)",
116
+ "ref_id": null
117
+ }
118
+ ],
119
+ "ref_spans": [],
120
+ "eq_spans": [],
121
+ "section": "Introduction",
122
+ "sec_num": null
123
+ },
124
+ {
125
+ "text": "the new approach compares lexical items according to all of their synsets on all hierarchy levels. In addition, the new approach is both more flexible and precise by using a clustering algorithm for comparing meaning distributions between lexical items. Whereas the original approach took into account only identical distributions (with additional human intervention to further group together sufficiently similar classes), the clustering approach allows for completely automatic comparisons, relative to certain thresholds, that identify partial overlaps in meaning distributions.",
126
+ "cite_spans": [],
127
+ "ref_spans": [],
128
+ "eq_spans": [],
129
+ "section": "Introduction",
130
+ "sec_num": null
131
+ },
132
+ {
133
+ "text": "In lexical semantics, a distinction can be made between senses that are of a contrastive and those that are of a complementary nature (Weinreich 1964) . Contrastive senses are unrelated to each other as with the two meanings of \"bank\". However, such clear-cut (contrastive) meaning distinctions are rather the exception than the rule. Often, words have a number of (complementary) senses that are somehow related to each other in systematic ways (Pustejovsky 1995) . For instance, a word like \"mouth\" has several senses that are all somehow related (after Cruse 1986):",
134
+ "cite_spans": [
135
+ {
136
+ "start": 134,
137
+ "end": 150,
138
+ "text": "(Weinreich 1964)",
139
+ "ref_id": "BIBREF15"
140
+ },
141
+ {
142
+ "start": 446,
143
+ "end": 464,
144
+ "text": "(Pustejovsky 1995)",
145
+ "ref_id": "BIBREF11"
146
+ }
147
+ ],
148
+ "ref_spans": [],
149
+ "eq_spans": [],
150
+ "section": "Acquisition and Application of Systematic Polysemous Classes",
151
+ "sec_num": "1"
152
+ },
153
+ {
154
+ "text": "John opened his mouth. This parasite attaches itself to their mouths. The mouth of the cave resembles a bottle. The mouth of the river starts here.",
155
+ "cite_spans": [],
156
+ "ref_spans": [],
157
+ "eq_spans": [],
158
+ "section": "Acquisition and Application of Systematic Polysemous Classes",
159
+ "sec_num": "1"
160
+ },
161
+ {
162
+ "text": "Related senses are, however, only systematic (or regular) if more than one example in a language can be found as formulated in (Apresjan 1973 ",
163
+ "cite_spans": [
164
+ {
165
+ "start": 127,
166
+ "end": 141,
167
+ "text": "(Apresjan 1973",
168
+ "ref_id": "BIBREF0"
169
+ }
170
+ ],
171
+ "ref_spans": [],
172
+ "eq_spans": [],
173
+ "section": "CoreLex",
174
+ "sec_num": "2"
175
+ },
176
+ {
177
+ "text": "Polysemy of the word A with the meanings ai and aj is called regular if in the given language, there exists at least one other word B with the meanings bi bj, which are semantically distinguished from each other in exactly the same way as ai and aj and if ai and bi, aj and bj are nonsynonymous.",
178
+ "cite_spans": [],
179
+ "ref_spans": [],
180
+ "eq_spans": [],
181
+ "section": "):",
182
+ "sec_num": null
183
+ },
184
+ {
185
+ "text": "With this definition, we can construct classes of systematically polysemous words as shown in the CoreLex approach (Buitelaar 1998a) (Buitelaar 1998b ). This method takes WordNet sense assignments and compares their distribution by reducing them to a set of basic types. For instance, WordNet assigns to the noun \"book\" the following senses:",
186
+ "cite_spans": [
187
+ {
188
+ "start": 115,
189
+ "end": 132,
190
+ "text": "(Buitelaar 1998a)",
191
+ "ref_id": "BIBREF1"
192
+ },
193
+ {
194
+ "start": 133,
195
+ "end": 149,
196
+ "text": "(Buitelaar 1998b",
197
+ "ref_id": "BIBREF2"
198
+ }
199
+ ],
200
+ "ref_spans": [],
201
+ "eq_spans": [],
202
+ "section": "):",
203
+ "sec_num": null
204
+ },
205
+ {
206
+ "text": "1. publication 2. product, production 3. fact 4. dramatic_composition, dramatic_work 5. record 6. section, subdivision 7. journal At the top of the WordNet hierarchy these seven senses can be reduced to two basic types: the content that is being communicated and the medium of communication.",
207
+ "cite_spans": [],
208
+ "ref_spans": [],
209
+ "eq_spans": [],
210
+ "section": "):",
211
+ "sec_num": null
212
+ },
213
+ {
214
+ "text": "We can arrive at systematically polysemous classes by investigating which other words share these same senses and are thus polysemous in the same way. For instance, the seven different senses that WordNet assigns to \"book\" can be reduced to two basic types: artifact and communication. We do this for each noun and then group them into classes according to their combination of basic types. Finally, by human introspection several classes were grouped together, because their members seemed sufficiently similar.",
215
+ "cite_spans": [],
216
+ "ref_spans": [],
217
+ "eq_spans": [],
218
+ "section": "):",
219
+ "sec_num": null
220
+ },
221
+ {
222
+ "text": "Among the resulting classes are a number that are to be expected given the literature on systematic polysemy. For instance, the classes animal / food and plant / natural, product have been discussed widely. Other classes are less expected, but seem quite intuitive. The class artifact / attribute / substance for instance includes a number of nouns (\"chalk, charcoal, daub, fiber, fibre, tincture\") that refer to an object that is at the same time an artifact made of some substance and that is also an attribute.",
223
+ "cite_spans": [],
224
+ "ref_spans": [],
225
+ "eq_spans": [],
226
+ "section": "):",
227
+ "sec_num": null
228
+ },
229
+ {
230
+ "text": "3 CoreLex-II Thereby following Apresjan's definition of systematic polysemy discussed above.",
231
+ "cite_spans": [],
232
+ "ref_spans": [],
233
+ "eq_spans": [],
234
+ "section": "):",
235
+ "sec_num": null
236
+ },
237
+ {
238
+ "text": "The algorithm works example for nouns):",
239
+ "cite_spans": [],
240
+ "ref_spans": [],
241
+ "eq_spans": [],
242
+ "section": "The Algorithm",
243
+ "sec_num": "3.2"
244
+ },
245
+ {
246
+ "text": "as follows (for",
247
+ "cite_spans": [],
248
+ "ref_spans": [],
249
+ "eq_spans": [],
250
+ "section": "The Algorithm",
251
+ "sec_num": "3.2"
252
+ },
253
+ {
254
+ "text": "The CoreLex database has been used and/or evaluated in a number of projects, leading to some criticisms of the approach in (Krymolowski and Roth 1998) (Peters, Peters and Vossen 1998) (Tomuro 1998 ) and in personal communication. Primarily it was argued that the choice of basic types is arbitrary and on too high a level. Systematic class discovery in the original approach is dependent on this set of basic types, which means that classes on lower levels are not captured at all. Further criticism arose on the arbitrariness (and inefficiency) of human intervention in grouping together resulting classes into more comprehensive ones based on the similarity of their members.",
255
+ "cite_spans": [
256
+ {
257
+ "start": 123,
258
+ "end": 150,
259
+ "text": "(Krymolowski and Roth 1998)",
260
+ "ref_id": "BIBREF8"
261
+ },
262
+ {
263
+ "start": 151,
264
+ "end": 183,
265
+ "text": "(Peters, Peters and Vossen 1998)",
266
+ "ref_id": "BIBREF10"
267
+ },
268
+ {
269
+ "start": 184,
270
+ "end": 196,
271
+ "text": "(Tomuro 1998",
272
+ "ref_id": "BIBREF14"
273
+ }
274
+ ],
275
+ "ref_spans": [],
276
+ "eq_spans": [],
277
+ "section": "A More Flexible Approach",
278
+ "sec_num": "3.1"
279
+ },
280
+ {
281
+ "text": "In response to this, a new approach was formulated and implemented that addresses both these points. Comparison of sense distributions is now performed over synsets on all levels, not just over a small set on the top levels. In addition, similarity on sense distribution between words need no longer be complete (100%), as with the former approach. Instead, a threshold on similarity can be set that constraints a clustering algorithm for automatically grouping together words into systematic polysemous classes. (No human intervention to further group together resulting classes is required.) This approach took inspiration from the pioneering work by (Dolan 1994) , but it is also fundamentally different, because instead of grouping similar senses together, the CoreLex approach groups together words according to all of their senses. For every noun in the WordNet or GermaNet index, get all of its senses (which are in fact level1 synsets). If a noun has more than one sense put it in a separate list that will be used for further processing. Nouns with only one sense are not used in further processing because we are only interested in systematic distributions of more than one sense over several nouns. In order to compare nouns not only on the sense level but rather over the whole of the WordNet hierarchy, also all higher level synsets (hypernyms) for each sense are stored.",
282
+ "cite_spans": [
283
+ {
284
+ "start": 653,
285
+ "end": 665,
286
+ "text": "(Dolan 1994)",
287
+ "ref_id": "BIBREF5"
288
+ }
289
+ ],
290
+ "ref_spans": [],
291
+ "eq_spans": [],
292
+ "section": "A More Flexible Approach",
293
+ "sec_num": "3.1"
294
+ },
295
+ {
296
+ "text": "Then, for each noun we compare its \"sense distribution\" (the complete set of synsets derived in the previous steps) with each other noun. Similarity is computed using the Jaccard score, which compares objects according to the attributes they share and their unique attributes. If the similarity is over a certain threshold, the noun pair is stored in a matrix which is consequently used in a final clustering step.",
297
+ "cite_spans": [],
298
+ "ref_spans": [],
299
+ "eq_spans": [],
300
+ "section": "A More Flexible Approach",
301
+ "sec_num": "3.1"
302
+ },
303
+ {
304
+ "text": "Finally, the clustering itself is a simple, single link algorithm that groups together objects uniquely in discrete clusters.",
305
+ "cite_spans": [],
306
+ "ref_spans": [],
307
+ "eq_spans": [],
308
+ "section": "A More Flexible Approach",
309
+ "sec_num": "3.1"
310
+ },
311
+ {
312
+ "text": "Depending on the threshold on similarity, the algorithm generates a number of clusters of ambiguous words that share similar sense distributions, and which can be seen as systematic polysemous classes. In the following table an overview is given of results with different thresholds. The number of nouns in WordNet that were processed is 46.995, of which 10.772 have more than one sense. Systematic polysemous classes that are obtained in this way can be used as filters on sense disambiguation in a variety of applications in which a coarse grained sense assignment will suffice in many cases, but where an option of further specification exists. For instance, in information retrieval it will not always be necessary to distinguish between the two interpretations of \"baseball, ,,2 basketball, football ..... Users looking for information on a baseball-game may be interested also in baseball-balls. On the other hand, a user may be interested specifically in buying a new baseball-ball and does not wish to be flooded with irrelevant information on baseball-games. In this case, the underspecified ball / game sense needs to be further specified in the ball sense only. Similarly, it will not always be necessary to distinguish exactly between the vessel interpretation of \"bottle, bucket, cask, ...\" and the measure interpretation, or between the communication interpretation of a \"clamor, hiss, roar, ...\" and the noise interpretation.",
313
+ "cite_spans": [],
314
+ "ref_spans": [],
315
+ "eq_spans": [],
316
+ "section": "Quantitative and Qualitative Analysis",
317
+ "sec_num": "3.3"
318
+ },
319
+ {
320
+ "text": "Currently, a query expansion module based on the approach described here is under development as part of the prototype systems of two EU funded projects:",
321
+ "cite_spans": [],
322
+ "ref_spans": [],
323
+ "eq_spans": [],
324
+ "section": "Quantitative and Qualitative Analysis",
325
+ "sec_num": "3.3"
326
+ },
327
+ {
328
+ "text": "MIETTA 3 (a cross-lingual search engine in the tourism domain - Buitelaar et al 1998) and OLIVE 4 (a cross-lingual video retrieval system).",
329
+ "cite_spans": [
330
+ {
331
+ "start": 64,
332
+ "end": 85,
333
+ "text": "Buitelaar et al 1998)",
334
+ "ref_id": "BIBREF3"
335
+ }
336
+ ],
337
+ "ref_spans": [],
338
+ "eq_spans": [],
339
+ "section": "Quantitative and Qualitative Analysis",
340
+ "sec_num": "3.3"
341
+ },
342
+ {
343
+ "text": "Also in shallow processing applications like semantic pre-processing for document categorization it will be sufficient to use an underspecified sense instead of needless disambiguation between senses that are roughly equal in their relevance to a certain document category. Similarly, in shallow syntactic processing tasks, like statistical disambiguation of PP-attachment, the use of underspecified senses may be preferable as shown in experiments by (Krymolowski and Roth 1998) .",
344
+ "cite_spans": [
345
+ {
346
+ "start": 452,
347
+ "end": 479,
348
+ "text": "(Krymolowski and Roth 1998)",
349
+ "ref_id": "BIBREF8"
350
+ }
351
+ ],
352
+ "ref_spans": [],
353
+ "eq_spans": [],
354
+ "section": "Quantitative and Qualitative Analysis",
355
+ "sec_num": "3.3"
356
+ },
357
+ {
358
+ "text": "2 Compare (SchUtze 1997) for a similar, but purely statistical approach to underspecification in lexical semantic processing and its use in machine learning and information retrieval. 3 http://www.mietta.net/mietta 4 http: lltwentyone.tpd.tno.nllolivel In order to train systems to accurately perform syntactic analysis on the basis of semantic classes, semantically annotated corpora are needed. This is another area of application of the research described here. CoreLex clusters can be considered by annotators as alternatives to WordNet or GermaNet synsets if they are not able to choose between the senses given and instead prefer an underspecified sense. This approach is currently tested, in cooperation with the GermaNet group of the University of Ttibingen, in a preliminary project on semantic annotation of German newspaper text.",
359
+ "cite_spans": [
360
+ {
361
+ "start": 223,
362
+ "end": 252,
363
+ "text": "lltwentyone.tpd.tno.nllolivel",
364
+ "ref_id": null
365
+ }
366
+ ],
367
+ "ref_spans": [],
368
+ "eq_spans": [],
369
+ "section": "Quantitative and Qualitative Analysis",
370
+ "sec_num": "3.3"
371
+ },
372
+ {
373
+ "text": "We presented a new algorithm for generating systematic polysemous classes from existing resources like WordNet and similar semantic databases. Results were discussed for classes of English nouns as generated from WordNet. With a threshold of 75% similarity between nouns, 1341 classes could be found covering 3336 nouns. Not discussed were similar experiments for verbs and adjectives, both in English and German. The resulting classes can be used as filters on incremental sense disambiguation in various applications in which coarse grained (underspecified) senses are preferred, but from which more fine grained senses can be derived on demand.",
374
+ "cite_spans": [],
375
+ "ref_spans": [],
376
+ "eq_spans": [],
377
+ "section": "Conclusion",
378
+ "sec_num": null
379
+ }
380
+ ],
381
+ "back_matter": [],
382
+ "bib_entries": {
383
+ "BIBREF0": {
384
+ "ref_id": "b0",
385
+ "title": "Regular Polysemy. Linguistics",
386
+ "authors": [
387
+ {
388
+ "first": "J",
389
+ "middle": [],
390
+ "last": "Apresjan",
391
+ "suffix": ""
392
+ }
393
+ ],
394
+ "year": 1973,
395
+ "venue": "",
396
+ "volume": "",
397
+ "issue": "",
398
+ "pages": "",
399
+ "other_ids": {},
400
+ "num": null,
401
+ "urls": [],
402
+ "raw_text": "J. Apresjan (1973) Regular Polysemy. Linguistics, 142.",
403
+ "links": null
404
+ },
405
+ "BIBREF1": {
406
+ "ref_id": "b1",
407
+ "title": "CoreLex: Systematic Polysemy and Underspecification",
408
+ "authors": [
409
+ {
410
+ "first": "Paul",
411
+ "middle": [],
412
+ "last": "Buitelaar",
413
+ "suffix": ""
414
+ }
415
+ ],
416
+ "year": 1998,
417
+ "venue": "",
418
+ "volume": "",
419
+ "issue": "",
420
+ "pages": "",
421
+ "other_ids": {},
422
+ "num": null,
423
+ "urls": [],
424
+ "raw_text": "Paul Buitelaar (1998a) CoreLex: Systematic Polysemy and Underspecification. PhD Thesis, Brandeis University.",
425
+ "links": null
426
+ },
427
+ "BIBREF2": {
428
+ "ref_id": "b2",
429
+ "title": "CoreLex: An Ontology of Systematic Polysemous Classes",
430
+ "authors": [
431
+ {
432
+ "first": "Paul",
433
+ "middle": [],
434
+ "last": "Buitelaar",
435
+ "suffix": ""
436
+ }
437
+ ],
438
+ "year": 1998,
439
+ "venue": "Formal Ontology in Information Systems",
440
+ "volume": "",
441
+ "issue": "",
442
+ "pages": "",
443
+ "other_ids": {},
444
+ "num": null,
445
+ "urls": [],
446
+ "raw_text": "Paul Buitelaar (1998b) CoreLex: An Ontology of Systematic Polysemous Classes. In: Formal Ontology in Information Systems. IOS Press, Amsterdam.",
447
+ "links": null
448
+ },
449
+ "BIBREF3": {
450
+ "ref_id": "b3",
451
+ "title": "Integrating Different Strategies In Cross-Language Information Retrieval in the MIETTA Project",
452
+ "authors": [
453
+ {
454
+ "first": "Paul",
455
+ "middle": [],
456
+ "last": "Buitelaar",
457
+ "suffix": ""
458
+ },
459
+ {
460
+ "first": "Klaus",
461
+ "middle": [],
462
+ "last": "Netter",
463
+ "suffix": ""
464
+ },
465
+ {
466
+ "first": "Feiyu",
467
+ "middle": [],
468
+ "last": "Xu",
469
+ "suffix": ""
470
+ }
471
+ ],
472
+ "year": 1998,
473
+ "venue": "Proceedings of TWLT14",
474
+ "volume": "",
475
+ "issue": "",
476
+ "pages": "",
477
+ "other_ids": {},
478
+ "num": null,
479
+ "urls": [],
480
+ "raw_text": "Paul Buitelaar, Klaus Netter and Feiyu Xu (1998) Integrating Different Strategies In Cross-Language Information Retrieval in the MIETTA Project. In: Proceedings of TWLT14, Enschede, the Netherlands, December.",
481
+ "links": null
482
+ },
483
+ "BIBREF4": {
484
+ "ref_id": "b4",
485
+ "title": "Lexical Semantics",
486
+ "authors": [
487
+ {
488
+ "first": "D",
489
+ "middle": [
490
+ "A"
491
+ ],
492
+ "last": "Cruse",
493
+ "suffix": ""
494
+ }
495
+ ],
496
+ "year": 1986,
497
+ "venue": "",
498
+ "volume": "",
499
+ "issue": "",
500
+ "pages": "",
501
+ "other_ids": {},
502
+ "num": null,
503
+ "urls": [],
504
+ "raw_text": "D. A. Cruse (1986) Lexical Semantics. Cambridge University Press.",
505
+ "links": null
506
+ },
507
+ "BIBREF5": {
508
+ "ref_id": "b5",
509
+ "title": "Word Sense Ambiguation: Clustering Related Senses",
510
+ "authors": [
511
+ {
512
+ "first": "Bill",
513
+ "middle": [],
514
+ "last": "Dolan",
515
+ "suffix": ""
516
+ }
517
+ ],
518
+ "year": 1994,
519
+ "venue": "Proceedings of COLING-94",
520
+ "volume": "",
521
+ "issue": "",
522
+ "pages": "",
523
+ "other_ids": {},
524
+ "num": null,
525
+ "urls": [],
526
+ "raw_text": "Bill Dolan (1994) Word Sense Ambiguation: Clustering Related Senses. In: Proceedings of COLING-94. Kyoto, Japan.",
527
+ "links": null
528
+ },
529
+ "BIBREF6": {
530
+ "ref_id": "b6",
531
+ "title": "GermaNeta Lexical Semantic Net for German",
532
+ "authors": [
533
+ {
534
+ "first": "Birgit",
535
+ "middle": [],
536
+ "last": "Hamp",
537
+ "suffix": ""
538
+ },
539
+ {
540
+ "first": "Helmut",
541
+ "middle": [],
542
+ "last": "Feldweg",
543
+ "suffix": ""
544
+ }
545
+ ],
546
+ "year": 1997,
547
+ "venue": "Proceedings of the ACL Workshop on Automatic Information Extraction and Building of Lexieal Semantic Resources for NLP Applications. Madrid",
548
+ "volume": "",
549
+ "issue": "",
550
+ "pages": "",
551
+ "other_ids": {},
552
+ "num": null,
553
+ "urls": [],
554
+ "raw_text": "Birgit Hamp and Helmut Feldweg (1997) GermaNet- a Lexical Semantic Net for German. In: Proceedings of the ACL Workshop on Automatic Information Extraction and Building of Lexieal Semantic Resources for NLP Applications. Madrid,.",
555
+ "links": null
556
+ },
557
+ "BIBREF7": {
558
+ "ref_id": "b7",
559
+ "title": "Semantic Interpretation and the Resolution of Ambiguity",
560
+ "authors": [
561
+ {
562
+ "first": "G",
563
+ "middle": [],
564
+ "last": "Hirst",
565
+ "suffix": ""
566
+ }
567
+ ],
568
+ "year": 1987,
569
+ "venue": "",
570
+ "volume": "",
571
+ "issue": "",
572
+ "pages": "",
573
+ "other_ids": {},
574
+ "num": null,
575
+ "urls": [],
576
+ "raw_text": "G. Hirst (1987) Semantic Interpretation and the Resolution of Ambiguity. Cambridge University Press.",
577
+ "links": null
578
+ },
579
+ "BIBREF8": {
580
+ "ref_id": "b8",
581
+ "title": "Incorporating Knowledge in Natural Language Learning: A Case Study",
582
+ "authors": [
583
+ {
584
+ "first": "Yuval",
585
+ "middle": [],
586
+ "last": "Krymolowski",
587
+ "suffix": ""
588
+ },
589
+ {
590
+ "first": "Dan",
591
+ "middle": [],
592
+ "last": "Roth",
593
+ "suffix": ""
594
+ }
595
+ ],
596
+ "year": 1998,
597
+ "venue": "Proceedings ACL-98 Workshop on the Use of WordNet in NLP",
598
+ "volume": "",
599
+ "issue": "",
600
+ "pages": "",
601
+ "other_ids": {},
602
+ "num": null,
603
+ "urls": [],
604
+ "raw_text": "Yuval Krymolowski and Dan Roth (1998) Incorporating Knowledge in Natural Language Learning: A Case Study. In: Proceedings ACL-98 Workshop on the Use of WordNet in NLP.",
605
+ "links": null
606
+ },
607
+ "BIBREF9": {
608
+ "ref_id": "b9",
609
+ "title": "Introduction to WordNet: An On-line Lexical Database",
610
+ "authors": [
611
+ {
612
+ "first": "G",
613
+ "middle": [
614
+ "A"
615
+ ],
616
+ "last": "Miller",
617
+ "suffix": ""
618
+ },
619
+ {
620
+ "first": "R",
621
+ "middle": [],
622
+ "last": "Beckwith",
623
+ "suffix": ""
624
+ },
625
+ {
626
+ "first": "Ch",
627
+ "middle": [],
628
+ "last": "Fellbaum",
629
+ "suffix": ""
630
+ },
631
+ {
632
+ "first": "D",
633
+ "middle": [],
634
+ "last": "Gross",
635
+ "suffix": ""
636
+ },
637
+ {
638
+ "first": "K",
639
+ "middle": [],
640
+ "last": "Miller",
641
+ "suffix": ""
642
+ }
643
+ ],
644
+ "year": 1990,
645
+ "venue": "International Journal of Lexicography",
646
+ "volume": "3",
647
+ "issue": "",
648
+ "pages": "",
649
+ "other_ids": {},
650
+ "num": null,
651
+ "urls": [],
652
+ "raw_text": "G. A. Miller and R. Beckwith and Ch. Fellbaum and D. Gross and K. Miller (1990) Introduction to WordNet: An On-line Lexical Database. International Journal of Lexicography, 3,4.",
653
+ "links": null
654
+ },
655
+ "BIBREF10": {
656
+ "ref_id": "b10",
657
+ "title": "Automatic Sense Clustering in EuroWordNet",
658
+ "authors": [
659
+ {
660
+ "first": "Wim",
661
+ "middle": [],
662
+ "last": "Peters",
663
+ "suffix": ""
664
+ },
665
+ {
666
+ "first": "Ivonne",
667
+ "middle": [],
668
+ "last": "Peters",
669
+ "suffix": ""
670
+ },
671
+ {
672
+ "first": "Piek",
673
+ "middle": [],
674
+ "last": "Vossen",
675
+ "suffix": ""
676
+ }
677
+ ],
678
+ "year": 1998,
679
+ "venue": "Proceedings of LREC. Granada",
680
+ "volume": "",
681
+ "issue": "",
682
+ "pages": "",
683
+ "other_ids": {},
684
+ "num": null,
685
+ "urls": [],
686
+ "raw_text": "Wim Peters, Ivonne Peters and Piek Vossen (1998) Automatic Sense Clustering in EuroWordNet. In: Proceedings of LREC. Granada.",
687
+ "links": null
688
+ },
689
+ "BIBREF11": {
690
+ "ref_id": "b11",
691
+ "title": "The Generative Lexicon",
692
+ "authors": [
693
+ {
694
+ "first": "James",
695
+ "middle": [],
696
+ "last": "Pustejovsky",
697
+ "suffix": ""
698
+ }
699
+ ],
700
+ "year": 1995,
701
+ "venue": "",
702
+ "volume": "",
703
+ "issue": "",
704
+ "pages": "",
705
+ "other_ids": {},
706
+ "num": null,
707
+ "urls": [],
708
+ "raw_text": "James Pustejovsky (1995) The Generative Lexicon. MIT Press.",
709
+ "links": null
710
+ },
711
+ "BIBREF12": {
712
+ "ref_id": "b12",
713
+ "title": "Ambiguity Resolution in Language Learning",
714
+ "authors": [
715
+ {
716
+ "first": "Hinrich",
717
+ "middle": [],
718
+ "last": "Schirze",
719
+ "suffix": ""
720
+ }
721
+ ],
722
+ "year": 1997,
723
+ "venue": "",
724
+ "volume": "71",
725
+ "issue": "",
726
+ "pages": "",
727
+ "other_ids": {},
728
+ "num": null,
729
+ "urls": [],
730
+ "raw_text": "Hinrich SchiRze (1997) Ambiguity Resolution in Language Learning. Volume 71 of CSLI Publications. Chicago University Press.",
731
+ "links": null
732
+ },
733
+ "BIBREF13": {
734
+ "ref_id": "b13",
735
+ "title": "Viewing Word Expert Parsing as Linguistic Theory",
736
+ "authors": [
737
+ {
738
+ "first": "S",
739
+ "middle": [],
740
+ "last": "Small",
741
+ "suffix": ""
742
+ }
743
+ ],
744
+ "year": 1981,
745
+ "venue": "Proceedings of IJCAI",
746
+ "volume": "",
747
+ "issue": "",
748
+ "pages": "",
749
+ "other_ids": {},
750
+ "num": null,
751
+ "urls": [],
752
+ "raw_text": "S. Small (1981) Viewing Word Expert Parsing as Linguistic Theory. In: Proceedings of IJCAI.",
753
+ "links": null
754
+ },
755
+ "BIBREF14": {
756
+ "ref_id": "b14",
757
+ "title": "Semi-Automatic Induction of Systematic Polysemy from WordNet",
758
+ "authors": [
759
+ {
760
+ "first": "Noriko",
761
+ "middle": [],
762
+ "last": "Tomuro",
763
+ "suffix": ""
764
+ }
765
+ ],
766
+ "year": 1998,
767
+ "venue": "Proceedings ACL-98 Workshop on the Use of WordNet in NLP",
768
+ "volume": "",
769
+ "issue": "",
770
+ "pages": "",
771
+ "other_ids": {},
772
+ "num": null,
773
+ "urls": [],
774
+ "raw_text": "Noriko Tomuro (1998) Semi-Automatic Induction of Systematic Polysemy from WordNet. In: Proceedings ACL-98 Workshop on the Use of WordNet in NLP.",
775
+ "links": null
776
+ },
777
+ "BIBREF15": {
778
+ "ref_id": "b15",
779
+ "title": "Webster's Third: A Critique of its Semantics",
780
+ "authors": [
781
+ {
782
+ "first": "Uriel",
783
+ "middle": [],
784
+ "last": "Weinreich",
785
+ "suffix": ""
786
+ }
787
+ ],
788
+ "year": 1964,
789
+ "venue": "International Journal of American Linguistics",
790
+ "volume": "30",
791
+ "issue": "",
792
+ "pages": "405--409",
793
+ "other_ids": {},
794
+ "num": null,
795
+ "urls": [],
796
+ "raw_text": "Uriel Weinreich (1964) Webster's Third: A Critique of its Semantics. International Journal of American Linguistics, 405-409, 30.",
797
+ "links": null
798
+ },
799
+ "BIBREF16": {
800
+ "ref_id": "b16",
801
+ "title": "Is Word Sense Disambiguation just one more NLP task?",
802
+ "authors": [
803
+ {
804
+ "first": "Yorick",
805
+ "middle": [],
806
+ "last": "Wilks",
807
+ "suffix": ""
808
+ }
809
+ ],
810
+ "year": 1999,
811
+ "venue": "",
812
+ "volume": "",
813
+ "issue": "",
814
+ "pages": "",
815
+ "other_ids": {},
816
+ "num": null,
817
+ "urls": [],
818
+ "raw_text": "Yorick Wilks (1999) Is Word Sense Disambiguation just one more NLP task? Cs.CL/9902030.",
819
+ "links": null
820
+ }
821
+ },
822
+ "ref_entries": {}
823
+ }
824
+ }
Full_text_JSON/prefixW/json/W00/W00-0104.json ADDED
@@ -0,0 +1,1196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0104",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:35:02.293116Z"
6
+ },
7
+ "title": "Automatic Extraction of Systematic Polysemy Using Tree-cut",
8
+ "authors": [
9
+ {
10
+ "first": "Noriko",
11
+ "middle": [],
12
+ "last": "Tomuro",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "DePaul University",
17
+ "location": {
18
+ "addrLine": "243 S. Wabash Ave. Chicago",
19
+ "postCode": "60604",
20
+ "region": "IL"
21
+ }
22
+ },
23
+ "email": "tomuro@cs.depaul.edu"
24
+ }
25
+ ],
26
+ "year": "",
27
+ "venue": null,
28
+ "identifiers": {},
29
+ "abstract": "This paper describes an automatic method for extracting systematic polysemy from a hierarchically organized semantic lexicon (WordNet). Systematic polysemy is a set of word senses that are related in systematic and predictable ways. Our method uses a modification of a tree generalization technique used in (Li and Abe, 1998), and generates a tree-cut, which is a list of clusters that partition a tree. We compare the systematic relations extracted by our automatic method to manually extracted WordNet cousins.",
30
+ "pdf_parse": {
31
+ "paper_id": "W00-0104",
32
+ "_pdf_hash": "",
33
+ "abstract": [
34
+ {
35
+ "text": "This paper describes an automatic method for extracting systematic polysemy from a hierarchically organized semantic lexicon (WordNet). Systematic polysemy is a set of word senses that are related in systematic and predictable ways. Our method uses a modification of a tree generalization technique used in (Li and Abe, 1998), and generates a tree-cut, which is a list of clusters that partition a tree. We compare the systematic relations extracted by our automatic method to manually extracted WordNet cousins.",
36
+ "cite_spans": [],
37
+ "ref_spans": [],
38
+ "eq_spans": [],
39
+ "section": "Abstract",
40
+ "sec_num": null
41
+ }
42
+ ],
43
+ "body_text": [
44
+ {
45
+ "text": "In recent years, several on-line broad-coverage semantic lexicons became available, including LDOCE (Procter, 1978) , WordNet (Miller, 1990) and HECTOR . (Kilgarriff, 1998a) . These lexicons have been used as a domainindependent semantic resource as well as an evaluation criteria in various Natural Language Processing (NLP) tasks, such as Information Retrieval (IR), Information Extraction (IE) and Word Sense Disambiguation (WSD).",
46
+ "cite_spans": [
47
+ {
48
+ "start": 100,
49
+ "end": 115,
50
+ "text": "(Procter, 1978)",
51
+ "ref_id": "BIBREF12"
52
+ },
53
+ {
54
+ "start": 126,
55
+ "end": 140,
56
+ "text": "(Miller, 1990)",
57
+ "ref_id": null
58
+ },
59
+ {
60
+ "start": 154,
61
+ "end": 173,
62
+ "text": "(Kilgarriff, 1998a)",
63
+ "ref_id": "BIBREF6"
64
+ }
65
+ ],
66
+ "ref_spans": [],
67
+ "eq_spans": [],
68
+ "section": "Introduction",
69
+ "sec_num": "1"
70
+ },
71
+ {
72
+ "text": "However, those lexicons are rather complex. For instance, WordNet (version 1.6) contains a total of over 120,000 words and 170,000 word senses, which are grouped into around 100,000 synsets (synonym sets). In addition to the size, word entries in those lexicon are often polysemous. For instance, 20% of the words in Wordnet have more than one sense, and the average number of senses of those polysemous words is around 3. Also, the distinction between word senses tends to be ambiguous and arbitrary. For example, the following 6 senses are listed in WordNet for the noun \"door\":",
73
+ "cite_spans": [],
74
+ "ref_spans": [],
75
+ "eq_spans": [],
76
+ "section": "Introduction",
77
+ "sec_num": "1"
78
+ },
79
+ {
80
+ "text": "1. door -a swinging or sliding barrier 2. door -the space in a wall 3. door -anything providing a means of access (or escape) 4. door -a swinging or sliding barrier that will close off access into a car 5. door -a house that is entered via a door 6. door -a room that is entered via a door Because of the high degree of ambiguity, using such complex semantic lexicons brings some serious problems to the performance of NLP systems. The first, obvious problem is the computational intractability: increased processing time needed to disambiguate multiple possibilities will necessarily slow down the system. Another problem, which has been receiving attention in the past few years, is the inaccuracy: when there is more than one sense applicable in a given context, different systems (or human individuals) may select different senses as the correct sense. Indeed, recent studies in WSD show that, when sense definitions are fine-grained, similar senses become indistinguishable to human annotators and often cause disagreement on the correct tag (Ng et al., 1999; Veronis, 1998; Kilgarriff, 1998b) . Also in IR and IE tasks, difference in the correct sense assignment will surely degrade recall and precision of the systems. Thus, it is apparent that, in order for a lexicon to be useful as an evaluation criteria for NLP systems, it must represent word senses at the level of granularity that captures human intuition.",
81
+ "cite_spans": [
82
+ {
83
+ "start": 1047,
84
+ "end": 1064,
85
+ "text": "(Ng et al., 1999;",
86
+ "ref_id": "BIBREF10"
87
+ },
88
+ {
89
+ "start": 1065,
90
+ "end": 1079,
91
+ "text": "Veronis, 1998;",
92
+ "ref_id": "BIBREF15"
93
+ },
94
+ {
95
+ "start": 1080,
96
+ "end": 1098,
97
+ "text": "Kilgarriff, 1998b)",
98
+ "ref_id": "BIBREF6"
99
+ }
100
+ ],
101
+ "ref_spans": [],
102
+ "eq_spans": [],
103
+ "section": "Introduction",
104
+ "sec_num": "1"
105
+ },
106
+ {
107
+ "text": "In Lexical Semantics, several approaches have been proposed which organize a lexicon based on systematic polysemy: 1 a set of word senses that are related in systematic and predictable ISystematic polysemy (in the sense we use in this paper) is also referred to as regular polysemy (Apresjan, 1973) or logical polyseray (Pustejovsky, 1995) .",
108
+ "cite_spans": [
109
+ {
110
+ "start": 282,
111
+ "end": 298,
112
+ "text": "(Apresjan, 1973)",
113
+ "ref_id": "BIBREF0"
114
+ },
115
+ {
116
+ "start": 320,
117
+ "end": 339,
118
+ "text": "(Pustejovsky, 1995)",
119
+ "ref_id": "BIBREF13"
120
+ }
121
+ ],
122
+ "ref_spans": [],
123
+ "eq_spans": [],
124
+ "section": "Introduction",
125
+ "sec_num": "1"
126
+ },
127
+ {
128
+ "text": "ways (e.g. ANIMAL and MEAT meanings of the word \"chicken\"). 2 In particular, (Buitelaar, 1997 (Buitelaar, , 1998 identified systematic relations that exist between abstract semantic concepts in the WordNet noun hierarchy, and defined a set of underspecified semantic classes that represent the relations. Then he extracted all polysemous nouns in WordNet according to those underspecified classes and built a lexicon called CORELEX. For example, a CORELEX class AQU (which represents a relation between ARTIFACT and QUANTITY) contains words such as \"bottle\", \"bucket\" and \"spoon\".",
129
+ "cite_spans": [
130
+ {
131
+ "start": 77,
132
+ "end": 93,
133
+ "text": "(Buitelaar, 1997",
134
+ "ref_id": "BIBREF1"
135
+ },
136
+ {
137
+ "start": 94,
138
+ "end": 112,
139
+ "text": "(Buitelaar, , 1998",
140
+ "ref_id": "BIBREF2"
141
+ }
142
+ ],
143
+ "ref_spans": [],
144
+ "eq_spans": [],
145
+ "section": "Introduction",
146
+ "sec_num": "1"
147
+ },
148
+ {
149
+ "text": "Using the abstract semantic classes and organizing a lexicon based on systematic polysemy addresses the two problems mentioned above in the following ways. For the first problem, using the abstract classes can reduce the size of the lexicon by combining several related senses into one sense; thus computation becomes more efficient. For the second problem, systematic polysemy does reflect our general intuitions on word meanings. Although the distinction between systematic vs. non-systematic relations (or related vs. unrelated meanings) is sometimes unclear, systematicity of the related senses among words is quite intuitive and has been well studied in Lexical Semantics (for example, (Apresjan, 1973; Cruse, 1986; Nunberg, 1995; Copestake and Briscoe, 1995) ).",
150
+ "cite_spans": [
151
+ {
152
+ "start": 691,
153
+ "end": 707,
154
+ "text": "(Apresjan, 1973;",
155
+ "ref_id": "BIBREF0"
156
+ },
157
+ {
158
+ "start": 708,
159
+ "end": 720,
160
+ "text": "Cruse, 1986;",
161
+ "ref_id": "BIBREF4"
162
+ },
163
+ {
164
+ "start": 721,
165
+ "end": 735,
166
+ "text": "Nunberg, 1995;",
167
+ "ref_id": "BIBREF11"
168
+ },
169
+ {
170
+ "start": 736,
171
+ "end": 764,
172
+ "text": "Copestake and Briscoe, 1995)",
173
+ "ref_id": "BIBREF3"
174
+ }
175
+ ],
176
+ "ref_spans": [],
177
+ "eq_spans": [],
178
+ "section": "Introduction",
179
+ "sec_num": "1"
180
+ },
181
+ {
182
+ "text": "However, there is one critical issue still to be addressed: the level of granularity at which the abstract classes are defined. The problem is that, when the granularity of the abstract classes is too coarse, systematic relations defined at that level may not hold uniformly at more fine-grained levels (Vossen et al., 1999) . For instance, the CORELEX class AQU mentioned above also contains a word \"dose\" .3 Here, the relation between the senses of \"dose\" is different from that of \"bottle\", \"bucket\" and \"spoon\", which can be labeled as CONTAINER-CONTAINERFUL relation. We argue that human intuitions can distinguish meanings 2Note that systematic polysemy should be contrasted with homonymy which refers to words which have more than one unrelated sense (e.g. FINANCIAL_INSTITUTION and SLOPING_LAND meanings of the word \"bank\").",
183
+ "cite_spans": [
184
+ {
185
+ "start": 303,
186
+ "end": 324,
187
+ "text": "(Vossen et al., 1999)",
188
+ "ref_id": "BIBREF16"
189
+ }
190
+ ],
191
+ "ref_spans": [],
192
+ "eq_spans": [],
193
+ "section": "Introduction",
194
+ "sec_num": "1"
195
+ },
196
+ {
197
+ "text": "3Senses of \"dose\" in WordNet are: (1) a measured portion of medicine taken at any one time, and (2) the quantity of an active agent (substance or radiation) taken in or absorbed at any one time. at this level, where differences between the systematic relations are rather clear, and therefore lexicons that encode word senses at this level of granularity have advantages over fine-grained as well as coarse-grained lexicons in various NLP tasks. Another issue we like to address is the ways for extracting systematic polysemy. Most often, this procedure is done manually. For example, the current version of WordNet (1.6) encodes the similarity between word senses (or synsets) by a relation called cousin. But those cousin relations were identified manually by the WordNet lexicographers. A similar effort was also made in the EuroWordnet project (Vossen et al., 1999) . However, manually inspecting a large, complex lexicon is very time-consuming and often prone to inconsistencies.",
198
+ "cite_spans": [
199
+ {
200
+ "start": 848,
201
+ "end": 869,
202
+ "text": "(Vossen et al., 1999)",
203
+ "ref_id": "BIBREF16"
204
+ }
205
+ ],
206
+ "ref_spans": [],
207
+ "eq_spans": [],
208
+ "section": "Introduction",
209
+ "sec_num": "1"
210
+ },
211
+ {
212
+ "text": "In this paper, we propose a method which automatically extracts systematic polysemy from a hierarchically organized semantic lexicon (WordNet). Our method uses a modification of a tree generalization technique used in (Li and Abe, 1998) , and generates a tree-cut, which is a list of clusters that partition a tree. Then, we compare the systematic relations extracted by our automatic method to the WordNet cousins. Preliminary results show that our method discovered most of the WordNet cousins as well as some more interesting relations.",
213
+ "cite_spans": [
214
+ {
215
+ "start": 218,
216
+ "end": 236,
217
+ "text": "(Li and Abe, 1998)",
218
+ "ref_id": "BIBREF8"
219
+ }
220
+ ],
221
+ "ref_spans": [],
222
+ "eq_spans": [],
223
+ "section": "Introduction",
224
+ "sec_num": "1"
225
+ },
226
+ {
227
+ "text": "Before we present our method, we first give a brief summary of the tree-cut technique which we adopted from (Li and Abe, 1998) . This technique is used to acquire generalized case frame patterns from a corpus using a thesaurus tree.",
228
+ "cite_spans": [
229
+ {
230
+ "start": 108,
231
+ "end": 126,
232
+ "text": "(Li and Abe, 1998)",
233
+ "ref_id": "BIBREF8"
234
+ }
235
+ ],
236
+ "ref_spans": [],
237
+ "eq_spans": [],
238
+ "section": "Tree Generalization using Tree-cut and MDL",
239
+ "sec_num": "2"
240
+ },
241
+ {
242
+ "text": "A thesaurus tree is a hierarchically organized lexicon where leaf nodes encode lexical data (i.e., words) and internal nodes represent abstract semantic classes. A tree-cut is a partition of a thesaurus tree. It is a list of internal/leaf nodes in the tree, and each node represents a set of all leaf nodes in a subtree rooted by the node. Such set is also considered as a cluster. 4 Clusters in a tree-cut exhaustively cover all leaf nodes of the tree, and they are mutually disjoint. For example, for a thesaurus tree in Figure 1, . Thus, a treecut corresponds to one of the levels of abstraction in the tree. Using a thesaurus tree and the idea of treecut, the problem of acquiring generalized case frame patters (for a fixed verb) from a corpus is to select the best tree-cut that accounts for both observed and unobserved case frame instances. In (Li and Abe, 1998) , this generalization problem is viewed as a problem of selecting the best model for a tree-cut that estimates the true probability distribution, given a sample corpus data.",
243
+ "cite_spans": [
244
+ {
245
+ "start": 852,
246
+ "end": 870,
247
+ "text": "(Li and Abe, 1998)",
248
+ "ref_id": "BIBREF8"
249
+ }
250
+ ],
251
+ "ref_spans": [
252
+ {
253
+ "start": 523,
254
+ "end": 532,
255
+ "text": "Figure 1,",
256
+ "ref_id": "FIGREF0"
257
+ }
258
+ ],
259
+ "eq_spans": [],
260
+ "section": "Tree-cut Models",
261
+ "sec_num": "2.1"
262
+ },
263
+ {
264
+ "text": "Formally, a tree-cut model M is a pair consisting of a tree-cut F and a probability parameter vector O of the same length,",
265
+ "cite_spans": [],
266
+ "ref_spans": [],
267
+ "eq_spans": [],
268
+ "section": "Tree-cut Models",
269
+ "sec_num": "2.1"
270
+ },
271
+ {
272
+ "text": "M = (F, O)",
273
+ "cite_spans": [],
274
+ "ref_spans": [],
275
+ "eq_spans": [],
276
+ "section": "Tree-cut Models",
277
+ "sec_num": "2.1"
278
+ },
279
+ {
280
+ "text": "where F and \u00ae are:",
281
+ "cite_spans": [],
282
+ "ref_spans": [],
283
+ "eq_spans": [],
284
+ "section": "Tree-cut Models",
285
+ "sec_num": "2.1"
286
+ },
287
+ {
288
+ "text": "(1) F=[Cx,..,Ck],O=[P(C,),..,P(Ck)] (2)",
289
+ "cite_spans": [],
290
+ "ref_spans": [],
291
+ "eq_spans": [],
292
+ "section": "Tree-cut Models",
293
+ "sec_num": "2.1"
294
+ },
295
+ {
296
+ "text": "words, that is, P(C) = ~=1 P(nj). Here, compared to knowing all P(nj) (where 1 < j < m) individually, knowing one P(C) can only facilitate an estimate of uniform probability distribution among members as the best guess, that is, P(nj) = P(C) for all j. Therefore, in general, m when clusters C1..Cm are merged and generalized to C according to the thesaurus tree, the estimation of a probability model becomes less accurate.",
297
+ "cite_spans": [],
298
+ "ref_spans": [],
299
+ "eq_spans": [],
300
+ "section": "Tree-cut Models",
301
+ "sec_num": "2.1"
302
+ },
303
+ {
304
+ "text": "To select the best tree-cut model, (Li and Abe, 1998 ) uses the Minimal Description Length (MDL) principle (Rissanen, 1978) . The MDL is a principle of data compression in Information Theory which states that, for a given dataset, the best model is the one which requires the minimum length (often measured in bits) to encode the model (the model description length) and the data (the data description length). For the problem of case frame generalization, the MDL principle fits very well in that it captures the trade-off between the simplicity of a model, which is measured by the number of clusters in a tree-cut, and the goodness of fit to the data, which is measured by the estimation accuracy of the probability distribution.",
305
+ "cite_spans": [
306
+ {
307
+ "start": 35,
308
+ "end": 52,
309
+ "text": "(Li and Abe, 1998",
310
+ "ref_id": "BIBREF8"
311
+ },
312
+ {
313
+ "start": 107,
314
+ "end": 123,
315
+ "text": "(Rissanen, 1978)",
316
+ "ref_id": "BIBREF14"
317
+ }
318
+ ],
319
+ "ref_spans": [],
320
+ "eq_spans": [],
321
+ "section": "The MDL Principle",
322
+ "sec_num": "2.2"
323
+ },
324
+ {
325
+ "text": "The calculation of the description length for a tree-cut model is as follows. Given a thesaurus tree T and a sample S consisting of the case frame instances, the total description length L(M, S) for a tree-cut model M = (F, 0) is where Ci (1 < i < k) is a cluster in the treecut, P(Ci) is the probability of a cluster Ci, and ~/k=l P(Ci) = 1. For example, suppose a corpus contained 10 instances of verb-object relation for the verb \"fly\", and the frequency of object noun n, denoted f(n), are as follows: f ( airpl ane ) --5, f ( helicopter ) = 3, f ( bal l ) = O, f(kite) --2, f(puzzle) = 0. Then, the set of tree-cut models for the thesaurus tree shown in ",
326
+ "cite_spans": [],
327
+ "ref_spans": [],
328
+ "eq_spans": [],
329
+ "section": "The MDL Principle",
330
+ "sec_num": "2.2"
331
+ },
332
+ {
333
+ "text": "Note that P(C) is the probability of cluster C = {nl, .., nm) as a whole. It is essentially the sum of all (true) probabilities of the member 4A leaf node is also a cluster whose cardinality is 1.",
334
+ "cite_spans": [],
335
+ "ref_spans": [],
336
+ "eq_spans": [],
337
+ "section": "0.2]).",
338
+ "sec_num": null
339
+ },
340
+ {
341
+ "text": "where L(F) is the model description length, L(OIF) is the parameter description length (explained shortly), and L(SIF , O) is the data description length. Note that L(F) + L(OIF ) essentially corresponds to the usual notion of the model description length.",
342
+ "cite_spans": [],
343
+ "ref_spans": [],
344
+ "eq_spans": [],
345
+ "section": "L(M,S)=L(F)+L(eT)+L(SJF, e) (3)",
346
+ "sec_num": null
347
+ },
348
+ {
349
+ "text": "L(M, S) is calculated as fol- lows. 5 The model description length L(F) is L(r) = log21GI (4)",
350
+ "cite_spans": [],
351
+ "ref_spans": [],
352
+ "eq_spans": [],
353
+ "section": "Each length in",
354
+ "sec_num": null
355
+ },
356
+ {
357
+ "text": "where G is the set of all cuts in T, and IG I denotes the size of G. This value is a constant for",
358
+ "cite_spans": [],
359
+ "ref_spans": [],
360
+ "eq_spans": [],
361
+ "section": "Each length in",
362
+ "sec_num": null
363
+ },
364
+ {
365
+ "text": "\u2022 SFor justification and detailed explanation of these formulas, see (Li and Abe, 1998) . all models, thus it is omitted in the calculation of the total length.",
366
+ "cite_spans": [
367
+ {
368
+ "start": 69,
369
+ "end": 87,
370
+ "text": "(Li and Abe, 1998)",
371
+ "ref_id": "BIBREF8"
372
+ }
373
+ ],
374
+ "ref_spans": [],
375
+ "eq_spans": [],
376
+ "section": "Each length in",
377
+ "sec_num": null
378
+ },
379
+ {
380
+ "text": "The parameter description length L(OIF ) indicates the complexity of the model. It is the length required to encode the probability distribution of the clusters in the tree-cut F. It is calculated as k L(Olr) =",
381
+ "cite_spans": [],
382
+ "ref_spans": [],
383
+ "eq_spans": [],
384
+ "section": "Each length in",
385
+ "sec_num": null
386
+ },
387
+ {
388
+ "text": "x Zog21Sl 5where k is the length of \u00ae, and IS[ is the size of S.",
389
+ "cite_spans": [],
390
+ "ref_spans": [],
391
+ "eq_spans": [],
392
+ "section": "Each length in",
393
+ "sec_num": null
394
+ },
395
+ {
396
+ "text": "Finally, the data description length L(SIF, O) is the length required to encode the whole sample data. It is calculated as L(SIF, e) = -log2P(n)",
397
+ "cite_spans": [],
398
+ "ref_spans": [],
399
+ "eq_spans": [],
400
+ "section": "Each length in",
401
+ "sec_num": null
402
+ },
403
+ {
404
+ "text": "nES where, for each n E C and each C E F, and",
405
+ "cite_spans": [],
406
+ "ref_spans": [],
407
+ "eq_spans": [],
408
+ "section": "Each length in",
409
+ "sec_num": null
410
+ },
411
+ {
412
+ "text": "P(n)-P(C) ICl (7) P(C)-f(c) (8) ISl",
413
+ "cite_spans": [],
414
+ "ref_spans": [],
415
+ "eq_spans": [],
416
+ "section": "Each length in",
417
+ "sec_num": null
418
+ },
419
+ {
420
+ "text": "Note here that, in (7), the probability of C is divided evenly among all n in C. This way, words that are not observed in the sample receive a non-zero probability, and the data sparseness problem is avoided.",
421
+ "cite_spans": [],
422
+ "ref_spans": [],
423
+ "eq_spans": [],
424
+ "section": "Each length in",
425
+ "sec_num": null
426
+ },
427
+ {
428
+ "text": "Then, the best model is the one which requires the minimum total description length. Figure 2 shows the MDL lengths for all five tree-cut models that can be produced for the thesaurus tree in Figure 1 . The best model is the one with the tree-cut [AIRCRAFT, ball, kite, puzzle] indicated by a thick curve in the figure.",
429
+ "cite_spans": [],
430
+ "ref_spans": [
431
+ {
432
+ "start": 85,
433
+ "end": 93,
434
+ "text": "Figure 2",
435
+ "ref_id": null
436
+ },
437
+ {
438
+ "start": 192,
439
+ "end": 200,
440
+ "text": "Figure 1",
441
+ "ref_id": "FIGREF0"
442
+ }
443
+ ],
444
+ "eq_spans": [],
445
+ "section": "Each length in",
446
+ "sec_num": null
447
+ },
448
+ {
449
+ "text": "Using the generalization technique in (Li and Abe, 1998) described in the previous section, we wish to extract systematic polysemy automatically from WordNet. Our assumption is that, if a semantic concept is systematically related to another concept, words that have one sense under one concept (sub)tree are likely to have another sense under the other concept (sub)tree. To give an example, Figure 3 shows parts of WordNet noun trees for ARTIFACT and MEASURE, where subtrees under CONTAINER and C0NTAINERFUL respectively contain \"bottle\", \"bucket\" and \"spoon\". Note a dashed line in the figure indicates an indirect link for more than one level.",
450
+ "cite_spans": [
451
+ {
452
+ "start": 38,
453
+ "end": 56,
454
+ "text": "(Li and Abe, 1998)",
455
+ "ref_id": "BIBREF8"
456
+ }
457
+ ],
458
+ "ref_spans": [
459
+ {
460
+ "start": 393,
461
+ "end": 401,
462
+ "text": "Figure 3",
463
+ "ref_id": null
464
+ }
465
+ ],
466
+ "eq_spans": [],
467
+ "section": "Generalization Technique",
468
+ "sec_num": "3.1"
469
+ },
470
+ {
471
+ "text": "Based on this assumption, it seems systematic polysemy in the two trees can be extracted straight-forwardly by clustering each tree according to polysemy as a feature, and by matching of clusters taken from each tree. 6 To this end, the notion of tree-cut and the MDL principle seem to comprise an excellent tool.",
472
+ "cite_spans": [],
473
+ "ref_spans": [],
474
+ "eq_spans": [],
475
+ "section": "Generalization Technique",
476
+ "sec_num": "3.1"
477
+ },
478
+ {
479
+ "text": "However, we must be careful in adopting Li and Abe's technique directly: since the problem which their technique was applied to is fundamentally different from ours, some procedures used in their problem may not have any interpretation in our problem. Although both problems are essentially a tree generalization problem, their problem estimates the true probability distribution from a random sample of examples (a corpus), whereas our problem does not have any additional data to estimate, since all data (a lexicon) is already known. This difference raises the following issue. In the calculation of the data description length in equation (6), each word in a cluster, observed or unobserved, is assigned an estimated probability, which is a uniform fraction of the probability of the cluster. This procedure does not have interpretation if it is applied to our problem. Instead, we use the distribution of feature frequency proportion of the clusters, and calculate the data description length by the following formula:",
480
+ "cite_spans": [],
481
+ "ref_spans": [],
482
+ "eq_spans": [],
483
+ "section": "Generalization Technique",
484
+ "sec_num": "3.1"
485
+ },
486
+ {
487
+ "text": "k L(SIF, e) = - f(Ci) \u00d7 log2P(Ci) (9) i=l",
488
+ "cite_spans": [],
489
+ "ref_spans": [],
490
+ "eq_spans": [],
491
+ "section": "Generalization Technique",
492
+ "sec_num": "3.1"
493
+ },
494
+ {
495
+ "text": "where F = [C1, .., Ck] , 0 = [P(C,),.., P(Ck) ]. This corresponds to the length required to encode all words in a cluster, for all clusters in a tree-cut, assuming Huffman's algorithm (Huffman, 1952) assigned a codeword of length -log2P(Ci) to each cluster C/ (whose propor-6We could also combine two (or possibly more) trees into one tree and apply clustering over that tree once. In this paper, we describe clustering of two trees for example purpose. ",
496
+ "cite_spans": [
497
+ {
498
+ "start": 10,
499
+ "end": 14,
500
+ "text": "[C1,",
501
+ "ref_id": null
502
+ },
503
+ {
504
+ "start": 15,
505
+ "end": 18,
506
+ "text": "..,",
507
+ "ref_id": null
508
+ },
509
+ {
510
+ "start": 19,
511
+ "end": 22,
512
+ "text": "Ck]",
513
+ "ref_id": null
514
+ },
515
+ {
516
+ "start": 40,
517
+ "end": 45,
518
+ "text": "P(Ck)",
519
+ "ref_id": null
520
+ },
521
+ {
522
+ "start": 184,
523
+ "end": 199,
524
+ "text": "(Huffman, 1952)",
525
+ "ref_id": "BIBREF5"
526
+ }
527
+ ],
528
+ "ref_spans": [],
529
+ "eq_spans": [],
530
+ "section": "Generalization Technique",
531
+ "sec_num": "3.1"
532
+ },
533
+ {
534
+ "text": "Our clustering method uses the the modified generalization technique described in the last section to generate tree-cuts. But before we apply the method, we must transform the data in Wordnet. This is because WordNet differs from a theaurus tree in two ways: it is a graph rather than a tree, and internal nodes as well as leaf nodes carry data, First, we eliminate multiple inheritance by separating shared subtrees. Second, we bring down every internal node to a leaf level by creating a new duplicate node and adding it as a child of the old node (thus making the old node an internal node).",
535
+ "cite_spans": [],
536
+ "ref_spans": [],
537
+ "eq_spans": [],
538
+ "section": "Clustering Method",
539
+ "sec_num": "3.2"
540
+ },
541
+ {
542
+ "text": "After trees are transformed, our method extracts systematic polysemy by the following three steps. In the first step, all leaf nodes of the two trees are marked with either 1 or 0 (1 if a node/word appears in both trees, or 0 otherwise),",
543
+ "cite_spans": [],
544
+ "ref_spans": [],
545
+ "eq_spans": [],
546
+ "section": "Clustering Method",
547
+ "sec_num": "3.2"
548
+ },
549
+ {
550
+ "text": "In the second step, the generalization technique is applied to each tree, and two tree-cuts are obtained. To search for the best tree-cut, instead of computing the description length for M1 possible tree-cuts in a tree, a greedy dynamic programming algorithm is used. This algorithm , called Find-MDL in (Li and Abe, 1998) , finds the best tree-cut for a tree by recursively finding the best tree-cuts for all of its subtrees and merging them from bottom up. This algorithm is quite efficient, since it is basically a depth-first search with minor overhead for computing the description length.",
551
+ "cite_spans": [
552
+ {
553
+ "start": 304,
554
+ "end": 322,
555
+ "text": "(Li and Abe, 1998)",
556
+ "ref_id": "BIBREF8"
557
+ }
558
+ ],
559
+ "ref_spans": [],
560
+ "eq_spans": [],
561
+ "section": "Clustering Method",
562
+ "sec_num": "3.2"
563
+ },
564
+ {
565
+ "text": "Finally in the third step, clusters from the two tree-cuts are matched up, and the pairs which have substantial overlap are selected as systematic polysemy. Figure 4 shows parts of the final tree-cuts for ARTIFACT and MEASURE obtained by our method. ~ In both trees, most of the clusters in the tree-cuts are from nodes at depth 1 (counting the root as depth 0). That is because the tree-cut technique used in our method is sensitive to the structure of the tree. More specifically, the MDL principle inherently penalizes a complex tree-cut by assigning a long parameter length. Therefore, unless the entropy of the feature distribution is large enough to make the data length overshadow the parameter length, simpler tree-cuts partitioned at abstract levels are preferred. This situation tends to happen often when the tree is bushy and the total feature frequency is low. This was precisely the case with ARTIFACT and MEASURE, where both Tin the figure, bold letters indicate words which are polysemous in the two tree. Figure 4 : Parts of the final tree-cuts for ARTIFACT and MEASURE trees were quite bushy, and only 4% and 14% of the words were polysemous in the two categories respectively.",
566
+ "cite_spans": [],
567
+ "ref_spans": [
568
+ {
569
+ "start": 157,
570
+ "end": 165,
571
+ "text": "Figure 4",
572
+ "ref_id": null
573
+ },
574
+ {
575
+ "start": 1022,
576
+ "end": 1030,
577
+ "text": "Figure 4",
578
+ "ref_id": null
579
+ }
580
+ ],
581
+ "eq_spans": [],
582
+ "section": "Clustering Method",
583
+ "sec_num": "3.2"
584
+ },
585
+ {
586
+ "text": "To test our method, we chose 5 combinations from WordNet noun Top categories (which we call top relation classes), and extracted cluster pairs which have more than 3 overlapping words. Then we evaluated those pairs in two aspects: related vs. unrelated relations, and automatic vs. manual clusters.",
587
+ "cite_spans": [],
588
+ "ref_spans": [],
589
+ "eq_spans": [],
590
+ "section": "Evaluation",
591
+ "sec_num": "4"
592
+ },
593
+ {
594
+ "text": "Of the cluster pairs we extracted automatically, not all are systematically related; some are unrelated, homonymous relations. They are essentially false positives for our purposes. Table 1 shows the number of related and unrelated relations in the extracted cluster pairs. Although the results vary among category combinations, the ratio of the related pairs is rather low: less than 60% on average. There are several reasons for this. First, there are some pairs whose relations are spurious. For example, in ARTIFACT-GROUP class, a pair [LUMBER, SOCIAL_GROUP] was extracted. Words which are common in the two clusters are \"picket\", \"board\" and \"stock\". This relation is obviously homonymous.",
595
+ "cite_spans": [
596
+ {
597
+ "start": 540,
598
+ "end": 562,
599
+ "text": "[LUMBER, SOCIAL_GROUP]",
600
+ "ref_id": null
601
+ }
602
+ ],
603
+ "ref_spans": [
604
+ {
605
+ "start": 182,
606
+ "end": 189,
607
+ "text": "Table 1",
608
+ "ref_id": "TABREF3"
609
+ }
610
+ ],
611
+ "eq_spans": [],
612
+ "section": "Related vs. Unrelated Clusters",
613
+ "sec_num": "4.1"
614
+ },
615
+ {
616
+ "text": "Second, some clusters obtained by tree-cut are rather abstract, so that pairing two abstract clusters results in an unrelated pair. For example, in ARTIFACT-MEASURE class, a pair [INSTRUMENTALITY, LINEAR_UNIT] was selected. Words which are common in the two clusters include \"yard\", \"foot\" and \"knot\" (see the previous Figure 4) .",
617
+ "cite_spans": [],
618
+ "ref_spans": [
619
+ {
620
+ "start": 319,
621
+ "end": 328,
622
+ "text": "Figure 4)",
623
+ "ref_id": null
624
+ }
625
+ ],
626
+ "eq_spans": [],
627
+ "section": "Related vs. Unrelated Clusters",
628
+ "sec_num": "4.1"
629
+ },
630
+ {
631
+ "text": "Here, the concept INSTRUMENTALITY is very general (at depth 1), and it also contains many (polysemous) words.",
632
+ "cite_spans": [],
633
+ "ref_spans": [],
634
+ "eq_spans": [],
635
+ "section": "Related vs. Unrelated Clusters",
636
+ "sec_num": "4.1"
637
+ },
638
+ {
639
+ "text": "So, matching this cluster with another abstract cluster is likely to yield a pair which has just enough overlapping words but whose relation is not systematic. In the case of [INSTRUMENTALITY, LINEAR_UNIT] , the situation is even worse, because the concept of LINEAR_UNIT in MEASURE represents a collection of terms that were chosen arbitrarily in the his- ",
640
+ "cite_spans": [
641
+ {
642
+ "start": 175,
643
+ "end": 205,
644
+ "text": "[INSTRUMENTALITY, LINEAR_UNIT]",
645
+ "ref_id": null
646
+ }
647
+ ],
648
+ "ref_spans": [],
649
+ "eq_spans": [],
650
+ "section": "Related vs. Unrelated Clusters",
651
+ "sec_num": "4.1"
652
+ },
653
+ {
654
+ "text": "To compare the cluster pairs our method extracted automatically to manually extracted clusters, we use WordNet cousins. A cousin relation is relatively new in WordNet, and the coverage is still incomplete. However, it gives us a good measure to see whether our automatic method discovered systematic relations that correspond to human intuitions. A cousin relation in WordNet is defined between two synsets (currently in the noun trees only), and it indicates that senses of a word that appear in both of the (sub)trees rooted by those synsets are related, s The cousins were manuMly extracted by the WordNet lexicographers. Table 2 shows the number of cousins listed for each top relation class and the number of cousins our automatic method recovered (in the 'Auto' column). As you see, the total recall ratio is over 80% (27/33~ .82).",
655
+ "cite_spans": [],
656
+ "ref_spans": [],
657
+ "eq_spans": [],
658
+ "section": "Automatic vs. Manual Clusters",
659
+ "sec_num": "4.2"
660
+ },
661
+ {
662
+ "text": "In the right three columns of Table 2 , we also show the breakdown of the recovered cousins, whether each recovered one was an exact match, or it was more general or specific than the corresponding WordNet cousin. From this, we can see that more than half of the recovered cousins were more general than the WordNet cousins. That is partly because some WordNet cousins have only one or two common words.",
663
+ "cite_spans": [],
664
+ "ref_spans": [
665
+ {
666
+ "start": 30,
667
+ "end": 37,
668
+ "text": "Table 2",
669
+ "ref_id": "TABREF4"
670
+ }
671
+ ],
672
+ "eq_spans": [],
673
+ "section": "Automatic vs. Manual Clusters",
674
+ "sec_num": "4.2"
675
+ },
676
+ {
677
+ "text": "has only one common word \"watercolor\". Such SActually, cousin is one of the three relations which indicate the grouping of related senses of a word. Others are sister and twin. In this paper, we use cousin to refer to all relations listed in \"cousin.tps\" file (available in a WordNet distribution).",
678
+ "cite_spans": [],
679
+ "ref_spans": [],
680
+ "eq_spans": [],
681
+ "section": "For example, a WordNet cousin [PAINTING, COLORING_MATERIAL] in ARTIFACT-SUBSTANCE",
682
+ "sec_num": null
683
+ },
684
+ {
685
+ "text": "a minor relation tends to be lost in our tree generalization procedure. However, the main reason is the difficulty mentioned earlier in the paper: the problem of applying the tree-cut technique to a bushy tree when the data is sparse.",
686
+ "cite_spans": [],
687
+ "ref_spans": [],
688
+ "eq_spans": [],
689
+ "section": "26",
690
+ "sec_num": null
691
+ },
692
+ {
693
+ "text": "In addition to the WordNet cousins, our automatic extraction method discovered several interesting relations. Table 3 shows some examples,",
694
+ "cite_spans": [],
695
+ "ref_spans": [
696
+ {
697
+ "start": 110,
698
+ "end": 117,
699
+ "text": "Table 3",
700
+ "ref_id": "TABREF5"
701
+ }
702
+ ],
703
+ "eq_spans": [],
704
+ "section": "26",
705
+ "sec_num": null
706
+ },
707
+ {
708
+ "text": "Conclusions and Future Work",
709
+ "cite_spans": [],
710
+ "ref_spans": [],
711
+ "eq_spans": [],
712
+ "section": "5",
713
+ "sec_num": null
714
+ },
715
+ {
716
+ "text": "In this paper, we proposed an automatic method for extracting systematic polysemy from WordNet. As we reported, preliminary results show that our method identified almost all WordNet cousins as well as some new ones. One difficulty is that applying the generalization technique using the MDL principle to the bushy WordNet trees seems to yield a tree-cut at rather abstract level. For future work, we plan to compare the systematic relations extracted by our automatic method to corpus data. In particular, we like to test whether our method extracts the same groups of senses which human annotators disagreed (Ng et al., 1999) . We also like to test whether our method agrees with the finding that multiple senses which occur in a discourse are often systematically polysemous (Krovetz, 1998) . Gen Spec 2 1 0 1 0 6 6 1 5 0 1 1 0 1 0 15 13 3 9 1 9 6 5 1 0 27 9 17 1 \"drop\", \"circle\", \"intersection\", \"dig\", \"crossing\", \"bull's eye\" ARTIFACT-GROUP [STRUCTURE, PEOPLE] \"house\", \"convent\", \"market\", \"center\" ARTIFACT-SUBSTANCE [FABRIC, CHEMICAL_COMPOUND] \"acetate\", \"nylon\", \"acrylic\", \"polyester\"",
717
+ "cite_spans": [
718
+ {
719
+ "start": 610,
720
+ "end": 627,
721
+ "text": "(Ng et al., 1999)",
722
+ "ref_id": "BIBREF10"
723
+ },
724
+ {
725
+ "start": 778,
726
+ "end": 793,
727
+ "text": "(Krovetz, 1998)",
728
+ "ref_id": "BIBREF7"
729
+ }
730
+ ],
731
+ "ref_spans": [
732
+ {
733
+ "start": 796,
734
+ "end": 885,
735
+ "text": "Gen Spec 2 1 0 1 0 6 6 1 5 0 1 1 0 1 0 15 13 3 9 1 9 6 5 1 0 27",
736
+ "ref_id": "TABREF3"
737
+ }
738
+ ],
739
+ "eq_spans": [],
740
+ "section": "5",
741
+ "sec_num": null
742
+ }
743
+ ],
744
+ "back_matter": [],
745
+ "bib_entries": {
746
+ "BIBREF0": {
747
+ "ref_id": "b0",
748
+ "title": "Regular Polysemy. Linguistics",
749
+ "authors": [
750
+ {
751
+ "first": "J",
752
+ "middle": [],
753
+ "last": "Apresjan",
754
+ "suffix": ""
755
+ }
756
+ ],
757
+ "year": 1973,
758
+ "venue": "",
759
+ "volume": "",
760
+ "issue": "",
761
+ "pages": "",
762
+ "other_ids": {},
763
+ "num": null,
764
+ "urls": [],
765
+ "raw_text": "Apresjan, J. (1973). Regular Polysemy. Lin- guistics, (142).",
766
+ "links": null
767
+ },
768
+ "BIBREF1": {
769
+ "ref_id": "b1",
770
+ "title": "A Lexicon for Underspecified Semantic Tagging",
771
+ "authors": [
772
+ {
773
+ "first": "P",
774
+ "middle": [],
775
+ "last": "Buitelaar",
776
+ "suffix": ""
777
+ }
778
+ ],
779
+ "year": 1997,
780
+ "venue": "Proceedings off the A CL SIGLEX Workshop on Tagging Text",
781
+ "volume": "",
782
+ "issue": "",
783
+ "pages": "25--33",
784
+ "other_ids": {},
785
+ "num": null,
786
+ "urls": [],
787
+ "raw_text": "Buitelaar, P. (1997). A Lexicon for Underspec- ified Semantic Tagging. In Proceedings off the A CL SIGLEX Workshop on Tagging Text \"soprano\", \"alto\", \"tenor\", \"baritone\" \"John\", \"Matthew\", \"Jonah\", \"Joshua\", \"Jeremiah\" with Lexical Semantics, Washington, D.C., pp. 25-33.",
788
+ "links": null
789
+ },
790
+ "BIBREF2": {
791
+ "ref_id": "b2",
792
+ "title": "CORELEX: Systematic Polysemy and Underspecification",
793
+ "authors": [
794
+ {
795
+ "first": "P",
796
+ "middle": [],
797
+ "last": "Buitelaar",
798
+ "suffix": ""
799
+ }
800
+ ],
801
+ "year": 1998,
802
+ "venue": "",
803
+ "volume": "",
804
+ "issue": "",
805
+ "pages": "",
806
+ "other_ids": {},
807
+ "num": null,
808
+ "urls": [],
809
+ "raw_text": "Buitelaar, P. (1998). CORELEX: Systematic Polysemy and Underspecification. Ph.D. dis- sertation, Department of Computer Science, Brandeis University.",
810
+ "links": null
811
+ },
812
+ "BIBREF3": {
813
+ "ref_id": "b3",
814
+ "title": "Semiproductive Polysemy and Sense Extension",
815
+ "authors": [
816
+ {
817
+ "first": "A",
818
+ "middle": [],
819
+ "last": "Copestake",
820
+ "suffix": ""
821
+ },
822
+ {
823
+ "first": "T",
824
+ "middle": [],
825
+ "last": "Briscoe",
826
+ "suffix": ""
827
+ }
828
+ ],
829
+ "year": 1995,
830
+ "venue": "Journal of Semantics",
831
+ "volume": "",
832
+ "issue": "",
833
+ "pages": "",
834
+ "other_ids": {},
835
+ "num": null,
836
+ "urls": [],
837
+ "raw_text": "Copestake, A. and Briscoe, T. (1995). Semi- productive Polysemy and Sense Extension. Journal of Semantics, 12.",
838
+ "links": null
839
+ },
840
+ "BIBREF4": {
841
+ "ref_id": "b4",
842
+ "title": "Lexical Semantics",
843
+ "authors": [
844
+ {
845
+ "first": "D",
846
+ "middle": [],
847
+ "last": "Cruse",
848
+ "suffix": ""
849
+ }
850
+ ],
851
+ "year": 1986,
852
+ "venue": "",
853
+ "volume": "",
854
+ "issue": "",
855
+ "pages": "",
856
+ "other_ids": {},
857
+ "num": null,
858
+ "urls": [],
859
+ "raw_text": "Cruse, D. (1986). Lexical Semantics, Cam- bridge University Press.",
860
+ "links": null
861
+ },
862
+ "BIBREF5": {
863
+ "ref_id": "b5",
864
+ "title": "A Model for the Construction of Minimum Redundancy Codes",
865
+ "authors": [
866
+ {
867
+ "first": "D",
868
+ "middle": [
869
+ "A"
870
+ ],
871
+ "last": "Huffman",
872
+ "suffix": ""
873
+ }
874
+ ],
875
+ "year": 1952,
876
+ "venue": "Proceedings of the IRE",
877
+ "volume": "",
878
+ "issue": "",
879
+ "pages": "",
880
+ "other_ids": {},
881
+ "num": null,
882
+ "urls": [],
883
+ "raw_text": "Huffman, D. A. (1952). A Model for the Con- struction of Minimum Redundancy Codes. In Proceedings of the IRE, 40.",
884
+ "links": null
885
+ },
886
+ "BIBREF6": {
887
+ "ref_id": "b6",
888
+ "title": "SENSEVAL: An Exercise in Evaluating Word Sense Disambiguation Programs",
889
+ "authors": [
890
+ {
891
+ "first": "A",
892
+ "middle": [],
893
+ "last": "Kilgarriff",
894
+ "suffix": ""
895
+ }
896
+ ],
897
+ "year": 1998,
898
+ "venue": "Advanced Papers of the SENSE-VAL Workshop",
899
+ "volume": "",
900
+ "issue": "",
901
+ "pages": "",
902
+ "other_ids": {},
903
+ "num": null,
904
+ "urls": [],
905
+ "raw_text": "Kilgarriff, A. (1998a)~ SENSEVAL: An Exer- cise in Evaluating Word Sense Disambigua- tion Programs. In Proceedings of the LREC Kilgarriff, A. (1998b). Inter-tagger Agree- ment. In Advanced Papers of the SENSE- VAL Workshop, Sussex, UK.",
906
+ "links": null
907
+ },
908
+ "BIBREF7": {
909
+ "ref_id": "b7",
910
+ "title": "More than One Sense Per Discourse",
911
+ "authors": [
912
+ {
913
+ "first": "R",
914
+ "middle": [],
915
+ "last": "Krovetz",
916
+ "suffix": ""
917
+ }
918
+ ],
919
+ "year": 1998,
920
+ "venue": "Advanced Papers of the SEN-SEVAL Workshop",
921
+ "volume": "",
922
+ "issue": "",
923
+ "pages": "",
924
+ "other_ids": {},
925
+ "num": null,
926
+ "urls": [],
927
+ "raw_text": "Krovetz, R. (1998). More than One Sense Per Discourse. In Advanced Papers of the SEN- SEVAL Workshop, Sussex, UK.",
928
+ "links": null
929
+ },
930
+ "BIBREF8": {
931
+ "ref_id": "b8",
932
+ "title": "Generalizing Case Frames Using a Thesaurus and the MDL Principle",
933
+ "authors": [
934
+ {
935
+ "first": "H",
936
+ "middle": [],
937
+ "last": "Li",
938
+ "suffix": ""
939
+ },
940
+ {
941
+ "first": "N",
942
+ "middle": [],
943
+ "last": "Abe",
944
+ "suffix": ""
945
+ }
946
+ ],
947
+ "year": 1998,
948
+ "venue": "Computational Linguistics",
949
+ "volume": "24",
950
+ "issue": "2",
951
+ "pages": "217--244",
952
+ "other_ids": {},
953
+ "num": null,
954
+ "urls": [],
955
+ "raw_text": "Li, H. and Abe, N. (1998). Generalizing Case Frames Using a Thesaurus and the MDL Principle, Computational Linguistics, 24(2), pp. 217-244",
956
+ "links": null
957
+ },
958
+ "BIBREF9": {
959
+ "ref_id": "b9",
960
+ "title": "WORDNET: An Online Lexical Database",
961
+ "authors": [],
962
+ "year": 1990,
963
+ "venue": "International Journal of Lexicography",
964
+ "volume": "3",
965
+ "issue": "4",
966
+ "pages": "",
967
+ "other_ids": {},
968
+ "num": null,
969
+ "urls": [],
970
+ "raw_text": "Miller, G. (eds.) (1990). WORDNET: An On- line Lexical Database. International Journal of Lexicography, 3 (4).",
971
+ "links": null
972
+ },
973
+ "BIBREF10": {
974
+ "ref_id": "b10",
975
+ "title": "A Case Study on Inter-Annotator Agreement for Word Sense Disambiguationl",
976
+ "authors": [
977
+ {
978
+ "first": "H",
979
+ "middle": [
980
+ "T"
981
+ ],
982
+ "last": "Ng",
983
+ "suffix": ""
984
+ },
985
+ {
986
+ "first": "C",
987
+ "middle": [],
988
+ "last": "Lim",
989
+ "suffix": ""
990
+ },
991
+ {
992
+ "first": "S",
993
+ "middle": [],
994
+ "last": "Foo",
995
+ "suffix": ""
996
+ }
997
+ ],
998
+ "year": 1999,
999
+ "venue": "Proceedings of the A CL SIGLEX Workshop on Standardizing Lexical Resources",
1000
+ "volume": "",
1001
+ "issue": "",
1002
+ "pages": "",
1003
+ "other_ids": {},
1004
+ "num": null,
1005
+ "urls": [],
1006
+ "raw_text": "Ng, H.T., Lim, C. and Foo, S. (1999). A Case Study on Inter-Annotator Agreement for Word Sense Disambiguationl In Proceed- ings of the A CL SIGLEX Workshop on Stan- dardizing Lexical Resources, College Park, MD.",
1007
+ "links": null
1008
+ },
1009
+ "BIBREF11": {
1010
+ "ref_id": "b11",
1011
+ "title": "Transfers of Meaning",
1012
+ "authors": [
1013
+ {
1014
+ "first": "G",
1015
+ "middle": [],
1016
+ "last": "Nunberg",
1017
+ "suffix": ""
1018
+ }
1019
+ ],
1020
+ "year": 1995,
1021
+ "venue": "Journal of Semantics",
1022
+ "volume": "",
1023
+ "issue": "",
1024
+ "pages": "",
1025
+ "other_ids": {},
1026
+ "num": null,
1027
+ "urls": [],
1028
+ "raw_text": "Nunberg, G. (1995). Transfers of Meaning. Journal of Semantics, 12.",
1029
+ "links": null
1030
+ },
1031
+ "BIBREF12": {
1032
+ "ref_id": "b12",
1033
+ "title": "Longman dictionary of Contemporary English",
1034
+ "authors": [
1035
+ {
1036
+ "first": "P",
1037
+ "middle": [],
1038
+ "last": "Procter",
1039
+ "suffix": ""
1040
+ }
1041
+ ],
1042
+ "year": 1978,
1043
+ "venue": "",
1044
+ "volume": "",
1045
+ "issue": "",
1046
+ "pages": "",
1047
+ "other_ids": {},
1048
+ "num": null,
1049
+ "urls": [],
1050
+ "raw_text": "Procter, P. (1978). Longman dictionary of Contemporary English, Longman Group.",
1051
+ "links": null
1052
+ },
1053
+ "BIBREF13": {
1054
+ "ref_id": "b13",
1055
+ "title": "The Generative Lexicon",
1056
+ "authors": [
1057
+ {
1058
+ "first": "J",
1059
+ "middle": [],
1060
+ "last": "Pustejovsky",
1061
+ "suffix": ""
1062
+ }
1063
+ ],
1064
+ "year": 1995,
1065
+ "venue": "",
1066
+ "volume": "",
1067
+ "issue": "",
1068
+ "pages": "",
1069
+ "other_ids": {},
1070
+ "num": null,
1071
+ "urls": [],
1072
+ "raw_text": "Pustejovsky, J. (1995). The Generative Lexi- con, The MIT Press.",
1073
+ "links": null
1074
+ },
1075
+ "BIBREF14": {
1076
+ "ref_id": "b14",
1077
+ "title": "Modeling by Shortest Data Description. Automatic",
1078
+ "authors": [
1079
+ {
1080
+ "first": "J",
1081
+ "middle": [],
1082
+ "last": "Rissanen",
1083
+ "suffix": ""
1084
+ }
1085
+ ],
1086
+ "year": 1978,
1087
+ "venue": "",
1088
+ "volume": "",
1089
+ "issue": "",
1090
+ "pages": "",
1091
+ "other_ids": {},
1092
+ "num": null,
1093
+ "urls": [],
1094
+ "raw_text": "Rissanen, J. (1978). Modeling by Shortest Data Description. Automatic, 14.",
1095
+ "links": null
1096
+ },
1097
+ "BIBREF15": {
1098
+ "ref_id": "b15",
1099
+ "title": "A Study of Polysemy Judgements and Inter-annotator Agreement",
1100
+ "authors": [
1101
+ {
1102
+ "first": "J",
1103
+ "middle": [],
1104
+ "last": "Veronis",
1105
+ "suffix": ""
1106
+ }
1107
+ ],
1108
+ "year": 1998,
1109
+ "venue": "Advanced Papers of the SENSEVAL Workshop",
1110
+ "volume": "",
1111
+ "issue": "",
1112
+ "pages": "",
1113
+ "other_ids": {},
1114
+ "num": null,
1115
+ "urls": [],
1116
+ "raw_text": "Veronis, J. (1998). A Study of Polysemy Judge- ments and Inter-annotator Agreement. In Advanced Papers of the SENSEVAL Work- shop, Sussex, UK.",
1117
+ "links": null
1118
+ },
1119
+ "BIBREF16": {
1120
+ "ref_id": "b16",
1121
+ "title": "Towards a Universal Index of Meaning",
1122
+ "authors": [
1123
+ {
1124
+ "first": "P",
1125
+ "middle": [],
1126
+ "last": "Vossen",
1127
+ "suffix": ""
1128
+ },
1129
+ {
1130
+ "first": "W",
1131
+ "middle": [],
1132
+ "last": "Peters",
1133
+ "suffix": ""
1134
+ },
1135
+ {
1136
+ "first": "J",
1137
+ "middle": [],
1138
+ "last": "Gonzalo",
1139
+ "suffix": ""
1140
+ }
1141
+ ],
1142
+ "year": 1999,
1143
+ "venue": "Proceedings of the A CL SIGLEX Workshop on Standardizing Lexical Resources",
1144
+ "volume": "",
1145
+ "issue": "",
1146
+ "pages": "",
1147
+ "other_ids": {},
1148
+ "num": null,
1149
+ "urls": [],
1150
+ "raw_text": "Vossen, P., Peters, W. and Gonzalo, J. (1999). Towards a Universal Index of Meaning. In Proceedings of the A CL SIGLEX Workshop on Standardizing Lexical Resources, College Park, MD.",
1151
+ "links": null
1152
+ }
1153
+ },
1154
+ "ref_entries": {
1155
+ "FIGREF0": {
1156
+ "num": null,
1157
+ "text": "An example thesaurus tree",
1158
+ "type_str": "figure",
1159
+ "uris": null
1160
+ },
1161
+ "FIGREF1": {
1162
+ "num": null,
1163
+ "text": "Figure 1includes ([airplane, helicopter, TOY], [0.5, 0.3, 0.2]) and ([AIRCRAFT, TOY], [0.8,",
1164
+ "type_str": "figure",
1165
+ "uris": null
1166
+ },
1167
+ "FIGREF2": {
1168
+ "num": null,
1169
+ "text": "The MDL lengths and the final tree-Parts of WordNet trees ARTIFACT and MEASURE tion is P(Ci) = .~_~_d~ Isl J\"All other notions and formulas are applicable to our problem without modification.",
1170
+ "type_str": "figure",
1171
+ "uris": null
1172
+ },
1173
+ "TABREF3": {
1174
+ "num": null,
1175
+ "content": "<table><tr><td>Top relation class</td><td colspan=\"2\">Related Unrelated</td><td>Total</td><td>% of</td></tr><tr><td/><td/><td/><td/><td>related</td></tr><tr><td>ACTION-L0CATION</td><td>10</td><td>1</td><td>11</td><td>90.9</td></tr><tr><td>ARTIFACT-GROUP</td><td>18</td><td>9</td><td>27</td><td>66.7</td></tr><tr><td>ARTIFACT-MEASURE</td><td>7</td><td>19</td><td>26</td><td>26.9</td></tr><tr><td>ARTIFACT-SUBSTANCE</td><td>19</td><td>12</td><td>31</td><td>61.3</td></tr><tr><td>COMMUNICATION-PERSON</td><td>12</td><td>11</td><td>23</td><td>52.2</td></tr><tr><td>Total</td><td>66</td><td>52</td><td>118</td><td>55.9</td></tr><tr><td>tory of the English language.</td><td/><td/><td/><td/></tr></table>",
1176
+ "text": "Related vs. Unrelated Relations",
1177
+ "html": null,
1178
+ "type_str": "table"
1179
+ },
1180
+ "TABREF4": {
1181
+ "num": null,
1182
+ "content": "<table><tr><td>Top relation class</td><td>WN cousin</td><td>Auto</td><td>Exact</td></tr><tr><td>ACTION-LOCATION</td><td/><td/><td/></tr><tr><td>ARTIFACT-GROUP</td><td/><td/><td/></tr><tr><td>ARTIFACT-MEASURE</td><td/><td/><td/></tr><tr><td>ARTIFACT-SUBSTANCE</td><td/><td/><td/></tr><tr><td>COMMUNICATION-PERSON</td><td/><td/><td/></tr><tr><td>Total</td><td>33</td><td/><td/></tr></table>",
1183
+ "text": "Automatic Clusters vs. WordNet Cousins",
1184
+ "html": null,
1185
+ "type_str": "table"
1186
+ },
1187
+ "TABREF5": {
1188
+ "num": null,
1189
+ "content": "<table><tr><td>Top relation class</td><td>Relation</td><td>Common Words</td></tr><tr><td>ACTION-LOCATION</td><td>[ACTION, POINT]</td><td/></tr><tr><td/><td>[VOICE, SINGER]</td><td/></tr></table>",
1190
+ "text": "Examples of Automatically Extracted Systematic Polysemy",
1191
+ "html": null,
1192
+ "type_str": "table"
1193
+ }
1194
+ }
1195
+ }
1196
+ }
Full_text_JSON/prefixW/json/W00/W00-0105.json ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0105",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:14.437084Z"
6
+ },
7
+ "title": "Dependency of context-based Word Sense Disambiguation from representation and domain complexity",
8
+ "authors": [
9
+ {
10
+ "first": "Paola",
11
+ "middle": [],
12
+ "last": "Velardi",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "Informazione University \"La Sapienza\" Roma",
17
+ "location": {}
18
+ },
19
+ "email": "velardi@dsi.uniromal.it"
20
+ },
21
+ {
22
+ "first": "Alessandro",
23
+ "middle": [],
24
+ "last": "Cucchiarelli",
25
+ "suffix": "",
26
+ "affiliation": {
27
+ "laboratory": "",
28
+ "institution": "University of Ancona",
29
+ "location": {}
30
+ },
31
+ "email": ""
32
+ }
33
+ ],
34
+ "year": "",
35
+ "venue": null,
36
+ "identifiers": {},
37
+ "abstract": "Word Sense Disambiguation (WSD) is a central task in the area of Natural Language Processing. In the past few years several context-based probabilistic and machine learning methods for WSD have been presented in literature. However, an important area of research that has not been given the attention it deserves is a formal analysis of the parameters affecting the performance of the learning task faced by these systems. Usually performance is estimated by measuring precision and recall of a specific algorithm for specific test sets and environmental conditions. Therefore, a comparison among different learning systems and an objective estimation of the difficulty of the learning task is extremely difficult. In this paper we propose, in the framework of Computational Learning theory, a formal analysis of the relations between accuracy of a context-based WSD system, the complexity of the context representation scheme, and the environmental conditions (e.g. the complexity of language domain and concept inventory) .",
38
+ "pdf_parse": {
39
+ "paper_id": "W00-0105",
40
+ "_pdf_hash": "",
41
+ "abstract": [
42
+ {
43
+ "text": "Word Sense Disambiguation (WSD) is a central task in the area of Natural Language Processing. In the past few years several context-based probabilistic and machine learning methods for WSD have been presented in literature. However, an important area of research that has not been given the attention it deserves is a formal analysis of the parameters affecting the performance of the learning task faced by these systems. Usually performance is estimated by measuring precision and recall of a specific algorithm for specific test sets and environmental conditions. Therefore, a comparison among different learning systems and an objective estimation of the difficulty of the learning task is extremely difficult. In this paper we propose, in the framework of Computational Learning theory, a formal analysis of the relations between accuracy of a context-based WSD system, the complexity of the context representation scheme, and the environmental conditions (e.g. the complexity of language domain and concept inventory) .",
44
+ "cite_spans": [],
45
+ "ref_spans": [],
46
+ "eq_spans": [],
47
+ "section": "Abstract",
48
+ "sec_num": null
49
+ }
50
+ ],
51
+ "body_text": [
52
+ {
53
+ "text": "In the literature (see Computational Linguistics (1998) for some recent results), there is a rather vast repertoire of supervised and unsupervised learning algorithms for WSD, most of which are based on a formal characterization of the surrounding context of a word or linguistic concept 1, and a function f to compute the membership of a word to a category, given its context in running texts. Despite the rich literature, none of these algorithms exhibit an \"acceptable\" performance with reference to the needs of real-world computational task (e.g. Information Retrieval, Information Extraction, Machine Translation etc.), except for particularly straightforward cases. A very interesting WSD experiment is Senseval (1998) , a large:-scale exercise in evaluating WSD programs. One of the objectives of this experiment was to identify correlations between performance of the various systems and the parameters of the WSD task. Though the scoring of systems appears sensitive to certain factors, such as the degree of polysemy and the entropy of sense distributions, these correlations could not be consistently observed. There are words with fewer senses (e.g. bet, consume, generous) causing troubles to most systems, while there are words with a very high polysemy and entropy (e.g. shake) on which all systems obtain good performance. The justification that the Senseval coordinator Adam Kilgariff provides for shake is very interesting in the light of what we will discuss later in this paper: \"The items (means contexts) for shake involve multi-word expressions, such as shake one's head. (...) Over 50% of the items for shake involve some multi-word expression or other.\" In other words, the contexts for shake are very 1 The inventory of linguistic concepts is usually extracted from on-line resources like WordNet, the Longman dictionary (LDOCE), or HECTOR. repetitive in the training set, therefore all systems could easily learn a sense discrimination model. Furthermore, in Senseval (but also in other reported evaluations experiments) it appears that performances for individual words/concepts are extremely uneven within the same system. This scarce homogeneity of results suggests that performance is not solely related with the \"cleverness\" of a given learning algorithm. Clearly, the performances of WSD systems are related to a variety of parameters, but the formal nature of these dependencies is not fully understood.",
54
+ "cite_spans": [
55
+ {
56
+ "start": 37,
57
+ "end": 55,
58
+ "text": "Linguistics (1998)",
59
+ "ref_id": null
60
+ },
61
+ {
62
+ "start": 710,
63
+ "end": 725,
64
+ "text": "Senseval (1998)",
65
+ "ref_id": null
66
+ }
67
+ ],
68
+ "ref_spans": [],
69
+ "eq_spans": [],
70
+ "section": "Introduction",
71
+ "sec_num": "1"
72
+ },
73
+ {
74
+ "text": "The Senseval experiment highlighted the necessity of a more accurate analysis of the correlations between performance of WSD systems and the parameters that may affect this task. In absence, a comparison of the various WSD algorithms and an estimation of their performance under different environmental conditions is extremely difficult. In the next sections we briefly present a computational model of learning, called PAC theory (Anthony and Biggs (1997) , Kearns and Vazirani (1994) , Valiant (1984) ), and we then show that this theory may be used to determine the formal relations between performance of context-based WSD models and environmental conditions, such as the complexity of the context representation scheme, and the the complexity of language domain and concept inventory.",
75
+ "cite_spans": [
76
+ {
77
+ "start": 431,
78
+ "end": 456,
79
+ "text": "(Anthony and Biggs (1997)",
80
+ "ref_id": "BIBREF0"
81
+ },
82
+ {
83
+ "start": 459,
84
+ "end": 485,
85
+ "text": "Kearns and Vazirani (1994)",
86
+ "ref_id": "BIBREF5"
87
+ },
88
+ {
89
+ "start": 488,
90
+ "end": 502,
91
+ "text": "Valiant (1984)",
92
+ "ref_id": "BIBREF8"
93
+ }
94
+ ],
95
+ "ref_spans": [],
96
+ "eq_spans": [],
97
+ "section": "Introduction",
98
+ "sec_num": "1"
99
+ },
100
+ {
101
+ "text": "Formally, the problem of example-based learning of WSD models can be stated as follows: 1",
102
+ "cite_spans": [],
103
+ "ref_spans": [],
104
+ "eq_spans": [],
105
+ "section": "A relation between sample size and complexity of learning task",
106
+ "sec_num": "2"
107
+ },
108
+ {
109
+ "text": "Given a class C of concepts Cl (where C is either a hierarchy or a \"flat\" concept inventory), 2 Given a context-based representadon class H for a concept class C, where H: ~*--~C and ~ is a finite alphabet of symbols (e.g. words or word tags), 3",
110
+ "cite_spans": [],
111
+ "ref_spans": [],
112
+ "eq_spans": [],
113
+ "section": "A relation between sample size and complexity of learning task",
114
+ "sec_num": "2"
115
+ },
116
+ {
117
+ "text": "Given an input space X~* of encodings of instances in the learner's world, e.g. feature vectors representing contexts around words wj, where wj is a member of Ct, Given a training sample S of length m: S= ((xl,bl) ...(xm,bm)) xi eX, ~ e{O,l} where bl=l if xi is a positive example of Cl, characterize formally a function h (C~)e H that assigns a word w to a concept Cl, given the sentence context x of w. The hypothesis may have the form of a Hidden Markov Model with estimated transition probabilities, a decision list, a cluster of points in a representation space, a logic formula, etc. The complexity of this learning task is related to several aspects, such as selecting an appropriate representation space H, an appropriate grain for the concept inventory C, and finally, a sufficiently representative training sample S. As first, H must be a \"reasonable\" representation space for C. Quite intuitively, if we represent a linguistic concept as the set of possible morphologic tags pairs in a \u00b11 window, we will not be able to predict much, simply because surrounding morphologic tags are not sufficient to determine the semantic category of a word. On the opposite, if we select an overly complex representation model, including irrelevant features, we run through the so called overfitdng problem.",
118
+ "cite_spans": [
119
+ {
120
+ "start": 205,
121
+ "end": 213,
122
+ "text": "((xl,bl)",
123
+ "ref_id": null
124
+ }
125
+ ],
126
+ "ref_spans": [],
127
+ "eq_spans": [],
128
+ "section": "A relation between sample size and complexity of learning task",
129
+ "sec_num": "2"
130
+ },
131
+ {
132
+ "text": "Thirdly, some of the features used in a representation may be dependent from other features, and again the model would result unnecessarily complex. The problem of noise and overfitting are well known in the area of Machine Learning (Russell and Norvig (1999)), therefore we will not discuss the matter in detail here. An analysis of this issue as applied to probabilistic WSD learners may be found in Bruce and Wiebe (1999) . For the purpose of this paper, we assume that the representation space H is optimized with respect to the choice of the relevant model parameters. Our objective will be to determine the size of S, given H and C, and given certain performance objectives. As we said, the aim of a WSD learning process, when instructed with a sequence S of examples in X, is to produce an hypothesis h which, in some sense, \"corresponds\" to the concept under consideration. Because S is a finite sequence, only concepts with a finite number of positive examples can be learned with total success, i.e. the learner can output an hypothesis h= C~ . In general, and this is the case for linguistic concepts, we can only hope that h is a good approximation of Ci.. In our problem at hand, it is worth noticing that even humans may provide only approximate definitions of linguistic concepts ! The theory of Probably Approximately Correct (PAC) learning, a relatively recent field at the borderline between Artificial Intelligence and Information Theory, states the conditions under which h reaches this objective, i.e. the conditions under which a computer derived hypothesis h 'probably' represents Ct 'approximately'.",
133
+ "cite_spans": [
134
+ {
135
+ "start": 402,
136
+ "end": 424,
137
+ "text": "Bruce and Wiebe (1999)",
138
+ "ref_id": "BIBREF1"
139
+ }
140
+ ],
141
+ "ref_spans": [],
142
+ "eq_spans": [],
143
+ "section": "A relation between sample size and complexity of learning task",
144
+ "sec_num": "2"
145
+ },
146
+ {
147
+ "text": "Definition 1 (PAC learning). Let C be a concept class over X. Let D be a fixed probability distribution over the instance space X, and EX(Ci,D) be a procedure reflecting the probability distribution of the population we whish to learn about. We say that C is PAC learnable if there exists an algorithm L with the following property: For every Ci~C, for every distribution D on X, and for all 0<e<l/2 and 0<8<1/2, if L is given access to EX(C~,D) and inputs e and 8, then with probability at least (1-8), L outputs a hypothesis h for concept Cl, satisfying error(h)<e. The parameters e and 5 have the following meaning: e is the probability that the learner produces a generalization of the sample that does not coincide with the target concept, while 5 is the probability, given D, that a particularly unrepresentative (or noisy) training sample is drawn. The objective of PAC theory is to predict the performance of learning systems by deriving a lower bound for m, as a function of the performance parameters e and 6. And we want this to be:",
148
+ "cite_spans": [],
149
+ "ref_spans": [],
150
+ "eq_spans": [],
151
+ "section": "A relation between sample size and complexity of learning task",
152
+ "sec_num": "2"
153
+ },
154
+ {
155
+ "text": "IHl(1-e)m_<6 we hence obtain a lower bound for the number of examples we need to submit to the learner in order to obtain the required accuracy:",
156
+ "cite_spans": [],
157
+ "ref_spans": [],
158
+ "eq_spans": [],
159
+ "section": "A relation between sample size and complexity of learning task",
160
+ "sec_num": "2"
161
+ },
162
+ {
163
+ "text": "(1) m_>~(ln~ +1 ~ I~ --10 s) is the size of the vocabulary. We then have:",
164
+ "cite_spans": [
165
+ {
166
+ "start": 21,
167
+ "end": 28,
168
+ "text": "--10 s)",
169
+ "ref_id": null
170
+ }
171
+ ],
172
+ "ref_spans": [],
173
+ "eq_spans": [],
174
+ "section": "A relation between sample size and complexity of learning task",
175
+ "sec_num": "2"
176
+ },
177
+ {
178
+ "text": "the above expression, used in inequality (1), produces an overly high bound for m, that can be hardly pursued especially in case the learning algorithm is supervised! In PAC literature, the bound for m is often derived \"ad hoc\" for specific algorithms, in order to exploit knowledge on the precise learning conditions. It is also worth noticing that PAC literature has mostly a theoretical emphasis, and most applications concentrated on the field of neural networks and natural learning systems (Hanson, Petsche, Kearns, Rivest (1994) ). To the knowledge of the authors, the utility of this theory in the area of computer learning of natural language has not been explored.",
179
+ "cite_spans": [
180
+ {
181
+ "start": 496,
182
+ "end": 535,
183
+ "text": "(Hanson, Petsche, Kearns, Rivest (1994)",
184
+ "ref_id": "BIBREF4"
185
+ }
186
+ ],
187
+ "ref_spans": [],
188
+ "eq_spans": [],
189
+ "section": "A relation between sample size and complexity of learning task",
190
+ "sec_num": "2"
191
+ },
192
+ {
193
+ "text": "In the following, we will derive a probabilistic expression for m in the track of (1), for the case of a context-based WSD probabflistic learner, a learning method that includes a rather wide class of algorithms in the area of WSD. We believe that adapting our analysis to other example-based WSD systems will not require a significant effort. This relation allows it to establish, upon an a-priori analysis of the chosen conceptual model and of the language domain, a more precise relation between performance, complexity of the learning algorithm, and environmental conditions (e.g. complexity of the language domain).",
194
+ "cite_spans": [],
195
+ "ref_spans": [],
196
+ "eq_spans": [],
197
+ "section": "A relation between sample size and complexity of learning task",
198
+ "sec_num": "2"
199
+ },
200
+ {
201
+ "text": "Our objective is to show that an a-priori analysis of the learning model and language domain may help to tune precisely a WSD experiment and allows a more uniform comparison between different WSD systems.",
202
+ "cite_spans": [],
203
+ "ref_spans": [],
204
+ "eq_spans": [],
205
+ "section": "A relation between sample size and complexity of learning task",
206
+ "sec_num": "2"
207
+ },
208
+ {
209
+ "text": "A probabilistic context-based WSD learner may be described as follows: Let X be a space of feature vectors: fk=( f(all=vl,a21=v2 .... ani=Vn)e ~n, bik)), b[ =1 if fk is a positive example of Ct under H. Each vector describes the context in which a word we Cl is found, with variable degree of complexity. For examples, arguments may be any combination of plain words and their morphologic, syntactic and semantic tags. We assume that arguments are not statistically independent (in case they are, the representation of a concept is more simple, see Bruce and Wiebe, (1999) ). An example (Cucchiarelli, Luzi and Velardi (1998) ) is the case in which fk represents a syntactic relation between we C~ and another word in its context. For example, given the compound district banks the following feature is generated as an example of the category organization:",
210
+ "cite_spans": [
211
+ {
212
+ "start": 549,
213
+ "end": 572,
214
+ "text": "Bruce and Wiebe, (1999)",
215
+ "ref_id": "BIBREF1"
216
+ },
217
+ {
218
+ "start": 587,
219
+ "end": 625,
220
+ "text": "(Cucchiarelli, Luzi and Velardi (1998)",
221
+ "ref_id": "BIBREF3"
222
+ }
223
+ ],
224
+ "ref_spans": [],
225
+ "eq_spans": [],
226
+ "section": "A formal estimate of accuracy for context-based probability WSD models",
227
+ "sec_num": "3."
228
+ },
229
+ {
230
+ "text": "((N_N district bank), organization(bank))",
231
+ "cite_spans": [],
232
+ "ref_spans": [],
233
+ "eq_spans": [],
234
+ "section": "A formal estimate of accuracy for context-based probability WSD models",
235
+ "sec_num": "3."
236
+ },
237
+ {
238
+ "text": "We further assume that observations of contexts are noisy, and the noise may be originated by several factors, such as tags ambiguity, and semantic ambiguity of the word whose context is observed. In the above feature vector, the syntactic tag (first argument) could be wrong because of syntactic ambiguity and limited coverage of available parsers, and the ambiguous word bank could not be, in a specific context, an instance of the category organization, though it is in the example above. Probabilistic learners usually associate to uncertain information a measure of the confidence the system has in that information. Therefore, we assume that each feature fk is associated to a concept Cl with a confidence qb(i,k). The confidence may be calculated in several ways, depending upon the type of selected features for fk. For example, the Mutual Information measures the strength of a correlation between co-occurring arguments, and the Plausibility (Cucchiarelli, Luzi and Velardi (1998) ) assigns a weight to a feature vector, depending upon the degree of ambiguity of its arguments and the frequency of its observations in a corpus. We assume here that ~ is adjusted to be a probability, i.e. ~l~(i,k)=l. The factor ~(i,k) represents hence an estimate of the probability that fk. is indeed a context of Ci. Under these hypotheses, a representation he H for a concept Ct is the following: h(Cl):{fll..flm,} (2) fk-~h(Cl ) iff qb(i,k) > y A concept is hence represented by a set of features with associated probabilities 2. Policy (2) establishes that only features with a probability higher than a threshold y are assigned to a category model. Given an unknown word w' occurring in a context represented by f'k, the WSD algorithm assigns w' to the category in C that maximizes the similarity between f'k and one of its members. Again, see Cucchiarelli, Luzi and Velardi (1998) and Bruce and Wiebe, (1999) for examples of similarity functions.",
239
+ "cite_spans": [
240
+ {
241
+ "start": 952,
242
+ "end": 990,
243
+ "text": "(Cucchiarelli, Luzi and Velardi (1998)",
244
+ "ref_id": "BIBREF3"
245
+ },
246
+ {
247
+ "start": 1843,
248
+ "end": 1880,
249
+ "text": "Cucchiarelli, Luzi and Velardi (1998)",
250
+ "ref_id": "BIBREF3"
251
+ },
252
+ {
253
+ "start": 1885,
254
+ "end": 1908,
255
+ "text": "Bruce and Wiebe, (1999)",
256
+ "ref_id": "BIBREF1"
257
+ }
258
+ ],
259
+ "ref_spans": [],
260
+ "eq_spans": [],
261
+ "section": "A formal estimate of accuracy for context-based probability WSD models",
262
+ "sec_num": "3."
263
+ },
264
+ {
265
+ "text": "Given the above, the probabilistic WSD model for a category Ct may fail because:",
266
+ "cite_spans": [],
267
+ "ref_spans": [],
268
+ "eq_spans": [],
269
+ "section": "A formal estimate of accuracy for context-based probability WSD models",
270
+ "sec_num": "3."
271
+ },
272
+ {
273
+ "text": "1 Cl includes falsepos#Jves (fp), e.g. feature vectors erroneously assigned to Cl 2 There are false negatives (fn), i.e. feature vectors erroneously discarded because of a low value qb(i,k) 3 The context f'k of the word w' has never been observed around members of Ct, nor it is similar (in the precise sense of similarity established by a given algorithm) to any of the vectors in the contextual models.",
274
+ "cite_spans": [],
275
+ "ref_spans": [],
276
+ "eq_spans": [],
277
+ "section": "A formal estimate of accuracy for context-based probability WSD models",
278
+ "sec_num": "3."
279
+ },
280
+ {
281
+ "text": "We then have3:",
282
+ "cite_spans": [],
283
+ "ref_spans": [],
284
+ "eq_spans": [],
285
+ "section": "A formal estimate of accuracy for context-based probability WSD models",
286
+ "sec_num": "3."
287
+ },
288
+ {
289
+ "text": "(3) P(w' is misclassified on the basis of f'k)= P(f'kE fp in C0+P(f'kE fn outside C0+P(f'kis unseen in C O Let: m be the total number of feature vectors extracted from a corpus m k the total number of occurrences of a feature fk k the number of times the context fk occurred m i with a word w' member of Cl Notice that ~irnik~m k, since, because of i ambiguity, a context may be assigned to more than one concept (or to none). We can then estimate the three probabilities in expression (3) as follows:",
290
+ "cite_spans": [],
291
+ "ref_spans": [],
292
+ "eq_spans": [],
293
+ "section": "A formal estimate of accuracy for context-based probability WSD models",
294
+ "sec_num": "3."
295
+ },
296
+ {
297
+ "text": "L (3.1) ~ (fp in Ct)= E -~-(l-dp(i, k) ~( i, k~? m. k (3.2) ~ (fn outside C~)= X 1 0(i,k) \u00a2(i, k~y m (3.3) ~ (unseen in CO= (1 ~mk).(~ Emik ).(~(i)) =~m Emik,(i, k m Vm =l k k",
298
+ "cite_spans": [],
299
+ "ref_spans": [],
300
+ "eq_spans": [],
301
+ "section": "A formal estimate of accuracy for context-based probability WSD models",
302
+ "sec_num": "3."
303
+ },
304
+ {
305
+ "text": "The third probability is computed as the product of three estimated factors: the probability ~ of unseen contexts 4 in the 3 In the expression 3) the three events are clearly mutually exclusive. 4 We here assume for simplicity that the similarity function is an identity. A multtnomial or a more corpus, the probability of extracting contexts around members of Cl, and the average confidence of a feature vector in Cl. Classic methods such as Chernoff bounds may be applied to obtain good approximations for the three probabilities above. Notice however that in order to obtain a given accuracy of estimate, Chernoff bounds (and other methods) again impose a bound on the number of observed examples (Kearns and Vazirani (1994) ",
306
+ "cite_spans": [
307
+ {
308
+ "start": 700,
309
+ "end": 727,
310
+ "text": "(Kearns and Vazirani (1994)",
311
+ "ref_id": "BIBREF5"
312
+ }
313
+ ],
314
+ "ref_spans": [],
315
+ "eq_spans": [],
316
+ "section": "A formal estimate of accuracy for context-based probability WSD models",
317
+ "sec_num": "3."
318
+ },
319
+ {
320
+ "text": "In a complex language domain (e.g. newspaper articles) linguistic phenomena are far less repetitive than in a restricted language (e.g. airline reservations). However, even in a relatively unrestricted domain certain categories are used in a more narrow sense. Let us consider the probabilistic context-based algorithm in Cucchiarelli, Luzi and Velardi (1998) , where a feature is defined by: fk: (syntactic_relation, wl, wi) (e.g. (N_N district bank)) fk ~C~ if w i reaches the hyperonym C~ in the WordNet on-line taxonomy, and ~(i,k) > y Using the 1 million word Wall Street Journal corpus, we estimated the following probabilities (3.3) of unseen feature vectors (m in this experiment is O(105)): P(unseen in artifact)=0,7692 P(unseen in person)= 0,7161 P(unseen in psychological feature)=0.8598 complex function must be used in case contexts are considered similar if, for example, co-occurring words have some common hyperonym. See Cucchiarelli, Luzi and Velardi (1998) for examples.",
321
+ "cite_spans": [
322
+ {
323
+ "start": 322,
324
+ "end": 359,
325
+ "text": "Cucchiarelli, Luzi and Velardi (1998)",
326
+ "ref_id": "BIBREF3"
327
+ },
328
+ {
329
+ "start": 937,
330
+ "end": 974,
331
+ "text": "Cucchiarelli, Luzi and Velardi (1998)",
332
+ "ref_id": "BIBREF3"
333
+ }
334
+ ],
335
+ "ref_spans": [],
336
+ "eq_spans": [],
337
+ "section": "Dependency upon the corpus and linguistic concepts",
338
+ "sec_num": "3.1"
339
+ },
340
+ {
341
+ "text": "The linguistic concepts artifact, person and psychological feature are three hyperonyms of the on-line WordNet taxonomy. The above figures show that the more \"vague\" concept psychological feature occurs in more variable contexts, though the distribution of words in the three categories is approximately even.",
342
+ "cite_spans": [],
343
+ "ref_spans": [],
344
+ "eq_spans": [],
345
+ "section": "Dependency upon the corpus and linguistic concepts",
346
+ "sec_num": "3.1"
347
+ },
348
+ {
349
+ "text": "The representation model H also affects the estimates of erroneous classifications. For example, if we modify the contextual model by removing the information on wi (that is to say, the feature vectors in the contextual model now only includes the syntactic relation type and the co-occurring word wl), we obtain the following values for the probabilies (3.3): P (unseen in artifact) =0,1778 P (unseen in person) = 0,1714 P(unseen in psychological feature)=O,2139",
350
+ "cite_spans": [],
351
+ "ref_spans": [],
352
+ "eq_spans": [],
353
+ "section": "Dependency on the representation model",
354
+ "sec_num": "3.2"
355
+ },
356
+ {
357
+ "text": "The probability of \"unseens\" in this simpler model is considerably lower (we removed an attribute, wi, that assumes values over V), but clearly, the probability of false positives and false negatives increases. The motivation is that we now assume that a context for a word belonging (also to) Ct is a valid context for any word in that category.",
358
+ "cite_spans": [],
359
+ "ref_spans": [],
360
+ "eq_spans": [],
361
+ "section": "Dependency on the representation model",
362
+ "sec_num": "3.2"
363
+ },
364
+ {
365
+ "text": "Regardless of the specific adopted formula for O(i,k), the confidence ~b(i,k) in such a generalization depends on the number of different words w~ in occurring in a given context fk. If this number is low, or is just 1, then the value of dp(i,k) must be low, accordingly. The selected threshold y then determines the different contribution of false positives and false negatives to the total model accuracy.",
366
+ "cite_spans": [],
367
+ "ref_spans": [],
368
+ "eq_spans": [],
369
+ "section": "Dependency on the representation model",
370
+ "sec_num": "3.2"
371
+ },
372
+ {
373
+ "text": "A preliminary experiment is illustrated in Figure 2 On the other side, P(fn outside Ci) (not shown here for sake of space) has a specular behavour. For 7=0,9, the probability of false negative is as low as 0,6.",
374
+ "cite_spans": [],
375
+ "ref_spans": [
376
+ {
377
+ "start": 43,
378
+ "end": 51,
379
+ "text": "Figure 2",
380
+ "ref_id": null
381
+ }
382
+ ],
383
+ "eq_spans": [],
384
+ "section": "Dependency on the representation model",
385
+ "sec_num": "3.2"
386
+ },
387
+ {
388
+ "text": "By no means the work presented in this paper needs more investigation, especially on the experimental side. However, we believe that learnability analysis of WSD models has strong practical implications. The quantitative and (preliminary) experimental results of Section 2 put in evidence that :",
389
+ "cite_spans": [],
390
+ "ref_spans": [],
391
+ "eq_spans": [],
392
+ "section": "Conclusion",
393
+ "sec_num": "4."
394
+ },
395
+ {
396
+ "text": "\u2022 In order to acquire statistically stable contextual models of linguistic concepts, the dimension of the analyzed corpora must be considerably high. Paradoxically, untrained probabilistic systems are in better shape in this regard. Very large repositories of language samples can be now obtained from the WWW. \u2022 The experimental setting (i.e. size of the training set) must be tuned for each category and language domain, because the variability of contextual behavior may be significantly different, depending on domain complexity, e.g. the type and grain of the selected category, and the more or less restricted language domain \u2022 it is possible and indeed advisable, for a given WSD algorithm, to determine in a formal way the relation between expected accuracy of the WSD model and the domain and representation complexity. This would allow a better comparison among systems, and an a-priori tuning of the parameters of the disambiguation model.",
397
+ "cite_spans": [],
398
+ "ref_spans": [],
399
+ "eq_spans": [],
400
+ "section": "Conclusion",
401
+ "sec_num": "4."
402
+ },
403
+ {
404
+ "text": "Note that in case of statistical independence among the features in a vector, a model for a concept would be a set of features, rather than feature vectors, but most of what we discuss in this section would still apply with simple changes.",
405
+ "cite_spans": [],
406
+ "ref_spans": [],
407
+ "eq_spans": [],
408
+ "section": "",
409
+ "sec_num": null
410
+ }
411
+ ],
412
+ "back_matter": [],
413
+ "bib_entries": {
414
+ "BIBREF0": {
415
+ "ref_id": "b0",
416
+ "title": "Computational Learning Theory",
417
+ "authors": [
418
+ {
419
+ "first": "M",
420
+ "middle": [],
421
+ "last": "Anthony",
422
+ "suffix": ""
423
+ },
424
+ {
425
+ "first": "N",
426
+ "middle": [],
427
+ "last": "Biggs",
428
+ "suffix": ""
429
+ }
430
+ ],
431
+ "year": 1997,
432
+ "venue": "",
433
+ "volume": "",
434
+ "issue": "",
435
+ "pages": "",
436
+ "other_ids": {},
437
+ "num": null,
438
+ "urls": [],
439
+ "raw_text": "Anthony M. and Biggs, N. (1997) Computational Learning Theory Cambridge University Press, 1997",
440
+ "links": null
441
+ },
442
+ "BIBREF1": {
443
+ "ref_id": "b1",
444
+ "title": "Decomposable Modeling in Natural Language Processing",
445
+ "authors": [
446
+ {
447
+ "first": "R",
448
+ "middle": [],
449
+ "last": "Bruce",
450
+ "suffix": ""
451
+ },
452
+ {
453
+ "first": "J",
454
+ "middle": [],
455
+ "last": "Wiebe",
456
+ "suffix": ""
457
+ }
458
+ ],
459
+ "year": 1999,
460
+ "venue": "Computational Linguistics",
461
+ "volume": "25",
462
+ "issue": "2",
463
+ "pages": "",
464
+ "other_ids": {},
465
+ "num": null,
466
+ "urls": [],
467
+ "raw_text": "Bruce R. and Wiebe J., (1999) Decomposable Modeling in Natural Language Processing, Computational Linguistics vol. 25, N. 2. 199",
468
+ "links": null
469
+ },
470
+ "BIBREF2": {
471
+ "ref_id": "b2",
472
+ "title": "Computational Linguistics",
473
+ "authors": [],
474
+ "year": 1988,
475
+ "venue": "",
476
+ "volume": "24",
477
+ "issue": "",
478
+ "pages": "",
479
+ "other_ids": {},
480
+ "num": null,
481
+ "urls": [],
482
+ "raw_text": "Computational Linguistics (1998) Special Issue on Word Sense Disamblguatlon, Vol. 24 (1) March 1988",
483
+ "links": null
484
+ },
485
+ "BIBREF3": {
486
+ "ref_id": "b3",
487
+ "title": "Automatic Semantic Tagging of Unknown Proper Names Proc. of joint 36 \u00b0 ACL-17 \u00b0 COLING",
488
+ "authors": [
489
+ {
490
+ "first": "A",
491
+ "middle": [],
492
+ "last": "Cucchiarelli",
493
+ "suffix": ""
494
+ },
495
+ {
496
+ "first": "D",
497
+ "middle": [],
498
+ "last": "Luzi",
499
+ "suffix": ""
500
+ },
501
+ {
502
+ "first": "P",
503
+ "middle": [],
504
+ "last": "Velardi",
505
+ "suffix": ""
506
+ }
507
+ ],
508
+ "year": 1998,
509
+ "venue": "",
510
+ "volume": "",
511
+ "issue": "",
512
+ "pages": "",
513
+ "other_ids": {},
514
+ "num": null,
515
+ "urls": [],
516
+ "raw_text": "Cucchiarelli A. Luzi D. and Velardi P. (1998) Automatic Semantic Tagging of Unknown Proper Names Proc. of joint 36 \u00b0 ACL-17 \u00b0 COLING, Montreal, August 1998",
517
+ "links": null
518
+ },
519
+ "BIBREF4": {
520
+ "ref_id": "b4",
521
+ "title": "Computational Learning Theory and Natural Learning Systems",
522
+ "authors": [
523
+ {
524
+ "first": "S",
525
+ "middle": [
526
+ "J"
527
+ ],
528
+ "last": "Hanson",
529
+ "suffix": ""
530
+ },
531
+ {
532
+ "first": "T",
533
+ "middle": [],
534
+ "last": "Petsche",
535
+ "suffix": ""
536
+ },
537
+ {
538
+ "first": "M",
539
+ "middle": [],
540
+ "last": "Kearns",
541
+ "suffix": ""
542
+ },
543
+ {
544
+ "first": "R",
545
+ "middle": [
546
+ "L"
547
+ ],
548
+ "last": "Rivest",
549
+ "suffix": ""
550
+ }
551
+ ],
552
+ "year": 1994,
553
+ "venue": "",
554
+ "volume": "II",
555
+ "issue": "",
556
+ "pages": "",
557
+ "other_ids": {},
558
+ "num": null,
559
+ "urls": [],
560
+ "raw_text": "Hanson S.J., Petsche T., Kearns M., Rivest R.L. (1994) Computational Learning Theory and Natural Learning Systems, Vol. II, MIT Press, 1994",
561
+ "links": null
562
+ },
563
+ "BIBREF5": {
564
+ "ref_id": "b5",
565
+ "title": "An Introduction to Computational Learning Theory",
566
+ "authors": [
567
+ {
568
+ "first": "M",
569
+ "middle": [
570
+ "J"
571
+ ],
572
+ "last": "Kearns",
573
+ "suffix": ""
574
+ },
575
+ {
576
+ "first": "U",
577
+ "middle": [
578
+ "V"
579
+ ],
580
+ "last": "Vazirani",
581
+ "suffix": ""
582
+ }
583
+ ],
584
+ "year": 1994,
585
+ "venue": "",
586
+ "volume": "",
587
+ "issue": "",
588
+ "pages": "",
589
+ "other_ids": {},
590
+ "num": null,
591
+ "urls": [],
592
+ "raw_text": "Kearns M.J. and Vazirani U.V. (1994) An Introduction to Computational Learning Theory MIT Press, 1994",
593
+ "links": null
594
+ },
595
+ "BIBREF6": {
596
+ "ref_id": "b6",
597
+ "title": "Chapter 18: Learning from Observations in: Artificial Intelligence: a modern approach",
598
+ "authors": [
599
+ {
600
+ "first": "Russell",
601
+ "middle": [
602
+ "S J"
603
+ ],
604
+ "last": "Norvig",
605
+ "suffix": ""
606
+ },
607
+ {
608
+ "first": "P",
609
+ "middle": [],
610
+ "last": "",
611
+ "suffix": ""
612
+ }
613
+ ],
614
+ "year": 1999,
615
+ "venue": "",
616
+ "volume": "",
617
+ "issue": "",
618
+ "pages": "",
619
+ "other_ids": {},
620
+ "num": null,
621
+ "urls": [],
622
+ "raw_text": "Russell S.J and Norvig P (1999). Chapter 18: Learning from Observations in: Artificial Intelligence: a modern approach Prentice-hall 1999",
623
+ "links": null
624
+ },
625
+ "BIBREF8": {
626
+ "ref_id": "b8",
627
+ "title": "Theory of Learnable Communications of the ACM",
628
+ "authors": [
629
+ {
630
+ "first": "L",
631
+ "middle": [],
632
+ "last": "Valiant",
633
+ "suffix": ""
634
+ }
635
+ ],
636
+ "year": 1984,
637
+ "venue": "",
638
+ "volume": "27",
639
+ "issue": "",
640
+ "pages": "",
641
+ "other_ids": {},
642
+ "num": null,
643
+ "urls": [],
644
+ "raw_text": "Valiant L. (1984) A Theory of Learnable Communications of the ACM, 27(11), 1984",
645
+ "links": null
646
+ },
647
+ "BIBREF9": {
648
+ "ref_id": "b9",
649
+ "title": "Figure 2: (1-P(fp)) VS. Corpus Dim. For the category Artifact",
650
+ "authors": [],
651
+ "year": null,
652
+ "venue": "",
653
+ "volume": "",
654
+ "issue": "",
655
+ "pages": "",
656
+ "other_ids": {},
657
+ "num": null,
658
+ "urls": [],
659
+ "raw_text": "Figure 2: (1-P(fp)) VS. Corpus Dim. For the category Artifact",
660
+ "links": null
661
+ }
662
+ },
663
+ "ref_entries": {
664
+ "FIGREF0": {
665
+ "uris": null,
666
+ "text": "from Russell and Norvig (1999)) illustrates the \"intuitive\" meaning of PAC definition. After seeing m examples, the probability that Hbad includes consistent hypotheses is: P(Hbad~Hco.s)-<[ Hbad [(l-l~)m-<lH[(l-~) m H Hbad @ Figure I : e-sphere around the \"true\" function Ci",
667
+ "num": null,
668
+ "type_str": "figure"
669
+ },
670
+ "FIGREF1": {
671
+ "uris": null,
672
+ "text": ") Since in (3.1) (1-~(i,k))<y, in (3.2) ~(i,k))>y, and in (3.3) ~(i,k))_<l, we obtain the bound: P(w' is misclassified on the basis of f'k)= <_ Mi m -Ni (l-y)+_~ty +l~m ~m The expression (3) establishes interesting dependencies between the accuracy of a context-based probabilistic WSD model and certain environmental conditions.",
673
+ "num": null,
674
+ "type_str": "figure"
675
+ },
676
+ "FIGREF2": {
677
+ "uris": null,
678
+ "text": ". The figure computes (1-P(fp in C1) for the category artifact, as a function of m and ~(i,k), evaluated on a test set of 78 words. The figure shows that when y is >_0,5 the number of false positives is rather low, after observing sufficient examples.",
679
+ "num": null,
680
+ "type_str": "figure"
681
+ }
682
+ }
683
+ }
684
+ }
Full_text_JSON/prefixW/json/W00/W00-0106.json ADDED
@@ -0,0 +1,1007 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0106",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:20.796345Z"
6
+ },
7
+ "title": "Analyzing the Reading Comprehension Task",
8
+ "authors": [
9
+ {
10
+ "first": "Amit",
11
+ "middle": [],
12
+ "last": "Bagga",
13
+ "suffix": "",
14
+ "affiliation": {},
15
+ "email": "bagga@crd.ge"
16
+ }
17
+ ],
18
+ "year": "",
19
+ "venue": null,
20
+ "identifiers": {},
21
+ "abstract": "In this paper we describe a method for analyzing the reading comprehension task. First, we describe a method of classifying facts (information) into categories or levels; where each level signifies a different degree of difficulty of extracting a fact from a piece of text containing it. We then proceed to show how one can use this model the analyze the complexity of the reading comprehension task. Finally, we analyze five different reading comprehension tasks and present results from this analysis.",
22
+ "pdf_parse": {
23
+ "paper_id": "W00-0106",
24
+ "_pdf_hash": "",
25
+ "abstract": [
26
+ {
27
+ "text": "In this paper we describe a method for analyzing the reading comprehension task. First, we describe a method of classifying facts (information) into categories or levels; where each level signifies a different degree of difficulty of extracting a fact from a piece of text containing it. We then proceed to show how one can use this model the analyze the complexity of the reading comprehension task. Finally, we analyze five different reading comprehension tasks and present results from this analysis.",
28
+ "cite_spans": [],
29
+ "ref_spans": [],
30
+ "eq_spans": [],
31
+ "section": "Abstract",
32
+ "sec_num": null
33
+ }
34
+ ],
35
+ "body_text": [
36
+ {
37
+ "text": "Recently there has been a spate of activity for building question-answering systems (QA systems) driven largely by the recently organized QA track at the Eighth Text Retrieval Conference (TREC-8) (Harman, 1999) . This increase in research activity has also fueled research in a related area: building Reading Comprehension systems (Hirschman and others, 1999) . But while a number of successful systems have been developed for each of these tasks, little, if any, work has been done on analyzing the complexities of the tasks themselves. In this paper we describe a method of classifying facts (information) into categories or levels; where each level signifies a different degree of difficulty of extracting a fact from a piece of text containing it. We then proceed to show how one can use this model the analyze the complexity of the reading comprehension task. Finally, we analyze five different reading comprehension tasks and present results from this analysis.",
38
+ "cite_spans": [
39
+ {
40
+ "start": 196,
41
+ "end": 210,
42
+ "text": "(Harman, 1999)",
43
+ "ref_id": null
44
+ },
45
+ {
46
+ "start": 331,
47
+ "end": 359,
48
+ "text": "(Hirschman and others, 1999)",
49
+ "ref_id": null
50
+ }
51
+ ],
52
+ "ref_spans": [],
53
+ "eq_spans": [],
54
+ "section": "Introduction",
55
+ "sec_num": "1"
56
+ },
57
+ {
58
+ "text": "The Complexity of Extracting a Fact From Text Any text document is a collection of facts (information). These facts may be explicitly or implicitly stated in the text. In addition, there are \"easy\" facts which may be found in a single sentence (example: the name of a city) as well as \"difficult\" facts which are spread across several sentences (example: the reason for a particular event). For a computer system to be able to process text documents in applications like information extrac-tion (IE), question answering, and reading comprehension, it has to have the ability to extract facts from text. Obviously, the performance of the system will depend upon the type of fact it has to extract: explicit or implicit, easy or difficult, etc. (by no means is this list complete). In addition, the performance of such systems varies greatly depending on various additional factors including known vocabulary, sentence length, the amount of training, quality of parsing, etc. Despite the great variations in the performances of such systems, it has been hypothesized that there are facts that are simply harder to extract than others (Hirschman, 1992) .",
59
+ "cite_spans": [
60
+ {
61
+ "start": 1132,
62
+ "end": 1149,
63
+ "text": "(Hirschman, 1992)",
64
+ "ref_id": "BIBREF11"
65
+ }
66
+ ],
67
+ "ref_spans": [],
68
+ "eq_spans": [],
69
+ "section": "2",
70
+ "sec_num": null
71
+ },
72
+ {
73
+ "text": "In this section we describe a method for estimating the complexity of extracting a fact from text. The proposed model was initially used to analyze the information extraction task (Bagga and Biermann, 1997) . In addition to verifying Hirschman's hypothesis, the model also provided us with a framework for analyzing and understanding the performance of several IE systems (Bagga and Biermann, 1998) . We have also proposed using this model to analyze the complexity of the QA task Which is related to both the IE, and the reading comprehension tasks (Bagga et al., 1999) . The remainder of this section describes the model in detail, and provides a sample application of the model to an IE task. In the following section, we discuss how this model can be used to analyze the reading comprehension task.",
74
+ "cite_spans": [
75
+ {
76
+ "start": 180,
77
+ "end": 206,
78
+ "text": "(Bagga and Biermann, 1997)",
79
+ "ref_id": "BIBREF2"
80
+ },
81
+ {
82
+ "start": 372,
83
+ "end": 398,
84
+ "text": "(Bagga and Biermann, 1998)",
85
+ "ref_id": "BIBREF3"
86
+ },
87
+ {
88
+ "start": 550,
89
+ "end": 570,
90
+ "text": "(Bagga et al., 1999)",
91
+ "ref_id": "BIBREF4"
92
+ }
93
+ ],
94
+ "ref_spans": [],
95
+ "eq_spans": [],
96
+ "section": "2",
97
+ "sec_num": null
98
+ },
99
+ {
100
+ "text": "A network consists of a collection of nodes interconnected by an accompanying set of arcs. Each node denotes an object and each arc represents a binary relation between the objects. (Hendrix, 1979)",
101
+ "cite_spans": [],
102
+ "ref_spans": [],
103
+ "eq_spans": [],
104
+ "section": "Network:",
105
+ "sec_num": null
106
+ },
107
+ {
108
+ "text": "A partial network is a collection of nodes interconnected by an accompanying set of arcs where the collection of nodes is a subset of a collection of nodes forming a network, and the accompanying set of arcs is a subset of the se.t of arcs accompanying the set of nodes which form the network. Figure 1 shows a sample network for the following piece of text: \"The Extraditables,\" or the Armed Branch of the Medellin Cartel have claimed responsibility for the murder of two employees of Bogota's daily E1 Espectador on Nov 15. The murders took place in Medellin.",
109
+ "cite_spans": [],
110
+ "ref_spans": [
111
+ {
112
+ "start": 294,
113
+ "end": 302,
114
+ "text": "Figure 1",
115
+ "ref_id": null
116
+ }
117
+ ],
118
+ "eq_spans": [],
119
+ "section": "A Partial Network:",
120
+ "sec_num": null
121
+ },
122
+ {
123
+ "text": "The level of a fact, F, in a piece of text is defined by the following algorithm:",
124
+ "cite_spans": [],
125
+ "ref_spans": [],
126
+ "eq_spans": [],
127
+ "section": "The Level of A Fact",
128
+ "sec_num": "2.2"
129
+ },
130
+ {
131
+ "text": "1. Build a network, S, for the piece of text. We define the level of the fact, F, with respect to the network, S to be equal to k, the number of arcs linking the nodes which comprise the fact Fins.",
132
+ "cite_spans": [],
133
+ "ref_spans": [],
134
+ "eq_spans": [],
135
+ "section": "The Level of A Fact",
136
+ "sec_num": "2.2"
137
+ },
138
+ {
139
+ "text": "2.2.1 Observations Given the definition of the level of a fact, the following observations can be made:",
140
+ "cite_spans": [],
141
+ "ref_spans": [],
142
+ "eq_spans": [],
143
+ "section": "The Level of A Fact",
144
+ "sec_num": "2.2"
145
+ },
146
+ {
147
+ "text": "\u2022 The level of a fact is related to the concept of \"semantic vicinity\" defined by Schubert et. al. (Schubert and others, 1979) . The semantic vicinity of a node in a network consists of the nodes and the arcs reachable from that node by traversing a small number of arcs. The fundamental assumption used here is that \"the knowledge required to perform an intellectual task generally lies in the semantic vicinity of the concepts involved in the task\" (Schubert and others, 1979).",
148
+ "cite_spans": [
149
+ {
150
+ "start": 82,
151
+ "end": 126,
152
+ "text": "Schubert et. al. (Schubert and others, 1979)",
153
+ "ref_id": null
154
+ }
155
+ ],
156
+ "ref_spans": [],
157
+ "eq_spans": [],
158
+ "section": "The Level of A Fact",
159
+ "sec_num": "2.2"
160
+ },
161
+ {
162
+ "text": "The level of a fact is equal to the number of arcs that one needs to traverse to reach all the concepts (nodes) which comprise the fact of interest.",
163
+ "cite_spans": [],
164
+ "ref_spans": [],
165
+ "eq_spans": [],
166
+ "section": "The Level of A Fact",
167
+ "sec_num": "2.2"
168
+ },
169
+ {
170
+ "text": "\u2022 A level-0 fact consists of a single node (i.e. no transitions) in a network.",
171
+ "cite_spans": [],
172
+ "ref_spans": [],
173
+ "eq_spans": [],
174
+ "section": "The Level of A Fact",
175
+ "sec_num": "2.2"
176
+ },
177
+ {
178
+ "text": "\u2022 A level-k fact is a union of k level-1 facts:",
179
+ "cite_spans": [],
180
+ "ref_spans": [],
181
+ "eq_spans": [],
182
+ "section": "The Level of A Fact",
183
+ "sec_num": "2.2"
184
+ },
185
+ {
186
+ "text": "\u2022 Conjunctions/disjunctions increase the level of a fact.",
187
+ "cite_spans": [],
188
+ "ref_spans": [],
189
+ "eq_spans": [],
190
+ "section": "The Level of A Fact",
191
+ "sec_num": "2.2"
192
+ },
193
+ {
194
+ "text": "\u2022 A higher level fact is likely to be harder to extract than a lower level fact.",
195
+ "cite_spans": [],
196
+ "ref_spans": [],
197
+ "eq_spans": [],
198
+ "section": "The Level of A Fact",
199
+ "sec_num": "2.2"
200
+ },
201
+ {
202
+ "text": "\u2022 A fact appearing at one level in a piece of text may appear at some other level in the same piece of text.",
203
+ "cite_spans": [],
204
+ "ref_spans": [],
205
+ "eq_spans": [],
206
+ "section": "The Level of A Fact",
207
+ "sec_num": "2.2"
208
+ },
209
+ {
210
+ "text": "\u2022 The level of a fact in a piece of text depends on the granularity of the network constructed for that piece of text. Therefore, the level of a fact with respect to a network built at the word level (i.e. words represent objects and the relationships between the objects) will be greater than the level of a fact with respect to a network built at the phrase level (i.e. noun groups represent objects while verb groups and preposition groups represent the relationships between the objects).",
211
+ "cite_spans": [],
212
+ "ref_spans": [],
213
+ "eq_spans": [],
214
+ "section": "The Level of A Fact",
215
+ "sec_num": "2.2"
216
+ },
217
+ {
218
+ "text": "Let S be the network shown in Figure 1 . S has been built at the phrase level.",
219
+ "cite_spans": [],
220
+ "ref_spans": [
221
+ {
222
+ "start": 30,
223
+ "end": 38,
224
+ "text": "Figure 1",
225
+ "ref_id": null
226
+ }
227
+ ],
228
+ "eq_spans": [],
229
+ "section": "Examples",
230
+ "sec_num": "2.2.2"
231
+ },
232
+ {
233
+ "text": "\u2022 The city mentioned, in S, is an example of a level-0 fact because the \"city\" fact consists only of one node \"Medellin.\"",
234
+ "cite_spans": [],
235
+ "ref_spans": [],
236
+ "eq_spans": [],
237
+ "section": "Examples",
238
+ "sec_num": "2.2.2"
239
+ },
240
+ {
241
+ "text": "\u2022 The type of attack, in S, is an example of a level-1 fact.",
242
+ "cite_spans": [],
243
+ "ref_spans": [],
244
+ "eq_spans": [],
245
+ "section": "Examples",
246
+ "sec_num": "2.2.2"
247
+ },
248
+ {
249
+ "text": "We define the type o/attack in the network to be an attack designator such as \"murder, .... bombing,\" or \"assassination\" with one modifier giving the victim, perpetrator, date, location, or other information. In this case the type of attack fact is composed of the \"the murder\" and the \"two employees\" nodes and their connector. This makes the type of attack a level-1 fact. The type of attack could appear as a level-0 fact as in \"the Medellin bombing\" (assuming that the network is built at the phrase level) because in this case both the attack designator (bombing) and the modifier (Medellin) occur in the same node. The type of attack fact occurs as a level-2 fact in the following sentence (once again assuming that the network is built at the phrase level): \"10 people were killed in the offensive which included several bombings.\" In this case there is no direct connector between the attack designator (several bombings) and its modifier (10 people). They are connected by the intermediatory \"the offensive\" node; thereby making the type of attack a level-2 fact. The type of attack can also appear at higher levels.",
250
+ "cite_spans": [],
251
+ "ref_spans": [],
252
+ "eq_spans": [],
253
+ "section": "Examples",
254
+ "sec_num": "2.2.2"
255
+ },
256
+ {
257
+ "text": "\u2022 In S, the date of the murder of the two employees is an example of a level-2 fact. This is because the attack designator (the tourder) along with its modifier (two employees) account for one level and the arc to \"Nov 15\" accounts for the second level. The date of the attack, in this case, is not a level-1 fact (because of the two nodes \"the tourder\" and \"Nov 15\") because the phrase \"the murder on Nov 15\" does not tell one that an attack actually took place. The article could have been talking about a seminar on murders that took place on Nov 15 and not about the murder of two employees which took place then.",
258
+ "cite_spans": [],
259
+ "ref_spans": [],
260
+ "eq_spans": [],
261
+ "section": "Examples",
262
+ "sec_num": "2.2.2"
263
+ },
264
+ {
265
+ "text": "\u2022 In S, the location of the murder of the two employees is an example of a level-2 fact. The exact same argument as the date of the murder of' the two employees applies here.",
266
+ "cite_spans": [],
267
+ "ref_spans": [],
268
+ "eq_spans": [],
269
+ "section": "Examples",
270
+ "sec_num": "2.2.2"
271
+ },
272
+ {
273
+ "text": "\u2022 The complete information, in S, about the victiros is an example of a level-2 fact because to know that two employees of Bogota's Daily E1 Espectador were victims, one has to know that they were murdered. The attack designator (the murder) with its modifier (two employees) accounts for one level, while the connector between \"two employees\" and \"Bogota's Daily E1 Espectador\" accounts for the other.",
274
+ "cite_spans": [],
275
+ "ref_spans": [],
276
+ "eq_spans": [],
277
+ "section": "Examples",
278
+ "sec_num": "2.2.2"
279
+ },
280
+ {
281
+ "text": "As mentioned earlier, the level of a fact for a piece of text depends on the network constructed for the text. Since there is no unique network corresponding to a piece of text, care has to be taken so that the networks are built consistently. We used the following algorithm to build the networks:",
282
+ "cite_spans": [],
283
+ "ref_spans": [],
284
+ "eq_spans": [],
285
+ "section": "Building the Networks",
286
+ "sec_num": "2.3"
287
+ },
288
+ {
289
+ "text": "1. Every article was broken up into a nonoverlapping sequence of noun groups (NGs), verb groups (VGs), and preposition groups (PGs). The rules employed to identify the NGs, VGs, and PGs were almost the same as the ones employed by SRI's FASTUS system 1.",
290
+ "cite_spans": [],
291
+ "ref_spans": [],
292
+ "eq_spans": [],
293
+ "section": "Building the Networks",
294
+ "sec_num": "2.3"
295
+ },
296
+ {
297
+ "text": "2. The nodes of the network consisted of the NGs while the transitions between the nodes consisted of the VGs and the PGs.",
298
+ "cite_spans": [],
299
+ "ref_spans": [],
300
+ "eq_spans": [],
301
+ "section": "Building the Networks",
302
+ "sec_num": "2.3"
303
+ },
304
+ {
305
+ "text": "3. Identification of coreferent nodes and prepositional phrase attachments were done manually.",
306
+ "cite_spans": [],
307
+ "ref_spans": [],
308
+ "eq_spans": [],
309
+ "section": "Building the Networks",
310
+ "sec_num": "2.3"
311
+ },
312
+ {
313
+ "text": "The networks are built based largely upon the syntactic structure of the text contained in the articles. However, there is some semantics encoded into the networks because identification of coreferent nodes and preposition phrase attachments are done manually.",
314
+ "cite_spans": [],
315
+ "ref_spans": [],
316
+ "eq_spans": [],
317
+ "section": "Building the Networks",
318
+ "sec_num": "2.3"
319
+ },
320
+ {
321
+ "text": "Obviously, if one were to employ a different algorithm for building the networks, one would get different numbers for the level of a fact. But, if the algorithm were employed consistently across all the facts of interest and across all articles in a domain, the numbers on the level of a fact would be consistently different and one would still be able to analyze the relative complexity of extracting that fact from a piece of text in the domain.",
322
+ "cite_spans": [],
323
+ "ref_spans": [],
324
+ "eq_spans": [],
325
+ "section": "Building the Networks",
326
+ "sec_num": "2.3"
327
+ },
328
+ {
329
+ "text": "3 Example: Analyzing the",
330
+ "cite_spans": [],
331
+ "ref_spans": [],
332
+ "eq_spans": [],
333
+ "section": "Building the Networks",
334
+ "sec_num": "2.3"
335
+ },
336
+ {
337
+ "text": "In order to validate our model of complexity we applied it to the Information Extraction (IE) task, or the Message Understanding task (DAR, 1991) , (DAR, 1992) , (ARP, 1993) , (DAR, 1995) , (DAR, 1998) . The goal of an IE task is to extract prespecified facts from text and fill in predefined templates containing labeled slots.",
338
+ "cite_spans": [
339
+ {
340
+ "start": 134,
341
+ "end": 145,
342
+ "text": "(DAR, 1991)",
343
+ "ref_id": null
344
+ },
345
+ {
346
+ "start": 148,
347
+ "end": 159,
348
+ "text": "(DAR, 1992)",
349
+ "ref_id": null
350
+ },
351
+ {
352
+ "start": 162,
353
+ "end": 173,
354
+ "text": "(ARP, 1993)",
355
+ "ref_id": null
356
+ },
357
+ {
358
+ "start": 176,
359
+ "end": 187,
360
+ "text": "(DAR, 1995)",
361
+ "ref_id": null
362
+ },
363
+ {
364
+ "start": 190,
365
+ "end": 201,
366
+ "text": "(DAR, 1998)",
367
+ "ref_id": null
368
+ }
369
+ ],
370
+ "ref_spans": [],
371
+ "eq_spans": [],
372
+ "section": "Complexity of an Information Extraction Task",
373
+ "sec_num": null
374
+ },
375
+ {
376
+ "text": "We analyzed the complexity of the task used for the Fourth Message Understanding Conference (MUC-4) (DAR, 1992) . In this task, the participangs were asked to extract the following facts from articles describing terrorist activities in Latin America:",
377
+ "cite_spans": [
378
+ {
379
+ "start": 100,
380
+ "end": 111,
381
+ "text": "(DAR, 1992)",
382
+ "ref_id": null
383
+ }
384
+ ],
385
+ "ref_spans": [],
386
+ "eq_spans": [],
387
+ "section": "Complexity of an Information Extraction Task",
388
+ "sec_num": null
389
+ },
390
+ {
391
+ "text": "\u2022 The type of attack.",
392
+ "cite_spans": [],
393
+ "ref_spans": [],
394
+ "eq_spans": [],
395
+ "section": "Complexity of an Information Extraction Task",
396
+ "sec_num": null
397
+ },
398
+ {
399
+ "text": "\u2022 The date of the attack.",
400
+ "cite_spans": [],
401
+ "ref_spans": [],
402
+ "eq_spans": [],
403
+ "section": "Complexity of an Information Extraction Task",
404
+ "sec_num": null
405
+ },
406
+ {
407
+ "text": "\u2022 The location of the attack.",
408
+ "cite_spans": [],
409
+ "ref_spans": [],
410
+ "eq_spans": [],
411
+ "section": "Complexity of an Information Extraction Task",
412
+ "sec_num": null
413
+ },
414
+ {
415
+ "text": "1We wish to thank Jerry Hobbs of SRI for providing us with the rules of their partial parser. \u2022 The perpetrator(s) (including suspects).",
416
+ "cite_spans": [],
417
+ "ref_spans": [],
418
+ "eq_spans": [],
419
+ "section": "Complexity of an Information Extraction Task",
420
+ "sec_num": null
421
+ },
422
+ {
423
+ "text": "We analyzed a set of 100 articles from the MUC-4 domain each of which reported one or more terrorist attacks. Figure 2 shows the level distribution for each of the five facts. A closer analysis of the figure shows that the \"type of attack\" fact is the easiest to extract while the \"perpetrator\" fact is the hardest (the curve peaks at level-2 for this fact). In addition, Figure 3 shows the level distribution of the five facts combined. This figure gives some indication of the complexity of the MUC-4 task because it shows that almost 50% of the MUC-4 facts occur at level-1. The expected level of the five facts in the MUC-4 domain was 1.74 (this is simply the weighted average of the level distributions of the facts). We define this number to be the Task Complexity for the MUC-4 task. Therefore, the MUC-4 task can now be compared to, say, the MUC-5 task by comparing their Task Complexities. In fact, we computed the Task Complexity of the MUC-5 task and discovered that it was equal to 2.5. In comparison, an analysis, using more \"superficial\" features, done by Beth Sundheim, shows that the nature of the MUC-5 EJV task is approximately twice as hard as the nature of the MUC-4 task (Sundheim, 1993) . The features used in the study included vocabulary size, the average number of words per sentence, and the average number of sentences per article. More details about this analysis can be found in (Bagga and Biermann, 1998) .",
424
+ "cite_spans": [
425
+ {
426
+ "start": 1192,
427
+ "end": 1208,
428
+ "text": "(Sundheim, 1993)",
429
+ "ref_id": "BIBREF14"
430
+ },
431
+ {
432
+ "start": 1408,
433
+ "end": 1434,
434
+ "text": "(Bagga and Biermann, 1998)",
435
+ "ref_id": "BIBREF3"
436
+ }
437
+ ],
438
+ "ref_spans": [
439
+ {
440
+ "start": 110,
441
+ "end": 118,
442
+ "text": "Figure 2",
443
+ "ref_id": null
444
+ },
445
+ {
446
+ "start": 372,
447
+ "end": 380,
448
+ "text": "Figure 3",
449
+ "ref_id": "FIGREF3"
450
+ }
451
+ ],
452
+ "eq_spans": [],
453
+ "section": "Complexity of an Information Extraction Task",
454
+ "sec_num": null
455
+ },
456
+ {
457
+ "text": "The reading comprehension task differs from the QA task in the following way: while the goal of the QA task is to find answers for a set of questions from a collection of documents, the goal of the reading comprehension task is to find answers to a set of questions from a single related document. Since the QA task involves extracting answers from a collection of documents, the complexity of this task depends on the expected level of occurrence of the answers of the questions. While it is theoretically possible to compute the average level of any fact in the entire to computing the level of the occurrence of each answer. Therefore, the results presented in this paper will provide both these values.",
458
+ "cite_spans": [],
459
+ "ref_spans": [],
460
+ "eq_spans": [],
461
+ "section": "Analyzing the Reading Comprehension Task",
462
+ "sec_num": "4"
463
+ },
464
+ {
465
+ "text": "We analyzed a set of five reading comprehension tests offered by the English Language Center at the University of Victoria in Canada 2. These five tests are listed in increasing order of difficulty and are classified by the Center as: Basic, Basic-Intermediate, Intermediate, Intermediate-Advanced, and Advanced. For each of these tests, we calculated the level number of each sentence in the text, and the level number of the sentences containing the answers to each question for every test. In addition, we also calculated the number of coreferences present in each sentence in the texts, and the corresponding number in the sentences containing each answer. It should be noted that we were forced to calculate the level number of the sentences containing the answer as opposed to calculating the level number of the answer itself because several questions had only true/false answers. Since there was no way to compute the level numbers of true/false answers, we decided to calculate the level numbers of the sentences containing the answers in order to be consistent. For true/false answers this implied analyzing all the sentences which help determine the truth value of the question. Figure 4 shows for each text, the number of sentences in the text, the average level number of a sentence, the average number of coreferences per sentence, the number of questions corresponding to the test, the average level number of each answer, and the average number of coreferences per answer.",
466
+ "cite_spans": [],
467
+ "ref_spans": [
468
+ {
469
+ "start": 1190,
470
+ "end": 1198,
471
+ "text": "Figure 4",
472
+ "ref_id": null
473
+ }
474
+ ],
475
+ "eq_spans": [],
476
+ "section": "Analysis and Results",
477
+ "sec_num": "4.1"
478
+ },
479
+ {
480
+ "text": "The results shown in Figure 4 are consistent with the model. The figure shows that as the difficulty level of the tests increase, so do the corresponding level numbers per sentence, and the answers. One 2 http://web2.uvcs.uvic.ca/elc/studyzone/index.htm conclusion that we can draw from the numbers is that the Basic-Intermediate test, based upon the analysis, is slightly more easy than the Basic test. We will address this issue in the next section.",
481
+ "cite_spans": [],
482
+ "ref_spans": [
483
+ {
484
+ "start": 21,
485
+ "end": 29,
486
+ "text": "Figure 4",
487
+ "ref_id": null
488
+ }
489
+ ],
490
+ "eq_spans": [],
491
+ "section": "Analysis and Results",
492
+ "sec_num": "4.1"
493
+ },
494
+ {
495
+ "text": "The numbers of coreferences, surprisingly, do no increase with the difficulty of the tests. However, a closer look at the types of coreference shows that while most of the coreferences in the first two tests (Basic, and Basic-Intermediate) are simple pronominal coreferences (he, she, it, etc.), the coreferences used in the last two tests (Intermediate-Advanced, and Advanced) require more knowledge to process.",
496
+ "cite_spans": [],
497
+ "ref_spans": [],
498
+ "eq_spans": [],
499
+ "section": "Analysis and Results",
500
+ "sec_num": "4.1"
501
+ },
502
+ {
503
+ "text": "Some examples include marijuana coreferent with the drug, hemp with the pant, etc. Not being able to capture the complexity of the coreferences is one, among several, shortcomings of this model.",
504
+ "cite_spans": [],
505
+ "ref_spans": [],
506
+ "eq_spans": [],
507
+ "section": "Analysis and Results",
508
+ "sec_num": "4.1"
509
+ },
510
+ {
511
+ "text": "MITRE 3 ran its Qanda reading comprehension system on the five tests analyzed in the previous section. However, instead of producing a single answer for each question, Qanda produces a list of answers listed in decreasing order of confidence. The rest of this section describes an evaluation of Qanda's performance on the five tests and a comparison with the analysis done in the previous section.",
512
+ "cite_spans": [],
513
+ "ref_spans": [],
514
+ "eq_spans": [],
515
+ "section": "A Comparison with Qanda",
516
+ "sec_num": "4.2"
517
+ },
518
+ {
519
+ "text": "In order to evaluate Qanda's performance on the five tests we decided to use the Mean Reciprocal Answer Rank (MRAR) technique which was used for evaluating question-answering systems at TREC-8 (Singhal, 1999) . For each answer, this techniques assigns a score between 0 and 1 depending on its rank in the list of answers output. The score for answer, i, is computed as:",
520
+ "cite_spans": [
521
+ {
522
+ "start": 193,
523
+ "end": 208,
524
+ "text": "(Singhal, 1999)",
525
+ "ref_id": "BIBREF13"
526
+ }
527
+ ],
528
+ "ref_spans": [],
529
+ "eq_spans": [],
530
+ "section": "A Comparison with Qanda",
531
+ "sec_num": "4.2"
532
+ },
533
+ {
534
+ "text": "1 Scorel = rank of answeri",
535
+ "cite_spans": [],
536
+ "ref_spans": [],
537
+ "eq_spans": [],
538
+ "section": "A Comparison with Qanda",
539
+ "sec_num": "4.2"
540
+ },
541
+ {
542
+ "text": "If no correct answer is found in the list, a score of 0 is assigned. Therefore, MRAR for a reading comprehension test is the sum of the scores for answers corresponding to each question for that test. Figure 4) . It should be pointed out that the system performed significantly better on the Basic-Intermediate Test compared to the Basic test consistent with the numbers in Figure 4 . However, contrary to expectation, Qanda performed exceedingly well on the Advanced test answering 7 out of the 10 questions with answers whose rank is 1 (i.e. the first answer among the list of possible answers for each question is the correct one). We are currently consulting the developers of the system for conducting an analysis of the performance on this test in more detail.",
543
+ "cite_spans": [],
544
+ "ref_spans": [
545
+ {
546
+ "start": 201,
547
+ "end": 210,
548
+ "text": "Figure 4)",
549
+ "ref_id": null
550
+ },
551
+ {
552
+ "start": 374,
553
+ "end": 382,
554
+ "text": "Figure 4",
555
+ "ref_id": null
556
+ }
557
+ ],
558
+ "eq_spans": [],
559
+ "section": "A Comparison with Qanda",
560
+ "sec_num": "4.2"
561
+ },
562
+ {
563
+ "text": "This measure is just the beginning of a search for useful complexity measures. Although the measure is a big step up from the measures used earlier, it has a number of shortcomings. The main shortcoming is the ambiguity regarding the selection of nodes from the network regarding the fact of interest. Consider the following sentence: \"This is a report from the Straits of Taiwan ......... Yesterday, China test fired a missile.\" Suppose we are interested in the location of the launch of the missile. The ambiguity here arises from the fact that the article does not explicitly mention that the missile was launched in the Straits of Taiwan. The decision to infer that fact from the information present depends upon the person building the network.",
564
+ "cite_spans": [],
565
+ "ref_spans": [],
566
+ "eq_spans": [],
567
+ "section": "Shortcomings",
568
+ "sec_num": "5"
569
+ },
570
+ {
571
+ "text": "In addition, the measure does not account for the following factors (the list is not complete):",
572
+ "cite_spans": [],
573
+ "ref_spans": [],
574
+ "eq_spans": [],
575
+ "section": "Shortcomings",
576
+ "sec_num": "5"
577
+ },
578
+ {
579
+ "text": "coreference: If the extraction of a fact requires the resolution of several coreferences, it is clearly more difficult than an extraction which does not. In addition, the degree of difficulty of resolving coreferences itself varies from simple exact matches~ and pronominal coreferences, to ones that require external world knowledge.",
580
+ "cite_spans": [],
581
+ "ref_spans": [],
582
+ "eq_spans": [],
583
+ "section": "Shortcomings",
584
+ "sec_num": "5"
585
+ },
586
+ {
587
+ "text": "frequency of answers: The frequency of occurrence of facts in a collection of documents has an impact on the performance of systems.",
588
+ "cite_spans": [],
589
+ "ref_spans": [],
590
+ "eq_spans": [],
591
+ "section": "Shortcomings",
592
+ "sec_num": "5"
593
+ },
594
+ {
595
+ "text": "occurrence of multiple (similar) facts: Clearly, if several similar facts are present in the same article, the systems will find it harder to extract the correct fact.",
596
+ "cite_spans": [],
597
+ "ref_spans": [],
598
+ "eq_spans": [],
599
+ "section": "Shortcomings",
600
+ "sec_num": "5"
601
+ },
602
+ {
603
+ "text": "vocabulary size: Unknown words present some problems to systems making it harder for them to perform well.",
604
+ "cite_spans": [],
605
+ "ref_spans": [],
606
+ "eq_spans": [],
607
+ "section": "Shortcomings",
608
+ "sec_num": "5"
609
+ },
610
+ {
611
+ "text": "On the other hand, no measure can take into account all possible features in natural language. Consider the following example. In an article, suppose one initially encounters a series of statements that obliquely imply that the following statement is false. Then the statement is given: \"Bill Clinton visited Taiwan last week.\" Processing such discourse requires an ability to perfectly understand the initial series of statements before the truth value of tlie last statement can be properly evaluated. Such complete understanding is beyond the state of the art and is likely to remain so for many years.",
612
+ "cite_spans": [],
613
+ "ref_spans": [],
614
+ "eq_spans": [],
615
+ "section": "Shortcomings",
616
+ "sec_num": "5"
617
+ },
618
+ {
619
+ "text": "Despite these shortcomings, the current measure does quantify complexity on one very important dimension, namely the number of clauses (or phrases) required to specify a fact. For the short term it appears to be the best available vehicle for understanding the complexity of extracting a fact.",
620
+ "cite_spans": [],
621
+ "ref_spans": [],
622
+ "eq_spans": [],
623
+ "section": "Shortcomings",
624
+ "sec_num": "5"
625
+ },
626
+ {
627
+ "text": "In this paper we have described a model that can be used to analyze the complexity of a reading comprehension task. The model has been used to analyze five different reading comprehension tests, and the paper presents the results from the analysis.",
628
+ "cite_spans": [],
629
+ "ref_spans": [],
630
+ "eq_spans": [],
631
+ "section": "Conclusions",
632
+ "sec_num": "6"
633
+ }
634
+ ],
635
+ "back_matter": [],
636
+ "bib_entries": {
637
+ "BIBREF0": {
638
+ "ref_id": "b0",
639
+ "title": "Fifth Message Understanding Conference (MUC-5)",
640
+ "authors": [
641
+ {
642
+ "first": "Arpa",
643
+ "middle": [],
644
+ "last": "",
645
+ "suffix": ""
646
+ }
647
+ ],
648
+ "year": 1993,
649
+ "venue": "",
650
+ "volume": "",
651
+ "issue": "",
652
+ "pages": "",
653
+ "other_ids": {},
654
+ "num": null,
655
+ "urls": [],
656
+ "raw_text": "ARPA. 1993. Fifth Message Understanding Confer- ence (MUC-5);",
657
+ "links": null
658
+ },
659
+ "BIBREF2": {
660
+ "ref_id": "b2",
661
+ "title": "Analyzing the Complexity of a Domain With Respect To An Information Extraction Task",
662
+ "authors": [
663
+ {
664
+ "first": "Amit",
665
+ "middle": [],
666
+ "last": "Bagga",
667
+ "suffix": ""
668
+ },
669
+ {
670
+ "first": "Alan",
671
+ "middle": [
672
+ "W"
673
+ ],
674
+ "last": "Biermann",
675
+ "suffix": ""
676
+ }
677
+ ],
678
+ "year": 1997,
679
+ "venue": "Tenth International Conference on Research on Computational Linguistics (ROCLING X)",
680
+ "volume": "",
681
+ "issue": "",
682
+ "pages": "175--194",
683
+ "other_ids": {},
684
+ "num": null,
685
+ "urls": [],
686
+ "raw_text": "Amit Bagga and Alan W. Biermann. 1997. Ana- lyzing the Complexity of a Domain With Respect To An Information Extraction Task. In Tenth In- ternational Conference on Research on Computa- tional Linguistics (ROCLING X), pages 175-194, August.",
687
+ "links": null
688
+ },
689
+ "BIBREF3": {
690
+ "ref_id": "b3",
691
+ "title": "Analyzing the Performance of Message Understanding Systems",
692
+ "authors": [
693
+ {
694
+ "first": "Amit",
695
+ "middle": [],
696
+ "last": "Bagga",
697
+ "suffix": ""
698
+ },
699
+ {
700
+ "first": "Alan",
701
+ "middle": [
702
+ "W"
703
+ ],
704
+ "last": "Biermann",
705
+ "suffix": ""
706
+ }
707
+ ],
708
+ "year": 1998,
709
+ "venue": "Journal of Computational Linguistics and Chinese Language Processing",
710
+ "volume": "3",
711
+ "issue": "",
712
+ "pages": "1--26",
713
+ "other_ids": {},
714
+ "num": null,
715
+ "urls": [],
716
+ "raw_text": "Amit Bagga and Alan W. Biermann. 1998. Ana- lyzing the Performance of Message Understand- ing Systems. Journal of Computational Linguis- tics and Chinese Language Processing, 3(1):1-26, February.",
717
+ "links": null
718
+ },
719
+ "BIBREF4": {
720
+ "ref_id": "b4",
721
+ "title": "Semantics and Complexity of Question Answering Systems: Towards a Moore's Law for Natural Language Engineering",
722
+ "authors": [
723
+ {
724
+ "first": "Amit",
725
+ "middle": [],
726
+ "last": "Bagga",
727
+ "suffix": ""
728
+ },
729
+ {
730
+ "first": "Wlodek",
731
+ "middle": [],
732
+ "last": "Zadrozny",
733
+ "suffix": ""
734
+ },
735
+ {
736
+ "first": "James",
737
+ "middle": [],
738
+ "last": "Pustejovsky",
739
+ "suffix": ""
740
+ }
741
+ ],
742
+ "year": 1999,
743
+ "venue": "1999 AAAI Fall Symposium Series on Question Answering Systems",
744
+ "volume": "",
745
+ "issue": "",
746
+ "pages": "1--10",
747
+ "other_ids": {},
748
+ "num": null,
749
+ "urls": [],
750
+ "raw_text": "Amit Bagga, Wlodek Zadrozny, and James Puste- jovsky. 1999. Semantics and Complexity of Ques- tion Answering Systems: Towards a Moore's Law for Natural Language Engineering. In 1999 AAAI Fall Symposium Series on Question Answering Systems, pages 1-10, November.",
751
+ "links": null
752
+ },
753
+ "BIBREF5": {
754
+ "ref_id": "b5",
755
+ "title": "Third Message Understanding Conference (MUC-3)",
756
+ "authors": [
757
+ {
758
+ "first": "",
759
+ "middle": [],
760
+ "last": "Darpa",
761
+ "suffix": ""
762
+ }
763
+ ],
764
+ "year": 1991,
765
+ "venue": "",
766
+ "volume": "",
767
+ "issue": "",
768
+ "pages": "",
769
+ "other_ids": {},
770
+ "num": null,
771
+ "urls": [],
772
+ "raw_text": "DARPA. 1991. Third Message Understanding Con- ference (MUC-3), San Mateo, May. Morgan Kauf- mann Publishers, Inc.",
773
+ "links": null
774
+ },
775
+ "BIBREF6": {
776
+ "ref_id": "b6",
777
+ "title": "Fourth Message Understanding Conference (MUC-4)",
778
+ "authors": [
779
+ {
780
+ "first": "",
781
+ "middle": [],
782
+ "last": "Darpa",
783
+ "suffix": ""
784
+ }
785
+ ],
786
+ "year": 1992,
787
+ "venue": "",
788
+ "volume": "",
789
+ "issue": "",
790
+ "pages": "",
791
+ "other_ids": {},
792
+ "num": null,
793
+ "urls": [],
794
+ "raw_text": "DARPA. 1992. Fourth Message Understanding Conference (MUC-4), San Mateo, June. Morgan Kaufmann Publishers, Inc. DARPA: TIPSTER Text Program. 1995. Sixth Message Understanding Conference (MUC-6), San Mateo, November. Morgan Kaufmann Pub- lishers, Inc.",
795
+ "links": null
796
+ },
797
+ "BIBREF7": {
798
+ "ref_id": "b7",
799
+ "title": "DARPA: TIPSTER Text Program",
800
+ "authors": [],
801
+ "year": 1998,
802
+ "venue": "Seventh Message Understanding Conference (MUC-7)",
803
+ "volume": "",
804
+ "issue": "",
805
+ "pages": "",
806
+ "other_ids": {},
807
+ "num": null,
808
+ "urls": [],
809
+ "raw_text": "DARPA: TIPSTER Text Program. 1998. Seventh Message Understanding Conference (MUC- 7).",
810
+ "links": null
811
+ },
812
+ "BIBREF8": {
813
+ "ref_id": "b8",
814
+ "title": "Eighth Text REtrieval Conference (TREC-8). National Institute of Standards and Technology (NIST)",
815
+ "authors": [],
816
+ "year": 1999,
817
+ "venue": "",
818
+ "volume": "",
819
+ "issue": "",
820
+ "pages": "",
821
+ "other_ids": {},
822
+ "num": null,
823
+ "urls": [],
824
+ "raw_text": "D. K. Harman, editor. 1999. Eighth Text RE- trieval Conference (TREC-8). National Institute of Standards and Technology (NIST), U.S. De- partment of Commerce, National Technical Infor- mation Service, November.",
825
+ "links": null
826
+ },
827
+ "BIBREF9": {
828
+ "ref_id": "b9",
829
+ "title": "Encoding Knowledge in Partitioned Networks",
830
+ "authors": [
831
+ {
832
+ "first": "G",
833
+ "middle": [],
834
+ "last": "Gary",
835
+ "suffix": ""
836
+ },
837
+ {
838
+ "first": "",
839
+ "middle": [],
840
+ "last": "Hendrix",
841
+ "suffix": ""
842
+ }
843
+ ],
844
+ "year": 1979,
845
+ "venue": "",
846
+ "volume": "",
847
+ "issue": "",
848
+ "pages": "51--92",
849
+ "other_ids": {},
850
+ "num": null,
851
+ "urls": [],
852
+ "raw_text": "Gary G. Hendrix. 1979. Encoding Knowledge in Partitioned Networks. In Nicholas V. Findler, edi- tor, Associative Networks, pages 51-92. Academic Press, New York.",
853
+ "links": null
854
+ },
855
+ "BIBREF10": {
856
+ "ref_id": "b10",
857
+ "title": "Deep Read: A Reading Comprehension System",
858
+ "authors": [
859
+ {
860
+ "first": "Lynette",
861
+ "middle": [],
862
+ "last": "Hirschman",
863
+ "suffix": ""
864
+ }
865
+ ],
866
+ "year": 1999,
867
+ "venue": "37th Annual Meeting of the Association of Computational Linguistics",
868
+ "volume": "",
869
+ "issue": "",
870
+ "pages": "325--332",
871
+ "other_ids": {},
872
+ "num": null,
873
+ "urls": [],
874
+ "raw_text": "Lynette Hirschman et al. 1999. Deep Read: A Read- ing Comprehension System. In 37th Annual Meet- ing of the Association of Computational Linguis- tics, pages 325-332, June.",
875
+ "links": null
876
+ },
877
+ "BIBREF11": {
878
+ "ref_id": "b11",
879
+ "title": "An Adjunct Test for Discourse Processing in MUC-4",
880
+ "authors": [
881
+ {
882
+ "first": "Lynette",
883
+ "middle": [],
884
+ "last": "Hirschman",
885
+ "suffix": ""
886
+ }
887
+ ],
888
+ "year": 1992,
889
+ "venue": "Fourth Message Understanding Conference (MUC-4) (DAR, 1992)",
890
+ "volume": "",
891
+ "issue": "",
892
+ "pages": "67--77",
893
+ "other_ids": {},
894
+ "num": null,
895
+ "urls": [],
896
+ "raw_text": "Lynette Hirschman. 1992. An Adjunct Test for Discourse Processing in MUC-4. In Fourth Mes- sage Understanding Conference (MUC-4) (DAR, 1992), pages 67-77.",
897
+ "links": null
898
+ },
899
+ "BIBREF12": {
900
+ "ref_id": "b12",
901
+ "title": "The Structure and Organization of a Semantic Net for Comprehension and Inference",
902
+ "authors": [
903
+ {
904
+ "first": "K",
905
+ "middle": [],
906
+ "last": "Lenhart",
907
+ "suffix": ""
908
+ },
909
+ {
910
+ "first": "",
911
+ "middle": [],
912
+ "last": "Schubert",
913
+ "suffix": ""
914
+ }
915
+ ],
916
+ "year": 1979,
917
+ "venue": "",
918
+ "volume": "",
919
+ "issue": "",
920
+ "pages": "121--175",
921
+ "other_ids": {},
922
+ "num": null,
923
+ "urls": [],
924
+ "raw_text": "Lenhart K. Schubert et al. 1979. The Structure and Organization of a Semantic Net for Comprehen- sion and Inference. In Nicholas V. Findler, editor, Associative Networks, pages 121-175. Academic Press, New York.",
925
+ "links": null
926
+ },
927
+ "BIBREF13": {
928
+ "ref_id": "b13",
929
+ "title": "Question Answering Track at TREC-8",
930
+ "authors": [
931
+ {
932
+ "first": "Amit",
933
+ "middle": [],
934
+ "last": "Singhal",
935
+ "suffix": ""
936
+ }
937
+ ],
938
+ "year": 1999,
939
+ "venue": "",
940
+ "volume": "",
941
+ "issue": "",
942
+ "pages": "",
943
+ "other_ids": {},
944
+ "num": null,
945
+ "urls": [],
946
+ "raw_text": "Amit Singhal. 1999. Question Answering Track at TREC-8. http://www, research, art. com/~ singhal/ qa-track-spec.txt, November.",
947
+ "links": null
948
+ },
949
+ "BIBREF14": {
950
+ "ref_id": "b14",
951
+ "title": "Tipster/MUC-5 Information Extraction System Evaluation",
952
+ "authors": [
953
+ {
954
+ "first": "Beth",
955
+ "middle": [
956
+ "M"
957
+ ],
958
+ "last": "Sundheim",
959
+ "suffix": ""
960
+ }
961
+ ],
962
+ "year": 1993,
963
+ "venue": "Fifth Message Understanding Conference (MUC-5) (ARP, 1993)",
964
+ "volume": "",
965
+ "issue": "",
966
+ "pages": "27--44",
967
+ "other_ids": {},
968
+ "num": null,
969
+ "urls": [],
970
+ "raw_text": "Beth M. Sundheim. 1993. Tipster/MUC-5 Informa- tion Extraction System Evaluation. In Fifth Mes- sage Understanding Conference (MUC-5) (ARP, 1993), pages 27-44.",
971
+ "links": null
972
+ }
973
+ },
974
+ "ref_entries": {
975
+ "FIGREF0": {
976
+ "text": "Figure 1: A Sample Network",
977
+ "uris": null,
978
+ "num": null,
979
+ "type_str": "figure"
980
+ },
981
+ "FIGREF1": {
982
+ "text": "Identify the nodes that are relevant to the fact, F. Suppose {xl,x~,...,Xn} are the nodes relevant to F. Let s be the partial network consisting of the set of nodes {xl, x~,..., x~} interconnected by the set of arcs {tl, t2,..., tk}.",
983
+ "uris": null,
984
+ "num": null,
985
+ "type_str": "figure"
986
+ },
987
+ "FIGREF2": {
988
+ "text": "Figure 2: MUC-4: Level Distribution of Each of the Five Facts",
989
+ "uris": null,
990
+ "num": null,
991
+ "type_str": "figure"
992
+ },
993
+ "FIGREF3": {
994
+ "text": "MUC-4: Level Distribution of the Five Facts Combined\u2022 The victim (including damage to property).",
995
+ "uris": null,
996
+ "num": null,
997
+ "type_str": "figure"
998
+ },
999
+ "FIGREF4": {
1000
+ "text": "summarizes Qanda's results for the five tests. The figure shows, for each test, the number of questions, the cumulative MRAR for all answers for the test, and the average MRAR per answer. 3We would like to thank Marc Light and Eric Breck for their help with running Qanda on our data.",
1001
+ "uris": null,
1002
+ "num": null,
1003
+ "type_str": "figure"
1004
+ }
1005
+ }
1006
+ }
1007
+ }
Full_text_JSON/prefixW/json/W00/W00-0107.json ADDED
@@ -0,0 +1,668 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0107",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:48.303224Z"
6
+ },
7
+ "title": "A Measure of Semantic Complexity for Natural Language Systems",
8
+ "authors": [
9
+ {
10
+ "first": "Shannon",
11
+ "middle": [],
12
+ "last": "Pollard",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "LSRC",
17
+ "location": {
18
+ "postBox": "Box 90129",
19
+ "postCode": "D224, 27708-0129",
20
+ "settlement": "Durham",
21
+ "region": "NC"
22
+ }
23
+ },
24
+ "email": "shannon@cs.duke.edu"
25
+ },
26
+ {
27
+ "first": "Alan",
28
+ "middle": [
29
+ "W"
30
+ ],
31
+ "last": "Biermann",
32
+ "suffix": "",
33
+ "affiliation": {
34
+ "laboratory": "",
35
+ "institution": "LSRC",
36
+ "location": {
37
+ "postBox": "Box 90129",
38
+ "postCode": "D224, 27708-0129",
39
+ "settlement": "Durham",
40
+ "region": "NC"
41
+ }
42
+ },
43
+ "email": ""
44
+ }
45
+ ],
46
+ "year": "",
47
+ "venue": null,
48
+ "identifiers": {},
49
+ "abstract": "This paper will describe a way to organize the salient objects, their attributes, and relationships between the objects in a given domain. This organization allows us to assign an information value to each collection, and to the domain as a whole, which corresponds to the number of things to \"talk about\" in the domain. This number gives a measure of semantic complexity; that is, it will correspond to the number of objects, attributes, and relationships in the domain, but not to the level of syntactic diversity allowed when conveying these meanings. Defining a measure of semantic complexity for a dialog system domain will give an insight towards making a complexity measurement standard. With such a standard, natural language programmers can measure the feasibility of making a natural language interface, compare different language processors' ability to handle more and more complex domains, and quantify the abilities of the current state of the art in natural language processors.",
50
+ "pdf_parse": {
51
+ "paper_id": "W00-0107",
52
+ "_pdf_hash": "",
53
+ "abstract": [
54
+ {
55
+ "text": "This paper will describe a way to organize the salient objects, their attributes, and relationships between the objects in a given domain. This organization allows us to assign an information value to each collection, and to the domain as a whole, which corresponds to the number of things to \"talk about\" in the domain. This number gives a measure of semantic complexity; that is, it will correspond to the number of objects, attributes, and relationships in the domain, but not to the level of syntactic diversity allowed when conveying these meanings. Defining a measure of semantic complexity for a dialog system domain will give an insight towards making a complexity measurement standard. With such a standard, natural language programmers can measure the feasibility of making a natural language interface, compare different language processors' ability to handle more and more complex domains, and quantify the abilities of the current state of the art in natural language processors.",
56
+ "cite_spans": [],
57
+ "ref_spans": [],
58
+ "eq_spans": [],
59
+ "section": "Abstract",
60
+ "sec_num": null
61
+ }
62
+ ],
63
+ "body_text": [
64
+ {
65
+ "text": "Quantification of task difficulty has been applied to many areas in artificial intelligence, including information retrieval (Bagga, 1997) (Bagga and Biermann, 1997) , machine learning (Niyogi, 1996) , parsing and grammatical formalisms(G. Edward Barton et al., 1987) , and language learning in general (Ristad, 1993) . In addition to providing a way of comparing systems, these measures quantify task complexity before a system is built. The goal of this paper is to measure the complexity of domains for dialog processing. With a standard measure of complexity, domains can be compared and analyzed without having to build the dialog system first. This measure would be an indication of the cost, amount of code, accuracy, reliability, and execution time of the finished dialog system specified by the domain. The hope is to have a single number or pair of numbers that correlates strongly with these standard measures.",
66
+ "cite_spans": [
67
+ {
68
+ "start": 125,
69
+ "end": 138,
70
+ "text": "(Bagga, 1997)",
71
+ "ref_id": "BIBREF2"
72
+ },
73
+ {
74
+ "start": 139,
75
+ "end": 165,
76
+ "text": "(Bagga and Biermann, 1997)",
77
+ "ref_id": "BIBREF1"
78
+ },
79
+ {
80
+ "start": 185,
81
+ "end": 199,
82
+ "text": "(Niyogi, 1996)",
83
+ "ref_id": "BIBREF5"
84
+ },
85
+ {
86
+ "start": 247,
87
+ "end": 267,
88
+ "text": "Barton et al., 1987)",
89
+ "ref_id": "BIBREF4"
90
+ },
91
+ {
92
+ "start": 303,
93
+ "end": 317,
94
+ "text": "(Ristad, 1993)",
95
+ "ref_id": "BIBREF6"
96
+ }
97
+ ],
98
+ "ref_spans": [],
99
+ "eq_spans": [],
100
+ "section": "Introduction",
101
+ "sec_num": "1"
102
+ },
103
+ {
104
+ "text": "Specifically, if domain D1 has complexity C1 and domain D2 has complexity C2 where C2 > C1, then we would expect D2 to have a greater cost of software, more lines of code, less accuracy, less reliability, and longer execution time.",
105
+ "cite_spans": [],
106
+ "ref_spans": [],
107
+ "eq_spans": [],
108
+ "section": "Introduction",
109
+ "sec_num": "1"
110
+ },
111
+ {
112
+ "text": "Section 2 will describe the difference in semantic and syntactic complexity and explain why we consider each separately. In section 3 we define the terms in the complexity analysis, which is explained in section 4. Sections 5 and 6 discuss how to compute information measures that are needed in the complexity analysis, and in sections 7 and 8 we present future work and conclude.",
113
+ "cite_spans": [],
114
+ "ref_spans": [],
115
+ "eq_spans": [],
116
+ "section": "Introduction",
117
+ "sec_num": "1"
118
+ },
119
+ {
120
+ "text": "Semantic vs. Syntactic complexity",
121
+ "cite_spans": [],
122
+ "ref_spans": [],
123
+ "eq_spans": [],
124
+ "section": "2",
125
+ "sec_num": null
126
+ },
127
+ {
128
+ "text": "The complexity measurement described above must be one that takes into account both the semantic and syntactic complexity of the domain. Semantic complexity is the number of \"things\" that we can talk about in the domain. This will include all the objects in the domain, the attributes of those objects to which one might refer, and the relationships between the objects that the user can express. Syntactic complexity refers to the variety of ways that the user will be allowed to refer to an object, attribute, or relationship. For example, a domain could include only two boys but if the user is allowed to refer to them in many ways (e.g., \"Bob\", \"Jim\", \"he\", \"they\", \"the two boys next to the water cooler at the back of the room\"), then the domain is semantically simple but syntactically complex. Likewise a domain with 100 objects that are each referred to only as Object1, Object2, etc.., is semantically complex but syntactically simple. Semantic and syntactic complexities form a trade-Off when it comes to building a language processor for a domain. To build a reliable and accurate processor, the domain must be sufficiently restrained. The more syntactic variety allowed the user, the fewer objects allowed in the domain. So, the more objects in the world, the more restricted the user's grammar and vocabulary. This leads to a tendency to consider the two fronts separately, and then consider a complete complexity measure as a combina-tion of both. Having measures of syntactic and semantic complexity separately will help to find where the best compromise lies. This paper addresses semantic complexity only. It therefore does not completely define the complexity measure described in the introduction, but hopefully takes a step toward defining such a measure. Syntactic complexity measures such as grammar perplexity (Cole and Zue, 1995) should augment this semantic measure to give a full complexity measure.",
129
+ "cite_spans": [
130
+ {
131
+ "start": 1835,
132
+ "end": 1855,
133
+ "text": "(Cole and Zue, 1995)",
134
+ "ref_id": "BIBREF3"
135
+ }
136
+ ],
137
+ "ref_spans": [],
138
+ "eq_spans": [],
139
+ "section": "2",
140
+ "sec_num": null
141
+ },
142
+ {
143
+ "text": "To analyze a domain's complexity, the domain expert must first specify the domain in which the system will work by determining the objects in the domain, each object's attributes, and the relationships between objects.",
144
+ "cite_spans": [],
145
+ "ref_spans": [],
146
+ "eq_spans": [],
147
+ "section": "Domain Terms",
148
+ "sec_num": "3"
149
+ },
150
+ {
151
+ "text": "Consider as an example the small domain of a simple army map, where there are a few objects on the map and the user can display, move, and show or set attributes of them. This example will be used to show how to define a domain using the following terms:",
152
+ "cite_spans": [],
153
+ "ref_spans": [],
154
+ "eq_spans": [],
155
+ "section": "Domain Terms",
156
+ "sec_num": "3"
157
+ },
158
+ {
159
+ "text": "Objects are the types of salient things in the domain. They correspond roughly to the subjects and objects of sentences used in the dialog. In the army display domain, the objects will be tanks, troops, bridges, forests, and hills. Notice that a type of object only needs to be specified once at this high level. Bridge is one object in our world, even though the actual program is able to distinguish many different bridges.",
160
+ "cite_spans": [],
161
+ "ref_spans": [],
162
+ "eq_spans": [],
163
+ "section": "Domain Terms",
164
+ "sec_num": "3"
165
+ },
166
+ {
167
+ "text": "Attributes of an object are the things that the program needs to know about the object in order to use it in the domain. They correspond roughly to adjectives that describe the object, or things that distinguish one of the objects from the others of that type. In our example, the domain requires the name and position of the bridge and the material of which the bridge is made. These three pieces of information include everything the system needs to know about any bridge. In the following figure, the attributes of an object are listed underneath each object type.",
168
+ "cite_spans": [],
169
+ "ref_spans": [],
170
+ "eq_spans": [],
171
+ "section": "Domain Terms",
172
+ "sec_num": "3"
173
+ },
174
+ {
175
+ "text": "Classes are objects, attributes, predicates, or other classes that are grouped together. A class can act as an object in the sense that it can have a name and have relationships with other objects. In our example domain, we will want to distinguish objects that can move from those that cannot, i.e., a MobileObject class as a grouping of Tanks and Troops. There are always three trivial classes: the class of all objects, all attributes (of all objects), and all predicates. Predicates are the relationships between the objects in the world. Any meaning that the user can convey using one or more of the objects should be represented by a predicate. They correspond to the relationship words, like the verbs and prepositions in a sentence, and one can usually find the predicates needed from looking at the allowed operations. For the example domain, the following is the list of allowable predicates, in a typical programming language format to distinguis h predicates from arguments. Notice that classes can be written as predicate arguments to mean that any object in the class can be an argument. Specifically, the Object type refers to all objects, MobileObject refers to either Tank or Troop, and Attribute refers to any object's attribute.",
176
+ "cite_spans": [],
177
+ "ref_spans": [],
178
+ "eq_spans": [],
179
+ "section": "Domain Terms",
180
+ "sec_num": "3"
181
+ },
182
+ {
183
+ "text": "Complexity Formulas Now that the domain is specified, we can anMyze its semantics by estimating the number of bits of information conveyed by referring to each different aspect of the domain. This is common in information theory (Ash, 1965) ; that is, when the user makes a statement, it must be encoded, and the number of bits needed to encode the statement is a measure of its information content. Since the number of bits required to encode a statement in a given domain corresponds directly to the number of salient objects, this information measurement is useful in assigning a semantic complexity measurement.",
184
+ "cite_spans": [
185
+ {
186
+ "start": 229,
187
+ "end": 240,
188
+ "text": "(Ash, 1965)",
189
+ "ref_id": "BIBREF0"
190
+ }
191
+ ],
192
+ "ref_spans": [],
193
+ "eq_spans": [],
194
+ "section": "4",
195
+ "sec_num": null
196
+ },
197
+ {
198
+ "text": "To get a. complexity measure for an entire do-main, we begin at the lowest level and make counts corresponding to the information content described above. The counts from lower levels are combined to give a higher level count. Specifically, first each attribute value for a specific object is computed, then attribute values are combined to give object values, which are combined to give class values, and so forth until a value for the entire domain is computed.",
199
+ "cite_spans": [],
200
+ "ref_spans": [],
201
+ "eq_spans": [],
202
+ "section": "4",
203
+ "sec_num": null
204
+ },
205
+ {
206
+ "text": "Define B(X) to be the number of bits conveyed by an instance of random variable X, and IX] to be the number of possible values of X. (Possible ways of computing B(X) will be given in the next sections.)",
207
+ "cite_spans": [],
208
+ "ref_spans": [],
209
+ "eq_spans": [],
210
+ "section": "4",
211
+ "sec_num": null
212
+ },
213
+ {
214
+ "text": "The random variable will represent different events, depending on where we are in the complexity analysis, but in general, the variable will represent the specification of possible attributes, objects, classes, or predicates. We start by defining the complexity of a single attribute for a single object. We give the formulas for computing the different levels of complexity (attribute level, object level, etc) and then work through the example domain.",
215
+ "cite_spans": [],
216
+ "ref_spans": [],
217
+ "eq_spans": [],
218
+ "section": "4",
219
+ "sec_num": null
220
+ },
221
+ {
222
+ "text": "The complexity of attribute i for object j, denoted",
223
+ "cite_spans": [],
224
+ "ref_spans": [],
225
+ "eq_spans": [],
226
+ "section": "4",
227
+ "sec_num": null
228
+ },
229
+ {
230
+ "text": "ACatt~,obji is",
231
+ "cite_spans": [],
232
+ "ref_spans": [],
233
+ "eq_spans": [],
234
+ "section": "4",
235
+ "sec_num": null
236
+ },
237
+ {
238
+ "text": "where A is the specification of an attribute value. The object complexity of object j is the sum of all its attributes' complexities:",
239
+ "cite_spans": [],
240
+ "ref_spans": [],
241
+ "eq_spans": [],
242
+ "section": "AGtt,,obji = B(A)",
243
+ "sec_num": null
244
+ },
245
+ {
246
+ "text": "A simple sum is used because identifying one object uniquely corresponds to knowing each of its attributes. Therefore, the sum of the attribute information is the same as the complete object information.",
247
+ "cite_spans": [],
248
+ "ref_spans": [],
249
+ "eq_spans": [],
250
+ "section": "OC\u00b0bj$ \"~-E ACatt~,obji i",
251
+ "sec_num": null
252
+ },
253
+ {
254
+ "text": "Since objects can be grouped together into classes, a class complexity is the number of bits conveyed by distinguishing one type of object from that class, plus the maximum object complexity that occurs in that class:",
255
+ "cite_spans": [],
256
+ "ref_spans": [],
257
+ "eq_spans": [],
258
+ "section": "OC\u00b0bj$ \"~-E ACatt~,obji i",
259
+ "sec_num": null
260
+ },
261
+ {
262
+ "text": "where O is the specification of an object in class.",
263
+ "cite_spans": [],
264
+ "ref_spans": [],
265
+ "eq_spans": [],
266
+ "section": "CC.,... = B(O) + max (OCob#) obj~class",
267
+ "sec_num": null
268
+ },
269
+ {
270
+ "text": "When a member of a class is specified, the amount of information conveyed is equal to the information in the object type specification (B(O) ), plus the information conveyed by the actual object itself. The most that can be is the maximum object complexity in the class. Classes of predicates and attributes are defined in the same way.",
271
+ "cite_spans": [],
272
+ "ref_spans": [
273
+ {
274
+ "start": 135,
275
+ "end": 140,
276
+ "text": "(B(O)",
277
+ "ref_id": null
278
+ }
279
+ ],
280
+ "eq_spans": [],
281
+ "section": "CC.,... = B(O) + max (OCob#) obj~class",
282
+ "sec_num": null
283
+ },
284
+ {
285
+ "text": "For each predicate, the complexity is the sum of the complexities of its arguments:",
286
+ "cite_spans": [],
287
+ "ref_spans": [],
288
+ "eq_spans": [],
289
+ "section": "CC.,... = B(O) + max (OCob#) obj~class",
290
+ "sec_num": null
291
+ },
292
+ {
293
+ "text": "This is the same as the object complexity as a sum of the complexities of its attributes.",
294
+ "cite_spans": [],
295
+ "ref_spans": [],
296
+ "eq_spans": [],
297
+ "section": "PCpred= E CC\u00a2,ass classearg",
298
+ "sec_num": null
299
+ },
300
+ {
301
+ "text": "In general, predicate arguments will be classes. If a single object is the only possibility for an argument rather than a class of objects, then the object complexity can be used. This would be the same as making a class of one object: the class complexity of one object is equal to the complexity of the one member of the class.",
302
+ "cite_spans": [],
303
+ "ref_spans": [],
304
+ "eq_spans": [],
305
+ "section": "PCpred= E CC\u00a2,ass classearg",
306
+ "sec_num": null
307
+ },
308
+ {
309
+ "text": "The entire domain's semantic complexity is then the same as the complexity of the class of all predicates defined for the domain. Specifically, for a domain with a set of predicates P, the semantic complexity SEMC is SEMC = B(P) + max PCpred pred~P where P is the specification of a predicate in the domain.",
310
+ "cite_spans": [],
311
+ "ref_spans": [],
312
+ "eq_spans": [],
313
+ "section": "PCpred= E CC\u00a2,ass classearg",
314
+ "sec_num": null
315
+ },
316
+ {
317
+ "text": "Any statement that the user can make should correspond to some predicate in the domain model. The information given in the sentence is the information given by the predicate specification (B(P)) plus the information given in the arguments to the predicate, which is as much as the greatest predicate complexity.",
318
+ "cite_spans": [],
319
+ "ref_spans": [],
320
+ "eq_spans": [],
321
+ "section": "PCpred= E CC\u00a2,ass classearg",
322
+ "sec_num": null
323
+ },
324
+ {
325
+ "text": "Using Equal Probability Assumptions Now we find a formula for B(X), the bits of information conveyed when referring to certain parts of the domain. For the army map example, we assume that all objects are equally likely to be referred to, and all attributes, classes, and relationships are also equally likely. So a troop is as likely to be referred to as a tank, or as a forest, etc. Also, a tank on the map is equally likely to be friend, foe, or unknown. Every value for the attributes will be equally likely. Under this assumption, the number of bits of information conveyed by referring to one entity out of v possible entities is log2v. That is, for the equally probable case, B(X) = log2[X[. Figure 1 , beginning with attribute values. A domain expert would decide how many different values are allowed for each attribute. In this example, we will specify that Tank's Priend/Foe value is either friend, foe, or unknownthree possibilities.",
326
+ "cite_spans": [],
327
+ "ref_spans": [
328
+ {
329
+ "start": 699,
330
+ "end": 707,
331
+ "text": "Figure 1",
332
+ "ref_id": "FIGREF0"
333
+ }
334
+ ],
335
+ "eq_spans": [],
336
+ "section": "5",
337
+ "sec_num": null
338
+ },
339
+ {
340
+ "text": "Assuming that there are 128 ID number possibilities, 65,000 positions, and 1,000 possible ranges, and assuming equal probability, we take the log of each number and fill in the complexity beside each attribute for that object. Following the hierarchy, we now add the attribute complexities to get the complexity of the tank object. ",
341
+ "cite_spans": [],
342
+ "ref_spans": [],
343
+ "eq_spans": [],
344
+ "section": "ACFriend/Foe,Tank -~-Iog23 ~ 2",
345
+ "sec_num": null
346
+ },
347
+ {
348
+ "text": "More General Information Measurement",
349
+ "cite_spans": [],
350
+ "ref_spans": [],
351
+ "eq_spans": [],
352
+ "section": "6",
353
+ "sec_num": null
354
+ },
355
+ {
356
+ "text": "In most cases, the equal probability assumption will not hold. For example, the bridges in the domain can be made of any of eight materials, but if all of the visible bridges are made of wood, then the Material attribute for Bridge will probably be wood most of the time. In this case, referring to the \"wooden bridge\" on the map doesn't give much more information than just \"bridge.\" For this more general case, define B(X) to be B (X1, X2, ...X,) where each Xi is a possible value of X. Also define pl,p2, ...Pn to be their associated probabilities. Then",
357
+ "cite_spans": [
358
+ {
359
+ "start": 433,
360
+ "end": 448,
361
+ "text": "(X1, X2, ...X,)",
362
+ "ref_id": null
363
+ }
364
+ ],
365
+ "ref_spans": [],
366
+ "eq_spans": [],
367
+ "section": "6",
368
+ "sec_num": null
369
+ },
370
+ {
371
+ "text": "B(Xt, X2, ...Xn) = -Epl logpi i=1",
372
+ "cite_spans": [],
373
+ "ref_spans": [],
374
+ "eq_spans": [],
375
+ "section": "6",
376
+ "sec_num": null
377
+ },
378
+ {
379
+ "text": "These probabilities can be determined using frequency counts from sample dialogs, or estimated based on domain knowledge.",
380
+ "cite_spans": [],
381
+ "ref_spans": [],
382
+ "eq_spans": [],
383
+ "section": "6",
384
+ "sec_num": null
385
+ },
386
+ {
387
+ "text": "The next step in this research is to obtain several domains that have been built into a dialog system and analyze them. The Circuit Fix-It Shoppe (Smith and D.R.Hipp, 1994) has been analyzed, but the results will only be interesting in comparison to other real domains. This comparison will not only help us verify the correctness of the analyses, but also bring up possible situations that the analysis may not cover.",
388
+ "cite_spans": [
389
+ {
390
+ "start": 146,
391
+ "end": 172,
392
+ "text": "(Smith and D.R.Hipp, 1994)",
393
+ "ref_id": "BIBREF7"
394
+ }
395
+ ],
396
+ "ref_spans": [],
397
+ "eq_spans": [],
398
+ "section": "Future Work",
399
+ "sec_num": "7"
400
+ },
401
+ {
402
+ "text": "Next, we will want to identify a measure of syntactic complexity. This could be related to grammar perplexity. It should take into account vocabulary size, grammar constraints, and the amount of ambiguity in the grammar. We would like to be able to analyze the domains with both the semantic complexity and the syntactic complexity, and see that the results match our intuitions of complexity and the standards of lines of code, reliability, cost of software, and execution time. We would also be interested in observing the correlation between the syntactic and semantic complexities.",
403
+ "cite_spans": [],
404
+ "ref_spans": [],
405
+ "eq_spans": [],
406
+ "section": "Future Work",
407
+ "sec_num": "7"
408
+ },
409
+ {
410
+ "text": "This paper describes a way to organize the objects, attributes, classes, and relationships in a domain and to use these classifications to define a semantic domain complexity. This measurement, along with a syntactic complexity measurement, will give natural language programmers a way to quantify the complexity of a given domain in terms of real-world costs: cost of software, reliability, accuracy, and execution time. After defining a syntactic complexity measure, domains can be analyzed against these real costs to be sure that the measure is accurate. Such a measure will allow natural language systems programmers a way to analyze domains and estimate the costs of building a natural language system beforehand, based on the domain's semantic and syntactic constraints. A standard complexity measure will also allow a comparison of different language processors' ability to handle more and more complex domains and quantify the abilities of the current state of the art in natural language processors.",
411
+ "cite_spans": [],
412
+ "ref_spans": [],
413
+ "eq_spans": [],
414
+ "section": "Conclusion",
415
+ "sec_num": "8"
416
+ }
417
+ ],
418
+ "back_matter": [],
419
+ "bib_entries": {
420
+ "BIBREF0": {
421
+ "ref_id": "b0",
422
+ "title": "Information Theory",
423
+ "authors": [
424
+ {
425
+ "first": "Robert",
426
+ "middle": [
427
+ "B"
428
+ ],
429
+ "last": "Ash",
430
+ "suffix": ""
431
+ }
432
+ ],
433
+ "year": 1965,
434
+ "venue": "",
435
+ "volume": "",
436
+ "issue": "",
437
+ "pages": "",
438
+ "other_ids": {},
439
+ "num": null,
440
+ "urls": [],
441
+ "raw_text": "Robert B. Ash. 1965. Information Theory. Inter- science Publishers.",
442
+ "links": null
443
+ },
444
+ "BIBREF1": {
445
+ "ref_id": "b1",
446
+ "title": "Analyzing the complexity of a domain with respect to an information extraction task",
447
+ "authors": [
448
+ {
449
+ "first": "Amit",
450
+ "middle": [],
451
+ "last": "Bagga",
452
+ "suffix": ""
453
+ },
454
+ {
455
+ "first": "Alan",
456
+ "middle": [
457
+ "W"
458
+ ],
459
+ "last": "Biermann",
460
+ "suffix": ""
461
+ }
462
+ ],
463
+ "year": 1997,
464
+ "venue": "Proceedings of the tenth International Conference on Research on Computational Linguistics (ROCLING X)",
465
+ "volume": "",
466
+ "issue": "",
467
+ "pages": "175--94",
468
+ "other_ids": {},
469
+ "num": null,
470
+ "urls": [],
471
+ "raw_text": "Amit Bagga and Alan W. Biermann. 1997. Ana- lyzing the complexity of a domain with respect to an information extraction task. Proceedings of the tenth International Conference on Research on Computational Linguistics (ROCLING X), pages 175--94, August.",
472
+ "links": null
473
+ },
474
+ "BIBREF2": {
475
+ "ref_id": "b2",
476
+ "title": "Analyzing the performance of message understanding systems",
477
+ "authors": [
478
+ {
479
+ "first": "Amit",
480
+ "middle": [],
481
+ "last": "Bagga",
482
+ "suffix": ""
483
+ }
484
+ ],
485
+ "year": 1997,
486
+ "venue": "Proceedings of the Natural Language Processing Pacific Rim Symposium (NLPRS '97)",
487
+ "volume": "",
488
+ "issue": "",
489
+ "pages": "637--677",
490
+ "other_ids": {},
491
+ "num": null,
492
+ "urls": [],
493
+ "raw_text": "Amit Bagga. 1997. Analyzing the performance of message understanding systems. In Proceedings of the Natural Language Processing Pacific Rim Symposium (NLPRS '97), pages 637---40, Decem- ber.",
494
+ "links": null
495
+ },
496
+ "BIBREF3": {
497
+ "ref_id": "b3",
498
+ "title": "Survey of the state of the art in human language technology, November",
499
+ "authors": [
500
+ {
501
+ "first": "Ron",
502
+ "middle": [],
503
+ "last": "Cole",
504
+ "suffix": ""
505
+ },
506
+ {
507
+ "first": "Victor",
508
+ "middle": [],
509
+ "last": "Zue",
510
+ "suffix": ""
511
+ }
512
+ ],
513
+ "year": 1995,
514
+ "venue": "",
515
+ "volume": "",
516
+ "issue": "",
517
+ "pages": "",
518
+ "other_ids": {},
519
+ "num": null,
520
+ "urls": [],
521
+ "raw_text": "Ron Cole and Victor Zue. 1995. Survey of the state of the art in human language technology, Novem- ber.",
522
+ "links": null
523
+ },
524
+ "BIBREF4": {
525
+ "ref_id": "b4",
526
+ "title": "Computational Complexity and Natural Language",
527
+ "authors": [
528
+ {
529
+ "first": "G",
530
+ "middle": [
531
+ "Edward"
532
+ ],
533
+ "last": "Jr",
534
+ "suffix": ""
535
+ },
536
+ {
537
+ "first": "Robert",
538
+ "middle": [
539
+ "C"
540
+ ],
541
+ "last": "Barton",
542
+ "suffix": ""
543
+ },
544
+ {
545
+ "first": "Eric",
546
+ "middle": [
547
+ "Sven"
548
+ ],
549
+ "last": "Berwick",
550
+ "suffix": ""
551
+ },
552
+ {
553
+ "first": "",
554
+ "middle": [],
555
+ "last": "Ristad",
556
+ "suffix": ""
557
+ }
558
+ ],
559
+ "year": 1987,
560
+ "venue": "",
561
+ "volume": "",
562
+ "issue": "",
563
+ "pages": "",
564
+ "other_ids": {},
565
+ "num": null,
566
+ "urls": [],
567
+ "raw_text": "Jr G. Edward Barton, Robert C. Berwick, and Eric Sven Ristad. 1987. Computational Complex- ity and Natural Language. The MIT Press, Cam- bridge, Massachusetts.",
568
+ "links": null
569
+ },
570
+ "BIBREF5": {
571
+ "ref_id": "b5",
572
+ "title": "The Informational Complexity of Learning from Examples",
573
+ "authors": [
574
+ {
575
+ "first": "Partha",
576
+ "middle": [],
577
+ "last": "Niyogi",
578
+ "suffix": ""
579
+ }
580
+ ],
581
+ "year": 1996,
582
+ "venue": "",
583
+ "volume": "",
584
+ "issue": "",
585
+ "pages": "",
586
+ "other_ids": {},
587
+ "num": null,
588
+ "urls": [],
589
+ "raw_text": "Partha Niyogi. 1996. The Informational Complexity of Learning from Examples. Ph.D. thesis, MIT.",
590
+ "links": null
591
+ },
592
+ "BIBREF6": {
593
+ "ref_id": "b6",
594
+ "title": "The Language Complexity Game",
595
+ "authors": [
596
+ {
597
+ "first": "Eric",
598
+ "middle": [],
599
+ "last": "Sven Ristad",
600
+ "suffix": ""
601
+ }
602
+ ],
603
+ "year": 1993,
604
+ "venue": "",
605
+ "volume": "",
606
+ "issue": "",
607
+ "pages": "",
608
+ "other_ids": {},
609
+ "num": null,
610
+ "urls": [],
611
+ "raw_text": "Eric Sven Ristad. 1993. The Language Complexity Game. MIT Press.",
612
+ "links": null
613
+ },
614
+ "BIBREF7": {
615
+ "ref_id": "b7",
616
+ "title": "Spoken Natural Language Dialog Systems: A Practical Approach",
617
+ "authors": [
618
+ {
619
+ "first": "R",
620
+ "middle": [
621
+ "W"
622
+ ],
623
+ "last": "Smith",
624
+ "suffix": ""
625
+ },
626
+ {
627
+ "first": "D",
628
+ "middle": [
629
+ "R"
630
+ ],
631
+ "last": "Hipp",
632
+ "suffix": ""
633
+ }
634
+ ],
635
+ "year": 1994,
636
+ "venue": "",
637
+ "volume": "",
638
+ "issue": "",
639
+ "pages": "",
640
+ "other_ids": {},
641
+ "num": null,
642
+ "urls": [],
643
+ "raw_text": "R.W. Smith and D.R.Hipp. 1994. Spoken Natural Language Dialog Systems: A Practical Approach.",
644
+ "links": null
645
+ }
646
+ },
647
+ "ref_entries": {
648
+ "FIGREF0": {
649
+ "type_str": "figure",
650
+ "num": null,
651
+ "text": "Example Domain Objects and Attributes",
652
+ "uris": null
653
+ },
654
+ "FIGREF1": {
655
+ "type_str": "figure",
656
+ "num": null,
657
+ "text": "Display(Object) [\"Display the tanks\"] Move(MobileObject,Object) [\"Move Troop at position 100, 400 to the hill\"] Show(Attribute,Object) [\"Show the range of sight of Tank 434\"] Set(Object,Attribute,Attribute) [\"The forest has an area of 100 square yards.\"]",
658
+ "uris": null
659
+ },
660
+ "FIGREF2": {
661
+ "type_str": "figure",
662
+ "num": null,
663
+ "text": "Map Domain Complexity Analysis under the Equal Probability AssumptionNow we have OCtank = 45 and let's say in like manner we get OCtroop = 43. These two types of objects comprise the MobileObject class, so now we can compute this complexity: CCMobileObject = log2 2 + maxobj~MobileObject (OCobj) used for predicate and complete domain complexity measurements, and the rest of the example should be obvious fromFigure 2.",
664
+ "uris": null
665
+ }
666
+ }
667
+ }
668
+ }
Full_text_JSON/prefixW/json/W00/W00-0108.json ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0108",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:42.255305Z"
6
+ },
7
+ "title": "Example-based Complexity--Syntax and Semantics as the Production of Ad-hoc Arrangements of Examples",
8
+ "authors": [
9
+ {
10
+ "first": "Robert",
11
+ "middle": [
12
+ "John"
13
+ ],
14
+ "last": "Freeman",
15
+ "suffix": "",
16
+ "affiliation": {},
17
+ "email": "freeman@email.com"
18
+ }
19
+ ],
20
+ "year": "",
21
+ "venue": null,
22
+ "identifiers": {},
23
+ "abstract": "Computational linguists have traditionally sought to model language by finding underlying parameters which govern numerous examples. I describe a different approach which argues that numerous examples themselves, by virtue of their many possible arrangements, provide the only way to specify a sufficiently rich set of \"parameters\". Essentially I argue for a different relationship between example and parameter. With examples primary, and parameterizafions of them secondary, the real \"productions\". Rather than representing a redundant complexity, examples should actually be seen as a simplification, a basis for the numerous arrangements of their \"parameterizations\". Another way of looking at it is to say I argue arrangements of examples, rather than simply revealing underlying parameters, represent in themselves an ignored resource for the modelling of syntactic, and semantic, complexity. I have implemented a small, working, \"shallow parser\" based on these ideas.",
24
+ "pdf_parse": {
25
+ "paper_id": "W00-0108",
26
+ "_pdf_hash": "",
27
+ "abstract": [
28
+ {
29
+ "text": "Computational linguists have traditionally sought to model language by finding underlying parameters which govern numerous examples. I describe a different approach which argues that numerous examples themselves, by virtue of their many possible arrangements, provide the only way to specify a sufficiently rich set of \"parameters\". Essentially I argue for a different relationship between example and parameter. With examples primary, and parameterizafions of them secondary, the real \"productions\". Rather than representing a redundant complexity, examples should actually be seen as a simplification, a basis for the numerous arrangements of their \"parameterizations\". Another way of looking at it is to say I argue arrangements of examples, rather than simply revealing underlying parameters, represent in themselves an ignored resource for the modelling of syntactic, and semantic, complexity. I have implemented a small, working, \"shallow parser\" based on these ideas.",
30
+ "cite_spans": [],
31
+ "ref_spans": [],
32
+ "eq_spans": [],
33
+ "section": "Abstract",
34
+ "sec_num": null
35
+ }
36
+ ],
37
+ "body_text": [
38
+ {
39
+ "text": "! contrast my work with Machine Learning. There are similarities in the emphasis on the analysis of relationships among data, but there are also differences in the assumptions about the nature of the system. I think there has been a tacit assumption in Machine Learning that language system consists of underlying parameters which generate a variety of examples. My argument is that you can turn that relationship around and get a great deal more descriptive power in the form of varying parameterizations of the order in a set of examples.",
40
+ "cite_spans": [],
41
+ "ref_spans": [],
42
+ "eq_spans": [],
43
+ "section": "Introduction--Machine Learning, Data, and Parameterizations",
44
+ "sec_num": null
45
+ },
46
+ {
47
+ "text": "Under the umbrella of Machine Learning I include a wide variety of data based analyses of language which have become popular in recent years. Both distributed and statistical data based models fit in that category: back-propagation networks, Hidden Markov Models, maximum entropy parametefizafions. Apart from their emphasis on data, however, they have one thing in common, and in common with earlier symbolic attempts to codify language system. They all hypothesize parameters for distributions of data. I say it is worth considering that the essence of language is not in such underlying parameters but the collections of examples we seek them through. That there are no underlying parameters, only the chaos of example, much as is the case in a population of people (see also Kenneth Pike \"analogies between linguistic structure and the structure of society\", in de Beaugrande ( 1991) ).",
48
+ "cite_spans": [
49
+ {
50
+ "start": 869,
51
+ "end": 887,
52
+ "text": "Beaugrande ( 1991)",
53
+ "ref_id": "BIBREF0"
54
+ }
55
+ ],
56
+ "ref_spans": [],
57
+ "eq_spans": [],
58
+ "section": "Introduction--Machine Learning, Data, and Parameterizations",
59
+ "sec_num": null
60
+ },
61
+ {
62
+ "text": "One way to describe this is to say that language might be \"irreducibly distributed\". A system where a collection of examples is the smallest set which describes all its structure. Although there might be different levels of this independence (along with differing abilities to parameterize: viz. phonology, morphology, syntax). We might contrast irreducibly distributed systems with those which are parametrically distributed, like a letter recognition system. Certainly, however, we could contrast them with statistical, systems, where only the likelihood of the outcomes is variable.",
63
+ "cite_spans": [],
64
+ "ref_spans": [],
65
+ "eq_spans": [],
66
+ "section": "Introduction--Machine Learning, Data, and Parameterizations",
67
+ "sec_num": null
68
+ },
69
+ {
70
+ "text": "The best thing about such \"irreducibly distributed\" systems is their power.",
71
+ "cite_spans": [],
72
+ "ref_spans": [],
73
+ "eq_spans": [],
74
+ "section": "R from N and the Descriptive Power of Sets",
75
+ "sec_num": null
76
+ },
77
+ {
78
+ "text": "The number of combinations of R objects taken from N is C(N,R) = N!/(N-R)!R!. This is the number of \"word association classes\" N word associations can model, for instance.",
79
+ "cite_spans": [],
80
+ "ref_spans": [],
81
+ "eq_spans": [],
82
+ "section": "R from N and the Descriptive Power of Sets",
83
+ "sec_num": null
84
+ },
85
+ {
86
+ "text": "The idea that we can model syntactic classes as \"word association classes\" is not new. There are numerous studies dating from the early 1990's and before which take this approach e.g. Schuetze (1993) , Finch (1993) ; and Powers (1996) lists references back to Pike's Tagmemics. What is different in my approach is the assumed relationship between these classes and the data which reveal them. If the variety of example can be generated by a small number of abstract parameters then we expect one set of relationships among that data to be more important than the others. If on the other hand we consider the full range of relationships possible among all the examples then we have an enormous range of structure at our disposal. Given the problems we have had describing language according to parameters, it is surprising that we have not more widely considered the attraction of this power.",
87
+ "cite_spans": [
88
+ {
89
+ "start": 184,
90
+ "end": 199,
91
+ "text": "Schuetze (1993)",
92
+ "ref_id": "BIBREF5"
93
+ },
94
+ {
95
+ "start": 202,
96
+ "end": 214,
97
+ "text": "Finch (1993)",
98
+ "ref_id": "BIBREF1"
99
+ },
100
+ {
101
+ "start": 221,
102
+ "end": 234,
103
+ "text": "Powers (1996)",
104
+ "ref_id": "BIBREF4"
105
+ }
106
+ ],
107
+ "ref_spans": [],
108
+ "eq_spans": [],
109
+ "section": "R from N and the Descriptive Power of Sets",
110
+ "sec_num": null
111
+ },
112
+ {
113
+ "text": "Consider the evidence that we need this power:",
114
+ "cite_spans": [],
115
+ "ref_spans": [],
116
+ "eq_spans": [],
117
+ "section": "R from N and the Descriptive Power of Sets",
118
+ "sec_num": null
119
+ },
120
+ {
121
+ "text": "a) Structure",
122
+ "cite_spans": [],
123
+ "ref_spans": [],
124
+ "eq_spans": [],
125
+ "section": "R from N and the Descriptive Power of Sets",
126
+ "sec_num": null
127
+ },
128
+ {
129
+ "text": "Collocation, phraseology. The data based analysis of language has bought home more and more strongly that some structure is beyond any logic we can enumerate. Face to face with the reality of use this realization has been most widely accepted in areas of linguistics which deal with language acquisition and teaching. Examples of relevant discussions are Pawley and Syder (1983) , Nattinger (1980) , Weinert (1995) . We are talking about explaining why you might say \"strong tea\" but not \"powerful tea\".",
130
+ "cite_spans": [
131
+ {
132
+ "start": 355,
133
+ "end": 378,
134
+ "text": "Pawley and Syder (1983)",
135
+ "ref_id": "BIBREF3"
136
+ },
137
+ {
138
+ "start": 381,
139
+ "end": 397,
140
+ "text": "Nattinger (1980)",
141
+ "ref_id": "BIBREF2"
142
+ },
143
+ {
144
+ "start": 400,
145
+ "end": 414,
146
+ "text": "Weinert (1995)",
147
+ "ref_id": "BIBREF6"
148
+ }
149
+ ],
150
+ "ref_spans": [],
151
+ "eq_spans": [],
152
+ "section": "R from N and the Descriptive Power of Sets",
153
+ "sec_num": null
154
+ },
155
+ {
156
+ "text": "In practical terms a processor based fundamentally on distributions should be able to tell that \"strong tea\" is idiomatic and \"powerful tea\" less so because the \"word association distributions\", say, of \"strong\" and \"powerful\" are different in detail, though not in generalities.",
157
+ "cite_spans": [],
158
+ "ref_spans": [],
159
+ "eq_spans": [],
160
+ "section": "R from N and the Descriptive Power of Sets",
161
+ "sec_num": null
162
+ },
163
+ {
164
+ "text": "A system based on labels, an assumption of underlying parameters, will not be able to do that (for a set of labels smaller than the set of all such distinct utterances).",
165
+ "cite_spans": [],
166
+ "ref_spans": [],
167
+ "eq_spans": [],
168
+ "section": "R from N and the Descriptive Power of Sets",
169
+ "sec_num": null
170
+ },
171
+ {
172
+ "text": "An irreducibly distributed representation gives us the power to model collocation. We would need a different syntactic class for every collocational restriction otherwise.",
173
+ "cite_spans": [],
174
+ "ref_spans": [],
175
+ "eq_spans": [],
176
+ "section": "R from N and the Descriptive Power of Sets",
177
+ "sec_num": null
178
+ },
179
+ {
180
+ "text": "N!/(N-R)!R! groupings give you an essentially infinite set of configurations. We have the power to associate a different configuration with everything we might ever want to say, if we like. In fact, by default we will do so. This means we have the power to represent not only syntactic idiosyncrasy, but the complexity of meaning, directly.",
181
+ "cite_spans": [],
182
+ "ref_spans": [],
183
+ "eq_spans": [],
184
+ "section": "b) Meaning",
185
+ "sec_num": null
186
+ },
187
+ {
188
+ "text": "The idea of meaning implied by the association is interesting in itself, h is an organization of data. But this is reasonable. And if we accept it then we have a fundamental definition of meaning in terms we can quantify. Meaning is synonymous with an organization of data: events, observations. New organization equals new meaning.",
189
+ "cite_spans": [],
190
+ "ref_spans": [],
191
+ "eq_spans": [],
192
+ "section": "b) Meaning",
193
+ "sec_num": null
194
+ },
195
+ {
196
+ "text": "There is an interesting topical analogy to be made here: a Web search engine. In a sense any collection of documents found \"represent\" the meaning of a set of search keys. There are many more subtleties of collection possible than can ever be labeled in an index.",
197
+ "cite_spans": [],
198
+ "ref_spans": [],
199
+ "eq_spans": [],
200
+ "section": "b) Meaning",
201
+ "sec_num": null
202
+ },
203
+ {
204
+ "text": "In a way my argument is just.that if we want to model the full complexity of syntactic restriction, or semantic subjectivity, we have no choice but to demote categories from being central, make them a product, and base them on the reorganization of content much the way they are treated in most Web search engines.",
205
+ "cite_spans": [],
206
+ "ref_spans": [],
207
+ "eq_spans": [],
208
+ "section": "b) Meaning",
209
+ "sec_num": null
210
+ },
211
+ {
212
+ "text": "Such an irreducibly distributed definition explains many puzzling properties of thought. It provides a natural mechanism for how:",
213
+ "cite_spans": [],
214
+ "ref_spans": [],
215
+ "eq_spans": [],
216
+ "section": "b) Meaning",
217
+ "sec_num": null
218
+ },
219
+ {
220
+ "text": "\u2022 new concepts can be created (novel reorganization of old examples--\"Aha[\") \u2022 new meaning can be communicated (I force you to reorganize your examples in the way I've just reorganized mine) \u2022 language (and conceptual) drift can occur (slow shift in balance of examples).",
221
+ "cite_spans": [],
222
+ "ref_spans": [],
223
+ "eq_spans": [],
224
+ "section": "b) Meaning",
225
+ "sec_num": null
226
+ },
227
+ {
228
+ "text": "As well as the usual useful properties of distributed representations:",
229
+ "cite_spans": [],
230
+ "ref_spans": [],
231
+ "eq_spans": [],
232
+ "section": "b) Meaning",
233
+ "sec_num": null
234
+ },
235
+ {
236
+ "text": "\u2022 flexibility (the group can vary) \u2022 robustness (it does not matter of a few elements are missing) \u2022 ambiguity (intersection sets) \u2022 subjectivity (sub-sets etc.)",
237
+ "cite_spans": [],
238
+ "ref_spans": [],
239
+ "eq_spans": [],
240
+ "section": "b) Meaning",
241
+ "sec_num": null
242
+ },
243
+ {
244
+ "text": "There is also an interesting tie in between this (meaning, and the primacy of data over parameter) and the vigorous \"rebel\" linguistic school of Systemic Functional Grammar. Most importantly in SFG the only irreducible definition of meaning, or structure, is a set of contrasts between events, or observations. Unfortunately in SFG an overemphasis on abstract parameters (function/meaning) means that in practice the flail power of contrasts among sets to model complexity is not applied. Nevertheless, there are strong parallels between my model and the core tenets of Systemic Functional Grammar. I find that a natural analysis according to the principles I have outlined above results in structure along lines of functional category. In fact the association groupings on which I base my analysis lead me to propose an \"inverse\" relationship (in a sense that can be precisely defined) between functional category, about which SFG is described, and categories based on syntactic regularities of the type which have traditionally been seen as important.",
245
+ "cite_spans": [],
246
+ "ref_spans": [],
247
+ "eq_spans": [],
248
+ "section": "b) Meaning",
249
+ "sec_num": null
250
+ },
251
+ {
252
+ "text": "I have implemented a small \"association parser\" based on these principles and the initial results have been interesting. I provide a list of typical \"parses\" in the appendix. Essentially it scores the grammaticality and provides a structural breakdown of each string of words it is presented with. Among more interesting observations, as I mentioned above, is the fact that my parser seems to naturally identify structure along lines of functional equivalence. Rather like the kind of analysis a Systemic Functional Grammarian might favor.",
253
+ "cite_spans": [],
254
+ "ref_spans": [],
255
+ "eq_spans": [],
256
+ "section": "A Simple \"Association Parser\"",
257
+ "sec_num": null
258
+ },
259
+ {
260
+ "text": "Since processing is essentially a search over a database for similar examples the main bottleneck is the inefficiency of a serial processor for nearest neighbor search. There are two key complexities. The search over one I have managed to reduce to linear time. The other remains to be resolved. make a car Parsed: (make (a car)), score: 5.689303 Parsed: ((make a) car), score: 2.120204 make another car Parsed: (make (another car)), score: 1.642482 Parsed: ((make another) car), score: O. 189554 make another try Parsed: ((make another) try), score: 0.051537 Parsed: (make (another try)), score: 0.039471 go with the president Parsed: ((go with) (the president)), score: 7.983729 Parsed: (go (with (the president))), score: 4.620297 Parsed: (go ((with the) president)), score: 0.771305 Parsed: (((go with) the) president), score: 0.318181 Parsed: ((go (with the)) president), score: 0.065606 I try to go Parsed: (i ((try to) go)), score: 4.343059 Parsed: (((i try) to) go), score: 1.297454 Parsed: ((i (try to)) go), score: 1.174891 Parsed: (i (try (to go))), score: 0.553270 Parsed: ((i try) (to go)), score: 0.474397 the election results Parsed: (the (election results)), score: 89.247596 Parsed: ((the election) results), score: 15.212562 they held an election Parsed: (they (held (an election))), score: 0.000238 Parsed: ((they held) (an election)), score: 0.000007 Parsed: (((they held) an) election), score: 0.000000 go with her Parsed: ((go with) her), score: 9.073902 Parsed: (go (with her)), score: O. 107435",
261
+ "cite_spans": [],
262
+ "ref_spans": [],
263
+ "eq_spans": [],
264
+ "section": "A Simple \"Association Parser\"",
265
+ "sec_num": null
266
+ }
267
+ ],
268
+ "back_matter": [],
269
+ "bib_entries": {
270
+ "BIBREF0": {
271
+ "ref_id": "b0",
272
+ "title": "Linguistic Theory: The Discourse of Fundamental Works, section 5",
273
+ "authors": [
274
+ {
275
+ "first": "Robert",
276
+ "middle": [],
277
+ "last": "Beaugrande",
278
+ "suffix": ""
279
+ },
280
+ {
281
+ "first": "",
282
+ "middle": [],
283
+ "last": "De",
284
+ "suffix": ""
285
+ }
286
+ ],
287
+ "year": 1991,
288
+ "venue": "",
289
+ "volume": "84",
290
+ "issue": "",
291
+ "pages": "",
292
+ "other_ids": {},
293
+ "num": null,
294
+ "urls": [],
295
+ "raw_text": "Beaugrande, Robert de (1991) Linguistic Theory: The Discourse of Fundamental Works, section 5.84, Harlow: Longman.",
296
+ "links": null
297
+ },
298
+ "BIBREF1": {
299
+ "ref_id": "b1",
300
+ "title": "Finding Structure in Language",
301
+ "authors": [
302
+ {
303
+ "first": "Steven",
304
+ "middle": [],
305
+ "last": "Finch",
306
+ "suffix": ""
307
+ }
308
+ ],
309
+ "year": 1993,
310
+ "venue": "",
311
+ "volume": "",
312
+ "issue": "",
313
+ "pages": "",
314
+ "other_ids": {},
315
+ "num": null,
316
+ "urls": [],
317
+ "raw_text": "Finch, Steven (1993) Finding Structure in Language. Ph.D. Thesis, University of Edinburgh.",
318
+ "links": null
319
+ },
320
+ "BIBREF2": {
321
+ "ref_id": "b2",
322
+ "title": "A lexical Phrase Grammar for ESL",
323
+ "authors": [
324
+ {
325
+ "first": "James",
326
+ "middle": [
327
+ "R"
328
+ ],
329
+ "last": "Nattinger",
330
+ "suffix": ""
331
+ }
332
+ ],
333
+ "year": 1980,
334
+ "venue": "",
335
+ "volume": "",
336
+ "issue": "",
337
+ "pages": "33--334",
338
+ "other_ids": {},
339
+ "num": null,
340
+ "urls": [],
341
+ "raw_text": "Nattinger, James R: (1980) A lexical Phrase Grammar for ESL, TESOL Quarterly Vol. XIV., No. 3, pp. 33%334.",
342
+ "links": null
343
+ },
344
+ "BIBREF3": {
345
+ "ref_id": "b3",
346
+ "title": "Two puzzles for linguistic theory: nativelike selection and nativelike fluency",
347
+ "authors": [
348
+ {
349
+ "first": "A",
350
+ "middle": [],
351
+ "last": "Pawley",
352
+ "suffix": ""
353
+ },
354
+ {
355
+ "first": "F",
356
+ "middle": [],
357
+ "last": "Syder",
358
+ "suffix": ""
359
+ }
360
+ ],
361
+ "year": 1983,
362
+ "venue": "1983: Language and Communication",
363
+ "volume": "",
364
+ "issue": "",
365
+ "pages": "191--226",
366
+ "other_ids": {},
367
+ "num": null,
368
+ "urls": [],
369
+ "raw_text": "Pawley, A. & Syder F. (1983) Two puzzles for linguistic theory: nativelike selection and nativelike fluency, in L Richards and IL Schmidt (eds.) 1983: Language and Communication, pp. 191-226. London: Longman.",
370
+ "links": null
371
+ },
372
+ "BIBREF4": {
373
+ "ref_id": "b4",
374
+ "title": "Unsupervised learning of linguistic structure: An empirical evaluation",
375
+ "authors": [
376
+ {
377
+ "first": "D",
378
+ "middle": [
379
+ "M W"
380
+ ],
381
+ "last": "Powers",
382
+ "suffix": ""
383
+ }
384
+ ],
385
+ "year": 1996,
386
+ "venue": "International Journal of Corpus Linguistics",
387
+ "volume": "1",
388
+ "issue": "2",
389
+ "pages": "",
390
+ "other_ids": {},
391
+ "num": null,
392
+ "urls": [],
393
+ "raw_text": "Powers, D. M. W. (1996) Unsupervised learning of linguistic structure: An empirical evaluation, International Journal of Corpus Linguistics 1#2.",
394
+ "links": null
395
+ },
396
+ "BIBREF5": {
397
+ "ref_id": "b5",
398
+ "title": "Distributed Syntactic Representations with an Application to Part-of-Speech Tagging",
399
+ "authors": [
400
+ {
401
+ "first": "H",
402
+ "middle": [],
403
+ "last": "Schuetze",
404
+ "suffix": ""
405
+ }
406
+ ],
407
+ "year": 1993,
408
+ "venue": "IEEE International Conference on Neural Networks",
409
+ "volume": "3",
410
+ "issue": "",
411
+ "pages": "1504--1513",
412
+ "other_ids": {},
413
+ "num": null,
414
+ "urls": [],
415
+ "raw_text": "Schuetze, H. (1993) Distributed Syntactic Representations with an Application to Part-of- Speech Tagging, 1993 IEEE International Conference on Neural Networks, p1504-9 vol. 3.",
416
+ "links": null
417
+ },
418
+ "BIBREF6": {
419
+ "ref_id": "b6",
420
+ "title": "The Role of Formulaic Language in Second Language Acquisition: A Review",
421
+ "authors": [
422
+ {
423
+ "first": "Regina",
424
+ "middle": [],
425
+ "last": "Weinert",
426
+ "suffix": ""
427
+ }
428
+ ],
429
+ "year": 1995,
430
+ "venue": "Applied Linguistics",
431
+ "volume": "16",
432
+ "issue": "2",
433
+ "pages": "181--205",
434
+ "other_ids": {},
435
+ "num": null,
436
+ "urls": [],
437
+ "raw_text": "Weinert, Regina. (1995) The Role of Formulaic Language in Second Language Acquisition: A Review, Applied Linguistics, Vol. 16, No. 2, pp. 181-205.",
438
+ "links": null
439
+ },
440
+ "BIBREF7": {
441
+ "ref_id": "b7",
442
+ "title": "Prototype make some products Parsed: (make (some products)), score: 1.329954 Parsed: ((make some) products), score: 0.023665 make some money Parsed: (make (some money)), score: 1.555408 Parsed: ((make some) money)",
443
+ "authors": [],
444
+ "year": null,
445
+ "venue": "Examples of Parses Produced by my",
446
+ "volume": "",
447
+ "issue": "",
448
+ "pages": "",
449
+ "other_ids": {},
450
+ "num": null,
451
+ "urls": [],
452
+ "raw_text": "Appendix--Examples of Parses Produced by my \"Association Parser\" Prototype make some products Parsed: (make (some products)), score: 1.329954 Parsed: ((make some) products), score: 0.023665 make some money Parsed: (make (some money)), score: 1.555408 Parsed: ((make some) money), score: 0.042059",
453
+ "links": null
454
+ }
455
+ },
456
+ "ref_entries": {}
457
+ }
458
+ }
Full_text_JSON/prefixW/json/W00/W00-0109.json ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0109",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:23.555733Z"
6
+ },
7
+ "title": "Partially Saturated Referents as a Source of Complexity in Semantic Interpretation",
8
+ "authors": [
9
+ {
10
+ "first": "David",
11
+ "middle": [
12
+ "D"
13
+ ],
14
+ "last": "Mcdonald",
15
+ "suffix": "",
16
+ "affiliation": {
17
+ "laboratory": "",
18
+ "institution": "Brandeis University",
19
+ "location": {}
20
+ },
21
+ "email": "d@alum.mit.edu"
22
+ }
23
+ ],
24
+ "year": "",
25
+ "venue": null,
26
+ "identifiers": {},
27
+ "abstract": "A significant factor in the complexity of the compressed, complex prose style used by journalists in short, targeted commercial reports (Who's News, joint ventures, earnings reports, etc.) is the fact that many of the phrases are semantically incomplete, i.e. their interpretation is dependent on information in other parts of the sentence or the in discourse context. We propose that the complexity that such partially saturated referents contribute to the overall process of semantic interpretation can be characterized by two factors we will call displacement and unpacking. This complexity source can be quantified by counting the distance, in nodes, between each phrase that has a locally incomplete interpretation and the phrase(s) that supply the terms that complete them. In this paper we will define this phenomenon and illustrate its impact on interpretation by examining short texts excerpted from the Tipster corpus and other online sources.",
28
+ "pdf_parse": {
29
+ "paper_id": "W00-0109",
30
+ "_pdf_hash": "",
31
+ "abstract": [
32
+ {
33
+ "text": "A significant factor in the complexity of the compressed, complex prose style used by journalists in short, targeted commercial reports (Who's News, joint ventures, earnings reports, etc.) is the fact that many of the phrases are semantically incomplete, i.e. their interpretation is dependent on information in other parts of the sentence or the in discourse context. We propose that the complexity that such partially saturated referents contribute to the overall process of semantic interpretation can be characterized by two factors we will call displacement and unpacking. This complexity source can be quantified by counting the distance, in nodes, between each phrase that has a locally incomplete interpretation and the phrase(s) that supply the terms that complete them. In this paper we will define this phenomenon and illustrate its impact on interpretation by examining short texts excerpted from the Tipster corpus and other online sources.",
34
+ "cite_spans": [],
35
+ "ref_spans": [],
36
+ "eq_spans": [],
37
+ "section": "Abstract",
38
+ "sec_num": null
39
+ }
40
+ ],
41
+ "body_text": [
42
+ {
43
+ "text": "The goal of this paper is to precisely characterize the intuitive observation that the A sentences below are more complex than their B counterparts. (Examplela. is from article 231 of the Tipster joint venture corpus; 2a is from article 2279.) The B examples were corn-posed by the author. The task is information extraction, where the goal is to determine the amount that each partner in the joint venture is contributing to the venture's total capital-ization.",
44
+ "cite_spans": [],
45
+ "ref_spans": [],
46
+ "eq_spans": [],
47
+ "section": "The Problem",
48
+ "sec_num": "1."
49
+ },
50
+ {
51
+ "text": "la. It will be capitalized at 130 million ringgit, which the three companies will equally shoulder.",
52
+ "cite_spans": [],
53
+ "ref_spans": [],
54
+ "eq_spans": [],
55
+ "section": "The Problem",
56
+ "sec_num": "1."
57
+ },
58
+ {
59
+ "text": "lb The three companies will shoulder equal amounts of the venture's capitalization of 130 million ringgit.",
60
+ "cite_spans": [],
61
+ "ref_spans": [],
62
+ "eq_spans": [],
63
+ "section": "The Problem",
64
+ "sec_num": "1."
65
+ },
66
+ {
67
+ "text": "2a .... the joint firm, capitalized at one billion yen, will be 60 pct owned by P. T. Astra International, Inc., and 40 pct by Daihatsu. 2b .... P. T. Astra will own 60 pct of the joint firm's capitalization of one billion yen and Daihatsu will own 40 pct.",
68
+ "cite_spans": [
69
+ {
70
+ "start": 83,
71
+ "end": 106,
72
+ "text": "T. Astra International,",
73
+ "ref_id": null
74
+ },
75
+ {
76
+ "start": 107,
77
+ "end": 112,
78
+ "text": "Inc.,",
79
+ "ref_id": null
80
+ },
81
+ {
82
+ "start": 113,
83
+ "end": 147,
84
+ "text": "and 40 pct by Daihatsu. 2b .... P.",
85
+ "ref_id": null
86
+ }
87
+ ],
88
+ "ref_spans": [],
89
+ "eq_spans": [],
90
+ "section": "The Problem",
91
+ "sec_num": "1."
92
+ },
93
+ {
94
+ "text": "We are trying to quantify an aspect of the semantic interpretation process--the process by which the lexical and syntactic elements of a text are mapped to a collection of typed, structured objects with respect to some model (broadly speaking, a collection of individuals and relations over them).",
95
+ "cite_spans": [],
96
+ "ref_spans": [],
97
+ "eq_spans": [],
98
+ "section": "The Problem",
99
+ "sec_num": "1."
100
+ },
101
+ {
102
+ "text": "We presume (a) that interpretations are formed compositionally following the paths provided by the syntax; (b) that they come into existence incrementally phrase by phrase, object by object as the parser moves left to right through the text. This implies that most relations will initially be only partially saturated. And (c) that the mapping from lexicosyntactic objects to semantic objects is a matter of recognizing function-argument patterns that are indicated structurally or morphologically and ultimately driven by information provided by the lexical sources of the predicates.",
103
+ "cite_spans": [],
104
+ "ref_spans": [],
105
+ "eq_spans": [],
106
+ "section": "The Problem",
107
+ "sec_num": "1."
108
+ },
109
+ {
110
+ "text": "Given this background, the question is what makes the A sentences more complex than the B sentences even though both convey essentially the same information, l The answer, Information, albeit of a different kind, is also conveyed by ordenng, choice of cohesive devices, or even just following the stylistic conventions of the genre (which the B sentences do not). Quantifying the impact of as we see it, lies in the nature of the path that that terms must take through the text's phrase structure as they are composed to form relations: the farther the distance the greater the complexity.",
111
+ "cite_spans": [],
112
+ "ref_spans": [],
113
+ "eq_spans": [],
114
+ "section": "The Problem",
115
+ "sec_num": "1."
116
+ },
117
+ {
118
+ "text": "Compositional complexity, as we propose to call this phenomenon, is a problem that arises because speakers establish their relationship with their audience by producing texts (in the formal sense) rather than a jumbled salad of independent phrases. To this end, speakers have at their disposal a large battery of linguistic devices that give texts their cohesion by omitting information that their audience must now infer, thereby inducing the audience's attention (Halliday & Hasan 1976) .",
119
+ "cite_spans": [
120
+ {
121
+ "start": 465,
122
+ "end": 488,
123
+ "text": "(Halliday & Hasan 1976)",
124
+ "ref_id": null
125
+ }
126
+ ],
127
+ "ref_spans": [],
128
+ "eq_spans": [],
129
+ "section": "The Problem",
130
+ "sec_num": "1."
131
+ },
132
+ {
133
+ "text": "One of these devices is the use of phrases whose interpretations are locally incomplete: partially saturated. To understand such phrases, the audience (natural language understanding system) must search through the context and identify the terms that are needed to fully populate (saturate) the model-level relations these phrases denote.",
134
+ "cite_spans": [],
135
+ "ref_spans": [],
136
+ "eq_spans": [],
137
+ "section": "The Problem",
138
+ "sec_num": "1."
139
+ },
140
+ {
141
+ "text": "We call this aspect of the semantic interpretation process 'compositional' complexity because we assume that the bulk of the organization on the context that is searched is provided by the text's syntactic structure, and that the interpretation process overall is organized compositionally as a walk over the phrase structure the syntax defines (for us a bottom up and left to right traversal in lockstep with the parser as it establishes phrasal boundaries).",
142
+ "cite_spans": [],
143
+ "ref_spans": [],
144
+ "eq_spans": [],
145
+ "section": "The Problem",
146
+ "sec_num": "1."
147
+ },
148
+ {
149
+ "text": "These assumptions suggest that a text will be harder to understand the greater the separation between the partially-saturated relations and their missing terms (i.e. the process of its interpretation will require more effort in terms of larger working state, using a richer type system, deploying a more complex this information structure, however, is beyond our present abilities.",
150
+ "cite_spans": [],
151
+ "ref_spans": [],
152
+ "eq_spans": [],
153
+ "section": "The Problem",
154
+ "sec_num": "1."
155
+ },
156
+ {
157
+ "text": "control structure, inviting a greater chance of error, etc.). As a first approximation we will measure this complexity by counting the number of intervening syntactic nodes.",
158
+ "cite_spans": [],
159
+ "ref_spans": [],
160
+ "eq_spans": [],
161
+ "section": "52",
162
+ "sec_num": null
163
+ },
164
+ {
165
+ "text": "We will explore this notion of compositional complexity by first looking in some detail at the structure and interpretation of example la, \"It [the joint venture] will be capitalized at 130 million ringgit, which the three companies will equally shoulder\", which we take to have the following syntactic structure. 2 The first clause, \"it wilt be capitalized at 130 million ringgif', illustrates the simplest case of compositional complexity, where terms are adjacent to their targets. We assume that",
166
+ "cite_spans": [],
167
+ "ref_spans": [],
168
+ "eq_spans": [],
169
+ "section": "An Example",
170
+ "sec_num": "2."
171
+ },
172
+ {
173
+ "text": "We are agnostic about what the 'true' choice of labelings and other theory-governed particulars should be; what is important is the overall shape of the tree. the word capitalize in the sense used here 3 denotes a function of two arguments, where J is restricted to (can only be bound to) objects of type joint venture and $ to objects of type amount of money.",
174
+ "cite_spans": [],
175
+ "ref_spans": [],
176
+ "eq_spans": [],
177
+ "section": "An Example",
178
+ "sec_num": "2."
179
+ },
180
+ {
181
+ "text": "In this base case the two needed terms are not separated by any intermediary syntactic nodes and we say that the text has a compositional complexity of zero.",
182
+ "cite_spans": [],
183
+ "ref_spans": [],
184
+ "eq_spans": [],
185
+ "section": "J,$ . capitalization(J, $)",
186
+ "sec_num": null
187
+ },
188
+ {
189
+ "text": "The result of binding these two terms is the instantiation of the fully saturated relation (i) below. What is shown is an expression but it intended just as a gloss of a typed structured object. Here and the examples to follow we will abbreviate freely in the interests of space, e.g. jv indicates the object that represents the joint venture, 130-million-ringget the object rep-resenting the instance of that amount of money that is being invested in the venture, and so on. We have given expression (i) a label, Cap-l, to emphasize its status as an object and to provide a simple means of indicating references to it in other relations.",
190
+ "cite_spans": [],
191
+ "ref_spans": [],
192
+ "eq_spans": [],
193
+ "section": "J,$ . capitalization(J, $)",
194
+ "sec_num": null
195
+ },
196
+ {
197
+ "text": "(i) Cap-l: capitalization(J, 130- million-ringgit)",
198
+ "cite_spans": [],
199
+ "ref_spans": [],
200
+ "eq_spans": [],
201
+ "section": "J,$ . capitalization(J, $)",
202
+ "sec_num": null
203
+ },
204
+ {
205
+ "text": "Adopting an operational perspective, we can identify two different aspects of compositional complexity: displacement and unpacking. Displacement is simply the separation between a term and its binding site given their relative depths in the tree.",
206
+ "cite_spans": [],
207
+ "ref_spans": [],
208
+ "eq_spans": [],
209
+ "section": "J,$ . capitalization(J, $)",
210
+ "sec_num": null
211
+ },
212
+ {
213
+ "text": "The need for unpacking follows from our assumption that a text is interpreted incrementally, with relations (or partial relations) forming as soon as possible in the parser's progress through the text. We also assume that the individual elements of the text become unavailable at that moment except with respect to their configuration within the relation they have become part of. This is the sense of capitalize where it does not have an agent; cf. \"Oracle lost $3.9 billion in market capitalization\" [Wired 8.03, pg. 272] .",
214
+ "cite_spans": [
215
+ {
216
+ "start": 502,
217
+ "end": 514,
218
+ "text": "[Wired 8.03,",
219
+ "ref_id": null
220
+ },
221
+ {
222
+ "start": 515,
223
+ "end": 523,
224
+ "text": "pg. 272]",
225
+ "ref_id": null
226
+ }
227
+ ],
228
+ "ref_spans": [],
229
+ "eq_spans": [],
230
+ "section": "J,$ . capitalization(J, $)",
231
+ "sec_num": null
232
+ },
233
+ {
234
+ "text": "In our experience this is a valuable property. Consider the partially saturated relation below that is the denotation of the relative clause of la at the point when the downstairs S has been parsed (\"which the three companies will equally shoulder\"). We assume for present purposes that shoulder denotes a model-level category we can gloss as contributes-to-capitalization. The objects representing the three companies are glossed as just C 1, C2, C3.",
235
+ "cite_spans": [],
236
+ "ref_spans": [],
237
+ "eq_spans": [],
238
+ "section": "J,$ . capitalization(J, $)",
239
+ "sec_num": null
240
+ },
241
+ {
242
+ "text": "( ii ) ~ amount contributes-to- capitalization( collection(Cl, C2, C3 ), amount)",
243
+ "cite_spans": [],
244
+ "ref_spans": [],
245
+ "eq_spans": [],
246
+ "section": "J,$ . capitalization(J, $)",
247
+ "sec_num": null
248
+ },
249
+ {
250
+ "text": "The agent of this relation is plain enough (those three particular companies), but what about the 'amount' that they contribute? Syntactically, the relative clause is of course open in its direct object, which the parser will associate with the np 130 million ringgit. But how is this syntactic open variable mirrored semantically? When thought of as a contri-bution to capitalization, the denotation of 130 million ringgit is not simply an amount of money in Indonesian currency, which would be meaningless. The np's denotation should instead provide a link though which we can determine that the money constitutes the funding of some particular venture. This can be reflected in the restriction we place on the amount variable. This is where unpacking comes in. We have the option to view (i) as a composite object with a first class object representing each of its variable bindings in its own right, as in (iii) which is the unreduced binding of the amount of money to the amount variable of the object we named Cap-1 in (i). Under this view we can unpack Cap-i into its constituent elements and make this binding object accessible to be bound to amount, giving us:",
251
+ "cite_spans": [],
252
+ "ref_spans": [],
253
+ "eq_spans": [],
254
+ "section": "J,$ . capitalization(J, $)",
255
+ "sec_num": null
256
+ },
257
+ {
258
+ "text": "(iv) contributes-to-capitalizatlon ( collection(Cl, C2, C3), Amt~l)",
259
+ "cite_spans": [],
260
+ "ref_spans": [],
261
+ "eq_spans": [],
262
+ "section": "J,$ . capitalization(J, $)",
263
+ "sec_num": null
264
+ },
265
+ {
266
+ "text": "Now that we have illustrated the character of the complexity involved, what kind of numbers should be put to this so that we can compare different text quantitatively? With no literature to guide us here we should start with a simple calculuS. We will add one 'point' for each node that intervenes between the partial relation and each term that it is missing, and one for each variable binding that must be unpacked from an already formed relation.",
267
+ "cite_spans": [],
268
+ "ref_spans": [],
269
+ "eq_spans": [],
270
+ "section": "Measurements",
271
+ "sec_num": "3."
272
+ },
273
+ {
274
+ "text": "Under this analysis, the displacement of the 'amount' term contributes two points for the two nodes that intervene between the location of the verb and the relative pronoun. 4 We add another point for unpacking given that the amount of money per se does not fit the restrictions we imposed on the AMT of a contributes-to-capitalization and we need to unpack the denotation of the upper clause to get at the binding we need. This gives us a total of three points of compositional complexity for saturating the relation created by shoulder.",
275
+ "cite_spans": [],
276
+ "ref_spans": [],
277
+ "eq_spans": [],
278
+ "section": "Measurements",
279
+ "sec_num": "3."
280
+ },
281
+ {
282
+ "text": "What other kinds of costs have we ignored so far? One definite cost is establishing what category (function, predicate) shoulder actually denotes since unless that is known the type constraints on its variable bindings will be untenably vague. (Consider that in this domain it will be quite common to see the phrase to shoulder debt.)",
283
+ "cite_spans": [],
284
+ "ref_spans": [],
285
+ "eq_spans": [],
286
+ "section": "Measurements",
287
+ "sec_num": "3."
288
+ },
289
+ {
290
+ "text": "Another, possibly debatable, cost is whether to distribute the denotation of the \"the three companies\" across the capitalization to create three individual relations. Just like one could elect to ignore the fact that a multi-term relation can be seen as a set of individual variable bindings until one of those bindings is",
291
+ "cite_spans": [],
292
+ "ref_spans": [],
293
+ "eq_spans": [],
294
+ "section": "Measurements",
295
+ "sec_num": "3."
296
+ },
297
+ {
298
+ "text": "We assume the parser carries the denotation of \u2022 the relativized np down to the spec posi-tion; doing that certainly permits an easier analysis of the relative clause since it allows it to take on the surface pattern of, e.g., topicalization.",
299
+ "cite_spans": [],
300
+ "ref_spans": [],
301
+ "eq_spans": [],
302
+ "section": "Measurements",
303
+ "sec_num": "3."
304
+ },
305
+ {
306
+ "text": "needed to do work in another part of the text's interpretation, the distribution of this conjunction could remain a latent option until it was needed to make explicit some other semantic relation.",
307
+ "cite_spans": [],
308
+ "ref_spans": [],
309
+ "eq_spans": [],
310
+ "section": "Measurements",
311
+ "sec_num": "3."
312
+ },
313
+ {
314
+ "text": "We do need to distribute the companies conjunction in example 1 a because of the other relation-generating lexical head that we have yet to consider: equally. (Recall that the text of la is \"It will be capitalized at 130 million ringgit, which the three companies will equally shoulder\".) In isolation (before being specialized to the situation of joint venture capitalization, another cost), equal denotes a completely unsaturated relation:",
315
+ "cite_spans": [],
316
+ "ref_spans": [],
317
+ "eq_spans": [],
318
+ "section": "Measurements",
319
+ "sec_num": "3."
320
+ },
321
+ {
322
+ "text": "k collection ( partition (measurable- stuff) ) . equal ( elements-of ( collection ( partition (measurable-stuff) ) ) )",
323
+ "cite_spans": [],
324
+ "ref_spans": [],
325
+ "eq_spans": [],
326
+ "section": "Measurements",
327
+ "sec_num": "3."
328
+ },
329
+ {
330
+ "text": "Admittedly this choice of semantics may already be biased to the joint ventures problem, but it's thrust is to say that there must be some stuff that has been partitioned into some indeterminate number aggregate these portions form that all of these portions are equal. of portions; in a collection; and in some respect Here equal is predicated of whatever the shoulder clause denotes so the process of forming its interpretation must meet and follow the process of forming that clause's interpretation as it percolates up the headline of the relative clause and into the main clause.",
331
+ "cite_spans": [],
332
+ "ref_spans": [],
333
+ "eq_spans": [],
334
+ "section": "Measurements",
335
+ "sec_num": "3."
336
+ },
337
+ {
338
+ "text": "Equal is open in something of type collection where that collection is a partition of something. The first collection to be seen moving up the headline at a remove of two nodes (the main verb and the vp) is the conjunction of companies. Because (a) equal is predicating the equality of some aspect of each of the elements of the collection and (b) the companies per se do not have textually obvious things that might be partitioned, we can make sense of this only by distributing not just the companies but the companies qua their participation in the contribution-to-capitalization relation. This gives us the three latent contributionto-capitalization relations (at only the cost of the distribution construction, which is probably cheap). As part of that distribution construction we must also partition the amount of the contribution (object (iii)) into three parts. This entails unpacking those relations to expose their amount bindings. The equals relation then boils to down to a predication 5 over those three binding objects, viz. In terms of our computational complexity metric, the interpretation of the equally modifier has contributed two points for the displacement between it and the conjunction of companies and then (modulo the distribution cost) one point for unpacking the relation the companies are participating it to isolate the amount binding(s).",
339
+ "cite_spans": [],
340
+ "ref_spans": [],
341
+ "eq_spans": [],
342
+ "section": "Measurements",
343
+ "sec_num": "3."
344
+ },
345
+ {
346
+ "text": "This gives example 1 a a total compositional complexity of 6: its three relation sources, capitalized, shoulder, and equally contributing zero, three, and three counts respectively; four of the counts reflecting the distance that displaced elements from their binding sites, and two reflecting the effort to dip into, or 'unpack', already created relations in order to select or reify one of the elements within them.",
347
+ "cite_spans": [],
348
+ "ref_spans": [],
349
+ "eq_spans": [],
350
+ "section": "Measurements",
351
+ "sec_num": "3."
352
+ },
353
+ {
354
+ "text": "The amounts of money that the companies are contributing is given abstractly rather than calculated out since that appears to be the preferred level at which it should be represented for reasoning in this domain Contrast la, with its complexity of six, with lb, which has a compositional complexity of zero (though the rather severe departure of this artificially constructed sentence from the normal stylistic patterning must have a cost to human readers). lb garners this minimal cost by placing each contributing term right next the partial relation that provides its binding site, notably pushing the capitalization clause of 1 a down to the rightmost and lowest position in the sentence's phrase structure.",
355
+ "cite_spans": [],
356
+ "ref_spans": [],
357
+ "eq_spans": [],
358
+ "section": "Measurements",
359
+ "sec_num": "3."
360
+ },
361
+ {
362
+ "text": "Example two presents a challenge to a standard compositional model of interpretation that assumes that the denotation of the syntactic head provides the basis for interpreting the head's syntactic arguments. The syntactic head of the conjunct \"40 pct by Daihatsu'\" has to be the percentage, yet there is no way to fashion a plausible rule of interpretation that binds a company to a percentage. Instead, both terms must be passed up through the conjunction node to the ownership clause (1 count) and then unpack the interpretation of that clause to extract the capitalization value and the joint venture (2 counts, one for each term). Given that the capitalization of the joint venture was given in an appositive off the subject, the ownership clause itself required two extra counts for its construction, one to unpack the capitalization and a second for the displacement of the first parent company (P.T. Astra) away from the verb in its agentive by-phrase (though that count is debatable since the grammar might explicitly subcategorize for it).",
363
+ "cite_spans": [],
364
+ "ref_spans": [],
365
+ "eq_spans": [],
366
+ "section": "Measurements",
367
+ "sec_num": "3."
368
+ },
369
+ {
370
+ "text": "Complexity of this kind is ubiquitous in business reporting. Consider this excerpt from the beginning of a quarterly earnings report (PRNewsWire 1/21/00 5:21 p.m.):",
371
+ "cite_spans": [],
372
+ "ref_spans": [],
373
+ "eq_spans": [],
374
+ "section": "Measurements",
375
+ "sec_num": "3."
376
+ },
377
+ {
378
+ "text": "3. Gensym Corp. < descriptive appositives> today reported that revenues for its fourth quarter ended December 31, 1999 were $9.1 million .... The net loss for the fourth quarter of 1999 was... The sentence that reports the loss does not say what company lost the money--to do so would be unnecessarily redundant and reduce the text's cohesion. Yet the increased tightness of the text leaves us with an partially saturated relation as the immediate referent of that sentence, open in its company variable, which must be actively filled in from context. Moreover this example is somewhat unusual in that it provides a syntax-supported explicit indicator of whose fourth quarter reporting period it is in the first of the two sentences; usually it would be stated \".for the fourth quarter...\" and the reporting-period object would also have been left with an unbound variable.",
379
+ "cite_spans": [],
380
+ "ref_spans": [],
381
+ "eq_spans": [],
382
+ "section": "Measurements",
383
+ "sec_num": "3."
384
+ },
385
+ {
386
+ "text": "Up to this point we have deliberately not discussed the question of how one would actually derive these compositional complexity counts automatically. We have instead provided a prose description of the process for a very few examples and many questions of just what constitutes a displacement or how one might know that a relation reached in the traversal Should be unpacked remain unanswered.",
387
+ "cite_spans": [],
388
+ "ref_spans": [],
389
+ "eq_spans": [],
390
+ "section": "Modeling",
391
+ "sec_num": "4."
392
+ },
393
+ {
394
+ "text": "The glib answer is that you fire up your natural language understanding system, add some reporting facilities to it, and apply it to the texts in question. Today at least that procedure is unlikely to work since texts of the sort we have been discussing are largely beyond the state of the art for information extraction engines without some deliberate, do-main-specific engineering.",
395
+ "cite_spans": [],
396
+ "ref_spans": [],
397
+ "eq_spans": [],
398
+ "section": "Modeling",
399
+ "sec_num": "4."
400
+ },
401
+ {
402
+ "text": "A more germane answer would look to. some resource of hand-annotated texts and then provide suitable definitions for displace-ment and unpacking that, given some debugging, could then be applied automatically even if there was not system that could as yet replace the knowledge of the human annotator.",
403
+ "cite_spans": [],
404
+ "ref_spans": [],
405
+ "eq_spans": [],
406
+ "section": "Modeling",
407
+ "sec_num": "4."
408
+ },
409
+ {
410
+ "text": "But this answer too is not available to us simply because such resources do not yet exist. Besides the obvious fact that efforts at providing semantic annotations of corpora are only just now getting underway, an additional problem is that the study of the semantic phenomenon that is the focus of this paper, unsaturated, model-level relations, is uncommon in the field and for good reason.",
411
+ "cite_spans": [],
412
+ "ref_spans": [],
413
+ "eq_spans": [],
414
+ "section": "Modeling",
415
+ "sec_num": "4."
416
+ },
417
+ {
418
+ "text": "An examination of the full text of the articles in, e.g., the Tipster Joint Ventures corpus will show that full phrases (maximal projections) that are unsaturated at the moment they are delimited by the parser and then given a semantic interpretation are unusual. A casual examination of the text in this section did not turn any up. In the full text from which example l a was taken (which appears at the end of this paper) turns up only two more instances (reductions around the word sales). It is also worth noting that the original Tipster effort elected to drop attempts to extract capitalization information, as, indeed, these are among the more linguistically complex constructions in the corpus.",
419
+ "cite_spans": [],
420
+ "ref_spans": [],
421
+ "eq_spans": [],
422
+ "section": "Modeling",
423
+ "sec_num": "4."
424
+ },
425
+ {
426
+ "text": "Partially saturated relations abound in financial texts such as quarterly earnings reports or stock market reports. Our own interest in this phenomena stems from our recent focus on such texts as well as the utility of the perspective shifts this kind of semantic object provides for work in the tactics of natural language generation (i.e. microplanning).",
427
+ "cite_spans": [],
428
+ "ref_spans": [],
429
+ "eq_spans": [],
430
+ "section": "Modeling",
431
+ "sec_num": "4."
432
+ },
433
+ {
434
+ "text": "Without further, collective study of this class of semantic constructions any annotation effort would have a considerable startup cost as it arrived at candidate representations for its annotators to use as well as a subjective cost in convincing the rest of the community that they had made reasonable, practical choices that other project could adapt to their own purposes.",
435
+ "cite_spans": [],
436
+ "ref_spans": [],
437
+ "eq_spans": [],
438
+ "section": "Modeling",
439
+ "sec_num": "4."
440
+ },
441
+ {
442
+ "text": "Barring a well-financed project to supply a suitably annotated corpus, we think that the proper way to proceed towards the goal of a suitable formalization is along the lines of the original, glib answer to this problem, namely to build a parser and interpretation system that operates at a sufficient level of generalization that it would require only a minimal effort to provide the lexicon and conceptual model needed to examine texts in a given domain. We have been personally engaged in such a project over the last few years, albeit at a very slow pace given the constraints we are working under, and have made a fair amount of progress, some of which is described in McDonald (in press).",
443
+ "cite_spans": [],
444
+ "ref_spans": [],
445
+ "eq_spans": [],
446
+ "section": "Modeling",
447
+ "sec_num": "4."
448
+ },
449
+ {
450
+ "text": "That texts with partially saturated relations are more complex to process is, we think, undeniable. It also seems to us a simple matter of examination to conclude that the cost is proportional to the factors we have identified: the distance by which relation elements have been displaced from each other and the cost of unpacking already completed relations to find needed terms that those relations have already in some sense consumed. On the other hand, that this cost is measured in integer values based on simple phrase node counts is entirely debatable. As other aspects of the semantic interpretation process are quantified this component of the total measure will at least need to be combined with some proportionality constant to make all the numbers comparable.",
451
+ "cite_spans": [],
452
+ "ref_spans": [],
453
+ "eq_spans": [],
454
+ "section": "Final Observations",
455
+ "sec_num": "5."
456
+ },
457
+ {
458
+ "text": "More interesting is the fact that some node transitions will certainly be different from others in their practical implementation and this should probably be factored into the cost calculation. Consider this sentence from article 1271 of the Tipster joint venture corpus. The process that completes the 'equal ownership' relation will have to reach up through three nodes to get to the first of the two owner companies. But it will certainly be different (more elaborate) to pass this partial relation through a node that is itself creating a relation (the vp headed by form) as compared with passing it through report verbs like said or raising verbs like expects to that add relatively little information.",
459
+ "cite_spans": [],
460
+ "ref_spans": [],
461
+ "eq_spans": [],
462
+ "section": "Final Observations",
463
+ "sec_num": "5."
464
+ },
465
+ {
466
+ "text": "What the composition cost comes to in practice is, of course, a matter of the architecture of the parser and semantic interpretation engine that is being deployed. For some it may be a matter of adding additional mapping patterns that recognize the specific local configurations that denote partially saturated relations ('the <ordinal> quarter') and having heuristics for searching the discourse context for their missing elements.",
467
+ "cite_spans": [],
468
+ "ref_spans": [],
469
+ "eq_spans": [],
470
+ "section": "Final Observations",
471
+ "sec_num": "5."
472
+ },
473
+ {
474
+ "text": "Systems with rich descriptive resources for lexicalized grammars such as TAGs could define specific auxiliary trees for relational heads that can appear in non-standard locations (e.g. equally) and tie them into map-ping rules that might try to do the work over the derivation trees that these parsers produce. The conjunction problem presented by example two would be amenable to a syntactic treatment in a categorial grammar, though the range of semantic types that can be combined in this arbitrary way might make that quite difficult in general.",
475
+ "cite_spans": [],
476
+ "ref_spans": [],
477
+ "eq_spans": [],
478
+ "section": "Final Observations",
479
+ "sec_num": "5."
480
+ },
481
+ {
482
+ "text": "Finally, we must say that for us this whole idea of viewing the local interpretation of the interior phrases of a sentence as partially saturated relations and viewing their completion as a matter of passing these partial interpretations through the tree is the result of many years of research and development on a system where such relations are first class objects with the same ontological status as conventional individuals. In our system (see McDonald in press) the goal is to keep the syntactic processing simple and to move the onus of the interpretation effort onto to the semantic level by having more than one referent move up the headline as the phrase structure is created. The partially saturated relations are given an active role in seeking the arguments that they need. This introduces a bias into our observations in this paper and could, possibly, be creating a mountain where systems with quite different architectures might only see a molehill.",
483
+ "cite_spans": [],
484
+ "ref_spans": [],
485
+ "eq_spans": [],
486
+ "section": "Final Observations",
487
+ "sec_num": "5."
488
+ }
489
+ ],
490
+ "back_matter": [
491
+ {
492
+ "text": "Halliday, Michael A. K., and Ruqaiya Hasan (1976) 500 will kick off production by June 1992, with sales expected to reach some 10 billion Yen. By the mid-1990s, it will increase the number of employees to 2,000 and sales to 30 billion Yen. Output at the Malaysian company will be supplied to the three companies. Mazda will use the products for its cars to produced in and after the second half of next year, while Ford will mount them on its cars for sales in the Far East. Sanyo plans to sell Malaysianmade products in Japan and other countries. </TXT> </doc> Appendix: The complete text of example la <doc> <docno> 0231 </docno> <DD> August 9, 1990, Thursday </DD> <SO> Copyright \u00a9 1990 Jiji Press Ltd.; </SO> <TXT> Mazda Motor Corp. and Sanyo Electric Co. of Japan and Ford Motor Co. of the United States have agreed to set up a joint venture by the end of this year to produce car audio equipment in Malaysia, they said Thursday. The new company, whose name is not decided yet, will produce radios, stereos, compact disc players and tuners used for cars. It will be capitalized at 130 million ringgit, which the three companies will equally shoulder. The three plan to construct a 21,000-square-meter plant in the Prai Industrial Estate of Penang. The joint venture with a startup workforce of",
493
+ "cite_spans": [
494
+ {
495
+ "start": 21,
496
+ "end": 49,
497
+ "text": "K., and Ruqaiya Hasan (1976)",
498
+ "ref_id": null
499
+ }
500
+ ],
501
+ "ref_spans": [],
502
+ "eq_spans": [],
503
+ "section": "References:",
504
+ "sec_num": null
505
+ }
506
+ ],
507
+ "bib_entries": {},
508
+ "ref_entries": {
509
+ "FIGREF1": {
510
+ "num": null,
511
+ "uris": null,
512
+ "text": "amount . Cap-l) 130 -mil I ion-ringgit )",
513
+ "type_str": "figure"
514
+ },
515
+ "FIGREF3": {
516
+ "num": null,
517
+ "uris": null,
518
+ "text": "The three companies will shoulder equal amounts of the venture's capitalization of 130 million ringgit.",
519
+ "type_str": "figure"
520
+ },
521
+ "FIGREF4": {
522
+ "num": null,
523
+ "uris": null,
524
+ "text": "2a .... the joint firm, capital&ed at one billion yen, will be 60 pet owned by P.T. Astra International, Inc., and 40 pct by Daihatsu.",
525
+ "type_str": "figure"
526
+ },
527
+ "FIGREF5": {
528
+ "num": null,
529
+ "uris": null,
530
+ "text": "57",
531
+ "type_str": "figure"
532
+ },
533
+ "TABREF0": {
534
+ "num": null,
535
+ "html": null,
536
+ "type_str": "table",
537
+ "text": "4. Inoda Cement Co ...... said Tuesday its U.S. subsidiary has formed an equally owned cement joint venture . . . with Lone Star Industries Inc...",
538
+ "content": "<table/>"
539
+ }
540
+ }
541
+ }
542
+ }
Full_text_JSON/prefixW/json/W00/W00-0110.json ADDED
@@ -0,0 +1,854 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0110",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:19.908777Z"
6
+ },
7
+ "title": "Similarities and Differences among Semantic Behaviors of Japanese Adnominal Constituents",
8
+ "authors": [
9
+ {
10
+ "first": "Kyoko",
11
+ "middle": [],
12
+ "last": "Kanzaki",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "Communications Research Laboratory",
16
+ "institution": "",
17
+ "location": {
18
+ "addrLine": "Iwaoka-cho, Nishi-ku",
19
+ "postCode": "588-2, 651-2492",
20
+ "settlement": "Iwaoka, Kobe",
21
+ "country": "Japan"
22
+ }
23
+ },
24
+ "email": ""
25
+ },
26
+ {
27
+ "first": "Qing",
28
+ "middle": [],
29
+ "last": "Ma",
30
+ "suffix": "",
31
+ "affiliation": {
32
+ "laboratory": "Communications Research Laboratory",
33
+ "institution": "",
34
+ "location": {
35
+ "addrLine": "Iwaoka-cho, Nishi-ku",
36
+ "postCode": "588-2, 651-2492",
37
+ "settlement": "Iwaoka, Kobe",
38
+ "country": "Japan"
39
+ }
40
+ },
41
+ "email": ""
42
+ },
43
+ {
44
+ "first": "Hitoshi",
45
+ "middle": [],
46
+ "last": "Isahara",
47
+ "suffix": "",
48
+ "affiliation": {
49
+ "laboratory": "Communications Research Laboratory",
50
+ "institution": "",
51
+ "location": {
52
+ "addrLine": "Iwaoka-cho, Nishi-ku",
53
+ "postCode": "588-2, 651-2492",
54
+ "settlement": "Iwaoka, Kobe",
55
+ "country": "Japan"
56
+ }
57
+ },
58
+ "email": ""
59
+ }
60
+ ],
61
+ "year": "",
62
+ "venue": null,
63
+ "identifiers": {},
64
+ "abstract": "This paper treats the classification of the semantic functions performed by adnominal constituents in Japanese, where many parts of speech act as adnominal constituents. In order to establish a formal treatment of the semantic roles, the similarities and differences among adnominal constituents, i.e. adjectives and \"noun + NO (in English \"of + noun\")\" structures, which have a broad range of semantic functions, are discussed. This paper also proposes an objective method of classifying these constructs using a large amount of linguistic data. The feasibility of this was verified with a selforganizing semantic map based on a neural network model.",
65
+ "pdf_parse": {
66
+ "paper_id": "W00-0110",
67
+ "_pdf_hash": "",
68
+ "abstract": [
69
+ {
70
+ "text": "This paper treats the classification of the semantic functions performed by adnominal constituents in Japanese, where many parts of speech act as adnominal constituents. In order to establish a formal treatment of the semantic roles, the similarities and differences among adnominal constituents, i.e. adjectives and \"noun + NO (in English \"of + noun\")\" structures, which have a broad range of semantic functions, are discussed. This paper also proposes an objective method of classifying these constructs using a large amount of linguistic data. The feasibility of this was verified with a selforganizing semantic map based on a neural network model.",
71
+ "cite_spans": [],
72
+ "ref_spans": [],
73
+ "eq_spans": [],
74
+ "section": "Abstract",
75
+ "sec_num": null
76
+ }
77
+ ],
78
+ "body_text": [
79
+ {
80
+ "text": "Pustejovsky (Pustejovsky, 1995) proposed the theory of a generative lexicon as a framework by which meanings of words are expressed in one unified representation. This kind ofgenerativity would be very useful for NLP, especially if it is applicable to the complex semantic structures represented by various modification relations. In our previous research on adjectives (Isahara and Kanzaki, 1999) we used Pustejovsky's theory to classify adjectives in Japanese. In this paper we take the first steps in a similar classification of the Japanese \"noun + NO\" construction. Bouillon (Bouillon, 1996) applied this theory to the adnominal constituent of mental states. Saint-Dizier (Saint-Dizier, 1998) discussed adjectives in French. Isahara and Kanzaki (Isahara and Kanzaki, 1999 ) treated a much wider range of phenomena of adnominal constituents. They classified the semantic roles of adnominal constituents in .Japanese. where many parts of speech act as adnominal constituents, and discussed a for-mal treatment of their semantic roles. In their research, adnominal constituents, mainly adjectives which function as adverbials, are discussed. The present paper describes the similarities and differences among adnominal constituents, i.e. adjectives and \"noun + NO t (in English \"of + noun\")\" structures which have a broad range of semantic functions. This paper proposes an objective method for classifying these structures using a large amount of linguistic data. The feasibility of this was verified with a self-organizing semantic map based on a neural network model.",
81
+ "cite_spans": [
82
+ {
83
+ "start": 12,
84
+ "end": 31,
85
+ "text": "(Pustejovsky, 1995)",
86
+ "ref_id": "BIBREF4"
87
+ },
88
+ {
89
+ "start": 370,
90
+ "end": 397,
91
+ "text": "(Isahara and Kanzaki, 1999)",
92
+ "ref_id": "BIBREF2"
93
+ },
94
+ {
95
+ "start": 580,
96
+ "end": 596,
97
+ "text": "(Bouillon, 1996)",
98
+ "ref_id": "BIBREF0"
99
+ },
100
+ {
101
+ "start": 664,
102
+ "end": 697,
103
+ "text": "Saint-Dizier (Saint-Dizier, 1998)",
104
+ "ref_id": "BIBREF5"
105
+ },
106
+ {
107
+ "start": 730,
108
+ "end": 776,
109
+ "text": "Isahara and Kanzaki (Isahara and Kanzaki, 1999",
110
+ "ref_id": "BIBREF2"
111
+ }
112
+ ],
113
+ "ref_spans": [],
114
+ "eq_spans": [],
115
+ "section": "Introduction",
116
+ "sec_num": "1"
117
+ },
118
+ {
119
+ "text": "In section 2, we explain the semantic functions performed by \"noun + NO.\" In section 3, we discuss how we can semi-automatically obtain and classify examples of adjectives and \"noun + NO\" structures which have similar semantic functions. In section 4, we introduce a self-organizing semantic map to verify the result of this classification. In section 5, we discuss similarities and differences between adjectives and \"noun + NO\" structures.",
120
+ "cite_spans": [],
121
+ "ref_spans": [],
122
+ "eq_spans": [],
123
+ "section": "Introduction",
124
+ "sec_num": "1"
125
+ },
126
+ {
127
+ "text": "Relations between \"noun -t-NO\" and their Head Nouns Among Japanese adnominal constituents, \" noun + NO\" represents a wider range of semantic relations than other adnominal constituents. Therefore, \"noun + NO\" does not always behave like the other adnominal constituents. In previous work, some researchers have analyzed semantic relations between the noun in the \"noun + NO\" structure and its head noun (Shimazu et al., 1986) . Here, we show several examples that demonstrate the diversity of the sel \"NO\" is a Japanese postpositiona| which can represent a wide range of semantic relations. It is similar to \"of\" in English. mantic relation between \"noun + NO\" structures and their head nouns shown in their research. These semantic relations between \"noun + NO\" structures and their head nouns are different than those between other adnominal constituents, e.g. adjectives and their head nouns. However, some \"noun + NO\" behavior is similar to the behavior of adjectives and nominal adjectivals. In these cases \"noun + NO\" seems not to differ semantically from adjectives and nominal adjectivals. Let us consider the English examples: financial world / world of finance (\"ZAIKAI\") industrial center / center of industry (\"SANGYOU NO CHUUSHIN\") In this case \"noun + NO\" need not be distinguished from an adjective with respect to semantic behavior. However, in the following examples it is necessary to distinguish them from one another. global center / center of tile globe (\"SEKAI NO CHUUSHIN / CHIKYUU NO CHUUSHIN\")",
128
+ "cite_spans": [
129
+ {
130
+ "start": 403,
131
+ "end": 425,
132
+ "text": "(Shimazu et al., 1986)",
133
+ "ref_id": "BIBREF6"
134
+ },
135
+ {
136
+ "start": 1221,
137
+ "end": 1244,
138
+ "text": "(\"SANGYOU NO CHUUSHIN\")",
139
+ "ref_id": null
140
+ }
141
+ ],
142
+ "ref_spans": [],
143
+ "eq_spans": [],
144
+ "section": "The Diversity of Semantic",
145
+ "sec_num": "2"
146
+ },
147
+ {
148
+ "text": "We do not have a discrimination criteria that automatically recognizes whether a \"noun + NO\" structure is similar in its semantic behavior to that of adjectives or not. We have attempted to gather, semi-automatically, nolms in the \"n(mn + NO\" structure which behave like adjectives.",
149
+ "cite_spans": [],
150
+ "ref_spans": [],
151
+ "eq_spans": [],
152
+ "section": "DENWA NO",
153
+ "sec_num": null
154
+ },
155
+ {
156
+ "text": "3 The Exploration of the Similarities of Semantic Functions of \"noun + NO\" Structures and Adjectives. (The Method for this Research) 3.1 The Basic Concept There is one case in which the meanings of adnominal constituents are semantically similar to the features of the referents of their head nouns, e.g. adnominal constituents represent the concrete contents of their head nouns. Let us consider the Japanese phrase \"KANASHII KIMOCHI (sad feeling)\" and \"YOROKOBI NO KIMOCHI (feeling of delight)\" as examples. One meaning of \"KIMOCHI (feeling)\" represents the semantic element <mental state>. In the above examples, the adjective, \"KANASHII (sad)\", and \"noun + NO\", \"YOROKOBI NO (of delight)\", represent the concrete contents of their head noun \"KIMOCHI (feeling)\", i.e. they also represent the mental state: \"feeling\". Therefore, even though they belong to different parts of speech (adjective/noun), they must be classified in the same semantic category since both carry the same meaning. Neither the adjective, \"KANASHII (sad)\", nor the \"noun + NO\", \"YOROKOBI NO (of delight)\", can appear in predicative position without changing their meaning.",
157
+ "cite_spans": [],
158
+ "ref_spans": [],
159
+ "eq_spans": [],
160
+ "section": "DENWA NO",
161
+ "sec_num": null
162
+ },
163
+ {
164
+ "text": "However, if adnominal constituents do not share the same semantic concept as their head noun, they cannot represent the contents of head nouns. In the above examples, the noun in \"noun + NO\", \"JOHN\", does not include the concept, <mental state>, so it cannot represent the content of \"KIMOCHI (feeling).\" The adjective, \"KANASHII (sad)\", and the noun in the \"noun + NO\", \"JOHN\" do not embody the same concept and have a different semantic relation with their head noun. We cannot find the semantic similarities between \"KANASHII (sad)\" and \"JOHN\" that we could between \"YOROKOBI NO (of delight)\" and \"KANASHII (sad).\" We focus on the phenomena where adnominal constituents represent the concrete contents of their head nouns. This makes it possible to identify adjectives and \"noun + NO\" structures which are similar in semantic behavior to the referents of their head nouns. These expressions are extracted semi-automatically from large corpora.",
165
+ "cite_spans": [],
166
+ "ref_spans": [],
167
+ "eq_spans": [],
168
+ "section": "DENWA NO",
169
+ "sec_num": null
170
+ },
171
+ {
172
+ "text": "When we collect words which have some similarities, it is difficult to select the semantic axis for classification by making use of only the co-occurring words. In collecting similar words, some previous research took not only cooccurring words but also the context of these words into account (Grefenstette, 1994) . One of the important points of our analysis is the introduction of the distinct semantic elements that both \"noun + NO\" structures and adjectivals (adjectives and nominals) have in common with their head nouns. We wanted to ascertain the similarities between \"noun + NO\" and other adnominal constituents based on these common semantic elements. For this reason, we used the semantic relations, in which adnominal constituents represent the concrete content of their head nouns, as a key to classification. We automatically 2 extracted these relations from one year of newspaper articles from Mainichi Shimbun (1994), 100 novels from Shincho publishers and 100 books covering a variety of topics. We used the following procedure to extract the necessary information.",
173
+ "cite_spans": [
174
+ {
175
+ "start": 294,
176
+ "end": 314,
177
+ "text": "(Grefenstette, 1994)",
178
+ "ref_id": "BIBREF1"
179
+ }
180
+ ],
181
+ "ref_spans": [],
182
+ "eq_spans": [],
183
+ "section": "How to Extract the Necessary Information",
184
+ "sec_num": "3.2"
185
+ },
186
+ {
187
+ "text": "Step 1) Extract from the corpora, all nouns which are preceded by the Japanese expression \"TOIU\" which is something like \"that\" or \"of.\" \"TOIU + noun (noun that/of ...)\" is a typical ,Japanese expression which introduces some in-2Only",
188
+ "cite_spans": [],
189
+ "ref_spans": [],
190
+ "eq_spans": [],
191
+ "section": "How to Extract the Necessary Information",
192
+ "sec_num": "3.2"
193
+ },
194
+ {
195
+ "text": "Step 3) is done manually.",
196
+ "cite_spans": [],
197
+ "ref_spans": [],
198
+ "eq_spans": [],
199
+ "section": "How to Extract the Necessary Information",
200
+ "sec_num": "3.2"
201
+ },
202
+ {
203
+ "text": "formation about the referent of the noun, such as apposition. Therefore, nouns found in this pattern may have their content elucidated by means of their modifiers.",
204
+ "cite_spans": [],
205
+ "ref_spans": [],
206
+ "eq_spans": [],
207
+ "section": "How to Extract the Necessary Information",
208
+ "sec_num": "3.2"
209
+ },
210
+ {
211
+ "text": "Step 2) Extract from the corpora, all \"noun + NO\" structures, adjectives and nominal adjectivals which modify the nouns extracted in step 1.",
212
+ "cite_spans": [],
213
+ "ref_spans": [],
214
+ "eq_spans": [],
215
+ "section": "How to Extract the Necessary Information",
216
+ "sec_num": "3.2"
217
+ },
218
+ {
219
+ "text": "NB, the relationships between adnominal constituents and their modified nouns extracted here include not only representations of the contents of the noun, but also other various relations.",
220
+ "cite_spans": [],
221
+ "ref_spans": [],
222
+ "eq_spans": [],
223
+ "section": "How to Extract the Necessary Information",
224
+ "sec_num": "3.2"
225
+ },
226
+ {
227
+ "text": "Step 3) Extract \"noun + NO\" structures, adjectives and nominal adjectivals which represent the contents of the referents of the modified nouns.",
228
+ "cite_spans": [],
229
+ "ref_spans": [],
230
+ "eq_spans": [],
231
+ "section": "How to Extract the Necessary Information",
232
+ "sec_num": "3.2"
233
+ },
234
+ {
235
+ "text": "Step 3 is done manually.",
236
+ "cite_spans": [],
237
+ "ref_spans": [],
238
+ "eq_spans": [],
239
+ "section": "How to Extract the Necessary Information",
240
+ "sec_num": "3.2"
241
+ },
242
+ {
243
+ "text": "Step 4) In order to find the distribution of their semantic categories and analyze the semantic similarities between \"noun + NO\" and other adnominal constituents in each semantic category, we clustered the modified nouns automatically. This clustering was based on sets of similar adnominal constituents which represent the content of the referent of the modified noun.",
244
+ "cite_spans": [],
245
+ "ref_spans": [],
246
+ "eq_spans": [],
247
+ "section": "How to Extract the Necessary Information",
248
+ "sec_num": "3.2"
249
+ },
250
+ {
251
+ "text": "Modified Nouns Constructed by the Self-Organizing System of the Neural Network Model",
252
+ "cite_spans": [],
253
+ "ref_spans": [],
254
+ "eq_spans": [],
255
+ "section": "The Semantic Map of the",
256
+ "sec_num": "4"
257
+ },
258
+ {
259
+ "text": "We can gather similar modified nouns when we classify the modified nouns according to the similarities of the adnominal constituents, because in our data both adnominal constituents and their modified nouns have the same semantic elements in common that we mentioned above.",
260
+ "cite_spans": [],
261
+ "ref_spans": [],
262
+ "eq_spans": [],
263
+ "section": "The Semantic Map of the",
264
+ "sec_num": "4"
265
+ },
266
+ {
267
+ "text": "We attempted to construct the Semantic Map of the modified nouns gathered by the abovementioned method by using the self-organizing system of the neural network model (Ma et al., 2000) . We suppose that both modified nouns and adnominal constituents have common sernantic elements when adnominal constituents represent the concrete content of their head nouns. If this is true, nouns with similar meanings are located near each other oil the semantic map, self-organized by the similarities of semantic elements among the adnominal constituents. The result of our experiment verified this supposition ( Figure I ). The nouns with a similar meaning are located near each other on the map and we could divide the distribution of the modified nouns into seven categories ( Figure 2) .",
268
+ "cite_spans": [
269
+ {
270
+ "start": 167,
271
+ "end": 184,
272
+ "text": "(Ma et al., 2000)",
273
+ "ref_id": "BIBREF3"
274
+ }
275
+ ],
276
+ "ref_spans": [
277
+ {
278
+ "start": 603,
279
+ "end": 611,
280
+ "text": "Figure I",
281
+ "ref_id": null
282
+ },
283
+ {
284
+ "start": 770,
285
+ "end": 779,
286
+ "text": "Figure 2)",
287
+ "ref_id": null
288
+ }
289
+ ],
290
+ "eq_spans": [],
291
+ "section": "The Semantic Map of the",
292
+ "sec_num": "4"
293
+ },
294
+ {
295
+ "text": "Each group, i.e. the \"mental state\" group, \"state/ situation\" group, \"characteristics\" group, \"range/ area\" group, \"viewpoint/ standpoint\" group, \"aspect\" group, and \"others,\" represents a meaning held in common by nouns in the group. Mental state can be further divided into the state of emotion, mood and intention. As we analyze the adnominal constituents in each category of modified nouns, we can find the possibility of the co-occurrence of an adnominal constituent with a head noun. Table 1 shows examples of adjectives and nouns in \"noun + NO\" structures in each group. In the mental state, state/situation, aspect and characteristics groups~ adjectives appear more frequently than \"noun + NO\" constructions. These are simple adjectives. Ill the range/area and viewpoint/standpoint groups, \"noun + NO\" structures appear more frequently than simple adjectives. Nominal adjectivals derived from nouns plus the suffix \"TEKIna\" appear often with these noun groups. Most nouns in the groups \"mental state: emotion\", \"state/situation\" and \"characteristics\", contain abstract nouns which represent emotions, situations or characteristics. There are few concrete nouns. However, in the groups \"range/area\" and \"viewpoint/standpoint', there are many concrete nouns which represent natural phenomena, organizations or professional domains and few abstract nouns. We can find differences among \"noun + NO\" structures, that is, there are adjectives which behave like nouns semantically and there are nouns which behave semantically like adjectives.",
296
+ "cite_spans": [],
297
+ "ref_spans": [
298
+ {
299
+ "start": 490,
300
+ "end": 497,
301
+ "text": "Table 1",
302
+ "ref_id": "TABREF3"
303
+ }
304
+ ],
305
+ "eq_spans": [],
306
+ "section": "The Semantic Map of the",
307
+ "sec_num": "4"
308
+ },
309
+ {
310
+ "text": "5 The semantic behavior of the \"noun -t-NO\" structure which is similar to that of adjectives 5.1 Types of nouns in the \"noun -'t-NO\" structure As we mentioned in section 3, we extracted the \"noun + NO\" structures which have the same semantic element, along with similar adjectives, from large corpora. For example, KIKEN_NA JOUTAI (dangerous) (situation) dangerous situation",
311
+ "cite_spans": [],
312
+ "ref_spans": [],
313
+ "eq_spans": [],
314
+ "section": "UT~ \u00ae",
315
+ "sec_num": null
316
+ },
317
+ {
318
+ "text": "In this case \"dangerous\" represents the state concretely.",
319
+ "cite_spans": [],
320
+ "ref_spans": [],
321
+ "eq_spans": [],
322
+ "section": "UT~ \u00ae",
323
+ "sec_num": null
324
+ },
325
+ {
326
+ "text": "In this case, the \"MIKETTEI NO (of indecision)\" also represents the state concretely. Here, both \"KIKENN_NA (dangerous)\" and \"MIKETTEI NO (of indecision)\" have tile same semantic element \"state\" in common. We find that a \"situation\" can be represented by both an adjective and the \"noun + NO\" structure. When \"MIKETTEI NO (of indecision)\" cooccurs with modified nouns other than \"situation\", it mostly represents the semantic notion, e.g. \"MIKETTEI NO MONDAI (a problem of indecision)\", and so on. That is,\"MIKETTEI NO (of indecision),\" represents the situation of a problem. So we see that \"MIKETTEI NO (of indecision)\" is in itself like an adjective.",
327
+ "cite_spans": [],
328
+ "ref_spans": [],
329
+ "eq_spans": [],
330
+ "section": "MIKETTEI NO JOUTAI (indecision) (of) (situation) a situation of indecision",
331
+ "sec_num": null
332
+ },
333
+ {
334
+ "text": "On the other hand, \"KUMORI NO (cloudiness)\" behaves sometimes like an adjective and sometimes not.",
335
+ "cite_spans": [],
336
+ "ref_spans": [],
337
+ "eq_spans": [],
338
+ "section": "MIKETTEI NO JOUTAI (indecision) (of) (situation) a situation of indecision",
339
+ "sec_num": null
340
+ },
341
+ {
342
+ "text": "NO JOUTAI (cloudiness) (of) (state) a state of cloudiness The semantic behavior of \"KUMORI NO (of cloudiness)\" is like the behavior of adjectives in that the cloudiness represents the state as \"KIKEN_NA (dangerous),\" however, \"KU-MORI NO (of cloudiness)\" does not always represent the state of the referent of the modified noun though \"MIKETTEI NO (of indecision)\" always represents that.",
343
+ "cite_spans": [],
344
+ "ref_spans": [],
345
+ "eq_spans": [],
346
+ "section": "KUMORI",
347
+ "sec_num": null
348
+ },
349
+ {
350
+ "text": "\"KUMORI (cloudiness)\" is a natural phenomenon which can be pointed to concretely. For example,",
351
+ "cite_spans": [],
352
+ "ref_spans": [],
353
+ "eq_spans": [],
354
+ "section": "KUMORI",
355
+ "sec_num": null
356
+ },
357
+ {
358
+ "text": "KUMORI NO NISSU (cloudiness) (of) (amount) WA 4 GATU NI SITEWA IJOU DA.",
359
+ "cite_spans": [],
360
+ "ref_spans": [],
361
+ "eq_spans": [],
362
+ "section": "KUMORI",
363
+ "sec_num": null
364
+ },
365
+ {
366
+ "text": "The amount of cloudiness is unusual for April.",
367
+ "cite_spans": [],
368
+ "ref_spans": [],
369
+ "eq_spans": [],
370
+ "section": "KUMORI",
371
+ "sec_num": null
372
+ },
373
+ {
374
+ "text": "In this example, \"KUMORI NO (of cloudiness)\" modifies \"NISSU (the amount),\" and does not represent a state but the possessor of the amount.",
375
+ "cite_spans": [],
376
+ "ref_spans": [],
377
+ "eq_spans": [],
378
+ "section": "KUMORI",
379
+ "sec_num": null
380
+ },
381
+ {
382
+ "text": "As the examples of \"MIKETTEI NO (of indecision)\" and \"KUMORI NO (of cloudiness)\" show, there are nouns which have the same properties as adjectives intrinsically (e.g. \"MIKETTEI (indecision)\"), and other nouns which intrinsically have different properties from adjectives (e.g. \"KUMORI (cloudiness)\"). So, it is important to consider the properties of the noun in \"noun + NO\" when we analyze the \"noun + NO\" which behaves semantically like an adjective. Such an analysis enables us to find the situation in which they act like adjectives. We classified nouns in \"noun + NO\" structures into three types based on what the nouns refer to. Nouns from the last category, 3), are similar to adjectives semantically. As adjectives do not represent concrete objects or verb-like notions, nouns from these categories only occasionally resemble adjectives.",
383
+ "cite_spans": [],
384
+ "ref_spans": [],
385
+ "eq_spans": [],
386
+ "section": "KUMORI",
387
+ "sec_num": null
388
+ },
389
+ {
390
+ "text": "Noun Categories: 1) nouns which refer to concrete objects. (like rain, book, science, and so on)",
391
+ "cite_spans": [],
392
+ "ref_spans": [],
393
+ "eq_spans": [],
394
+ "section": "KUMORI",
395
+ "sec_num": null
396
+ },
397
+ {
398
+ "text": "2) nominalizations (like decision, work, and so on)",
399
+ "cite_spans": [],
400
+ "ref_spans": [],
401
+ "eq_spans": [],
402
+ "section": "KUMORI",
403
+ "sec_num": null
404
+ },
405
+ {
406
+ "text": "3) nouns which belong to neither 1) nor 2), e.g. abstract nouns and so on.",
407
+ "cite_spans": [],
408
+ "ref_spans": [],
409
+ "eq_spans": [],
410
+ "section": "KUMORI",
411
+ "sec_num": null
412
+ },
413
+ {
414
+ "text": "As our corpora contain mainly newspaper articles, many compound nouns appear. Since the last word in a compound noun determines the properties of the whole word, we focus on the last word in classifying them. Table 2 contains examples of the noun categories. \"KOUGYOU TOSHI (industry city)\" is an example of a compound noun where the last word \"TOSHI (city)\" determines the properties. ",
415
+ "cite_spans": [],
416
+ "ref_spans": [
417
+ {
418
+ "start": 209,
419
+ "end": 216,
420
+ "text": "Table 2",
421
+ "ref_id": "TABREF4"
422
+ }
423
+ ],
424
+ "eq_spans": [],
425
+ "section": "KUMORI",
426
+ "sec_num": null
427
+ },
428
+ {
429
+ "text": "3) nouns which belong to neither 1) nor 2) MUTONTYAKU, JAKUSHOU (carelessness) (weakness)",
430
+ "cite_spans": [],
431
+ "ref_spans": [],
432
+ "eq_spans": [],
433
+ "section": "KUMORI",
434
+ "sec_num": null
435
+ },
436
+ {
437
+ "text": "In the following section, we analyze the similarities and differences of the semantic behavior of \"noun + NO\" structures and adjectives. Firstly, we describe the case in which the semantic behavior of \"noun + NO\" is similar to that of adjectives and then we mention the case in which the semantic behavior of \"noun + NO\" is different from that of adjectives. Secondly, we analyze several types of nouns in \"noun + NO\" which behave like adjectives, ewm though nouns in \"noun + NO\" are not intrinsically similar to adjectiw; types.",
438
+ "cite_spans": [],
439
+ "ref_spans": [],
440
+ "eq_spans": [],
441
+ "section": "KUMORI",
442
+ "sec_num": null
443
+ },
444
+ {
445
+ "text": "behavior between nouns in \"noun -b NO\" and adjectives For example, \"KANASHII (sad)\", \"URESHII (pleasurable)\", \"ZANNEN_NA (regrettable)\", \"KANASHIMI NO (of sadness)\", \"YOROKOBI NO (of delight)\" and so on, modify nouns such as \"OMOI (thought)\", \"KANJI (emotion)\" and so on. Using a set of adnominal constituents, such as \"KANASHII (sad)\", \"URESHII (pleasurable)\", \"ZANNEN..NA (regrettable)\", as keys for classification, we can classify the modified nouns, \"OMOI (thought)\", \"KANJI (feeling)\" and so on, into the same group. Then we can find a semantic relationship between these adnominal constituents and their head nouns, in this case, <emotion>. In the following, we describe the similar and differing semantic behaviors of \"noun \u00f7 NO\" and other adjectives in the same semantic category.",
446
+ "cite_spans": [],
447
+ "ref_spans": [],
448
+ "eq_spans": [],
449
+ "section": "The differences of semantic",
450
+ "sec_num": "5.2"
451
+ },
452
+ {
453
+ "text": "As we described in the previous section, we extract sets of \"noun + NO\" structures and adjectives from data which was sorted semantically. Words in each set represent the semantic substance of the similar nouns which they modify. Therefore, their semantic categories are similar. Examples of modified nouns of a similar semantic category and their modifiers which have a semantic category similar to that of the nouns are listed in Table 3 . Included are some \"noun \u00f7 NO\" examples which though cooccurring with <mental state> nouns are not classified as such themselves. There are many adjectives and nominal adjectivals which can modify nouns in Table 3 , such as \"AWARENA (poor)\", \"IJIRASHII (moving)\" and \"HOKO-RASHII (triumphant).\" Some \"noun \u00f7 NO\" structures are semantically similar to these adjectives since they represent the contents of the emotion, e.g. \"FUKAI NO KAN (sensation of displeasure)\" and \"YOROKOBI NO KIMOCHI (feeling of delight).\" Most nouns in these \"noun + NO\" structures in Table 3 are classified into \"mental activity by humans\" by the \"Word List Classified by Semantic Principles3. '' \"Noun + NO\" structures, which have this kind of semantic; category, are similar to adjectives and nominal adjectivals, as both represent the content of the human mind. We call this semantic cat-'~This list was compiled by The Natural Language Research Institute, Tokyo. egory created by these adnominal constituents and their modified nouns \"Feeling.\" On the other hand, some adnominal relationships concerning a mental state can only be represented by \"noun + NO\" structures, such as \"HOSHIN NO KIMOCHI (desire of defending one's own interest),\" \"CHIKUZAI NO NEN (thought of moneymaking)\" and \"INTAI NO KIMOCHI (idea of retirement).\" Event nouns are mainly used in these \"noun + NO\" structures. Adnominal modifying relations of \"nominalization + NO + mental state_noun\" structures represent an intentional mental state. This kind of intentional mental state cannot be expressed by adjectives. We call this semantic category \"Intentional mental state.\"",
454
+ "cite_spans": [],
455
+ "ref_spans": [
456
+ {
457
+ "start": 432,
458
+ "end": 439,
459
+ "text": "Table 3",
460
+ "ref_id": "TABREF5"
461
+ },
462
+ {
463
+ "start": 647,
464
+ "end": 654,
465
+ "text": "Table 3",
466
+ "ref_id": "TABREF5"
467
+ },
468
+ {
469
+ "start": 1000,
470
+ "end": 1007,
471
+ "text": "Table 3",
472
+ "ref_id": "TABREF5"
473
+ }
474
+ ],
475
+ "eq_spans": [],
476
+ "section": "The differences of semantic",
477
+ "sec_num": "5.2"
478
+ },
479
+ {
480
+ "text": "We discussed two types of semantic representations above, i.e. Feeling and Intentional mental state. Feeling can be represented by adjectives and \"noun + NO\" structures. However, Intentional mental state can be represented only by \"noun + NO\" structures. From the standpoint of the characteristics of the modified nouns (they represent human mental states), these two mental activities (Feeling and Intentional mental state) are similar, even though there are .differences in whether the activity is intentional or not. However, from the standpoint of the selection of an adnominal relationship in the surface structure, whether the activity has active intention or not will be the deciding factor for the selection between adjectives and \"noun + NO\" structures.",
481
+ "cite_spans": [],
482
+ "ref_spans": [],
483
+ "eq_spans": [],
484
+ "section": "The differences of semantic",
485
+ "sec_num": "5.2"
486
+ },
487
+ {
488
+ "text": "5.3 The case where the semantic behavior of \"noun + NO\" structures is similar to that of adjectives",
489
+ "cite_spans": [],
490
+ "ref_spans": [],
491
+ "eq_spans": [],
492
+ "section": "The differences of semantic",
493
+ "sec_num": "5.2"
494
+ },
495
+ {
496
+ "text": "Here we focus on nouns whose properties are unlike those of adjectives, i.e. the nouns which refer to concrete objects, verbal notions and so on.",
497
+ "cite_spans": [],
498
+ "ref_spans": [],
499
+ "eq_spans": [],
500
+ "section": "The differences of semantic",
501
+ "sec_num": "5.2"
502
+ },
503
+ {
504
+ "text": "(1) In the case where \"noun + NO\" represents characteristics, there is some overlap between the semantic behavior of adjectives and \"noun + NO\" structures.",
505
+ "cite_spans": [],
506
+ "ref_spans": [],
507
+ "eq_spans": [],
508
+ "section": "The differences of semantic",
509
+ "sec_num": "5.2"
510
+ },
511
+ {
512
+ "text": "I) The case where the noun in \"noun + NO\" is a compound noun In the previous two examples, the differences between \"noun + NO\" and adjectives depend only on whether the nouns they modify represent a person or a city where both head nouns have characteristics in common. However, \"KOUGYOUTOSHI (industry city)\" does not always have the same semantic relation to the modified noun, as seen in the following example:",
513
+ "cite_spans": [],
514
+ "ref_spans": [],
515
+ "eq_spans": [],
516
+ "section": "The differences of semantic",
517
+ "sec_num": "5.2"
518
+ },
519
+ {
520
+ "text": "KOUGYOUTOSHI NO YUKYUTI (industry city) (of) (vacant land) NI TYAKUMOKU. They noticed the vacant land in the industrial city.",
521
+ "cite_spans": [],
522
+ "ref_spans": [],
523
+ "eq_spans": [],
524
+ "section": "The differences of semantic",
525
+ "sec_num": "5.2"
526
+ },
527
+ {
528
+ "text": "In this example, the semantic relation between \"KOUGYOUTOSHI NO (of industry city)\" and \"YUKYUTI (the vacant land)\" indicate the relation of possession so that it is not a semantic relation that adjectives can represent. When the modified nouns are abstract nouns that represent the property (\"INSHOU (impression)\" or \"SEIKAKU (characteristics)\" etc.), or instances of the concrete nouns in compound nouns (\"KAWASAKI SHI (KAWASAKI city)\"), the semantic function of compound nouns in \"noun + NO\" constructions represent the characteristics of the referent of the modified nouns as adjectives do. When the modified noun is an instance of the last word of the modifying compound noun, the semantic function of the whole compound noun is similar to that of adjectives because, in this type of compound, we focus on the adjectival semantic element. For example, \"KOUGYOU (industry)\" in \"KOUGYOUTOSHI (industry city)\", \"KOUKYUU (high-quality)\" in \"KOUKYU-UHIN (high quality item)\", and \"YUURYOU (excellent)\" in \"YUURYOUGAISHA (excellent company)\" are adjectival. II) the nouns that refer to the concrete object in \"noun + NO\"",
529
+ "cite_spans": [],
530
+ "ref_spans": [],
531
+ "eq_spans": [],
532
+ "section": "The differences of semantic",
533
+ "sec_num": "5.2"
534
+ },
535
+ {
536
+ "text": "Originally the nouns that refer to a concrete object or event do not have the same meaning as adjectives, however, they have similar semantic behavior to that of adjectives in the following case. KARE WA OTONASHII KIHUU (mild)",
537
+ "cite_spans": [],
538
+ "ref_spans": [],
539
+ "eq_spans": [],
540
+ "section": "The differences of semantic",
541
+ "sec_num": "5.2"
542
+ },
543
+ {
544
+ "text": "NO MOTINUSHI DA. He has a mild disposition.",
545
+ "cite_spans": [],
546
+ "ref_spans": [],
547
+ "eq_spans": [],
548
+ "section": "The differences of semantic",
549
+ "sec_num": "5.2"
550
+ },
551
+ {
552
+ "text": "The \"mild\" represents the characteristic (disposition). In the following examples the \"noun + NO\" also indicate the characteristics of something. Ordinary people have a strong impression of environmental pollution from the chemical company.",
553
+ "cite_spans": [],
554
+ "ref_spans": [],
555
+ "eq_spans": [],
556
+ "section": "The differences of semantic",
557
+ "sec_num": "5.2"
558
+ },
559
+ {
560
+ "text": "The impression the children make is of a \"HINOKI (HINOKI-tree)\" and the impression the chemical company makes is of \"KANKY-OUOSEN (environmental pollution)\". These \"noun + NO'structures represent the characteristics of children and a company in same manner that the adjective \"mild\" indicates his characteristic.",
561
+ "cite_spans": [],
562
+ "ref_spans": [],
563
+ "eq_spans": [],
564
+ "section": "The differences of semantic",
565
+ "sec_num": "5.2"
566
+ },
567
+ {
568
+ "text": "In these examples, nouns in \"noun + NO\" represent objects and events and so on, i.e. \"HINOKI-tree\" and \"environmental pollution\" these nouns ordinarily do not behave like adjectives. That is, the adjective \"mild\" can represent a characteristic directly, however, these nouns in \"noun + NO\" cannot represent the characteristics of something directly. We cannot say \"that children are HINOKI-tree\" and \"the company is the environmental pollution\" while we can say \"He is mild.\" That is, in this case, \"noun + NO\" cannot appear in the predicative position with this meaning. When we show the characteristics of something by using nouns that refer to concrete objects and events, we need to specify the modified nouns which indicate the characteristics like \"impression, .... disposition\" and so on.",
569
+ "cite_spans": [],
570
+ "ref_spans": [],
571
+ "eq_spans": [],
572
+ "section": "The differences of semantic",
573
+ "sec_num": "5.2"
574
+ },
575
+ {
576
+ "text": "(2) \"Noun + NO\" can represent quantification. The rate of debt has reached a dangerous level for the household budget.",
577
+ "cite_spans": [],
578
+ "ref_spans": [],
579
+ "eq_spans": [],
580
+ "section": "The differences of semantic",
581
+ "sec_num": "5.2"
582
+ },
583
+ {
584
+ "text": "The suggestion of the Japanese prime minister is at an \"abstract\" level on the \"concreteabstract\" scale and the rate of debt is at a \"dangerous\" level on the \"safety-dangerous\" scale. The level of concreteness and safety is represented by adjectives. On the other hand, the nouns that refer to concrete objects and verbal notions also represent a level by inference from the context. We can infer the scale from the contextual situation. For example, KOUNIN KOUHO WA UWASA NO DANKAI (rumor) (of) (stage) the stage of rumor DA GA BORUGA SHI Though it is completely at the stage of rumor, the candidate for the succession is Mr. Borgar ... SHUSHOU GAWA WA \"\" (the prime minister and his staff) ENZETU NO IKI (speech) (of) (level) WO KOERARENAKATTA. Though the prime minister and his staff said \"we will specify the guidelines of the government proposal during the election\", after all it was still at the level of speech.",
585
+ "cite_spans": [],
586
+ "ref_spans": [],
587
+ "eq_spans": [],
588
+ "section": "The differences of semantic",
589
+ "sec_num": "5.2"
590
+ },
591
+ {
592
+ "text": "KANSEI NO IKI (completeness) (of) (level) NI TASSHITEITA. It reached a level of completeness, technically.",
593
+ "cite_spans": [],
594
+ "ref_spans": [],
595
+ "eq_spans": [],
596
+ "section": "GIJUTUTEKINIWA",
597
+ "sec_num": null
598
+ },
599
+ {
600
+ "text": "In the above case, we do not have a semantic element of actual \"talk\" in the \"rumor\" or \"speech\" meaning nor a semantic element \"event\" in the \"completeness\" meaning, but we have the level of \"rumor\" on the \"truth-rumor\" scale, the level of \"speech\" on the \"statementspeech\" scale and the level of \"completeness\" on the \"incompleteness-completeness\" scale. The nouns that refer to concrete objects and verbal actions are similar to adjectives when they represent a level in context.",
601
+ "cite_spans": [],
602
+ "ref_spans": [],
603
+ "eq_spans": [],
604
+ "section": "GIJUTUTEKINIWA",
605
+ "sec_num": null
606
+ },
607
+ {
608
+ "text": "In this paper, we discussed the similarities and differences among adnominal constituents, i.e. adjectives and \"noun + NO\" structures which have a broad range of semantic functions. Nouns and adjectives differ in part of speech, but they sometimes have similarities when used adnominally. In such a case, we need not distinguish them from each other semantically. We investigated explicit criteria to detect similarities and differences between nouns and adjectives in adnominal usage. This research was verified by using large corpora and a self-organizing mapping system based on the neural network model. In future work, we will attempt to systematically classify words used adnominally according to the semantic behavior of adnominal constituents following the theoretical insights of Pustejovsky.",
609
+ "cite_spans": [],
610
+ "ref_spans": [],
611
+ "eq_spans": [],
612
+ "section": "Conclusion",
613
+ "sec_num": "6"
614
+ }
615
+ ],
616
+ "back_matter": [
617
+ {
618
+ "text": "We would like to thank Catherine Macleod of New York University and Kiyotaka Uchimoto of the Communications Research Laboratory for their invaluable help in writing this paper.",
619
+ "cite_spans": [],
620
+ "ref_spans": [],
621
+ "eq_spans": [],
622
+ "section": "Acknowledgment",
623
+ "sec_num": null
624
+ }
625
+ ],
626
+ "bib_entries": {
627
+ "BIBREF0": {
628
+ "ref_id": "b0",
629
+ "title": "Mental State Adjectives: the Perspective of Generative Lexicon",
630
+ "authors": [
631
+ {
632
+ "first": "P",
633
+ "middle": [],
634
+ "last": "Bouillon",
635
+ "suffix": ""
636
+ }
637
+ ],
638
+ "year": 1996,
639
+ "venue": "Proc. of COLING96",
640
+ "volume": "",
641
+ "issue": "",
642
+ "pages": "",
643
+ "other_ids": {},
644
+ "num": null,
645
+ "urls": [],
646
+ "raw_text": "P. Bouillon. 1996. Mental State Adjectives: the Perspective of Generative Lexicon. In Proc. of COLING96.",
647
+ "links": null
648
+ },
649
+ "BIBREF1": {
650
+ "ref_id": "b1",
651
+ "title": "Corpus-Derived First, Second and Third-Order Word Affinities",
652
+ "authors": [
653
+ {
654
+ "first": "G",
655
+ "middle": [],
656
+ "last": "Grefenstette",
657
+ "suffix": ""
658
+ }
659
+ ],
660
+ "year": 1994,
661
+ "venue": "Proc. off the EURALEX '9~",
662
+ "volume": "",
663
+ "issue": "",
664
+ "pages": "",
665
+ "other_ids": {},
666
+ "num": null,
667
+ "urls": [],
668
+ "raw_text": "G. Grefenstette. 1994. Corpus-Derived First, Second and Third-Order Word Affinities. In ' Proc. off the EURALEX '9~.",
669
+ "links": null
670
+ },
671
+ "BIBREF2": {
672
+ "ref_id": "b2",
673
+ "title": "Lexical Semantics to Disambiguate Polysemous Phenomena of Japanese Adnominal Constituents",
674
+ "authors": [
675
+ {
676
+ "first": "H",
677
+ "middle": [],
678
+ "last": "Isahara",
679
+ "suffix": ""
680
+ },
681
+ {
682
+ "first": "K",
683
+ "middle": [],
684
+ "last": "Kanzaki",
685
+ "suffix": ""
686
+ }
687
+ ],
688
+ "year": 1999,
689
+ "venue": "Proc. of A CL99",
690
+ "volume": "",
691
+ "issue": "",
692
+ "pages": "",
693
+ "other_ids": {},
694
+ "num": null,
695
+ "urls": [],
696
+ "raw_text": "H. Isahara and K. Kanzaki. 1999. Lexical Se- mantics to Disambiguate Polysemous Phe- nomena of Japanese Adnominal Constituents. In Proc. of A CL99.",
697
+ "links": null
698
+ },
699
+ "BIBREF3": {
700
+ "ref_id": "b3",
701
+ "title": "Construction of a Japanese Semantic Map using Self-Organizing Neural Network Model",
702
+ "authors": [
703
+ {
704
+ "first": "Q",
705
+ "middle": [],
706
+ "last": "Ma",
707
+ "suffix": ""
708
+ },
709
+ {
710
+ "first": "K",
711
+ "middle": [],
712
+ "last": "Kanzaki",
713
+ "suffix": ""
714
+ },
715
+ {
716
+ "first": "M",
717
+ "middle": [],
718
+ "last": "Murata",
719
+ "suffix": ""
720
+ },
721
+ {
722
+ "first": "K",
723
+ "middle": [],
724
+ "last": "Uchimoto",
725
+ "suffix": ""
726
+ },
727
+ {
728
+ "first": "H",
729
+ "middle": [],
730
+ "last": "Isahara",
731
+ "suffix": ""
732
+ }
733
+ ],
734
+ "year": 2000,
735
+ "venue": "6th Annual Meeting of the Association for Natural Language Processing",
736
+ "volume": "",
737
+ "issue": "",
738
+ "pages": "",
739
+ "other_ids": {},
740
+ "num": null,
741
+ "urls": [],
742
+ "raw_text": "Q. Ma, K. Kanzaki, M. Murata, K. Uchi- moto, and H. Isahara. 2000. Construction of a Japanese Semantic Map using Self- Organizing Neural Network Model. In 6th Annual Meeting of the Association for Nat- ural Language Processing, Japan. (will ap- pear).",
743
+ "links": null
744
+ },
745
+ "BIBREF4": {
746
+ "ref_id": "b4",
747
+ "title": "The Generative Lexicon",
748
+ "authors": [
749
+ {
750
+ "first": "J",
751
+ "middle": [],
752
+ "last": "Pustejovsky",
753
+ "suffix": ""
754
+ }
755
+ ],
756
+ "year": 1995,
757
+ "venue": "",
758
+ "volume": "",
759
+ "issue": "",
760
+ "pages": "",
761
+ "other_ids": {},
762
+ "num": null,
763
+ "urls": [],
764
+ "raw_text": "J. Pustejovsky. 1995. The Generative Lexicon. The MIT Press.",
765
+ "links": null
766
+ },
767
+ "BIBREF5": {
768
+ "ref_id": "b5",
769
+ "title": "A Generative Lexicon Perspective for Adjectival Modification",
770
+ "authors": [
771
+ {
772
+ "first": "P",
773
+ "middle": [],
774
+ "last": "Saint-Dizier",
775
+ "suffix": ""
776
+ }
777
+ ],
778
+ "year": 1998,
779
+ "venue": "Proc. of the Conference volume2 in 36th Annual Meeting of the Association for Computational Linguistics and 17th International Conference on Computational Linguistics(COLING-A CL '98)",
780
+ "volume": "",
781
+ "issue": "",
782
+ "pages": "",
783
+ "other_ids": {},
784
+ "num": null,
785
+ "urls": [],
786
+ "raw_text": "P. Saint-Dizier. 1998. A Generative Lex- icon Perspective for Adjectival Modifica- tion. In Proc. of the Conference volume2 in 36th Annual Meeting of the Associa- tion for Computational Linguistics and 17th International Conference on Computational Linguistics(COLING-A CL '98).",
787
+ "links": null
788
+ },
789
+ "BIBREF6": {
790
+ "ref_id": "b6",
791
+ "title": "Analysis of semantic relations between nouns",
792
+ "authors": [
793
+ {
794
+ "first": "A",
795
+ "middle": [],
796
+ "last": "Shimazu",
797
+ "suffix": ""
798
+ },
799
+ {
800
+ "first": "S",
801
+ "middle": [],
802
+ "last": "Naito",
803
+ "suffix": ""
804
+ },
805
+ {
806
+ "first": "H",
807
+ "middle": [],
808
+ "last": "Nomura",
809
+ "suffix": ""
810
+ }
811
+ ],
812
+ "year": 1986,
813
+ "venue": "",
814
+ "volume": "",
815
+ "issue": "",
816
+ "pages": "",
817
+ "other_ids": {},
818
+ "num": null,
819
+ "urls": [],
820
+ "raw_text": "A. Shimazu, S. Naito, and H. Nomura. 1986. Analysis of semantic relations between nouns connected by a Japanese particle \"no\".",
821
+ "links": null
822
+ }
823
+ },
824
+ "ref_entries": {
825
+ "FIGREF0": {
826
+ "uris": null,
827
+ "text": "Semantic Map 1 Semantic Map 2",
828
+ "type_str": "figure",
829
+ "num": null
830
+ },
831
+ "TABREF3": {
832
+ "text": "",
833
+ "html": null,
834
+ "num": null,
835
+ "type_str": "table",
836
+ "content": "<table><tr><td colspan=\"2\">List of adjectives and \"noun + NO\"</td></tr><tr><td colspan=\"2\">Structures</td></tr><tr><td colspan=\"2\">&lt;mental state: emotion&gt;</td></tr><tr><td>Adj:</td><td>KANASHII (sad), URESHII</td></tr><tr><td/><td>(pleasurable)</td></tr><tr><td colspan=\"2\">noun+no: KANASHIMI (sadness),</td></tr><tr><td/><td>YOROKOBI (delight)</td></tr><tr><td colspan=\"2\">&lt;state/situation&gt;</td></tr><tr><td>Adj:</td><td>ISOGASHII (busy),</td></tr><tr><td/><td>MUTITUJONA (disorderly)</td></tr><tr><td colspan=\"2\">noun+no: KURAYAMI (darkness),</td></tr><tr><td/><td>MUISHIKI (unconscious)</td></tr><tr><td>&lt;aspect&gt;</td><td/></tr><tr><td>Adj:</td><td>YUUMOUNA (brave),</td></tr><tr><td/><td>HIGEKITEKINA (tragic)</td></tr><tr><td colspan=\"2\">noun+no: KONTON (chaos), TAIHAI</td></tr><tr><td/><td>(decadence)</td></tr><tr><td colspan=\"2\">&lt;characteristic&gt;</td></tr><tr><td>Adj:</td><td>NONKINA (carefree),</td></tr><tr><td/><td>KISAKUNA (open-hearted)</td></tr><tr><td colspan=\"2\">noun+no: IJIPPARI (stubbornness),</td></tr><tr><td/><td>GOUMANNA (arrogance)</td></tr><tr><td colspan=\"2\">&lt;range/area&gt;</td></tr><tr><td>Adj:</td><td>JOUSHIKITEKINA (comnmnsense),</td></tr><tr><td/><td>KOUTEKINA (official)</td></tr><tr><td colspan=\"2\">noun+no: GAKUMON (studies), GYOUMU</td></tr><tr><td/><td>(duty)</td></tr><tr><td colspan=\"2\">&lt;viewpoint/standpoint&gt;</td></tr><tr><td>Adj:</td><td>KYOUIKUTEKINA (educational),</td></tr><tr><td/><td>SHOUGYOUTEKINA (economic)</td></tr><tr><td colspan=\"2\">noun+no: KYOUIKU (education), EISEI</td></tr><tr><td/><td>(hygiene)</td></tr></table>"
837
+ },
838
+ "TABREF4": {
839
+ "text": "",
840
+ "html": null,
841
+ "num": null,
842
+ "type_str": "table",
843
+ "content": "<table><tr><td colspan=\"2\">Some \"noun + NO\" constructions with</td></tr><tr><td>\"impression\"</td><td/></tr><tr><td colspan=\"2\">1) nouns which refer to concrete objects</td></tr><tr><td colspan=\"2\">KOUGYOU TOSHI, HINOKI</td></tr><tr><td>(industry city)</td><td>(cypress)</td></tr><tr><td>2) nominalizations</td><td/></tr><tr><td>SOKUBAKU,</td><td>KOUTEN</td></tr><tr><td>(restriction)</td><td/></tr></table>"
844
+ },
845
+ "TABREF5": {
846
+ "text": "The modified nouns and adjectives, nominal adjectivals, and \"noun + NO\" collected in the semantic category, <mental state>",
847
+ "html": null,
848
+ "num": null,
849
+ "type_str": "table",
850
+ "content": "<table><tr><td>Modified nouns</td></tr><tr><td>KANJI (feeling), KAN (sensation),</td></tr><tr><td>OMOI (thought), KI (intention),</td></tr><tr><td>NEN (inclination), KIMOCHI (mind),</td></tr><tr><td>KIBUN (mood), KANJO (emotion),</td></tr><tr><td>JO (passion)</td></tr><tr><td>Adjectives and nominal adjectivals</td></tr><tr><td>AWARE_NA (poor), IJIRASHII (moving),</td></tr><tr><td>HOKORASHII (triumphant),</td></tr><tr><td>KINODOKU_NA (unfortunate),</td></tr><tr><td>SHIAWASE_NA (happy),</td></tr><tr><td>ZANNEN_NA (disappointing),</td></tr><tr><td>URESHII (pleasurable), ...and so on.</td></tr><tr><td>\"Nouns\" in the \"noun + NO\" structure</td></tr><tr><td>a) mental activity</td></tr><tr><td>KANASHIMI (sadness), FUKAI (displeasure),</td></tr><tr><td>SHITASHIMI (familiarity),</td></tr><tr><td>ZOUO (abhorrence), GAMAN (endurance),</td></tr><tr><td>KOUKAI (regret), YOROKOBI (joy),</td></tr><tr><td>MANZOKU (satisfaction),</td></tr><tr><td>RAKUTAN (disappointment),</td></tr><tr><td>IGAI (unexpected), ...and so on.</td></tr><tr><td>b) nominalizations</td></tr><tr><td>HOSHIN (self-defense),</td></tr><tr><td>CHIKUZAI (moneymaking),</td></tr><tr><td>INTAI (retirement), HIHAN (criticism),</td></tr><tr><td>HIYAKU (rapid progress), ...and so on</td></tr></table>"
851
+ }
852
+ }
853
+ }
854
+ }
Full_text_JSON/prefixW/json/W00/W00-0200.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0200",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:46.439323Z"
6
+ },
7
+ "title": "",
8
+ "authors": [],
9
+ "year": "",
10
+ "venue": null,
11
+ "identifiers": {},
12
+ "abstract": "",
13
+ "pdf_parse": {
14
+ "paper_id": "W00-0200",
15
+ "_pdf_hash": "",
16
+ "abstract": [],
17
+ "body_text": [],
18
+ "back_matter": [],
19
+ "bib_entries": {},
20
+ "ref_entries": {}
21
+ }
22
+ }
Full_text_JSON/prefixW/json/W00/W00-0201.json ADDED
@@ -0,0 +1,1236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0201",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:28.418048Z"
6
+ },
7
+ "title": "An Interlingual-based Approach to Reference Resolution",
8
+ "authors": [
9
+ {
10
+ "first": "David",
11
+ "middle": [],
12
+ "last": "Farwell",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "Computing Research Laboratory",
16
+ "institution": "New Mexico State University Las Cruces",
17
+ "location": {
18
+ "postBox": "Box 30001/3CRL",
19
+ "postCode": "88003",
20
+ "region": "New Mexico"
21
+ }
22
+ },
23
+ "email": ""
24
+ }
25
+ ],
26
+ "year": "",
27
+ "venue": null,
28
+ "identifiers": {},
29
+ "abstract": "",
30
+ "pdf_parse": {
31
+ "paper_id": "W00-0201",
32
+ "_pdf_hash": "",
33
+ "abstract": [],
34
+ "body_text": [
35
+ {
36
+ "text": "In this paper we outline an interlingualbased procedure for resolving reference and suggest a practical approach to implementing it. We assume a two-stage language analysis system. First, a syntactic analysis of an input text results in a functional structure in which certain cases of pronominal reference are resolved. Second, the f-structure is mapped onto an interlingual representation. As part of this mapping, the reference of the various f-structure elements is resolved resulting in the addition of information to certain existing IL objects (coreference) or in the creation of new IL objects which are added to the domain of discourse (initial reference).",
37
+ "cite_spans": [],
38
+ "ref_spans": [],
39
+ "eq_spans": [],
40
+ "section": "",
41
+ "sec_num": null
42
+ },
43
+ {
44
+ "text": "For this effort, we adopt Text Meaning Representation for our IL and rely on the ONTOS ontology (Mahesh & Nirenburg, 1995) as a general knowledge base. Since the central barrier to developing such a system today is the incompleteness of the knowledge base, we outline a strategy starting with the implementation of a series of form-based resolution algorithms that are applied directly to the referring expressions of the input text. These are initially supplemented by a knowledge-based resolution procedure which, as the knowledge base grows and the adequacy of the f-structure and IL-representation increases, takes on more and more of the processing load.",
45
+ "cite_spans": [
46
+ {
47
+ "start": 96,
48
+ "end": 122,
49
+ "text": "(Mahesh & Nirenburg, 1995)",
50
+ "ref_id": "BIBREF7"
51
+ }
52
+ ],
53
+ "ref_spans": [],
54
+ "eq_spans": [],
55
+ "section": "",
56
+ "sec_num": null
57
+ },
58
+ {
59
+ "text": "We examine the operation of the formbased algorithms on a sample Spanish text and show their limitations. We then demonstrate how an IL-based approach can be used to resolve the problematic cases of reference. This research effort is part of the CREST project at the CRL funded by DARPA 1.",
60
+ "cite_spans": [],
61
+ "ref_spans": [],
62
+ "eq_spans": [],
63
+ "section": "",
64
+ "sec_num": null
65
+ },
66
+ {
67
+ "text": "This paper describes a practical approach to implementing an interlingual-based reference resolution procedure. This effort is part of a recently initiated research project at the Computing Research Laboratory at New Mexico State University which aims to develop a system that will automatically construct an epistemic knowledge base from text. The procedure will also support machine translation (e.g., Mikrokosmos, see Onyshkevych & Nirenburg, 1994; Carlson & Nirenburg, 1990) as well as other multilingual NLP tasks. Since the system itself is in the early stages of development, descriptions of performance are projected rather than actual.",
68
+ "cite_spans": [
69
+ {
70
+ "start": 421,
71
+ "end": 451,
72
+ "text": "Onyshkevych & Nirenburg, 1994;",
73
+ "ref_id": "BIBREF9"
74
+ },
75
+ {
76
+ "start": 452,
77
+ "end": 478,
78
+ "text": "Carlson & Nirenburg, 1990)",
79
+ "ref_id": "BIBREF1"
80
+ }
81
+ ],
82
+ "ref_spans": [],
83
+ "eq_spans": [],
84
+ "section": "Introduction and Background",
85
+ "sec_num": "1"
86
+ },
87
+ {
88
+ "text": "Current approaches to reference resolution focus on surface forms (see Sundheim & Grishman, 1995, and MUC-7, 1998 , for a general introduction). They include string match algorithms for proper noun phrases (PNs) and common noun phrases (NPs) (e.g., Bagga & Baldwin, 1998) and syntactically constrained morphological matching for pronominals and deictic NPs (Lappin & Leass, 1994) . In some cases, they include constraints related to clause structure or larger textual units (e.g., Grosz, Joshi, and Weinstein, 1995) .",
89
+ "cite_spans": [
90
+ {
91
+ "start": 71,
92
+ "end": 81,
93
+ "text": "Sundheim &",
94
+ "ref_id": null
95
+ },
96
+ {
97
+ "start": 82,
98
+ "end": 101,
99
+ "text": "Grishman, 1995, and",
100
+ "ref_id": null
101
+ },
102
+ {
103
+ "start": 102,
104
+ "end": 113,
105
+ "text": "MUC-7, 1998",
106
+ "ref_id": null
107
+ },
108
+ {
109
+ "start": 249,
110
+ "end": 271,
111
+ "text": "Bagga & Baldwin, 1998)",
112
+ "ref_id": "BIBREF0"
113
+ },
114
+ {
115
+ "start": 357,
116
+ "end": 379,
117
+ "text": "(Lappin & Leass, 1994)",
118
+ "ref_id": "BIBREF6"
119
+ },
120
+ {
121
+ "start": 481,
122
+ "end": 515,
123
+ "text": "Grosz, Joshi, and Weinstein, 1995)",
124
+ "ref_id": "BIBREF3"
125
+ }
126
+ ],
127
+ "ref_spans": [],
128
+ "eq_spans": [],
129
+ "section": "Introduction and Background",
130
+ "sec_num": "1"
131
+ },
132
+ {
133
+ "text": "1 DOD contract N 66001-99-1-8915 They do not generally consider implicit references or, in the case of Spanish (e.g., Ferrhndez et al, 1998) , references to contextually clear possessors using determiners rather than possessive adjectives. All such approaches are supplemented, if not entirely determined, by heuristics which, more recently, have been induced statistically from corpora (e.g., Hirsehman et al, 1998 , Popescu-Belis, 1998 . The only approaches of which the authors are aware that attempted to account for implicit referents or implicit references are those developed within AI over two decades .ago (e.g., Hobbs, 1979 , or DeJong, 1979 .",
134
+ "cite_spans": [
135
+ {
136
+ "start": 103,
137
+ "end": 140,
138
+ "text": "Spanish (e.g., Ferrhndez et al, 1998)",
139
+ "ref_id": null
140
+ },
141
+ {
142
+ "start": 387,
143
+ "end": 415,
144
+ "text": "(e.g., Hirsehman et al, 1998",
145
+ "ref_id": null
146
+ },
147
+ {
148
+ "start": 416,
149
+ "end": 437,
150
+ "text": ", Popescu-Belis, 1998",
151
+ "ref_id": "BIBREF9"
152
+ },
153
+ {
154
+ "start": 622,
155
+ "end": 633,
156
+ "text": "Hobbs, 1979",
157
+ "ref_id": "BIBREF5"
158
+ },
159
+ {
160
+ "start": 634,
161
+ "end": 651,
162
+ "text": ", or DeJong, 1979",
163
+ "ref_id": null
164
+ }
165
+ ],
166
+ "ref_spans": [],
167
+ "eq_spans": [],
168
+ "section": "Introduction and Background",
169
+ "sec_num": "1"
170
+ },
171
+ {
172
+ "text": "This approach suggested here differs radically in thaf reference resolution is triggered by elements of interlingual (IL) representation rather than surface text expressions. The referents in the domain of discourse consist of elements of IL representation as well. Thus, implicit references and implicit referents are accounted for and, at the same time, empty references are ignored.",
173
+ "cite_spans": [],
174
+ "ref_spans": [],
175
+ "eq_spans": [],
176
+ "section": "Introduction and Background",
177
+ "sec_num": "1"
178
+ },
179
+ {
180
+ "text": "Below, in Section 2, we outline a proposal for practically implementing an IL-based approach, beginning with a description of a target procedure which resolves the reference of each new IL element as it is being produced, clause by clause. We then present a series of form-based procedures, which are to be gradually replaced (or, in some cases, transmogrified) as the IL analysis system and supporting knowledge bases are extended. In Section 3, we briefly describe the relevant aspects of the sample Spanish used as a basis for the presentation. In Section 4, we examine the operation of the procedures in greater detail and demonstrate how the IL-based approach can resolve those problematic references that are beyond the scope of the form-based approaches.",
181
+ "cite_spans": [],
182
+ "ref_spans": [],
183
+ "eq_spans": [],
184
+ "section": "Introduction and Background",
185
+ "sec_num": "1"
186
+ },
187
+ {
188
+ "text": "The proposed approach relies on the capability of a system to provide a reasonably adequate interlingual representation for a text. Here the interlingua we rely on is a variant of Text Meaning Representation (TMR) (see http://crl.nmsu.edu/Research/Projects/mikro/in dex.html) and we focus on a sample Spanish text and its interlingual analysis, which is known to be reproducible automatically. The first step of the analysis process is to produce a functional (syntactic) structure for the text. As part of the process of establishing the f-structure, various structurally governed, clause and sentence internal anaphorie relations will be resolved, the relevant anaphors being coindexed (or assigned differing indexes) as determined by the syntax. Thus some syntactic co-reference relationships such as those related to clitics and relative pronouns will be identified before the IL procedure begins.",
189
+ "cite_spans": [],
190
+ "ref_spans": [],
191
+ "eq_spans": [],
192
+ "section": "IL-based Resolution Procedure",
193
+ "sec_num": "2.1"
194
+ },
195
+ {
196
+ "text": "The second step is to map from the fstructure to the TMR. A TMR includes, among other representational objects, instantiations of object types, relation types and property types. These are constructed from ontological concepts which are associated with the lexical items in f-structure and which are filled out on the basis of the surrounding f-structure.",
197
+ "cite_spans": [],
198
+ "ref_spans": [],
199
+ "eq_spans": [],
200
+ "section": "IL-based Resolution Procedure",
201
+ "sec_num": "2.1"
202
+ },
203
+ {
204
+ "text": "For instance, the Spanish verb comprar (to buy) might be associated with the ontological concept named PURCHASE which is a generic frame structure corresponding to purchasing events. It might in part look like: These instantiated representational objects are, in turn, referents in the discourse context when the next sentence is processed. As part of this mapping, the reference of the various fstructure elements is resolved resulting in the addition of information to certain existing IL objects (coreference) or in the creation of new IL objects which are added to the domain of discourse (initial reference). Which occurs depends on whether a connection can be inferred between the current IL object and an already-existing IL object on the basis of ontological or epistemie information. Note that reference resolution is driven by the process of instantiating TMR objects rather than by linguistic forms. At the same time, various aspects of reference resolution are being done on the basis of form. Certain anaphors are coindexed or differently indexed on the basis of morphosyntax as the f-structure is being constructed. Ontological concepts of similar or related type may be called forth during the mapping of similar or related lexical items onto TMR. Articles and other determiners affect the form of the instantiated TMR objects corresponding to the NPs containing them. Finally, aspects of the instantiated TMR objects refer to literals such as the NAME attribute of a COMPANY. These can be used to implement stringmatching algorithms.",
205
+ "cite_spans": [],
206
+ "ref_spans": [],
207
+ "eq_spans": [],
208
+ "section": "IL-based Resolution Procedure",
209
+ "sec_num": "2.1"
210
+ },
211
+ {
212
+ "text": "TIME: T [",
213
+ "cite_spans": [],
214
+ "ref_spans": [],
215
+ "eq_spans": [],
216
+ "section": "IL-based Resolution Procedure",
217
+ "sec_num": "2.1"
218
+ },
219
+ {
220
+ "text": "Because the target procedure relies on a highly sophisticated, and as yet incomplete, language analysis system, the approach to implementing it begins by assuming that none of the IL apparatus is available for processing. Instead, we implement an initial set of standard, formbased algorithms which vary according to the syntactic category of referring expression.",
221
+ "cite_spans": [],
222
+ "ref_spans": [],
223
+ "eq_spans": [],
224
+ "section": "Approach to Implementation",
225
+ "sec_num": "2.2"
226
+ },
227
+ {
228
+ "text": "Generally, if the referring expression is a proper noun phrase, a full or partial string match with each prior PN is used to establish a eoreference link. Otherwise, the PN is assumed to refer to a new referent. For standard pronominals, a recency algorithm is used which checks the morphosyntactic constraints (gender, number, etc.) and, if possible, the semantic class to filter potential coreferents. When a match is found, the coreference link is established. Common noun phrases follow a bifurcated algorithm. If the noun phrase is indefinite or has no article, then it is assumed to refer to a new referent. If it is a definite noun phrase, then the head noun string is matched against that of previous NPs. If the heads of two NPs match and the complement strings do not mismatch, the two NPs are assumed to corefer.",
229
+ "cite_spans": [],
230
+ "ref_spans": [],
231
+ "eq_spans": [],
232
+ "section": "Approach to Implementation",
233
+ "sec_num": "2.2"
234
+ },
235
+ {
236
+ "text": "As the IL analysis system and supporting knowledge bases grow and the ability to produce appropriate f-structures and TMRs is extended, the target procedures will bear increasingly more of the resolution task.",
237
+ "cite_spans": [],
238
+ "ref_spans": [],
239
+ "eq_spans": [],
240
+ "section": "Approach to Implementation",
241
+ "sec_num": "2.2"
242
+ },
243
+ {
244
+ "text": "For this presentation, we focus the discussion on a single Spanish text, a newswire article concerning a corporate buyout (see Appendix I for the original text and its translation into English). It is taken from the ARPA Machine Translation Evaluation corpus (White et al, 1994) and contains 347 words, 17 sentences and 11 paragraphs. There are 144 referring expressions altogether including 14 proper noun phrases (9.7%), 76 common noun phrases (52.8%), 20 pronominal-like expressions (13.9%), 31 verbal expressions (21.5%) and 3 prepositional phrases (2.1%). Of the common noun phrases (NP), 43 were definite NPs, 8 were indefinite NPs and 25 NPs had no explicit determiner. Of the pronominal expressions, 12 were pronouns (Pron) or deicties such as hey (today), aqui (here) or ahora (now), 3 were ellipted (subject) pronouns (PRO) and 5 were definite articles which function as possessive adjectives (Det [= Pron]) as in: However, there are in addition 129 implicit references made which need to be resolved as well. Implicit references are those that are implied by the slots of TMR objects. They may be of unexpressed participants of an event (say, a seller in a PURCHASE event), or unexpressed times or locations of events. Altogether, then, there are 273 references of which 52.75% are explicit and an almost equal amount, 47.25%, are implicit. Taking the implicit references into account, the proper noun phrases (PN) represent 5.1% of the referring expressions, the common noun phrases NP about 27.8%, pronominal-like expressions 7.3%, verbal expressions 11.4% and prepositional phrases about 1.1%. These results are summarized in Tables I and 2. As for referents, there are 138 altogether. Of these 108 (78.25%) are referred to explicitly on at least one occasion while 30 (21.75%) are referred to implicitly only. There are 40 (29%) referents that are referred to more than once, of Thus, there were 135 eoreferences altogether (273 total references-138 referents), 41 explicit coreferences and 94 implicit coreferences. Of the explicit coreferences, 9 were made by PNs, 10 by NPs, 20 by pronominal-like expressions (12 by Prons, 3 by PROs, 5 by Dets [= Pron]) and 2 by verbal expressions.",
245
+ "cite_spans": [
246
+ {
247
+ "start": 259,
248
+ "end": 278,
249
+ "text": "(White et al, 1994)",
250
+ "ref_id": "BIBREF14"
251
+ }
252
+ ],
253
+ "ref_spans": [
254
+ {
255
+ "start": 1640,
256
+ "end": 1655,
257
+ "text": "Tables I and 2.",
258
+ "ref_id": null
259
+ }
260
+ ],
261
+ "eq_spans": [],
262
+ "section": "The Data",
263
+ "sec_num": "3"
264
+ },
265
+ {
266
+ "text": "It is perhaps of some interest that very few of these references were figurative, that is, metonymic or metaphorical. There was one clearly metonymic reference, and one or two other possible metonymie references.",
267
+ "cite_spans": [],
268
+ "ref_spans": [],
269
+ "eq_spans": [],
270
+ "section": "The Data",
271
+ "sec_num": "3"
272
+ },
273
+ {
274
+ "text": "What follows here is a discussion of what is needed to resolve the 273 references made in the example Spanish text. In each section, the cases that could be handled by a form-based method are discussed, then cases that would require an IL-based approach for resolution.",
275
+ "cite_spans": [],
276
+ "ref_spans": [],
277
+ "eq_spans": [],
278
+ "section": "Reference Resolution",
279
+ "sec_num": null
280
+ },
281
+ {
282
+ "text": "As mentioned, there are 14 PNs in the sample text, 9.7% of the explicit referring expressions or 5.1% of all references. In addition, there were 4 cases of common noun phrases which were in apposition with proper noun phrases and which will be considered here as well. For PNs, the basic resolution strategy is to match the form of the expression with that of each of the PNs used previously. If there is a match, assume the current PN is being used to corefer to the referent of the matching PN. This applies in 4 of 18 cases. If no match is found, attempt a partial positive form match. That is, if any part of either form for which there is a partial match does not match a corresponding substring in the other form, then the match fails. If no positive partial match can be found, it is assumed that the PN is not being used to corefer to an existing referent and introduces a new referent. This takes care of 9 additional cases.",
283
+ "cite_spans": [],
284
+ "ref_spans": [],
285
+ "eq_spans": [],
286
+ "section": "Resolving Proper Noun Phrases",
287
+ "sec_num": "4.1"
288
+ },
289
+ {
290
+ "text": "This basic PN resolution procedure, then, handles 16 of the 18 PNs (89%). That leaves 2 cases which it will not handle, Doctor Andreu coreferring to Docteur Andreu and Productos Roche SA coreferring to su compa~ia en Espa~a",
291
+ "cite_spans": [],
292
+ "ref_spans": [],
293
+ "eq_spans": [],
294
+ "section": "Resolving Proper Noun Phrases",
295
+ "sec_num": "4.1"
296
+ },
297
+ {
298
+ "text": "The second problem is not all that rare. Here, the PN is being used to corefer to a referent that was initially introduced by a common noun phrase. The first step is to identify the semantic class of the PN, possibly through some independent PN classifying procedure or possibly by looking at any semantic constraints that arise from the context. In the case of Productos Roche SA, for instance, it might be classed as a COMPANY by some independent PN classifying procedure, say, on the basis of the SA, or by inspecting its context.",
299
+ "cite_spans": [],
300
+ "ref_spans": [],
301
+ "eq_spans": [],
302
+ "section": "Resolving Proper Noun Phrases",
303
+ "sec_num": "4.1"
304
+ },
305
+ {
306
+ "text": "... la operaci6n realizada entre ... the transaction carried out between Productos Roche SA y Uni6n Explosivos Productos Roche SA and Uni6n Explosivo Rio... Rio ... In example (1), the expression la operaci6n is used to refer to some transaction that, as the text goes on to report, was carried out between Productos Roche and Uni6n Explosivos Rio Tinto. If transactions are carried out by companies (general semantic knowledge), then Productos Roche and Uni6n Explosivos must be companies.",
307
+ "cite_spans": [],
308
+ "ref_spans": [],
309
+ "eq_spans": [],
310
+ "section": "Resolving Proper Noun Phrases",
311
+ "sec_num": "4.1"
312
+ },
313
+ {
314
+ "text": "Having established the semantic category of Produetos Roche, the next step is to establish a plausible connection between Productos Roche and an established referent of the same semantic category. That is, the procedure is now to inspect all the established referents of the category COMPANY (i.e., the Roche group, Doctor Andreu and Roche's company in Spain). We know from prior text that Roche bought Doctor Andreu and that Roche acquired Doctor Andreu through its subsidiary in Spain (epistemie knowledge). From the current text, we know that Productos Roche and Uni6n Explosivos were actually involved in the transaction and that Uni6n Explosive had been a majority shareholder but, by implication, no longer is (epistemic knowledge). Thus, Uni6n Explosivos appears to be the seller and, by implication, Productos Roche could be the buyer, i.e., Roche's company in Spain.",
315
+ "cite_spans": [],
316
+ "ref_spans": [],
317
+ "eq_spans": [],
318
+ "section": "Resolving Proper Noun Phrases",
319
+ "sec_num": "4.1"
320
+ },
321
+ {
322
+ "text": "The same procedure can be used to establishing that the reference of Doctor Andreu is the same as that ofDocteur Andreu: establish the semantic class of Doctor Andreu, inspect each existing referent of that class to see whether or not a plausible connection can be established.",
323
+ "cite_spans": [],
324
+ "ref_spans": [],
325
+ "eq_spans": [],
326
+ "section": "Resolving Proper Noun Phrases",
327
+ "sec_num": "4.1"
328
+ },
329
+ {
330
+ "text": "In the sample Spanish text, there are 19 pronominal expressions, 13.9% of the explicit referring expressions or 7.3% of all references. Of these, there are 15 explicit forms and 4 ellipted forms. The 15 explicit forms include 6 possessive pronouns, 4 deictie adverbials and 5 definite articles. The 4 ellipted forms include 3 eilipted subjects of finite verbs and 1 ellipted head of a relative complementizer.",
331
+ "cite_spans": [],
332
+ "ref_spans": [],
333
+ "eq_spans": [],
334
+ "section": "Resolving Pronominals",
335
+ "sec_num": "4.2"
336
+ },
337
+ {
338
+ "text": "The basic form-based strategy for resolving pronominal reference is to begin by inspecting in reverse order of mention those referring expressions whose forms are compatible with the morphological constraints imposed by the pronominal. This strategy is usually constrained by various syntactic heuristics such as that a non-reflexive pronoun in object position cannot corefer to the subject or a pronominal complement of a noun cannot corefer to the head (e.g., Ferrfindez et al, 1998) . Such a resolution procedure will account for 4 of the 6 cases (66%) in the sample text.",
339
+ "cite_spans": [
340
+ {
341
+ "start": 462,
342
+ "end": 485,
343
+ "text": "Ferrfindez et al, 1998)",
344
+ "ref_id": "BIBREF2"
345
+ }
346
+ ],
347
+ "ref_spans": [],
348
+ "eq_spans": [],
349
+ "section": "#.2.1 Explicit pronouns",
350
+ "sec_num": null
351
+ },
352
+ {
353
+ "text": "To resolve the remaining cases, it is necessary to check the referent of the antecedent to see whether it is semantically compatible with the contextual function of the anaphor. So, for instance, in resolving the reference ofsu (its, his, her or their) in:",
354
+ "cite_spans": [],
355
+ "ref_spans": [],
356
+ "eq_spans": [],
357
+ "section": "#.2.1 Explicit pronouns",
358
+ "sec_num": null
359
+ },
360
+ {
361
+ "text": "El beneficio neto -el mejor de su The[=its] net profits the best in its historia-se elev6 a 641,5 millones de history increased to 641.5 million",
362
+ "cite_spans": [],
363
+ "ref_spans": [],
364
+ "eq_spans": [],
365
+ "section": "#.2.1 Explicit pronouns",
366
+ "sec_num": null
367
+ },
368
+ {
369
+ "text": "j'~'anco3 ... Francs ...",
370
+ "cite_spans": [],
371
+ "ref_spans": [],
372
+ "eq_spans": [],
373
+ "section": "#.2.1 Explicit pronouns",
374
+ "sec_num": null
375
+ },
376
+ {
377
+ "text": "the procedure is to first shuffle back through the referring expressions until a third person form is encountered. Here, the first third person referring expression is El beneficio neto (some company's net profits). The procedure next needs to establish through infereneing that the referent of El beneficio neto can serve the function of the anaphor su, that is, can have a history. In this case, a plausible inference cannot be established, and so the procedure moves on to consider the next most recently mentioned referent, the Roche group which is being referred to by the E1 of El beneficio neto which is being used to express a possessor relation. Here, on the basis of ontological knowledge about what companies can or cannot have, su is understood as coreferring to the referent of El.",
378
+ "cite_spans": [],
379
+ "ref_spans": [],
380
+ "eq_spans": [],
381
+ "section": "#.2.1 Explicit pronouns",
382
+ "sec_num": null
383
+ },
384
+ {
385
+ "text": "For the ellipted pronominals in the sample Spanish text, syntax will have to identify such ellipted elements in order to trigger the reference resolution process. However, once identified, the basic strategy described above for explicit pronouns should apply unaltered although, unlike possessive pronouns, the morphological constraints of the anaphor must be extracted from the morphosyntactic context. So, for instance, the ellipted subject of:",
386
+ "cite_spans": [],
387
+ "ref_spans": [],
388
+ "eq_spans": [],
389
+ "section": "Ellipted pronominals",
390
+ "sec_num": "4.2.2"
391
+ },
392
+ {
393
+ "text": "... cuenta PRO con compa~las en m6s ... has companies in more de 50 paises ... than 50 countries ... must be a third person singular referent (given the conjugation of the verb cuenta). In this case, the basic resolution procedure correctly resolves 2 of the 4 eases (50%).",
394
+ "cite_spans": [],
395
+ "ref_spans": [],
396
+ "eq_spans": [],
397
+ "section": "Ellipted pronominals",
398
+ "sec_num": "4.2.2"
399
+ },
400
+ {
401
+ "text": "For the remainder, the semantic function of the ellipted element is also extracted from context, i.e., it functions as the subject of contar con compa~ias (has companies). Thus, in the example above, it must be something that can own companies. Among the third singular candidate expressions, in reverse order of mention: el diagn6stico (diagnosis), la comercializaci6n",
402
+ "cite_spans": [],
403
+ "ref_spans": [],
404
+ "eq_spans": [],
405
+ "section": "Ellipted pronominals",
406
+ "sec_num": "4.2.2"
407
+ },
408
+ {
409
+ "text": "(the marketing), la producci6n (the manufacture), el desarrollo (the development), Basilea (Suiza) (Basel, Switzerland) and sede central (home office), none are potential owners of companies. The next most remote referring expression to be inspected is el grupo Roche (the Roche Group) which, as it turns out, is something that can own companies and, therefore, the PRO is identified as coreferring to the same referent.",
410
+ "cite_spans": [],
411
+ "ref_spans": [],
412
+ "eq_spans": [],
413
+ "section": "Ellipted pronominals",
414
+ "sec_num": "4.2.2"
415
+ },
416
+ {
417
+ "text": "Deictic elements, such as the adverbs hey (today), aqui (here), ahora (now), and so on, are resolved directly to properties of the utterance context: the day of utterance, the place of utterance, the time of utterance, and so on. There were 4 such pronouns in the sample text, 3 referring (hey, aqul, ahora\") and 1 coreferring (hey). It should be noted that all these elements are, in fact, coreferent with implicit temporal and spatial references of various finite verbs that are used to report certain events or states of affairs.",
418
+ "cite_spans": [],
419
+ "ref_spans": [],
420
+ "eq_spans": [],
421
+ "section": "Deictic Elements",
422
+ "sec_num": "4.2.3"
423
+ },
424
+ {
425
+ "text": "Perhaps the most contentious of the pronominal elements to be discussed here are the definite articles of noun phrases which Can be contextually interpreted as having the force of possessive adjectives. There are 6 examples of this in the sample text. However, these are by no means all the definite articles found and, in addition, of these six example, four were translated as possessive adjectives, one as a definite article, the, and one was omitted altogether in translation. In other words, not only is an ambiguity introduced for a very common lexical item, but even when resolved in favor of the possessor interpretation, it may not be translated as a possessive adjective. On the other hand, the major reason for assuming a distinction is that it is very important to establish such relationships in order to understand or translate a document. For instance, if it is not established that the cash flow referred to in:",
426
+ "cite_spans": [],
427
+ "ref_spans": [],
428
+ "eq_spans": [],
429
+ "section": "Articles with pronominal force",
430
+ "sec_num": "4.2.4"
431
+ },
432
+ {
433
+ "text": "El \"cash flow\" se increraent6 en un 21 its cash flow increased by about 21 per ciento ... per cent ... is that of company X, then it will be impossible to determine whether the cash flow referred to later in the text in:",
434
+ "cite_spans": [],
435
+ "ref_spans": [],
436
+ "eq_spans": [],
437
+ "section": "Articles with pronominal force",
438
+ "sec_num": "4.2.4"
439
+ },
440
+ {
441
+ "text": "... y el \"cash flow\" de ... ... and its cash flow ...",
442
+ "cite_spans": [],
443
+ "ref_spans": [],
444
+ "eq_spans": [],
445
+ "section": "Articles with pronominal force",
446
+ "sec_num": "4.2.4"
447
+ },
448
+ {
449
+ "text": "is that of company X or of some other company. That means that such information will not only be unavailable for use during translation (e.g., selecting a possessive adjective in the target language) but for any other purpose that might come along (e.g., information extraction). In any case, the procedure for resolving the reference of definite articles is the basic pronominal resolution procedure except that no morphological constraints can be placed on the antecedent expression. Still, syntactic constraints on the antecedent may be applied. Following this strategy, 5 of the 6 cases (83%) are resolved correctly although it is not clear whether it leads to false positives. Otherwise, potential referents are considered until one is found which can serve the appropriate possessor function. Since a positive connection must be inferred, the likelihood of false positives is greatly decreased.",
450
+ "cite_spans": [],
451
+ "ref_spans": [],
452
+ "eq_spans": [],
453
+ "section": "Articles with pronominal force",
454
+ "sec_num": "4.2.4"
455
+ },
456
+ {
457
+ "text": "If proper noun phrases and pronominals were the only type of referring expressions, formbased resolution techniques might prove sufficient. They account, however, for only 23.6% of the explicit referring expression or 12.4% of all references. It is for the resolution of common noun phrases, clauses and implicit references that an interlingual-based procedure will eventually prove necessary.",
458
+ "cite_spans": [],
459
+ "ref_spans": [],
460
+ "eq_spans": [],
461
+ "section": "Resolving Common Noun Phrases",
462
+ "sec_num": "4.3"
463
+ },
464
+ {
465
+ "text": "There are 76 common noun expressions in sample Spanish text. Of these 42 are definite noun phrases (32 referring, 10 coreferring), 9 are indefinite noun phrases (all 9 referring), and 25 are noun phrases having no article (all 25 referring).",
466
+ "cite_spans": [],
467
+ "ref_spans": [],
468
+ "eq_spans": [],
469
+ "section": "Resolving Common Noun Phrases",
470
+ "sec_num": "4.3"
471
+ },
472
+ {
473
+ "text": "Since none of the indefinite noun phrases or the noun phrases without articles are used to corefer, an initial basic resolution strategy for common noun phrases begins by inspecting the form of the referring expression. If it is an indefinite noun phrase or noun phrase without article, it is assumed to refer to something new and a new referent is added to the referents in the domain of discourse. That successfully resolves the reference of 34 of the 76 common noun phrases (45%) in the text.",
474
+ "cite_spans": [],
475
+ "ref_spans": [],
476
+ "eq_spans": [],
477
+ "section": "Resolving Common Noun Phrases",
478
+ "sec_num": "4.3"
479
+ },
480
+ {
481
+ "text": "Of the 42 definite noun phrases, 36 are used to refer (or corefer) to specific individuals or stuff or are inferrably unique given a general knowledge of individuals, stuff or situations that were being discussed. Of these 36, 24 were used to refer to particular individuals or stuff, 8 to processes, 2 to particular groups of objects and 2 to logically unique objects. Of the remaining 6 definite noun phrases, 4 were used to refer to portions (percentages) of stuff and 2 were used to refer to generic classes.",
482
+ "cite_spans": [],
483
+ "ref_spans": [],
484
+ "eq_spans": [],
485
+ "section": "Resolving Common Noun Phrases",
486
+ "sec_num": "4.3"
487
+ },
488
+ {
489
+ "text": "In regard to the resolution of definite noun phrases, then, the basic strategy is to first identify whether the expression is being used to refer to specific individuals or stuff, to portions (percentages) of stuff or, if possible, to a generic class. So, for instance, given the expression el 7, 4 por ciento in: la rentabilidad sobre las ventas aument6 its profit \"over sales increased del 6,3 al 7,4 por ciento. from 6.3 to 7.4 per cent it is sufficient to identify that the expression is being used to refer to a percentage (of sales) in order to assume the the expression is used to refer to something new. Identifying a generic reference on the basis of form is less obvious but, in any case, this will resolve 4 to 6 of the 42 cases.",
490
+ "cite_spans": [],
491
+ "ref_spans": [],
492
+ "eq_spans": [],
493
+ "section": "Resolving Common Noun Phrases",
494
+ "sec_num": "4.3"
495
+ },
496
+ {
497
+ "text": "Second, for the 36 definite noun phrases used to refer to a particular individual or stuff, the basic procedure is to match the head noun expression against each of the common noun expressions that have been used previously. If one is found, the complement expressions are then matched. If these are compatible, the NP under consideration is assumed to eorefer to the referent of the matching NP. This will successfully resolve 27 of the 36 eases (75%) but leaves 9 incorrectly resolved, 5 setting up new referents when in fact they are coreferring and 4 false positive eases ofcoreference.",
498
+ "cite_spans": [],
499
+ "ref_spans": [],
500
+ "eq_spans": [],
501
+ "section": "Resolving Common Noun Phrases",
502
+ "sec_num": "4.3"
503
+ },
504
+ {
505
+ "text": "The third step, then, is to inspect each referent for semantic compatibility. Semantic information is established on the basis of the expression's form and context. For instance, in looking for a possible referent for la rentabilidad sobre las ventas (its profit over sales ratio) above, it is first necessary to establish that the potential referent must be some measurement of financial performance which has increased during some particular period of time for some particular company.",
506
+ "cite_spans": [],
507
+ "ref_spans": [],
508
+ "eq_spans": [],
509
+ "section": "Resolving Common Noun Phrases",
510
+ "sec_num": "4.3"
511
+ },
512
+ {
513
+ "text": "At the time a reference for la rentabilidad sobre las ventas is sought, there are some 98 existing referents (53 objects, 34 events, 11 implicit objects). The more recent of these include \"641.5 million Swiss Francs\", \"Roche Group's net profit\", \"Roche Group's pharmaceutical division\", \"41% of Roche Group's total sales\", \"8.69 billion Swiss Francs\", \"Roche Group's total sales\", \"1988\" and so on. Of these, only \"Roche Group's net profit\" and \"Roche Group's total sales\" are possible measurements of a company's financial performance. However, both these measurements should be ontologically distinct from a company's profit over sales as well as from each other. Thus, they fail to satisfy the semantic requirements of a potential referent for the expression. In the end, no semantically appropriate referent will be found among the pool of existing referents and so a new referent is introduced to the pool.",
514
+ "cite_spans": [],
515
+ "ref_spans": [],
516
+ "eq_spans": [],
517
+ "section": "Resolving Common Noun Phrases",
518
+ "sec_num": "4.3"
519
+ },
520
+ {
521
+ "text": "If an existing referent meets the informational constraints on potential referent of the expression being processed, the expression is assumed to refer to that referent. If no existing referent satisfies those constraints, the expression is assumed to refer to something new and a new referent is added to the pool of referents.",
522
+ "cite_spans": [],
523
+ "ref_spans": [],
524
+ "eq_spans": [],
525
+ "section": "Resolving Common Noun Phrases",
526
+ "sec_num": "4.3"
527
+ },
528
+ {
529
+ "text": "Noun phrases, of course, are not the only type of constituent that is used to refer to things in the world. Clauses may be used to refer (or corefer) to particular events or states-of-affairs or to classes of events or states-of-affairs. These may be finite (main, relative, complement or adverbial clauses), participials (present or passive), infinitival or absolutive. Of the 45 events referred to in the text, there were 3 events that were referred to on more than one occasion. The first, the purchasing of Doctor Andreu, was coreferred to 4 times. But of these, only one was by way of a (finite) clause. The other three were all by way of NPs, 2 explicit and 1 impli~zit. The second, the announcing of the purchase, was coreferred to only once by way of a NP. The third, Roche's investing in R&D, was coreferred to once by way of an implicit pro-verb introduced for syntactic reasons in the context of a parallel, conjoined structure.",
530
+ "cite_spans": [],
531
+ "ref_spans": [],
532
+ "eq_spans": [],
533
+ "section": "Resolving Clauses",
534
+ "sec_num": "4.2.4"
535
+ },
536
+ {
537
+ "text": "The only form-based resolution procedure for resolving clausal reference would be to look for prior verbs having the same form and then inspecting the complements for contradictions. This procedure might be extended by inspecting prior verb forms that are related by, say, Spanish WordNet (Rodrfguez, 1998) or an on-line Spanish thesaurus (if any should exist). However, this extension could also open the door to many false positives.",
538
+ "cite_spans": [
539
+ {
540
+ "start": 289,
541
+ "end": 306,
542
+ "text": "(Rodrfguez, 1998)",
543
+ "ref_id": "BIBREF10"
544
+ }
545
+ ],
546
+ "ref_spans": [],
547
+ "eq_spans": [],
548
+ "section": "Resolving Clauses",
549
+ "sec_num": "4.2.4"
550
+ },
551
+ {
552
+ "text": "Such an approach might possibly resolve as many as 26 of the 27 cases of clausal refei'ences to events or states of affairs correctly. In any case, an IL-based approach has the advantage of having the events mentioned in prior text already represented formally aid in a language neutral form. Thus, the need for additional on-line resources for each language is assuaged.",
553
+ "cite_spans": [],
554
+ "ref_spans": [],
555
+ "eq_spans": [],
556
+ "section": "Resolving Clauses",
557
+ "sec_num": "4.2.4"
558
+ },
559
+ {
560
+ "text": "As mentioned, there were 45 events or statesof-affairs referred to in the sample Spanish text which introduce an additional 30 implied referents (5 times, 22 places and 3 actors). These events, and the implicit referents they introduce, need to be identified for successfully carrying out the coreference task. They may act as referents that are later referred to in the text or they may serve to assist in constraining or establishing coreference between later expressions and other existing referents. For instance, of the 22 implied locations, 6 are later referred to in the text and, of the 5 implied times, 3 are later referred to.",
561
+ "cite_spans": [],
562
+ "ref_spans": [],
563
+ "eq_spans": [],
564
+ "section": "Resolving Implicit References",
565
+ "sec_num": "4.2.5"
566
+ },
567
+ {
568
+ "text": "Clearly, there is no obvious form-based resolution procedure for such elements, since they have no explicit form. Thus, in order to resolve these implicitly introduced referents, the basic procedure is to treat them as pronominals. That is, every event or state-ofaffairs in the TMR has an implicit \"at that time\" and \"at that place\" associated with it which has to be resolved as part of the reference task. Beyond the fact that the potential referents must times and locations respectively, any further constraints will have to be derived from what is known about the event and about its relative (temporal or local) status with respect to the other .events which have previously been mentioned. A primary source of information for dealing with such issues will be scripts (Schank & Ableson, 1977) . Similarly, the identification of implied actors will be dependent on the ontological (not lexical) definitions of the class of event or state-of-affairs referred to and any additional information that may be extracted from the particular events or states-of-affairs that have been previously mentioned.",
569
+ "cite_spans": [
570
+ {
571
+ "start": 775,
572
+ "end": 799,
573
+ "text": "(Schank & Ableson, 1977)",
574
+ "ref_id": null
575
+ }
576
+ ],
577
+ "ref_spans": [],
578
+ "eq_spans": [],
579
+ "section": "Resolving Implicit References",
580
+ "sec_num": "4.2.5"
581
+ },
582
+ {
583
+ "text": "Given the informational constraints gathered, the procedure is then to inspect referents of like type (time, location, actor type) in reverse order of mention until one is found which is compatible with those additional informational constraints.",
584
+ "cite_spans": [],
585
+ "ref_spans": [],
586
+ "eq_spans": [],
587
+ "section": "Resolving Implicit References",
588
+ "sec_num": "4.2.5"
589
+ },
590
+ {
591
+ "text": "The advantages of the interlingual approach to reference resolution include the following: \u2022 only expressions related to actual referents are processed for coreference (i.e., no pleonastic pronouns, no clitic pronouns, no relative pronouns, etc.), \u2022 implicit as well as explicit referents are processes for coreference, \u2022 knowledge-based inferencing (both ontological and epistemic) is available for resolving (many) problematical cases, \u2022 ontologically connected actors, say, the different participants in a sequence of events making up a script, can be used to establish coreference, \u2022 texts in different languages can be processed in the same way, \u2022 all form-based procedures either are or can be implemented in any ease. The central disadvantages are: \u2022 some surface text level ordering information is lost in the TMR, \u2022 discourse-structural information may be lost in the TMR, . . \u2022 the need for a large and sophisticated knowledge sources, \u2022 the need for sound and appropriatelydirected inferencing.. As a result of the loss of ordering information, strict recency-based resolution procedures cannot be implemented. The referents in the domain are not ordered in terms of when they were introduced. The processing of the different arguments in f-structure does not necessarily correspond to the surface sequence of their mention. This \"defect\" could possibly be overcome by simply indexing each new the TMR object with the prior index plus one (assuming the indexes are integers). The tacit assumption is that, at the level of the clause, first the predicate is processed and then the arguments are processed in left to right order as they appear in f-structure.",
592
+ "cite_spans": [],
593
+ "ref_spans": [],
594
+ "eq_spans": [],
595
+ "section": "Conclusion",
596
+ "sec_num": null
597
+ },
598
+ {
599
+ "text": "As for the lack of information about the discourse structure, it may be the ease that this is a defect of the TMR representation system. That is to say, it is not unreasonable to assume that the larger orgafiisational aspects of a text, the topics, their order of presentation, the structure of the argumentation, etc., should in fact be captured in any adequate representation of the text. It has been, however, the goal of TMR to focus on capturing the information content exclusively and not on how the information is presented.",
600
+ "cite_spans": [],
601
+ "ref_spans": [],
602
+ "eq_spans": [],
603
+ "section": "Conclusion",
604
+ "sec_num": null
605
+ },
606
+ {
607
+ "text": "Roche Compra Docteur Andreu El grupo Roche, a trav6s de su compafila en Espafla, adquiri6 el laboratorio farmac6utico Doctor Andreu, se inform6 hoy aqut.",
608
+ "cite_spans": [],
609
+ "ref_spans": [],
610
+ "eq_spans": [],
611
+ "section": "Spanish Evaluation Text 93-1",
612
+ "sec_num": null
613
+ },
614
+ {
615
+ "text": "La comunicaci6n oficial no precis6 el monto de la operaci6n realizada entre Productos Roche SAy Uni6n Explosivos Rio Tinto SA, hasta ahora mayoritaria en el accionariado.",
616
+ "cite_spans": [],
617
+ "ref_spans": [],
618
+ "eq_spans": [],
619
+ "section": "Spanish Evaluation Text 93-1",
620
+ "sec_num": null
621
+ },
622
+ {
623
+ "text": "Fuentes financieras consultadas cifraron la 0peraci6n en unos I0.000 millones de pesetas. Seg~n el acuerdo firmado hoy en Madrid, los productos del Doctor Andreu continuarin siendo producidos y comercializados con el mismo nombre. Doctor Andreu, cuya fama la obtuvo a partir de las \"pastillitas\" para la tos, estfi bien introducido en las ireas de cardiologla, reumatologla y especialidades publicitarias.",
624
+ "cite_spans": [],
625
+ "ref_spans": [],
626
+ "eq_spans": [],
627
+ "section": "Spanish Evaluation Text 93-1",
628
+ "sec_num": null
629
+ },
630
+ {
631
+ "text": "Las actividades del grupo Roche, con sede central en Basilea (Suiza), incluyen el desarrollo, la producci6n y la comercializaci6n de medicamentos, productos para el diagn6stico, as| como de vitaminas y productos qulmicos.",
632
+ "cite_spans": [],
633
+ "ref_spans": [],
634
+ "eq_spans": [],
635
+ "section": "Spanish Evaluation Text 93-1",
636
+ "sec_num": null
637
+ },
638
+ {
639
+ "text": "A nivel mundial, cuenta con compafiias en mils de 50 pafses con casi 50.000 empleados. Doctor Andreu es una compaflia farmac6utica dedicada a la producci6n y comercializaci6n de ffirmacos y productos veterinarios. Con sede en Barcelona, cuenta con mfiSs de 400 empleados.",
640
+ "cite_spans": [],
641
+ "ref_spans": [],
642
+ "eq_spans": [],
643
+ "section": "Spanish Evaluation Text 93-1",
644
+ "sec_num": null
645
+ },
646
+ {
647
+ "text": "En el ejercicio pasado factur6 unos 3.490 millones de pesetas.",
648
+ "cite_spans": [],
649
+ "ref_spans": [],
650
+ "eq_spans": [],
651
+ "section": "Spanish Evaluation Text 93-1",
652
+ "sec_num": null
653
+ },
654
+ {
655
+ "text": "En 1988, el Grupo Roche alcanz6 unas ventas totales de 8.690 millones de francos suizos, de las que aproximadamente un 41 por ciento correspondieron a su divisi6n farmac6utica. E! beneficio nero -el mejor de su historia-se elev6 a 641,5 millones de francos suizos y la rentabilidad sobre las ventas aument6 del 6,3 al 7,4 por ciento.",
656
+ "cite_spans": [],
657
+ "ref_spans": [],
658
+ "eq_spans": [],
659
+ "section": "Spanish Evaluation Text 93-1",
660
+ "sec_num": null
661
+ },
662
+ {
663
+ "text": "El \"cash flow\" se increment6 en un 21 por ciento, alcanzando 1.179 miilones de francos o el 14 por eiento de las ventas del grupo.",
664
+ "cite_spans": [],
665
+ "ref_spans": [],
666
+ "eq_spans": [],
667
+ "section": "Spanish Evaluation Text 93-1",
668
+ "sec_num": null
669
+ },
670
+ {
671
+ "text": "Las inversiones en investigaci6n y desarrollo (I+D) fueron de 1.210 millones de francos suizos, el 14 pot ciento del total de sus ventas.",
672
+ "cite_spans": [],
673
+ "ref_spans": [],
674
+ "eq_spans": [],
675
+ "section": "Spanish Evaluation Text 93-1",
676
+ "sec_num": null
677
+ },
678
+ {
679
+ "text": "Productos Roche cuenta con una plantilla de 600 personas y alcanz6 unas ventas totales de 9.747 millones de pesetas, un 12,5 por ciento superiores al afio 1987.",
680
+ "cite_spans": [],
681
+ "ref_spans": [],
682
+ "eq_spans": [],
683
+ "section": "Spanish Evaluation Text 93-1",
684
+ "sec_num": null
685
+ },
686
+ {
687
+ "text": "Sus beneficios fueron de 218 millones y el \"cash flow\" de 356 millones. Las inversiones realizadas totalizaron 223 millones de pesetas.",
688
+ "cite_spans": [],
689
+ "ref_spans": [],
690
+ "eq_spans": [],
691
+ "section": "Spanish Evaluation Text 93-1",
692
+ "sec_num": null
693
+ },
694
+ {
695
+ "text": "The Roche Group acquired the pharmaceutical laboratory Doctor Andreu through its company in Spain, it was announced here today.",
696
+ "cite_spans": [],
697
+ "ref_spans": [],
698
+ "eq_spans": [],
699
+ "section": "Roche Buys Docteur Andreu",
700
+ "sec_num": null
701
+ },
702
+ {
703
+ "text": "The official announcement did not specify the exact amount of the transaction which took place between Productos Roche SA and Uni6n Explosivos Rio Tinto SA, which until now had held the mayority of the stock.",
704
+ "cite_spans": [],
705
+ "ref_spans": [],
706
+ "eq_spans": [],
707
+ "section": "Roche Buys Docteur Andreu",
708
+ "sec_num": null
709
+ },
710
+ {
711
+ "text": "Financial sources consulted estimate the transaction value at around 10 million pesetas. According to the agreement signed today in Madrid, Doctor Andreu's products will continue to be produced and marketed under the same name. Doctor Andreu, which became well known for its cough drops, is well established in the areas of cardiology, rheumatology, and advertising specialties.",
712
+ "cite_spans": [],
713
+ "ref_spans": [],
714
+ "eq_spans": [],
715
+ "section": "Roche Buys Docteur Andreu",
716
+ "sec_num": null
717
+ },
718
+ {
719
+ "text": "Activities of the Roche Group, headquartered in Basel (Switzerland), include the development, production, and marketing of drugs, diagnostic products, as well as vitamins and chemical products.",
720
+ "cite_spans": [],
721
+ "ref_spans": [],
722
+ "eq_spans": [],
723
+ "section": "Roche Buys Docteur Andreu",
724
+ "sec_num": null
725
+ },
726
+ {
727
+ "text": "The Roche Group has subsidiaries in more than 50 countries and almost 50,000 employees worldwide. Doctor Andreu is a pharmaceutical company which produces and markets medicines and veterinary products. Headquartered in Barcelona, it has more than 400 employees.",
728
+ "cite_spans": [],
729
+ "ref_spans": [],
730
+ "eq_spans": [],
731
+ "section": "Roche Buys Docteur Andreu",
732
+ "sec_num": null
733
+ },
734
+ {
735
+ "text": "In the last fiscal year its sales totaled some 3.49 billion pesetas.",
736
+ "cite_spans": [],
737
+ "ref_spans": [],
738
+ "eq_spans": [],
739
+ "section": "Roche Buys Docteur Andreu",
740
+ "sec_num": null
741
+ },
742
+ {
743
+ "text": "In 1988, the Roche Group's total sales reached 8.69 billion Swiss francs, of which approximately 41 per cent corresponded to its pharmaceutical division. Its net profits--the best in its history-went up to 641.5 million Swiss francs and the profitability over sales increased from 6.3 to 7.4 per cent.",
744
+ "cite_spans": [],
745
+ "ref_spans": [],
746
+ "eq_spans": [],
747
+ "section": "Roche Buys Docteur Andreu",
748
+ "sec_num": null
749
+ },
750
+ {
751
+ "text": "Its cash flow increased by 21 per cent, to 1.179 billion francs or 14 per cent oft.he group's sales.",
752
+ "cite_spans": [],
753
+ "ref_spans": [],
754
+ "eq_spans": [],
755
+ "section": "Roche Buys Docteur Andreu",
756
+ "sec_num": null
757
+ },
758
+ {
759
+ "text": "Investments in research and development (R&D) totaled 1.210 billion Swiss ffi'ancs or 14 per cent of total sales.",
760
+ "cite_spans": [],
761
+ "ref_spans": [],
762
+ "eq_spans": [],
763
+ "section": "Roche Buys Docteur Andreu",
764
+ "sec_num": null
765
+ },
766
+ {
767
+ "text": "Productos Roche employs 600 people and its total sales reached 9.747 billion pesetas, 12.5 per cent more than in 1987.",
768
+ "cite_spans": [],
769
+ "ref_spans": [],
770
+ "eq_spans": [],
771
+ "section": "Roche Buys Docteur Andreu",
772
+ "sec_num": null
773
+ },
774
+ {
775
+ "text": "Its profits totaled 218 million and its cash flow 356 million.",
776
+ "cite_spans": [],
777
+ "ref_spans": [],
778
+ "eq_spans": [],
779
+ "section": "Roche Buys Docteur Andreu",
780
+ "sec_num": null
781
+ },
782
+ {
783
+ "text": "Actual investments totaled 223 million pesetas.",
784
+ "cite_spans": [],
785
+ "ref_spans": [],
786
+ "eq_spans": [],
787
+ "section": "Roche Buys Docteur Andreu",
788
+ "sec_num": null
789
+ }
790
+ ],
791
+ "back_matter": [],
792
+ "bib_entries": {
793
+ "BIBREF0": {
794
+ "ref_id": "b0",
795
+ "title": "Entity-Based Cross-Document Coreferencing Using the Vector-Space Model",
796
+ "authors": [
797
+ {
798
+ "first": "A",
799
+ "middle": [],
800
+ "last": "Bagga",
801
+ "suffix": ""
802
+ },
803
+ {
804
+ "first": "B",
805
+ "middle": [],
806
+ "last": "Baldwin",
807
+ "suffix": ""
808
+ }
809
+ ],
810
+ "year": 1998,
811
+ "venue": "Proceedings of the 36 ~h Annual Meeting of the Association of Computational Linguistics and the 1 7,~ Conference on Computational Linguistics",
812
+ "volume": "",
813
+ "issue": "",
814
+ "pages": "",
815
+ "other_ids": {},
816
+ "num": null,
817
+ "urls": [],
818
+ "raw_text": "Bagga, A., and B. Baldwin. 1998. Entity-Based Cross-Document Coreferencing Using the Vector-Space Model. Proceedings of the 36 ~h Annual Meeting of the Association of Computational Linguistics and the 1 7,~ Conference on Computational Linguistics, Montreal, Canada.",
819
+ "links": null
820
+ },
821
+ "BIBREF1": {
822
+ "ref_id": "b1",
823
+ "title": "World Modeling for NLP",
824
+ "authors": [
825
+ {
826
+ "first": "L",
827
+ "middle": [],
828
+ "last": "Carlson",
829
+ "suffix": ""
830
+ },
831
+ {
832
+ "first": "S",
833
+ "middle": [],
834
+ "last": "Nirenburg",
835
+ "suffix": ""
836
+ }
837
+ ],
838
+ "year": 1990,
839
+ "venue": "",
840
+ "volume": "",
841
+ "issue": "",
842
+ "pages": "",
843
+ "other_ids": {},
844
+ "num": null,
845
+ "urls": [],
846
+ "raw_text": "Carlson, L., and S. Nirenburg. 1990. World Modeling for NLP. Technical Report 90-121. Center for Machine Translation, Carnegie Mellon University, Pittsburgh, PA.",
847
+ "links": null
848
+ },
849
+ "BIBREF2": {
850
+ "ref_id": "b2",
851
+ "title": "Skimming stories in real time: an experiment in integrated understanding",
852
+ "authors": [
853
+ {
854
+ "first": "A",
855
+ "middle": [],
856
+ "last": "Ferrfindez",
857
+ "suffix": ""
858
+ },
859
+ {
860
+ "first": "M",
861
+ "middle": [],
862
+ "last": "Palomar",
863
+ "suffix": ""
864
+ },
865
+ {
866
+ "first": "L",
867
+ "middle": [],
868
+ "last": "Moreno",
869
+ "suffix": ""
870
+ }
871
+ ],
872
+ "year": 1979,
873
+ "venue": "Proceedings of the 36 th Annual Meeting of the Association of Computational Linguistics and the I 7 ~h Conference on Computational Linguistics",
874
+ "volume": "158",
875
+ "issue": "",
876
+ "pages": "",
877
+ "other_ids": {},
878
+ "num": null,
879
+ "urls": [],
880
+ "raw_text": "Ferrfindez, A., M. Palomar, and L. Moreno. 1998. Anaphora resolution in unrestricted texts with partial parsing. Proceedings of the 36 th Annual Meeting of the Association of Computational Linguistics and the I 7 ~h Conference on Computational Linguistics, Montreal, Canada. DeJong, G. 1979. Skimming stories in real time: an experiment in integrated understanding. Ph.D. dissertation. Research report #158, Dept. of Computer Science, Yale University, New Haven, CT.",
881
+ "links": null
882
+ },
883
+ "BIBREF3": {
884
+ "ref_id": "b3",
885
+ "title": "Centering: A Framework for Modeling the Local Coherence of Discourse",
886
+ "authors": [
887
+ {
888
+ "first": "B",
889
+ "middle": [],
890
+ "last": "Grosz",
891
+ "suffix": ""
892
+ },
893
+ {
894
+ "first": "A",
895
+ "middle": [],
896
+ "last": "Joshi",
897
+ "suffix": ""
898
+ },
899
+ {
900
+ "first": "S",
901
+ "middle": [],
902
+ "last": "Weinstein",
903
+ "suffix": ""
904
+ }
905
+ ],
906
+ "year": 1995,
907
+ "venue": "Computational Linguistics",
908
+ "volume": "21",
909
+ "issue": "2",
910
+ "pages": "203--225",
911
+ "other_ids": {},
912
+ "num": null,
913
+ "urls": [],
914
+ "raw_text": "Grosz, B., A. Joshi, and S. Weinstein. 1995. Centering: A Framework for Modeling the Local Coherence of Discourse. Computational Linguistics, 21(2): 203-225.",
915
+ "links": null
916
+ },
917
+ "BIBREF4": {
918
+ "ref_id": "b4",
919
+ "title": "Automating Coreference: The Role of Annotated Training Data",
920
+ "authors": [
921
+ {
922
+ "first": "L",
923
+ "middle": [],
924
+ "last": "Hirschman",
925
+ "suffix": ""
926
+ },
927
+ {
928
+ "first": "P",
929
+ "middle": [],
930
+ "last": "Robinson",
931
+ "suffix": ""
932
+ },
933
+ {
934
+ "first": "J",
935
+ "middle": [],
936
+ "last": "Burger",
937
+ "suffix": ""
938
+ },
939
+ {
940
+ "first": "M",
941
+ "middle": [],
942
+ "last": "Vilain",
943
+ "suffix": ""
944
+ }
945
+ ],
946
+ "year": 1998,
947
+ "venue": "Proceedings of AAAI 98 Spring Symposium on Applying Machine Learning to Discourse Processing",
948
+ "volume": "",
949
+ "issue": "",
950
+ "pages": "",
951
+ "other_ids": {},
952
+ "num": null,
953
+ "urls": [],
954
+ "raw_text": "Hirschman, L., P. Robinson, J. Burger, and M. Vilain. 1998. Automating Coreference: The Role of Annotated Training Data. Proceedings of AAAI 98 Spring Symposium on Applying Machine Learning to Discourse Processing.",
955
+ "links": null
956
+ },
957
+ "BIBREF5": {
958
+ "ref_id": "b5",
959
+ "title": "Coherence and Coreference",
960
+ "authors": [
961
+ {
962
+ "first": "J",
963
+ "middle": [],
964
+ "last": "Hobbs",
965
+ "suffix": ""
966
+ }
967
+ ],
968
+ "year": 1979,
969
+ "venue": "Cognitive Science",
970
+ "volume": "3",
971
+ "issue": "",
972
+ "pages": "67--90",
973
+ "other_ids": {},
974
+ "num": null,
975
+ "urls": [],
976
+ "raw_text": "Hobbs, J. 1979. Coherence and Coreference. Cognitive Science 3: 67-90.",
977
+ "links": null
978
+ },
979
+ "BIBREF6": {
980
+ "ref_id": "b6",
981
+ "title": "An Algorithm for Pronominal Anaphora Resolution",
982
+ "authors": [
983
+ {
984
+ "first": "S",
985
+ "middle": [],
986
+ "last": "Lappin",
987
+ "suffix": ""
988
+ },
989
+ {
990
+ "first": "H",
991
+ "middle": [],
992
+ "last": "Leass",
993
+ "suffix": ""
994
+ }
995
+ ],
996
+ "year": 1994,
997
+ "venue": "Computational Linguistics",
998
+ "volume": "20",
999
+ "issue": "4",
1000
+ "pages": "535--56",
1001
+ "other_ids": {},
1002
+ "num": null,
1003
+ "urls": [],
1004
+ "raw_text": "Lappin, S., and H. Leass. 1994. An Algorithm for Pronominal Anaphora Resolution. Computational Linguistics, 20(4), 535-56 I.",
1005
+ "links": null
1006
+ },
1007
+ "BIBREF7": {
1008
+ "ref_id": "b7",
1009
+ "title": "A Situated Ontology for Practical NLP",
1010
+ "authors": [
1011
+ {
1012
+ "first": "K",
1013
+ "middle": [],
1014
+ "last": "Mahesh",
1015
+ "suffix": ""
1016
+ },
1017
+ {
1018
+ "first": "S",
1019
+ "middle": [],
1020
+ "last": "Nirenburg",
1021
+ "suffix": ""
1022
+ }
1023
+ ],
1024
+ "year": 1995,
1025
+ "venue": "Proceedings of the Workshop on Basic Ontological lssues in Knowledge Sharing, International Joint Conference on Artificial Intelligence. Montr6al, Canada",
1026
+ "volume": "",
1027
+ "issue": "",
1028
+ "pages": "",
1029
+ "other_ids": {},
1030
+ "num": null,
1031
+ "urls": [],
1032
+ "raw_text": "Mahesh, K., and S. Nirenburg. 1995. A Situated Ontology for Practical NLP. Proceedings of the Workshop on Basic Ontological lssues in Knowledge Sharing, International Joint Conference on Artificial Intelligence. Montr6al, Canada.",
1033
+ "links": null
1034
+ },
1035
+ "BIBREF8": {
1036
+ "ref_id": "b8",
1037
+ "title": "Proceedings of the Seventh Message Understanding Conference",
1038
+ "authors": [],
1039
+ "year": 1998,
1040
+ "venue": "",
1041
+ "volume": "",
1042
+ "issue": "",
1043
+ "pages": "",
1044
+ "other_ids": {},
1045
+ "num": null,
1046
+ "urls": [],
1047
+ "raw_text": "MUC-7. 1998. Proceedings of the Seventh Message Understanding Conference, April 1998.",
1048
+ "links": null
1049
+ },
1050
+ "BIBREF9": {
1051
+ "ref_id": "b9",
1052
+ "title": "The Lexicon in the Scheme of KBMT Things. Memoranda in Cognitive and Computer Science: MCCS 94-277",
1053
+ "authors": [
1054
+ {
1055
+ "first": "B",
1056
+ "middle": [],
1057
+ "last": "Onyshkevych",
1058
+ "suffix": ""
1059
+ },
1060
+ {
1061
+ "first": "S",
1062
+ "middle": [],
1063
+ "last": "Nirenburg",
1064
+ "suffix": ""
1065
+ },
1066
+ {
1067
+ "first": "A",
1068
+ "middle": [],
1069
+ "last": "Popescu-Belis",
1070
+ "suffix": ""
1071
+ }
1072
+ ],
1073
+ "year": 1994,
1074
+ "venue": "Proceedings of the first International Conference on Language Resource and Evaluation",
1075
+ "volume": "",
1076
+ "issue": "",
1077
+ "pages": "567--571",
1078
+ "other_ids": {},
1079
+ "num": null,
1080
+ "urls": [],
1081
+ "raw_text": "Onyshkevych, B., and S. Nirenburg. (1994). The Lexicon in the Scheme of KBMT Things. Memoranda in Cognitive and Computer Science: MCCS 94-277, Computing Research Laboratory, New Mexico State University, Las Cruces, NM. Popescu-Belis, A. 1998. How Corpora with Anotated Coreferenee Links Improve Reference Resolution. In Rubio, A., N. Gallardo, R. Castro & A. Tejada (eds.) Proceedings of the first International Conference on Language Resource and Evaluation, pp. 567-571. Granada, Spain, May 1998.",
1082
+ "links": null
1083
+ },
1084
+ "BIBREF10": {
1085
+ "ref_id": "b10",
1086
+ "title": "The Top-Down Strategy for Building EuroWordNet: Vocabulary Coverage, Base Concepts and Top Ontology",
1087
+ "authors": [
1088
+ {
1089
+ "first": "H",
1090
+ "middle": [],
1091
+ "last": "Rodrfguez",
1092
+ "suffix": ""
1093
+ },
1094
+ {
1095
+ "first": "S",
1096
+ "middle": [],
1097
+ "last": "Climent",
1098
+ "suffix": ""
1099
+ },
1100
+ {
1101
+ "first": "P",
1102
+ "middle": [],
1103
+ "last": "Vossen",
1104
+ "suffix": ""
1105
+ },
1106
+ {
1107
+ "first": "L",
1108
+ "middle": [],
1109
+ "last": "Bloksma",
1110
+ "suffix": ""
1111
+ },
1112
+ {
1113
+ "first": "W",
1114
+ "middle": [],
1115
+ "last": "Peters",
1116
+ "suffix": ""
1117
+ },
1118
+ {
1119
+ "first": "A",
1120
+ "middle": [],
1121
+ "last": "Alonge",
1122
+ "suffix": ""
1123
+ },
1124
+ {
1125
+ "first": "F",
1126
+ "middle": [],
1127
+ "last": "Bertagna",
1128
+ "suffix": ""
1129
+ },
1130
+ {
1131
+ "first": "A",
1132
+ "middle": [],
1133
+ "last": "Roventini",
1134
+ "suffix": ""
1135
+ }
1136
+ ],
1137
+ "year": 1998,
1138
+ "venue": "Computers and the Humanities",
1139
+ "volume": "32",
1140
+ "issue": "2/3",
1141
+ "pages": "117--152",
1142
+ "other_ids": {},
1143
+ "num": null,
1144
+ "urls": [],
1145
+ "raw_text": "Rodrfguez, H., S. Climent, P. Vossen, L. Bloksma, W.Peters, A. Alonge, F. Bertagna, and A. Roventini. 1998. The Top-Down Strategy for Building EuroWordNet: Vocabulary Coverage, Base Concepts and Top Ontology. Computers and the Humanities, 32(2/3): 117-152",
1146
+ "links": null
1147
+ },
1148
+ "BIBREF11": {
1149
+ "ref_id": "b11",
1150
+ "title": "Scripts, plans, goals, and understanding",
1151
+ "authors": [
1152
+ {
1153
+ "first": "R",
1154
+ "middle": [],
1155
+ "last": "Sehank",
1156
+ "suffix": ""
1157
+ },
1158
+ {
1159
+ "first": "R",
1160
+ "middle": [],
1161
+ "last": "Abelson",
1162
+ "suffix": ""
1163
+ }
1164
+ ],
1165
+ "year": 1977,
1166
+ "venue": "",
1167
+ "volume": "",
1168
+ "issue": "",
1169
+ "pages": "",
1170
+ "other_ids": {},
1171
+ "num": null,
1172
+ "urls": [],
1173
+ "raw_text": "Sehank, R., and R. Abelson. 1977. Scripts, plans, goals, and understanding. Lawrence Erlbaum Associates: Hillsdale, NJ.",
1174
+ "links": null
1175
+ },
1176
+ "BIBREF12": {
1177
+ "ref_id": "b12",
1178
+ "title": "Sixth Message Understanding Conference (MUC-6)",
1179
+ "authors": [],
1180
+ "year": 1995,
1181
+ "venue": "",
1182
+ "volume": "",
1183
+ "issue": "",
1184
+ "pages": "",
1185
+ "other_ids": {},
1186
+ "num": null,
1187
+ "urls": [],
1188
+ "raw_text": "Sundheim, B., and R. Grishman (eds.). 1995. Sixth Message Understanding Conference (MUC-6).",
1189
+ "links": null
1190
+ },
1191
+ "BIBREF14": {
1192
+ "ref_id": "b14",
1193
+ "title": "The ARPA MT Evaluation Methodologies: Evolution, Lessons, and Future Approaches",
1194
+ "authors": [
1195
+ {
1196
+ "first": "J",
1197
+ "middle": [],
1198
+ "last": "White",
1199
+ "suffix": ""
1200
+ },
1201
+ {
1202
+ "first": "T",
1203
+ "middle": [],
1204
+ "last": "O'connell",
1205
+ "suffix": ""
1206
+ },
1207
+ {
1208
+ "first": "F",
1209
+ "middle": [],
1210
+ "last": "O'mara",
1211
+ "suffix": ""
1212
+ }
1213
+ ],
1214
+ "year": 1994,
1215
+ "venue": "Technology Partnerships for Crossing the Language Barrier: Proceedings of the First Conference of the Association for Machine Translation in the Americas",
1216
+ "volume": "",
1217
+ "issue": "",
1218
+ "pages": "193--205",
1219
+ "other_ids": {},
1220
+ "num": null,
1221
+ "urls": [],
1222
+ "raw_text": "White, J., T. O'Connell and F. O'Mara. 1994. The ARPA MT Evaluation Methodologies: Evolution, Lessons, and Future Approaches, in Technology Partnerships for Crossing the Language Barrier: Proceedings of the First Conference of the Association for Machine Translation in the Americas, Columbia, MD, pp. 193-205.",
1223
+ "links": null
1224
+ }
1225
+ },
1226
+ "ref_entries": {
1227
+ "TABREF3": {
1228
+ "content": "<table/>",
1229
+ "html": null,
1230
+ "text": "Thus, El grupo Roche matches Roche and Productos Roche SA also matches Roche because there is no substring of the phrase Roche that does not match with El \u2022 grupo or Productos. On the other hand, Productos Roche SA does NOT match El grupo Roche because Productos does not match El grupo. This handles 3 further cases of the 18 (assuming that having matched El grupo Roche with Roche, Productos Roche SA will not match because it does not match with El grupo Roche).",
1231
+ "num": null,
1232
+ "type_str": "table"
1233
+ }
1234
+ }
1235
+ }
1236
+ }
Full_text_JSON/prefixW/json/W00/W00-0202.json ADDED
@@ -0,0 +1,1092 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0202",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:59.179855Z"
6
+ },
7
+ "title": "Representation of Actions as an Interlingua",
8
+ "authors": [
9
+ {
10
+ "first": "Karin",
11
+ "middle": [],
12
+ "last": "Kipper",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "University of Pennsylvania",
17
+ "location": {
18
+ "addrLine": "200 South 33rd Street Philadelphia",
19
+ "postCode": "19104",
20
+ "region": "PA",
21
+ "country": "USA"
22
+ }
23
+ },
24
+ "email": "fkipper@linc.cis.upenn.edu"
25
+ },
26
+ {
27
+ "first": "Martha",
28
+ "middle": [],
29
+ "last": "Palmer",
30
+ "suffix": "",
31
+ "affiliation": {
32
+ "laboratory": "",
33
+ "institution": "University of Pennsylvania",
34
+ "location": {
35
+ "addrLine": "200 South 33rd Street Philadelphia",
36
+ "postCode": "19104",
37
+ "region": "PA",
38
+ "country": "USA"
39
+ }
40
+ },
41
+ "email": "mpalmerg@linc.cis.upenn.edu"
42
+ }
43
+ ],
44
+ "year": "",
45
+ "venue": null,
46
+ "identifiers": {},
47
+ "abstract": "We present a Parameterized Action Representation (PAR) that provides a conceptual representation of di erent t ypes of actions used to animate virtual human agents in a simulated 3D environment. These actions involve c hanges of state, changes of location (kinematic) and exertion of force (dynamic). PARs are hierarchical, parameterized structures that facilitate both visual and verbal expressions. In order to support the animation of the actions, PARs have t o m a k e explicit many details that are often underspeci ed in the language. This detailed level of representation also provides a suitable pivot representation for generation in other natural languages, i.e., a form of interlingua. We show examples of how certain divergences in machine translation can be solved by our approach focusing speci cally on how v erb-framed and satellite-framed languages can use our representation.",
48
+ "pdf_parse": {
49
+ "paper_id": "W00-0202",
50
+ "_pdf_hash": "",
51
+ "abstract": [
52
+ {
53
+ "text": "We present a Parameterized Action Representation (PAR) that provides a conceptual representation of di erent t ypes of actions used to animate virtual human agents in a simulated 3D environment. These actions involve c hanges of state, changes of location (kinematic) and exertion of force (dynamic). PARs are hierarchical, parameterized structures that facilitate both visual and verbal expressions. In order to support the animation of the actions, PARs have t o m a k e explicit many details that are often underspeci ed in the language. This detailed level of representation also provides a suitable pivot representation for generation in other natural languages, i.e., a form of interlingua. We show examples of how certain divergences in machine translation can be solved by our approach focusing speci cally on how v erb-framed and satellite-framed languages can use our representation.",
54
+ "cite_spans": [],
55
+ "ref_spans": [],
56
+ "eq_spans": [],
57
+ "section": "Abstract",
58
+ "sec_num": null
59
+ }
60
+ ],
61
+ "body_text": [
62
+ {
63
+ "text": "In this paper, we describe a Parameterized Action Representation (PAR) (Badler et al., 1999) that provides a conceptual representation of di erent t ypes of actions used to animate virtual human agents in a simulated 3D environment. These actions involve changes of state, changes of location (kinematic) and exertion of force (dynamic). PARs are hierarchical, parameterized structures that facilitate both visual and verbal expressions (Badler et al., 2000) . In order to support the animation of the actions, PARs have to make explicit many d etails that are often underspeci ed in the language. This detailed level of representation is well suited for an interlingua for machine translation applications, since the animations of actions { and therefore the PARs that control them { will be equivalent for the same actions described in di erent l a nguages. These representations can be incorporated into a system which u s e s P AR-based animations as a workbench for creating accurate conceptual representations, which can map to seeral di erent l a nguages as well as produce faithful animations.",
64
+ "cite_spans": [
65
+ {
66
+ "start": 71,
67
+ "end": 92,
68
+ "text": "(Badler et al., 1999)",
69
+ "ref_id": "BIBREF0"
70
+ },
71
+ {
72
+ "start": 437,
73
+ "end": 458,
74
+ "text": "(Badler et al., 2000)",
75
+ "ref_id": "BIBREF1"
76
+ }
77
+ ],
78
+ "ref_spans": [],
79
+ "eq_spans": [],
80
+ "section": "Introduction",
81
+ "sec_num": "1"
82
+ },
83
+ {
84
+ "text": "The verb classes we are currently considering in this light involve explicit physical actions such as those expressed in the motion verb class and contact verb class (Levin, 1993 ). Since we are employing PAR as an interlingual representation, we will show examples of how it can handle certain divergences in machine translation, focusing speci cally on how verb-framed and satellite-framed languages (Talmy, 1991) can yield equivalent actions in this representation.",
85
+ "cite_spans": [
86
+ {
87
+ "start": 166,
88
+ "end": 178,
89
+ "text": "(Levin, 1993",
90
+ "ref_id": "BIBREF11"
91
+ },
92
+ {
93
+ "start": 402,
94
+ "end": 415,
95
+ "text": "(Talmy, 1991)",
96
+ "ref_id": "BIBREF17"
97
+ }
98
+ ],
99
+ "ref_spans": [],
100
+ "eq_spans": [],
101
+ "section": "Introduction",
102
+ "sec_num": "1"
103
+ },
104
+ {
105
+ "text": "We u s e parameterized a c t i o n r epresentations to animate the actions of virtual human agents. The PAR for an action includes the action's participants (its agent and objects), 1 as well as kinematic properties such a s i t s path, manner and duration, and dynamic properties, such as its speed and force (see Fig. 1 ). The representation also allows for traditional statespace properties of actions, such a s applicability conditions and preparatory actions that have to be satised before the action can be executed, and termination conditions and post assertions which determine when an action is concluded and what changes it makes to the environment state.",
106
+ "cite_spans": [],
107
+ "ref_spans": [
108
+ {
109
+ "start": 315,
110
+ "end": 321,
111
+ "text": "Fig. 1",
112
+ "ref_id": null
113
+ }
114
+ ],
115
+ "eq_spans": [],
116
+ "section": "PAR representation",
117
+ "sec_num": "2"
118
+ },
119
+ {
120
+ "text": "We created a hierarchy of actions, exploiting the idea that verbs can be represented in a lattice that allows semantically similar verbs, such as motion verbs or verbs of contact, to be closely associated with each other under a common parent that captures the properties these verbs all share (Dang et al., 1998) . The highest nodes in the hierarchy are occupied by generalized PAR schemas which represent the basic predicate-argument structures for entire groups of subordinate actions. The lower nodes are occupied by progressively more speci c schemas that inherit information from the generalized PARs, and can be instantiated with arguments from natural language to represent a speci c action such as John hit the ball with his bat. The example in Figure 1 is a generalized PAR schema for contact ac-1 Objects and agents are stored in a hierarchy a n d have a n umber of properties associated with them. Properties of the objects may include their location and status. Agents have capabilities, such as the ability t o w alk or swim, and properties such as their strength and height. path duration motion force manner : MANNER 3 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 5",
121
+ "cite_spans": [
122
+ {
123
+ "start": 294,
124
+ "end": 313,
125
+ "text": "(Dang et al., 1998)",
126
+ "ref_id": "BIBREF4"
127
+ }
128
+ ],
129
+ "ref_spans": [
130
+ {
131
+ "start": 754,
132
+ "end": 760,
133
+ "text": "Figure",
134
+ "ref_id": null
135
+ }
136
+ ],
137
+ "eq_spans": [],
138
+ "section": "PAR representation",
139
+ "sec_num": "2"
140
+ },
141
+ {
142
+ "text": "Figure 1: A PAR schema for actions of contact tions between two objects. This schema speci es that the`contact' action h a s a n a g e n t a n d two o bjects, and that the action is concluded when the two objects come together. 2 The preparatory speci cation of getting the second object is tested and carried out if the object is not possessed. In order to describe a speci c action, say hammer, we would combine all of its ancestor representations in the action hierarchy, as shown in Figure 2 , and add the information speci c to that action. Since hammer inherits from the PAR for hit, and ultimately from the PAR for contact, its representation would use the generalized`contact' PAR, with a forceful manner, and a hammer as the instrument. The action hit does not specify any instrument, but inherits the forceful manner and generalized contact PAR from its ancestors, and the action contact leaves both the instrument and the manner unspeci ed, and is associated only with the generalized contact PAR.",
143
+ "cite_spans": [
144
+ {
145
+ "start": 228,
146
+ "end": 229,
147
+ "text": "2",
148
+ "ref_id": null
149
+ }
150
+ ],
151
+ "ref_spans": [
152
+ {
153
+ "start": 487,
154
+ "end": 495,
155
+ "text": "Figure 2",
156
+ "ref_id": null
157
+ }
158
+ ],
159
+ "eq_spans": [],
160
+ "section": "PAR representation",
161
+ "sec_num": "2"
162
+ },
163
+ {
164
+ "text": "The PAR is intended to provide slots for information that is typically conveyed in modi ers or adjuncts in addition to internal verb arguments. As such, it is often the case that several di erent s y ntactic realizations can all map to the same PAR schema. For example, John hit the ball, John hit the ball with a bat and John swung mightily and his bat hit the ball with a resounding crack would all map to the same schema. 3",
165
+ "cite_spans": [],
166
+ "ref_spans": [],
167
+ "eq_spans": [],
168
+ "section": "PAR representation",
169
+ "sec_num": "2"
170
+ },
171
+ {
172
+ "text": "The main components of our animation system are: a natural language interface, a planner and a graphical animation (see Figure 3 ). The PARs are used as intermediate representations of the actions between components. An instruction in natural language starts the process. We use a Synchronous Tree Adjoining Grammar Schabes, 1990 Shieber, 1994) for parsing natural language instructions into derivations containing predicate-argument dependencies (Schuler, 1999) . The synchronous parser extracts these predicate-argument structures by rst associating each word in an input sentence with one or more elementary trees, which are combined into a single derivation tree for the entire input sentence using the constrained operations of substitution and adjunction in the Tree Adjoining Grammar formalism (Joshi, 1985 Joshi, 1987 . As the parser assembles these elementary tree predicates into a predicate-argument structure, it simultaneously selects and assembles the corresponding schemas. It lls in the participants and modi ers, and outputs the PAR schema for the instruction. These schemas may be underspeci ed for actions such a s e n ter' or put' and thus not provide enough information for the animation to be produced directly.",
173
+ "cite_spans": [
174
+ {
175
+ "start": 316,
176
+ "end": 344,
177
+ "text": "Schabes, 1990 Shieber, 1994)",
178
+ "ref_id": null
179
+ },
180
+ {
181
+ "start": 447,
182
+ "end": 462,
183
+ "text": "(Schuler, 1999)",
184
+ "ref_id": "BIBREF14"
185
+ },
186
+ {
187
+ "start": 801,
188
+ "end": 813,
189
+ "text": "(Joshi, 1985",
190
+ "ref_id": "BIBREF8"
191
+ },
192
+ {
193
+ "start": 814,
194
+ "end": 825,
195
+ "text": "Joshi, 1987",
196
+ "ref_id": "BIBREF9"
197
+ }
198
+ ],
199
+ "ref_spans": [
200
+ {
201
+ "start": 120,
202
+ "end": 128,
203
+ "text": "Figure 3",
204
+ "ref_id": null
205
+ }
206
+ ],
207
+ "eq_spans": [],
208
+ "section": "Generating Animations",
209
+ "sec_num": "3"
210
+ },
211
+ {
212
+ "text": "Natural Language ~-~I Planner PAR graphics Figure 3 : General architecture of the animation system",
213
+ "cite_spans": [],
214
+ "ref_spans": [
215
+ {
216
+ "start": 43,
217
+ "end": 51,
218
+ "text": "Figure 3",
219
+ "ref_id": null
220
+ }
221
+ ],
222
+ "eq_spans": [],
223
+ "section": "Generating Animations",
224
+ "sec_num": "3"
225
+ },
226
+ {
227
+ "text": "The planner uses information from the general schema, such as pre-conditions and post-assertions, as well as information derived from the agents' capabilities and the objects properties to fill in these gaps in several ways:",
228
+ "cite_spans": [],
229
+ "ref_spans": [],
230
+ "eq_spans": [],
231
+ "section": "Generating Animations",
232
+ "sec_num": "3"
233
+ },
234
+ {
235
+ "text": "\u2022 to select the way (activity) in which the instruction is performed (enter by walking, by swimming, etc.);",
236
+ "cite_spans": [],
237
+ "ref_spans": [],
238
+ "eq_spans": [],
239
+ "section": "Generating Animations",
240
+ "sec_num": "3"
241
+ },
242
+ {
243
+ "text": "\u2022 to determine the prepartory actions that must be completed before the instruction is carried out, (for example, in order for an agent to open the door, the door has to be reachable and that may involve a locomotion process);",
244
+ "cite_spans": [],
245
+ "ref_spans": [],
246
+ "eq_spans": [],
247
+ "section": "Generating Animations",
248
+ "sec_num": "3"
249
+ },
250
+ {
251
+ "text": "\u2022 to decompose the action into smaller units (put the glass on the table, involves getting the glass, planning a route to the table, etc.)",
252
+ "cite_spans": [],
253
+ "ref_spans": [],
254
+ "eq_spans": [],
255
+ "section": "Generating Animations",
256
+ "sec_num": "3"
257
+ },
258
+ {
259
+ "text": "The output of the planner for the input instruction is a complete description of the actions involved, including participants, preparatory specifications, termination conditions, manner, duration, etc. Participants bring with them a list of inherent properties of the agent (e.g. agent capabilities) or physical objects (e.g., object configurations) and other characteristics, such as 'how to open' for an object such as a door. This complete description refers to a set of animation PARS which can be immediately animated.",
260
+ "cite_spans": [],
261
+ "ref_spans": [],
262
+ "eq_spans": [],
263
+ "section": "Generating Animations",
264
+ "sec_num": "3"
265
+ },
266
+ {
267
+ "text": "In this way, a PAR schema for the action enter may actually translate into an animation PAR for walking into a certain area. One way to differentiate between action PAR schemas and instantiated animation PARs is to consider what it is possible to motion capture 4 (by attaching sensors to a moving human figure). For example, the enter action and the put action are quite general and underspecified and could not be motion captured. However, characteristic activities such as walking and swimming could be. For further details about the animation PARs and the animation system see (Badler et al., 1999) and (Bindiganavaie et al., 2000) .",
268
+ "cite_spans": [
269
+ {
270
+ "start": 581,
271
+ "end": 602,
272
+ "text": "(Badler et al., 1999)",
273
+ "ref_id": "BIBREF0"
274
+ },
275
+ {
276
+ "start": 607,
277
+ "end": 635,
278
+ "text": "(Bindiganavaie et al., 2000)",
279
+ "ref_id": null
280
+ }
281
+ ],
282
+ "ref_spans": [],
283
+ "eq_spans": [],
284
+ "section": "Generating Animations",
285
+ "sec_num": "3"
286
+ },
287
+ {
288
+ "text": "The PAR representation for an action can be seen as a general template. PAR schemas include, as part of the basic sub-categorization frame, properties of 4There are several other ways to generate motions, for example, through inverse kinematics, dynamics and keyframing.",
289
+ "cite_spans": [],
290
+ "ref_spans": [],
291
+ "eq_spans": [],
292
+ "section": "PAR as anIL",
293
+ "sec_num": "4"
294
+ },
295
+ {
296
+ "text": "the action that can occur linguistically either as the main verb or as adjuncts to the main verb phrase. This captures problems of divergences, such as the ones described by Talmy (Talmy, 1991) , for verbframed versus satellite-framed languages.",
297
+ "cite_spans": [
298
+ {
299
+ "start": 180,
300
+ "end": 193,
301
+ "text": "(Talmy, 1991)",
302
+ "ref_id": "BIBREF17"
303
+ }
304
+ ],
305
+ "ref_spans": [],
306
+ "eq_spans": [],
307
+ "section": "PAR as anIL",
308
+ "sec_num": "4"
309
+ },
310
+ {
311
+ "text": "New information may come from a sentence in natural language that modifies the action's inherent properties, such as in John hit the ball slowly, where 'slowly' is not part of the initial representation of the action 'hit'. This new information is added to the PAR schema.",
312
+ "cite_spans": [],
313
+ "ref_spans": [],
314
+ "eq_spans": [],
315
+ "section": "PAR as anIL",
316
+ "sec_num": "4"
317
+ },
318
+ {
319
+ "text": "Verb-versus Satellite-framed languages Verb-Framed Languages (VFL) map the motion (path or path + ground location) onto the verb, and the manner either onto a satellite or an adjunct, while Satellite-Framed Languages (SFL) map the motion into the satellite, and the manner onto the main verb.",
320
+ "cite_spans": [],
321
+ "ref_spans": [],
322
+ "eq_spans": [],
323
+ "section": "PAR as anIL",
324
+ "sec_num": "4"
325
+ },
326
+ {
327
+ "text": "English and other Germanic languages are considered satellite-framed languages, expressing the path in the satellite; Spanish, among other Romance languages, is a verb-framed language and expresses the path in the main verb. The pairs of sentences (1) and (2) from Talmy (1991) show examples of these divergences. In (1), in English, the exit of the bottle is expressed by the preposition out, in Spanish the same concept is incorporated in the main verb salir (to exit). In (2), the concept of blowing out the candle is represented differently in English and Spanish.",
328
+ "cite_spans": [
329
+ {
330
+ "start": 265,
331
+ "end": 277,
332
+ "text": "Talmy (1991)",
333
+ "ref_id": "BIBREF17"
334
+ }
335
+ ],
336
+ "ref_spans": [],
337
+ "eq_spans": [],
338
+ "section": "PAR as anIL",
339
+ "sec_num": "4"
340
+ },
341
+ {
342
+ "text": "(1) The bottle .floated out La boteUa sali6 flotando (the bottle exited floating)",
343
+ "cite_spans": [],
344
+ "ref_spans": [],
345
+ "eq_spans": [],
346
+ "section": "PAR as anIL",
347
+ "sec_num": "4"
348
+ },
349
+ {
350
+ "text": "(2) I blew out the candle Apagud la vela sopldndola (I extinguish the candle blowing)",
351
+ "cite_spans": [],
352
+ "ref_spans": [],
353
+ "eq_spans": [],
354
+ "section": "PAR as anIL",
355
+ "sec_num": "4"
356
+ },
357
+ {
358
+ "text": "In order to capture generalizations about motion actions, we have a generalized PAR schema for motion, and our hierarchy includes different types of motion actions such as inherently directed motion and manner of motion actions that inherit from the more general schema, as shown in Figure 4 . Directed motion actions, such as enter and exit, don't bring with them the manner by which the action is carried out but they have a inherent termination condition. For example, 'enter a room' may be done by walking, crawling or flying depending on the agents' ca-motion/(par: motion) directed_motion manner_raotion enter/(term: in (0B J) ) exit/(term: out (0B J) ) crawl/(act : crawl) float/(act ::float) Figure 4 : PAR schema hierarchy for motion actions pabilities, but it should end when the agent is in the room. In contrast, manner of motion verbs express the action explicitly and don't have an intrinsic termination condition.",
359
+ "cite_spans": [],
360
+ "ref_spans": [
361
+ {
362
+ "start": 283,
363
+ "end": 291,
364
+ "text": "Figure 4",
365
+ "ref_id": null
366
+ },
367
+ {
368
+ "start": 700,
369
+ "end": 708,
370
+ "text": "Figure 4",
371
+ "ref_id": null
372
+ }
373
+ ],
374
+ "eq_spans": [],
375
+ "section": "Motion",
376
+ "sec_num": "4.1"
377
+ },
378
+ {
379
+ "text": "Motion is a type of framing event where the path is in the main verb for VFLs and in the satellite for SFLs. In 3, we see the English sentence expressing the 'enter' idea in the preposition into whereas the Spanish sentence expresses it in the main verb entrar (to enter).",
380
+ "cite_spans": [],
381
+ "ref_spans": [],
382
+ "eq_spans": [],
383
+ "section": "Motion",
384
+ "sec_num": "4.1"
385
+ },
386
+ {
387
+ "text": "(3) The bottle floated into the cave La botella entr5 flotando a la cueva (the bottle entered floating the cave)",
388
+ "cite_spans": [],
389
+ "ref_spans": [],
390
+ "eq_spans": [],
391
+ "section": "Motion",
392
+ "sec_num": "4.1"
393
+ },
394
+ {
395
+ "text": "The PAR schemas don't distinguish the representation for these sentences, because there is a single schema which includes both the manner and the path without specifying how they are realiized linguistically. Mappings from the lexical items to the schemas or to constraints in the schemas can be seen in Figure 5 . 5 Independent of which is the source language, the PAR schema selected is motion, the activity field, which determines how the action is performed (in this case, by floating), is filled by float (the main verb in English, or the adjunct in Spanish). The termination condition, which says that action ends when the agent is in the object, is added from the preposition in English and is part of the semantics of the main verb to enter in Spanish. Because all of the necessary elements for a translation are specified in this representation, it is up 5A lexical item may have several mappings to reflect its semantics: For instance, float in English can be used also in the non-motion sense, in which case there will be two entries to capture that distinction. to the language specific component to transform it into a surface structure that satisfies the grammatical principles of the destination language.",
396
+ "cite_spans": [],
397
+ "ref_spans": [
398
+ {
399
+ "start": 304,
400
+ "end": 312,
401
+ "text": "Figure 5",
402
+ "ref_id": "FIGREF1"
403
+ }
404
+ ],
405
+ "eq_spans": [],
406
+ "section": "Motion",
407
+ "sec_num": "4.1"
408
+ },
409
+ {
410
+ "text": "Our approach now diverges considerably from the approach outlined in which discusses the use of Feature-Based Tree Adjoining Grammars, (Joshi, 1985; Vijay-Shanker and Joshi, 1991) to capture generalizations about manner-ofmotion verbs. They do not propose an interlingua but use a transfer-based mechanism expressed in Synchronous Tree Adjoining Grammars to capture divergences of VFL and SFL through the use of semantic features and links between the grammars. The problem of whether or not a prepositional phrase constitutes an argument to a verb or an adjunct (described by Palmer et al.) does not constitute a problem in our representation, since all the information is recovered in the same template for the action to be animated. The PAR approach is much more similar to the Lexical Conceptual Structures (LCS) approach, (Jackendoff, 1972; Jackendoff, 1990) , used as an interlingua representation (Doff, 1993) . Based on the assumption that motion and manner of motion are conflated in a matrix verb like swim, the use of LCS allows separation of the concepts of motion, direction, and manner of motion in the sentence John swam across the lake. Each one of these concepts is represented separately in the interlingua representation, as GO, PATH and MANNER, respectively. Our approach allows for a similar representation and the end result is the same, namely that the event of swimming across the lake is characterized by separate semantic components, which can be expressed by the main schema and by the activity field. In addition, our representation also incorporates details about the action such as applicability conditions, preparatory specifications, termination conditions, and adverbial modifiers. It is not clear to us how the LCS approach could be used to effect the same commonality of representation.",
411
+ "cite_spans": [
412
+ {
413
+ "start": 135,
414
+ "end": 148,
415
+ "text": "(Joshi, 1985;",
416
+ "ref_id": "BIBREF8"
417
+ },
418
+ {
419
+ "start": 149,
420
+ "end": 179,
421
+ "text": "Vijay-Shanker and Joshi, 1991)",
422
+ "ref_id": "BIBREF18"
423
+ },
424
+ {
425
+ "start": 827,
426
+ "end": 845,
427
+ "text": "(Jackendoff, 1972;",
428
+ "ref_id": "BIBREF6"
429
+ },
430
+ {
431
+ "start": 846,
432
+ "end": 863,
433
+ "text": "Jackendoff, 1990)",
434
+ "ref_id": "BIBREF7"
435
+ },
436
+ {
437
+ "start": 904,
438
+ "end": 916,
439
+ "text": "(Doff, 1993)",
440
+ "ref_id": null
441
+ }
442
+ ],
443
+ "ref_spans": [],
444
+ "eq_spans": [],
445
+ "section": "Comparison with other work",
446
+ "sec_num": null
447
+ },
448
+ {
449
+ "text": "The importance of the additional information such as the termination conditions can be more clearly illustrated with a different set of examples. Another class of actions that presents interesting divergences involves instruments where the instrument is used as the main verb or as an adjunct depending on the language. The sentence pair in (4) shows this divergence for English and Portuguese. Because Portuguese does not have a verb for to spoon, it uses a more general verb colocar (to put) as the main verb and expresses the instrument in a prepositional phrase. Unlike directed motion actions, a put with hand-held instrument action (e.g., spoon, scoop, ladle, etc.) leaves the activity field unspecified in both languages. The specific action is generated by taking the instrument into account. A simplified schema is shown in Figure 7 . termination_cond : over(chocolate, icecream) Notice that the only connection between to spoon and its Portuguese translation would be the termination condition where the object of the verb, chocolate, has a new location which is over the ice cream.",
450
+ "cite_spans": [],
451
+ "ref_spans": [
452
+ {
453
+ "start": 833,
454
+ "end": 841,
455
+ "text": "Figure 7",
456
+ "ref_id": "FIGREF3"
457
+ }
458
+ ],
459
+ "eq_spans": [],
460
+ "section": "Instrument",
461
+ "sec_num": "4.2"
462
+ },
463
+ {
464
+ "text": "Conclusion We have discussed a parameterized representation of actions grounded by the needs of animation of instructions in a simulated environment. In order to support the animation of these instructions, our representation makes explicit many details that are often underspecified in the language, such as start and end states and changes in the environment that happen as a result of the action.",
465
+ "cite_spans": [],
466
+ "ref_spans": [],
467
+ "eq_spans": [],
468
+ "section": "5",
469
+ "sec_num": null
470
+ },
471
+ {
472
+ "text": "Sometimes the start and end state information provides critical information for accurate translation but it is not always necessary. Machine translation can often simply preserve ambiguities in the translation without resolving them. In our application we cannot afford this luxury. An interesting question to pursue for future work will be whether or not we can determine which PAR slots are not needed for machine translation purposes.",
473
+ "cite_spans": [],
474
+ "ref_spans": [],
475
+ "eq_spans": [],
476
+ "section": "5",
477
+ "sec_num": null
478
+ },
479
+ {
480
+ "text": "Generalizations based on action classes provide the basis for an interlingua approach that captures the semantics of actions without committing to any language-dependent specification. This framework offers a strong foundation for handling the range of phenomena presented by the machine translation task.",
481
+ "cite_spans": [],
482
+ "ref_spans": [],
483
+ "eq_spans": [],
484
+ "section": "5",
485
+ "sec_num": null
486
+ },
487
+ {
488
+ "text": "The structure of our PAR schemas incorporate into a single template the kind of divergence presented in verb-framed and satellite-framed languages. Although not shown in this paper, this representation can also capture idioms and noncompositional constructions since the animations of actions -and therefore the PARs that control them must be equivalent for the same actions described in different languages.",
489
+ "cite_spans": [],
490
+ "ref_spans": [],
491
+ "eq_spans": [],
492
+ "section": "5",
493
+ "sec_num": null
494
+ },
495
+ {
496
+ "text": "Currently, we are also investigating the possibility of building these action representations from a classbased verb lexicon which has explicit syntactic and semantic information (Kipper et al., 2000) .",
497
+ "cite_spans": [
498
+ {
499
+ "start": 179,
500
+ "end": 200,
501
+ "text": "(Kipper et al., 2000)",
502
+ "ref_id": "BIBREF10"
503
+ }
504
+ ],
505
+ "ref_spans": [],
506
+ "eq_spans": [],
507
+ "section": "5",
508
+ "sec_num": null
509
+ },
510
+ {
511
+ "text": "In this example, the second object is the instrument with which the action is performed.",
512
+ "cite_spans": [],
513
+ "ref_spans": [],
514
+ "eq_spans": [],
515
+ "section": "",
516
+ "sec_num": null
517
+ },
518
+ {
519
+ "text": "The relationship between PARs and alternations may b ecome much more complicated when we consider other verb classes such a s c hange of state verbs.",
520
+ "cite_spans": [],
521
+ "ref_spans": [],
522
+ "eq_spans": [],
523
+ "section": "",
524
+ "sec_num": null
525
+ }
526
+ ],
527
+ "back_matter": [
528
+ {
529
+ "text": "The authors would like to thank the Actionary group, Hoa Trang Dang, and the anonymous reviewers for their valuable comments. This work was partially supported by NSF Grant 9900297.",
530
+ "cite_spans": [],
531
+ "ref_spans": [],
532
+ "eq_spans": [],
533
+ "section": "Acknowledgments",
534
+ "sec_num": null
535
+ }
536
+ ],
537
+ "bib_entries": {
538
+ "BIBREF0": {
539
+ "ref_id": "b0",
540
+ "title": "Animation control for real-time virtual humans",
541
+ "authors": [
542
+ {
543
+ "first": "Norman",
544
+ "middle": [
545
+ "I"
546
+ ],
547
+ "last": "Badler",
548
+ "suffix": ""
549
+ },
550
+ {
551
+ "first": "Martha",
552
+ "middle": [],
553
+ "last": "Palmer",
554
+ "suffix": ""
555
+ },
556
+ {
557
+ "first": "Rama",
558
+ "middle": [],
559
+ "last": "Bindiganavale",
560
+ "suffix": ""
561
+ }
562
+ ],
563
+ "year": 1999,
564
+ "venue": "Communications off the ACM",
565
+ "volume": "42",
566
+ "issue": "7",
567
+ "pages": "65--73",
568
+ "other_ids": {},
569
+ "num": null,
570
+ "urls": [],
571
+ "raw_text": "Norman I. Badler, Martha Palmer, and Rama Bindi- ganavale. 1999. Animation control for real-time virtual humans. Communications off the ACM, 42(7):65-73.",
572
+ "links": null
573
+ },
574
+ "BIBREF1": {
575
+ "ref_id": "b1",
576
+ "title": "Embodied Conversational Agents, chapter Parameterized Action Representation for Virtual Human Agents",
577
+ "authors": [
578
+ {
579
+ "first": "Norman",
580
+ "middle": [
581
+ "I"
582
+ ],
583
+ "last": "Badler",
584
+ "suffix": ""
585
+ },
586
+ {
587
+ "first": "Rarna",
588
+ "middle": [],
589
+ "last": "Bindiganavale",
590
+ "suffix": ""
591
+ },
592
+ {
593
+ "first": "Jan",
594
+ "middle": [],
595
+ "last": "Allbeck",
596
+ "suffix": ""
597
+ },
598
+ {
599
+ "first": "William",
600
+ "middle": [],
601
+ "last": "Schuler",
602
+ "suffix": ""
603
+ },
604
+ {
605
+ "first": "Liwei",
606
+ "middle": [],
607
+ "last": "Zhao",
608
+ "suffix": ""
609
+ },
610
+ {
611
+ "first": "Martha",
612
+ "middle": [],
613
+ "last": "Palmer",
614
+ "suffix": ""
615
+ }
616
+ ],
617
+ "year": 2000,
618
+ "venue": "",
619
+ "volume": "",
620
+ "issue": "",
621
+ "pages": "",
622
+ "other_ids": {},
623
+ "num": null,
624
+ "urls": [],
625
+ "raw_text": "Norman I. Badler, Rarna Bindiganavale, Jan All- beck, William Schuler, Liwei Zhao, and Martha Palmer, 2000. Embodied Conversational Agents, chapter Parameterized Action Representation for Virtual Human Agents. MIT Press. to appear.",
626
+ "links": null
627
+ },
628
+ "BIBREF3": {
629
+ "ref_id": "b3",
630
+ "title": "Dynamically altering agent behaviors using natural language instructions",
631
+ "authors": [
632
+ {
633
+ "first": "Martha",
634
+ "middle": [],
635
+ "last": "Palmer",
636
+ "suffix": ""
637
+ }
638
+ ],
639
+ "year": 2000,
640
+ "venue": "Fourth International Conference on Autonomous Agents",
641
+ "volume": "",
642
+ "issue": "",
643
+ "pages": "",
644
+ "other_ids": {},
645
+ "num": null,
646
+ "urls": [],
647
+ "raw_text": "Martha Palmer. 2000. Dynamically altering agent behaviors using natural language instructions. Fourth International Conference on Autonomous Agents, June.",
648
+ "links": null
649
+ },
650
+ "BIBREF4": {
651
+ "ref_id": "b4",
652
+ "title": "Investigating regular sense extensions based on intersective levin classes",
653
+ "authors": [
654
+ {
655
+ "first": "Hoa",
656
+ "middle": [
657
+ "Trang"
658
+ ],
659
+ "last": "Dang",
660
+ "suffix": ""
661
+ },
662
+ {
663
+ "first": "Karin",
664
+ "middle": [],
665
+ "last": "Kipper",
666
+ "suffix": ""
667
+ },
668
+ {
669
+ "first": "Martha",
670
+ "middle": [],
671
+ "last": "Palmer",
672
+ "suffix": ""
673
+ },
674
+ {
675
+ "first": "Joseph",
676
+ "middle": [],
677
+ "last": "Rosenzweig",
678
+ "suffix": ""
679
+ }
680
+ ],
681
+ "year": 1998,
682
+ "venue": "Proceedings of COLING-A CL98",
683
+ "volume": "",
684
+ "issue": "",
685
+ "pages": "293--299",
686
+ "other_ids": {},
687
+ "num": null,
688
+ "urls": [],
689
+ "raw_text": "Hoa Trang Dang, Karin Kipper, Martha Palmer, and Joseph Rosenzweig. 1998. Investigating reg- ular sense extensions based on intersective levin classes. In Proceedings of COLING-A CL98, pages 293-299, Montreal, CA, August.",
690
+ "links": null
691
+ },
692
+ "BIBREF5": {
693
+ "ref_id": "b5",
694
+ "title": "Machine Translation: A View from the Lexicon",
695
+ "authors": [
696
+ {
697
+ "first": "Bonnie",
698
+ "middle": [
699
+ "J"
700
+ ],
701
+ "last": "Dorr",
702
+ "suffix": ""
703
+ }
704
+ ],
705
+ "year": 1993,
706
+ "venue": "",
707
+ "volume": "",
708
+ "issue": "",
709
+ "pages": "",
710
+ "other_ids": {},
711
+ "num": null,
712
+ "urls": [],
713
+ "raw_text": "Bonnie J. Dorr. 1993. Machine Translation: A View from the Lexicon. MIT Press, Boston, MA.",
714
+ "links": null
715
+ },
716
+ "BIBREF6": {
717
+ "ref_id": "b6",
718
+ "title": "Semantic Interpretation in Generative Grammar",
719
+ "authors": [
720
+ {
721
+ "first": "R",
722
+ "middle": [],
723
+ "last": "Jackendoff",
724
+ "suffix": ""
725
+ }
726
+ ],
727
+ "year": 1972,
728
+ "venue": "",
729
+ "volume": "",
730
+ "issue": "",
731
+ "pages": "",
732
+ "other_ids": {},
733
+ "num": null,
734
+ "urls": [],
735
+ "raw_text": "R. Jackendoff. 1972. Semantic Interpretation in Generative Grammar. MIT Press, Cambridge, Massachusetts.",
736
+ "links": null
737
+ },
738
+ "BIBREF7": {
739
+ "ref_id": "b7",
740
+ "title": "Semantic Structures",
741
+ "authors": [
742
+ {
743
+ "first": "R",
744
+ "middle": [],
745
+ "last": "Jackendoff",
746
+ "suffix": ""
747
+ }
748
+ ],
749
+ "year": 1990,
750
+ "venue": "",
751
+ "volume": "",
752
+ "issue": "",
753
+ "pages": "",
754
+ "other_ids": {},
755
+ "num": null,
756
+ "urls": [],
757
+ "raw_text": "R. Jackendoff. 1990. Semantic Structures. MIT Press, Boston, Mass.",
758
+ "links": null
759
+ },
760
+ "BIBREF8": {
761
+ "ref_id": "b8",
762
+ "title": "How much context sensitivity is necessary for characterizing structural descriptions:. Tree adjoining grammars",
763
+ "authors": [
764
+ {
765
+ "first": "K",
766
+ "middle": [],
767
+ "last": "Aravind",
768
+ "suffix": ""
769
+ },
770
+ {
771
+ "first": "",
772
+ "middle": [],
773
+ "last": "Joshi",
774
+ "suffix": ""
775
+ }
776
+ ],
777
+ "year": 1985,
778
+ "venue": "Natural language parsing: Psychological, computational and theoretical perspectives",
779
+ "volume": "",
780
+ "issue": "",
781
+ "pages": "206--250",
782
+ "other_ids": {},
783
+ "num": null,
784
+ "urls": [],
785
+ "raw_text": "Aravind K. Joshi. 1985. How much context sensi- tivity is necessary for characterizing structural de- scriptions:. Tree adjoining grammars. In L. Kart- tunen D. Dowry and A. Zwicky, editors, Nat- ural language parsing: Psychological, computa- tional and theoretical perspectives, pages 206-250. Cambridge University Press, Cambridge, U.K.",
786
+ "links": null
787
+ },
788
+ "BIBREF9": {
789
+ "ref_id": "b9",
790
+ "title": "An introduction to tree adjoining grammars",
791
+ "authors": [
792
+ {
793
+ "first": "K",
794
+ "middle": [],
795
+ "last": "Aravind",
796
+ "suffix": ""
797
+ },
798
+ {
799
+ "first": "",
800
+ "middle": [],
801
+ "last": "Joshi",
802
+ "suffix": ""
803
+ }
804
+ ],
805
+ "year": 1987,
806
+ "venue": "Mathematics of Language. John Benjamins",
807
+ "volume": "",
808
+ "issue": "",
809
+ "pages": "",
810
+ "other_ids": {},
811
+ "num": null,
812
+ "urls": [],
813
+ "raw_text": "Aravind K. Joshi. 1987. An introduction to tree ad- joining grammars. In A. Manaster-Ramer, editor, Mathematics of Language. John Benjamins, Ams- terdam.",
814
+ "links": null
815
+ },
816
+ "BIBREF10": {
817
+ "ref_id": "b10",
818
+ "title": "Class-based construction of a verb lexicon",
819
+ "authors": [
820
+ {
821
+ "first": "Karin",
822
+ "middle": [],
823
+ "last": "Kipper",
824
+ "suffix": ""
825
+ },
826
+ {
827
+ "first": "Hoa",
828
+ "middle": [
829
+ "Trang"
830
+ ],
831
+ "last": "Dang",
832
+ "suffix": ""
833
+ },
834
+ {
835
+ "first": "Martha",
836
+ "middle": [],
837
+ "last": "Palmer",
838
+ "suffix": ""
839
+ }
840
+ ],
841
+ "year": 2000,
842
+ "venue": "",
843
+ "volume": "",
844
+ "issue": "",
845
+ "pages": "",
846
+ "other_ids": {},
847
+ "num": null,
848
+ "urls": [],
849
+ "raw_text": "Karin Kipper, Hoa Trang Dang, and Martha Palmer. 2000. Class-based construction of a verb lexicon. In submitted to AAAL",
850
+ "links": null
851
+ },
852
+ "BIBREF11": {
853
+ "ref_id": "b11",
854
+ "title": "English Verb Classes and Alternation, A Preliminary Investigation. The University of",
855
+ "authors": [
856
+ {
857
+ "first": "Beth",
858
+ "middle": [],
859
+ "last": "Levin",
860
+ "suffix": ""
861
+ }
862
+ ],
863
+ "year": 1993,
864
+ "venue": "",
865
+ "volume": "",
866
+ "issue": "",
867
+ "pages": "",
868
+ "other_ids": {},
869
+ "num": null,
870
+ "urls": [],
871
+ "raw_text": "Beth Levin. 1993. English Verb Classes and Alter- nation, A Preliminary Investigation. The Univer- sity of Chicago Press.",
872
+ "links": null
873
+ },
874
+ "BIBREF12": {
875
+ "ref_id": "b12",
876
+ "title": "Capturing Motion Verb Generalizations with Synchronous TAG",
877
+ "authors": [
878
+ {
879
+ "first": "Martha",
880
+ "middle": [],
881
+ "last": "Palmer",
882
+ "suffix": ""
883
+ },
884
+ {
885
+ "first": "Joseph",
886
+ "middle": [],
887
+ "last": "Rosenzweig",
888
+ "suffix": ""
889
+ },
890
+ {
891
+ "first": "William",
892
+ "middle": [],
893
+ "last": "Schuler",
894
+ "suffix": ""
895
+ }
896
+ ],
897
+ "year": 1998,
898
+ "venue": "",
899
+ "volume": "",
900
+ "issue": "",
901
+ "pages": "",
902
+ "other_ids": {},
903
+ "num": null,
904
+ "urls": [],
905
+ "raw_text": "Martha Palmer, Joseph Rosenzweig, and William Schuler. 1998. Capturing Motion Verb General- izations with Synchronous TAG. In Patrick St.",
906
+ "links": null
907
+ },
908
+ "BIBREF13": {
909
+ "ref_id": "b13",
910
+ "title": "Predicative Forms in NLP",
911
+ "authors": [
912
+ {
913
+ "first": "",
914
+ "middle": [],
915
+ "last": "Dizier",
916
+ "suffix": ""
917
+ }
918
+ ],
919
+ "year": null,
920
+ "venue": "",
921
+ "volume": "",
922
+ "issue": "",
923
+ "pages": "",
924
+ "other_ids": {},
925
+ "num": null,
926
+ "urls": [],
927
+ "raw_text": "Dizier, editor, Predicative Forms in NLP. Kluwer Press.",
928
+ "links": null
929
+ },
930
+ "BIBREF14": {
931
+ "ref_id": "b14",
932
+ "title": "Preserving semantic dependencies in synchronous tree adjoining grammar",
933
+ "authors": [
934
+ {
935
+ "first": "William",
936
+ "middle": [],
937
+ "last": "Schuler",
938
+ "suffix": ""
939
+ }
940
+ ],
941
+ "year": 1999,
942
+ "venue": "Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics (ACL '99)",
943
+ "volume": "",
944
+ "issue": "",
945
+ "pages": "",
946
+ "other_ids": {},
947
+ "num": null,
948
+ "urls": [],
949
+ "raw_text": "William Schuler. 1999. Preserving semantic depen- dencies in synchronous tree adjoining grammar. Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics (ACL '99).",
950
+ "links": null
951
+ },
952
+ "BIBREF15": {
953
+ "ref_id": "b15",
954
+ "title": "Synchronous tree adjoining grammars",
955
+ "authors": [
956
+ {
957
+ "first": "M",
958
+ "middle": [],
959
+ "last": "Stuart",
960
+ "suffix": ""
961
+ },
962
+ {
963
+ "first": "Yves",
964
+ "middle": [],
965
+ "last": "Shieber",
966
+ "suffix": ""
967
+ },
968
+ {
969
+ "first": "",
970
+ "middle": [],
971
+ "last": "Schabes",
972
+ "suffix": ""
973
+ }
974
+ ],
975
+ "year": 1990,
976
+ "venue": "Proceedings of the 13th International Conference on Computational Linguistics (COLING '90)",
977
+ "volume": "",
978
+ "issue": "",
979
+ "pages": "",
980
+ "other_ids": {},
981
+ "num": null,
982
+ "urls": [],
983
+ "raw_text": "Stuart M. Shieber and Yves Schabes. 1990. Syn- chronous tree adjoining grammars. In Proceedings of the 13th International Conference on Compu- tational Linguistics (COLING '90), Helsinki, Fin- land, August.",
984
+ "links": null
985
+ },
986
+ "BIBREF16": {
987
+ "ref_id": "b16",
988
+ "title": "Restricting the weakgenerative capability of synchronous tree adjoining grammars",
989
+ "authors": [
990
+ {
991
+ "first": "M",
992
+ "middle": [],
993
+ "last": "Stuart",
994
+ "suffix": ""
995
+ },
996
+ {
997
+ "first": "",
998
+ "middle": [],
999
+ "last": "Shieber",
1000
+ "suffix": ""
1001
+ }
1002
+ ],
1003
+ "year": 1994,
1004
+ "venue": "Computational Intelligence",
1005
+ "volume": "10",
1006
+ "issue": "4",
1007
+ "pages": "",
1008
+ "other_ids": {},
1009
+ "num": null,
1010
+ "urls": [],
1011
+ "raw_text": "Stuart M. Shieber. 1994. Restricting the weak- generative capability of synchronous tree adjoin- ing grammars. Computational Intelligence, 10(4).",
1012
+ "links": null
1013
+ },
1014
+ "BIBREF17": {
1015
+ "ref_id": "b17",
1016
+ "title": "Path to realization-via aspect and result",
1017
+ "authors": [
1018
+ {
1019
+ "first": "Leonard",
1020
+ "middle": [],
1021
+ "last": "Talmy",
1022
+ "suffix": ""
1023
+ }
1024
+ ],
1025
+ "year": 1991,
1026
+ "venue": "Proceedings of the 17th Annual Meeting of the Berkeley Linguistic Society",
1027
+ "volume": "",
1028
+ "issue": "",
1029
+ "pages": "480--519",
1030
+ "other_ids": {},
1031
+ "num": null,
1032
+ "urls": [],
1033
+ "raw_text": "Leonard Talmy. 1991. Path to realization-via as- pect and result. In Proceedings of the 17th Annual Meeting of the Berkeley Linguistic Society, pages 480-519.",
1034
+ "links": null
1035
+ },
1036
+ "BIBREF18": {
1037
+ "ref_id": "b18",
1038
+ "title": "Unification based tree adjoining grammars",
1039
+ "authors": [
1040
+ {
1041
+ "first": "K",
1042
+ "middle": [],
1043
+ "last": "Vijay-Shanker",
1044
+ "suffix": ""
1045
+ },
1046
+ {
1047
+ "first": "Aravind",
1048
+ "middle": [],
1049
+ "last": "Joshi",
1050
+ "suffix": ""
1051
+ }
1052
+ ],
1053
+ "year": 1991,
1054
+ "venue": "",
1055
+ "volume": "",
1056
+ "issue": "",
1057
+ "pages": "",
1058
+ "other_ids": {},
1059
+ "num": null,
1060
+ "urls": [],
1061
+ "raw_text": "K. Vijay-Shanker and Aravind Joshi. 1991. Uni- fication based tree adjoining grammars. In J. Wedekind, editor, Unification-based Grammars. MIT Press, Cambridge, Massachusetts.",
1062
+ "links": null
1063
+ }
1064
+ },
1065
+ "ref_entries": {
1066
+ "FIGREF0": {
1067
+ "num": null,
1068
+ "uris": null,
1069
+ "type_str": "figure",
1070
+ "text": "EN float/[par:motion,activity:float] into/[term:in(AG,OBJ)] SP entrar/[par:motion,term:in(AG,OBJ)] flotar/[activity :float]"
1071
+ },
1072
+ "FIGREF1": {
1073
+ "num": null,
1074
+ "uris": null,
1075
+ "type_str": "figure",
1076
+ "text": "Entries for the example sentences in (3)"
1077
+ },
1078
+ "FIGREF2": {
1079
+ "num": null,
1080
+ "uris": null,
1081
+ "type_str": "figure",
1082
+ "text": "Figure 6: A (simplified) PAR schema for the sentences in (3)"
1083
+ },
1084
+ "FIGREF3": {
1085
+ "num": null,
1086
+ "uris": null,
1087
+ "type_str": "figure",
1088
+ "text": "Representation of the sentences in (4)"
1089
+ }
1090
+ }
1091
+ }
1092
+ }
Full_text_JSON/prefixW/json/W00/W00-0203.json ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0203",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:29.819687Z"
6
+ },
7
+ "title": "Evaluation of a Practical Interlingua for Task-Oriented Dialogue",
8
+ "authors": [
9
+ {
10
+ "first": "Lori",
11
+ "middle": [],
12
+ "last": "Levin",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "IRST ITC",
16
+ "institution": "Carnegie Mellon University",
17
+ "location": {
18
+ "settlement": "Trento",
19
+ "country": "Italy"
20
+ }
21
+ },
22
+ "email": ""
23
+ },
24
+ {
25
+ "first": "Donna",
26
+ "middle": [],
27
+ "last": "Gates",
28
+ "suffix": "",
29
+ "affiliation": {
30
+ "laboratory": "IRST ITC",
31
+ "institution": "Carnegie Mellon University",
32
+ "location": {
33
+ "settlement": "Trento",
34
+ "country": "Italy"
35
+ }
36
+ },
37
+ "email": ""
38
+ },
39
+ {
40
+ "first": "Alon",
41
+ "middle": [],
42
+ "last": "Lavie",
43
+ "suffix": "",
44
+ "affiliation": {
45
+ "laboratory": "IRST ITC",
46
+ "institution": "Carnegie Mellon University",
47
+ "location": {
48
+ "settlement": "Trento",
49
+ "country": "Italy"
50
+ }
51
+ },
52
+ "email": ""
53
+ },
54
+ {
55
+ "first": "Fabio",
56
+ "middle": [],
57
+ "last": "Pianesi",
58
+ "suffix": "",
59
+ "affiliation": {
60
+ "laboratory": "IRST ITC",
61
+ "institution": "Carnegie Mellon University",
62
+ "location": {
63
+ "settlement": "Trento",
64
+ "country": "Italy"
65
+ }
66
+ },
67
+ "email": ""
68
+ },
69
+ {
70
+ "first": "Dorcas",
71
+ "middle": [],
72
+ "last": "Wallace",
73
+ "suffix": "",
74
+ "affiliation": {
75
+ "laboratory": "IRST ITC",
76
+ "institution": "Carnegie Mellon University",
77
+ "location": {
78
+ "settlement": "Trento",
79
+ "country": "Italy"
80
+ }
81
+ },
82
+ "email": ""
83
+ },
84
+ {
85
+ "first": "Taro",
86
+ "middle": [],
87
+ "last": "Watanabe",
88
+ "suffix": "",
89
+ "affiliation": {
90
+ "laboratory": "IRST ITC",
91
+ "institution": "Carnegie Mellon University",
92
+ "location": {
93
+ "settlement": "Trento",
94
+ "country": "Italy"
95
+ }
96
+ },
97
+ "email": ""
98
+ },
99
+ {
100
+ "first": "Monika",
101
+ "middle": [],
102
+ "last": "Woszczyna",
103
+ "suffix": "",
104
+ "affiliation": {
105
+ "laboratory": "IRST ITC",
106
+ "institution": "Carnegie Mellon University",
107
+ "location": {
108
+ "settlement": "Trento",
109
+ "country": "Italy"
110
+ }
111
+ },
112
+ "email": ""
113
+ }
114
+ ],
115
+ "year": "",
116
+ "venue": null,
117
+ "identifiers": {},
118
+ "abstract": "IF (Interchange Format), the interlingua used by the C-STAR consortium, is a speech-act based interlingua for task-oriented dialogue. IF was designed as a practical interlingua that could strike a balance between expressivity and simplicity. If it is too simple, components of meaning will be lost and coverage of unseen data will be low. On the other hand, if it is too complex, it cannot be used with a high degree of consistency by collaborators on different continents. In this paper, we suggest methods for evaluating the coverage of IF and the consistency with which it was used in the C-STAR consortium.",
119
+ "pdf_parse": {
120
+ "paper_id": "W00-0203",
121
+ "_pdf_hash": "",
122
+ "abstract": [
123
+ {
124
+ "text": "IF (Interchange Format), the interlingua used by the C-STAR consortium, is a speech-act based interlingua for task-oriented dialogue. IF was designed as a practical interlingua that could strike a balance between expressivity and simplicity. If it is too simple, components of meaning will be lost and coverage of unseen data will be low. On the other hand, if it is too complex, it cannot be used with a high degree of consistency by collaborators on different continents. In this paper, we suggest methods for evaluating the coverage of IF and the consistency with which it was used in the C-STAR consortium.",
125
+ "cite_spans": [],
126
+ "ref_spans": [],
127
+ "eq_spans": [],
128
+ "section": "Abstract",
129
+ "sec_num": null
130
+ }
131
+ ],
132
+ "body_text": [
133
+ {
134
+ "text": "IF (Interchange Format) is an interlingua used by the C-STAR consortium 1 for task-oriented dialogues. Because it is used in five different countries for six different languages, it had to achieve a careful balance between being expressive ehough and being simple enough to be used consistently. If it was not expressive enough, components of meaning would be lost and coverage of unseen data would be low. On the other hand, if was not simple enough, different system developers would use it inconsistently and the wrong meanings would be translated. IF is described in our previous papers ([PT98, LGLW98, LLW+] ).",
135
+ "cite_spans": [
136
+ {
137
+ "start": 591,
138
+ "end": 598,
139
+ "text": "([PT98,",
140
+ "ref_id": "BIBREF3"
141
+ },
142
+ {
143
+ "start": 599,
144
+ "end": 606,
145
+ "text": "LGLW98,",
146
+ "ref_id": null
147
+ },
148
+ {
149
+ "start": 607,
150
+ "end": 612,
151
+ "text": "LLW+]",
152
+ "ref_id": "BIBREF3"
153
+ }
154
+ ],
155
+ "ref_spans": [],
156
+ "eq_spans": [],
157
+ "section": "Introduction",
158
+ "sec_num": null
159
+ },
160
+ {
161
+ "text": "For this paper, we have proposed methods for evaluating the coverage of IF and the degree to which it can be used consistently across C-STAR sites. Coverage was measured by having human IF specialists annotate unseen data. Consistency was measured by two means. The first was inter-coder agreement among IF specialists at Carnegie Mel-lonUniversity and ITC-irst (Centre per la ricerca lhttp://www.c-star.org scientifica e tecnologica). The second, less direct method, was a cross-site end-to-end evaluation of English-to-Italian translation where the Englishto-IF analysis grammars were written at CMU and IF-to-Italian generation was developed at IRST. If the English and Italian grammar writers did not agree on the meaning of the IF, wrong translations will be produced. In this way, the cross-site evaluation can be an indirect indicator of whether the CMU and IRST IF specialists agreed on the meaning of IF representations. For comparison, we also present within-site end-to-end evaluations of English-to-German, English-to-Japanese, and English-to-IF-to-English, where all of the analysis and generation grammars were written at CMU.",
162
+ "cite_spans": [],
163
+ "ref_spans": [],
164
+ "eq_spans": [],
165
+ "section": "Introduction",
166
+ "sec_num": null
167
+ },
168
+ {
169
+ "text": "Because we are working with task-oriented dialogues, adequate rendering of the speech act in the target language often overshadows the need for literal translation of the words. IF is therefore based on domain actions (DAs), which consist of on speech acts plus domain-specific concepts. An example of a DA is give-information+price+room (giving information about the price of a room). DAs are composed from 45 general speech acts (e.g., acknowledge, give-information, accept)",
170
+ "cite_spans": [],
171
+ "ref_spans": [],
172
+ "eq_spans": [],
173
+ "section": "The Interchange Format",
174
+ "sec_num": null
175
+ },
176
+ {
177
+ "text": "and about 96 domain-specific concepts (e.g, price, temporal, room, flight, availability). In addition to the DA, IF representations can contain arguments such as room-type, destination, and price. There are about 119 argument types.",
178
+ "cite_spans": [],
179
+ "ref_spans": [],
180
+ "eq_spans": [],
181
+ "section": "The Interchange Format",
182
+ "sec_num": null
183
+ },
184
+ {
185
+ "text": "In the following example, the DA consists of a speaker tag (a: for agent), the speechact give-information, and two main concepts, +price and +room. The DA is followed by a list of arguments: room-type= and price=. The arguments have values that represent-information for the type of room double and the cost repre- ",
186
+ "cite_spans": [],
187
+ "ref_spans": [],
188
+ "eq_spans": [],
189
+ "section": "The Interchange Format",
190
+ "sec_num": null
191
+ },
192
+ {
193
+ "text": "In this section, we address the coverage of IF for task-oriented dialogues about travel planning. We want to know whether a very simple interlingua like IF can have good coverage. We are using a rather subjective measure of coverage: IF experts hand-tagged unseen data with IF representations and counted the percentage of utterances to which no IF could be assigned. (When they tagged the unseen data, they were not told that the IF was being tested for coverage. The tagging was done for system development purposes.) Our end-toend evaluation described in the following sections can be taken as a less subjective measure of cov-2When we add anaphora resolution, we will need to know whether a verb (cost) or a noun (price) was used. This will be an issue our new project, NESPOLEI (http://nespole. itc. it/). The development portion of the coverage experiment proceeded as follows. Over a period of two years, a database of travel planning dialogues was collected by C-STAR partners in the U.S., Italy, and Korea. The dialogues were role-playing dialogues between a person pretending to be a traveller and a person pretending to be a travel agent. For the English and Italian dialogues, the traveller and agent were talking face-to-face in the same language --both speaking English or both speaking Italian. The Korean dialogues were also role playing dialogues, but one participant was speaking Korean and the other was speaking English. From these dialogues, only the Korean utterances are included in the database. Each utterance in the database is annotated with an English translation and an IF representation. The evaluation portion of the coverage experiment was carried out on 124 dialogues (6069 dialogue act units) that were collected at ATR, Japan. One participant in each dialogue was speaking Japanese and the other was speaking English. Both Japanese and English utterances are included in the data. The 124 Japanese-English dialogues were not examined closely by system developers during IF development. After the IF design was finalized and frozen in Summer 1999, the Japanese-English data was tagged with IFs. No further IF development took place at this point except that values for arguments were added. For example, Miyako could be added as a hotel name, but no new speech acts, concepts, or argument types could be added. Sentences were tagged as no-tag if the IF did not cover them. Figures 3 and 4 show the cumulative cover-age of the top twenty DAs and speech acts in the Japanese-English data, including the percent of no-tag sentences.",
194
+ "cite_spans": [],
195
+ "ref_spans": [
196
+ {
197
+ "start": 2407,
198
+ "end": 2422,
199
+ "text": "Figures 3 and 4",
200
+ "ref_id": "FIGREF1"
201
+ }
202
+ ],
203
+ "eq_spans": [],
204
+ "section": "Coverage and Distribution of Dialogue Acts",
205
+ "sec_num": null
206
+ },
207
+ {
208
+ "text": "Notice that the percentage of no-tag was lower in our test data than in our development data. This is because the role playing instructions for the test data were more restrictive than the role playing instructions for the development data. Figures 1 and 3 show that slightly more of the test data is covered by slightly fewer DAs.",
209
+ "cite_spans": [],
210
+ "ref_spans": [
211
+ {
212
+ "start": 241,
213
+ "end": 256,
214
+ "text": "Figures 1 and 3",
215
+ "ref_id": null
216
+ }
217
+ ],
218
+ "eq_spans": [],
219
+ "section": "Coverage and Distribution of Dialogue Acts",
220
+ "sec_num": null
221
+ },
222
+ {
223
+ "text": "In this section we attempt to measure how reliably IF is used by researchers at different sites. Recall that one of the design criteria of IF was consistency of use by researchers who are separated by oceans. This criterion limits the complexity of IF. Two measures of consistency are used -inter-coder agreement and a cross-site end-to-end evaluation.",
224
+ "cite_spans": [],
225
+ "ref_spans": [],
226
+ "eq_spans": [],
227
+ "section": "Representations",
228
+ "sec_num": null
229
+ },
230
+ {
231
+ "text": "Inter-Coder Agreement: Inter-coder agreement is a direct measure of consistency among Table 2 : Inter-coder Agreement between CMU and IRST C-STAR partners. We used 84 DA units from the Japanese-English data described above. The 84 DA units consisted of some coherent dialogue fragments and and some isolated sentences. The data was coded at CMU and at IRST. We counted agreement on ~he components of the IF separately. Table 2 shows agreement on speech acts, dialogue acts (speech act plus concepts), concepts, and arguments. The results are reported in Table 2 in terms of percent agreement. Further work might include some other calculation of agreement such as Kappa or precision and recall of the coders against each other. Figure 5 shows a fragment of a dialogue coded by CMU and IRST. The coders disagreed on the IF middle sentence, I'd like a twin room please. One coded it as an acceptance of a twin room, the other coded it as a preference for a twin room.",
232
+ "cite_spans": [],
233
+ "ref_spans": [
234
+ {
235
+ "start": 86,
236
+ "end": 93,
237
+ "text": "Table 2",
238
+ "ref_id": null
239
+ },
240
+ {
241
+ "start": 419,
242
+ "end": 426,
243
+ "text": "Table 2",
244
+ "ref_id": null
245
+ },
246
+ {
247
+ "start": 554,
248
+ "end": 561,
249
+ "text": "Table 2",
250
+ "ref_id": null
251
+ },
252
+ {
253
+ "start": 728,
254
+ "end": 736,
255
+ "text": "Figure 5",
256
+ "ref_id": null
257
+ }
258
+ ],
259
+ "eq_spans": [],
260
+ "section": "Representations",
261
+ "sec_num": null
262
+ },
263
+ {
264
+ "text": "Cross-Site Evaluation: As an approximate and indirect measure of consistency, we have compared intra-site end-to-end evaluation with cross-site end-to-end evaluation. An end-to-end evaluation includes an analyzer, which maps the source language input into IF and a generator, which maps IF into target language sentences. The intra-site evaluation was carried out on English-German, English-Japanese, and English-IF-English translation. The English analyzer and the German, Japanese, and English generators were all written at CMU by IF experts who worked closely with each other. The cross-site evaluation was carried out on English-Italian translation, involving an English analyzer written at CMU and an Italian generator written at IRST. The IF experts at CMU and IRST were in occasional contact with each other by email, and met in person two or three times between 1997 and 1999.",
265
+ "cite_spans": [],
266
+ "ref_spans": [],
267
+ "eq_spans": [],
268
+ "section": "Representations",
269
+ "sec_num": null
270
+ },
271
+ {
272
+ "text": "A number of factors contribute to the success of an inter-site evaluation, just one of which is that the sites used IF consistently with each other. Another factor is that the two sites used similar development data and have approximately the same coverage. If the inter-site evaluation results are about as good as the intra-site results, we can con-clude that all factors are handled acceptably, including consistency of IF usage. If the inter-site results are worse than the intra-site results, consistency of IF use or some other factor may be to blame. Before conducting this evaluation, we already knew that there was some degree of crosssite consistency in IF usage because we conducted successful inter-continental demos with speech translation and video conferencing in Summer 1999. (The demos and some of the press coverage are reported on the C-STAR web site.) The demos included dialogues in English-Italian, English-German, English-Japanese, English-Korean, and English-French. At a later date, an Italian-Korean demo was produced with no additional work, thus illustrating the well-cited advantage of an interlingual approach in a multi-lingual situation. The end-to-end evaluation reported here goes beyond the demo situation to include data that was unseen by system developers.",
273
+ "cite_spans": [],
274
+ "ref_spans": [],
275
+ "eq_spans": [],
276
+ "section": "Representations",
277
+ "sec_num": null
278
+ },
279
+ {
280
+ "text": "Evaluation Data: The Summer 1999 intra-site evaluation was conducted on about 130 utterances from a CMU user study. The traveller was played by a second time user --someone who had participated in one previous user study, but had no other experience with our MT system. The travel agent was played by a system developer. Both people were speaking English, but they were in different rooms, and their utterances were paraphrased using IF. The end-to-end procedure was that (1) an English utterance was spoken and decoded by the JANUS speech recognizer, (2) the output of the recognizer was parsed into an IF representation, and (3) a different English utterance (supposedly with the same meaning) was generated from the IF representation. The speakers had no other means of communication with each other.",
281
+ "cite_spans": [],
282
+ "ref_spans": [],
283
+ "eq_spans": [],
284
+ "section": "Representations",
285
+ "sec_num": null
286
+ },
287
+ {
288
+ "text": "In order to evaluate English-German and English-Japanese translation, the IFs of the 130 test sentences were fed into German and Japanese generation components at CMU. The data used in the evaluation was unseen by system developers at the time of the evaluation. For English-Italian translation, the IF representations produced by the English analysis component were sent to IRST to be generated in Italian. Figure 6: Translation Grades for English to English, Japanese, German, and Italian score of perfect is assigned if, in addition to the previous criteria, the translation is fluent in the target language. A score of bad is assigned if the target language sentence is incomprehensible or some element of meaning has been added, deleted, or changed. The evaluation procedure is described in detail in [GLL+96] . In Figure 6 , acceptable is the sum of perfect and ok scores, s Figure 6 shows the results of the intra-site and inter-site evaluations. The first row grades the speech recognition output against a humanproduced transcript of what was said. This gives us a ceiling for how well we could do if translation were perfect, given speech recognition errors. Rows 2 through 7 show the results of the intra-site evaluation. All analyzers and generators were written at CMU, and the results were graded by CMU researchers. (The German results are a lower than the English and Japanese results because a shorter time was spent on grammar development.) Rows 8 and 9 report on CMU's intra~site evaluation of English-German transla~ Sin another paper ([LBL+00]), we describe a taskbased evaluation which focuses on success of communicative goals and how long it takes to achieve them. tion (the same system as in Rows 6 and 7), but the results were graded by researchers at IRST. Comparing Rows 6 and 7 with Rows 8 and 9, we can check that CMU and IRST graders were using roughly the same grading criteria: a difference of up to ten percent among graders is normal in our experience. Rows 10 and 11 show the results of the inter-site English-Italian evaluation. The CMU English analyzer produced IF representations which were sent to IRST and were fed into IRST's Italian generator. The results were graded by IRST researchers.",
289
+ "cite_spans": [
290
+ {
291
+ "start": 806,
292
+ "end": 814,
293
+ "text": "[GLL+96]",
294
+ "ref_id": null
295
+ }
296
+ ],
297
+ "ref_spans": [
298
+ {
299
+ "start": 820,
300
+ "end": 828,
301
+ "text": "Figure 6",
302
+ "ref_id": null
303
+ },
304
+ {
305
+ "start": 881,
306
+ "end": 889,
307
+ "text": "Figure 6",
308
+ "ref_id": null
309
+ }
310
+ ],
311
+ "eq_spans": [],
312
+ "section": "Representations",
313
+ "sec_num": null
314
+ },
315
+ {
316
+ "text": "Since the inter-site evaluation results are comparable to the intra-site results, we conclude that researchers at IRST and CMU are using IF at least as consistently as researchers within CMU.",
317
+ "cite_spans": [],
318
+ "ref_spans": [],
319
+ "eq_spans": [],
320
+ "section": "Conclusions drawn from the inter-site evaluation:",
321
+ "sec_num": null
322
+ },
323
+ {
324
+ "text": "In the next phase of C-STAR, we will cover descriptive sentences (e.g., The castle was built in the thirteenth century and someone was imprisoned in the tower) as well as task-oriented sentences. Descriptive sentences will be represented in a more traditional frame-based interlingua focusing on lexical meaning and grammatical features of the sentences. We are working on disambiguating literal from task-oriented meanings in context. For example That's great could be an acceptance (like I'll take it) (task oriented) or could just express appreciation. Sentences may also contain a combination of task oriented (e.g., Can you tell me) and descriptive (how long the castle has been standing) components.",
325
+ "cite_spans": [],
326
+ "ref_spans": [],
327
+ "eq_spans": [],
328
+ "section": "Future Plans",
329
+ "sec_num": null
330
+ },
331
+ {
332
+ "text": "[GLL+96]",
333
+ "cite_spans": [],
334
+ "ref_spans": [],
335
+ "eq_spans": [],
336
+ "section": "Future Plans",
337
+ "sec_num": null
338
+ },
339
+ {
340
+ "text": "[LBL+O0]",
341
+ "cite_spans": [],
342
+ "ref_spans": [],
343
+ "eq_spans": [],
344
+ "section": "Future Plans",
345
+ "sec_num": null
346
+ },
347
+ {
348
+ "text": "[LGLW98]",
349
+ "cite_spans": [],
350
+ "ref_spans": [],
351
+ "eq_spans": [],
352
+ "section": "Future Plans",
353
+ "sec_num": null
354
+ }
355
+ ],
356
+ "back_matter": [
357
+ {
358
+ "text": "We have singles, and t,ins and also Japanese rooms available on the eleventh. CMU a:give-information+availability+room (room-type=(single ~ twin ~ japanese_style), time=mdll) IRST a:give-in2ormation+availability+room (room-type=(single ~ twin & japanese_style), time=mdll) I'd like a twin room, please. CMU c:accept+features+room (room-typeffitwin) IBST c:give-information+preference+features+room (room-type=twin) A twin room is fourteen thousand yen. CMU a:give-information+price+room (room-type=twin, price=(currency=yen, quantity=f4000)) IRST a:give-in.formation+price+room (room-type=twin, price=(currency=yen, quantity=f4000)) ",
359
+ "cite_spans": [],
360
+ "ref_spans": [],
361
+ "eq_spans": [],
362
+ "section": "annex",
363
+ "sec_num": null
364
+ }
365
+ ],
366
+ "bib_entries": {
367
+ "BIBREF0": {
368
+ "ref_id": "b0",
369
+ "title": "End-to-End Evaluation in JANUS: A Speechto-Speech Translation System",
370
+ "authors": [
371
+ {
372
+ "first": "Donna",
373
+ "middle": [],
374
+ "last": "Gates",
375
+ "suffix": ""
376
+ },
377
+ {
378
+ "first": "A",
379
+ "middle": [],
380
+ "last": "Lavie",
381
+ "suffix": ""
382
+ },
383
+ {
384
+ "first": "L",
385
+ "middle": [],
386
+ "last": "Levin",
387
+ "suffix": ""
388
+ },
389
+ {
390
+ "first": "A",
391
+ "middle": [],
392
+ "last": "Waibel",
393
+ "suffix": ""
394
+ },
395
+ {
396
+ "first": "M",
397
+ "middle": [],
398
+ "last": "Gavald~",
399
+ "suffix": ""
400
+ },
401
+ {
402
+ "first": "L",
403
+ "middle": [],
404
+ "last": "Mayfield",
405
+ "suffix": ""
406
+ },
407
+ {
408
+ "first": "M",
409
+ "middle": [],
410
+ "last": ":-Woszczyna",
411
+ "suffix": ""
412
+ },
413
+ {
414
+ "first": "P",
415
+ "middle": [],
416
+ "last": "Zhan",
417
+ "suffix": ""
418
+ }
419
+ ],
420
+ "year": 1996,
421
+ "venue": "Proceedings of ECAI-96",
422
+ "volume": "",
423
+ "issue": "",
424
+ "pages": "",
425
+ "other_ids": {},
426
+ "num": null,
427
+ "urls": [],
428
+ "raw_text": "Donna Gates, A. Lavie, L. Levin, A. Waibel, M. Gavald~, L. Mayfield, M:-Woszczyna, and P. Zhan. End-to- End Evaluation in JANUS: A Speech- to-Speech Translation System. In Pro- ceedings of ECAI-96, Budapest, Hun- gary, 1996.",
429
+ "links": null
430
+ },
431
+ "BIBREF1": {
432
+ "ref_id": "b1",
433
+ "title": "Lessons Learned from a Task-Based Evaluation of Speech-to-Speech MT",
434
+ "authors": [
435
+ {
436
+ "first": "Lori",
437
+ "middle": [],
438
+ "last": "Levin",
439
+ "suffix": ""
440
+ },
441
+ {
442
+ "first": "Boris",
443
+ "middle": [],
444
+ "last": "Bartlog",
445
+ "suffix": ""
446
+ },
447
+ {
448
+ "first": "Ariadna",
449
+ "middle": [],
450
+ "last": "Font Llitjos",
451
+ "suffix": ""
452
+ },
453
+ {
454
+ "first": "Donna",
455
+ "middle": [],
456
+ "last": "Gates",
457
+ "suffix": ""
458
+ },
459
+ {
460
+ "first": "Alon",
461
+ "middle": [],
462
+ "last": "Lavie",
463
+ "suffix": ""
464
+ },
465
+ {
466
+ "first": "Dorcas",
467
+ "middle": [],
468
+ "last": "Wallace",
469
+ "suffix": ""
470
+ },
471
+ {
472
+ "first": "Taro",
473
+ "middle": [],
474
+ "last": "Watanabe",
475
+ "suffix": ""
476
+ },
477
+ {
478
+ "first": "Monika",
479
+ "middle": [],
480
+ "last": "Woszczyna",
481
+ "suffix": ""
482
+ }
483
+ ],
484
+ "year": 2000,
485
+ "venue": "Proceedings of LREC",
486
+ "volume": "",
487
+ "issue": "",
488
+ "pages": "",
489
+ "other_ids": {},
490
+ "num": null,
491
+ "urls": [],
492
+ "raw_text": "Lori Levin, Boris Bartlog, Ari- adna Font Llitjos, Donna Gates, Alon Lavie, Dorcas Wallace, Taro Watan- abe, and Monika Woszczyna. Lessons Learned from a Task-Based Evaluation of Speech-to-Speech MT. In Proceed- ings of LREC 2000, Athens, Greece, June to appear, 2000.",
493
+ "links": null
494
+ },
495
+ "BIBREF2": {
496
+ "ref_id": "b2",
497
+ "title": "An Interlingua Based on Domain Actions for Machine Translation of Task-Oriented Dialogues",
498
+ "authors": [
499
+ {
500
+ "first": "Lori",
501
+ "middle": [],
502
+ "last": "Levin",
503
+ "suffix": ""
504
+ },
505
+ {
506
+ "first": "D",
507
+ "middle": [],
508
+ "last": "Gates",
509
+ "suffix": ""
510
+ },
511
+ {
512
+ "first": "A",
513
+ "middle": [],
514
+ "last": "Lavie",
515
+ "suffix": ""
516
+ },
517
+ {
518
+ "first": "A",
519
+ "middle": [],
520
+ "last": "Waibel",
521
+ "suffix": ""
522
+ }
523
+ ],
524
+ "year": 1998,
525
+ "venue": "Proceedings of the International Conference on Spoken Language Processing (ICSLP'98)",
526
+ "volume": "",
527
+ "issue": "",
528
+ "pages": "",
529
+ "other_ids": {},
530
+ "num": null,
531
+ "urls": [],
532
+ "raw_text": "Lori Levin, D. Gates, A. Lavie, and A. Waibel. An Interlingua Based on Domain Actions for Machine Transla- tion of Task-Oriented Dialogues. In Proceedings of the International Con- ference on Spoken Language Process- ing (ICSLP'98), Sydney, Australia, 1998.",
533
+ "links": null
534
+ },
535
+ "BIBREF3": {
536
+ "ref_id": "b3",
537
+ "title": "The Janus-III Translation System. Machine Translation",
538
+ "authors": [
539
+ {
540
+ "first": "Lori",
541
+ "middle": [],
542
+ "last": "Levin",
543
+ "suffix": ""
544
+ },
545
+ {
546
+ "first": "A",
547
+ "middle": [],
548
+ "last": "Lavie",
549
+ "suffix": ""
550
+ },
551
+ {
552
+ "first": "M",
553
+ "middle": [],
554
+ "last": "Woszczyna",
555
+ "suffix": ""
556
+ },
557
+ {
558
+ "first": "D",
559
+ "middle": [],
560
+ "last": "Gates",
561
+ "suffix": ""
562
+ },
563
+ {
564
+ "first": "M",
565
+ "middle": [],
566
+ "last": "Gavald~",
567
+ "suffix": ""
568
+ },
569
+ {
570
+ "first": "D",
571
+ "middle": [],
572
+ "last": "Koll",
573
+ "suffix": ""
574
+ },
575
+ {
576
+ "first": "A",
577
+ "middle": [],
578
+ "last": "Waibel",
579
+ "suffix": ""
580
+ }
581
+ ],
582
+ "year": null,
583
+ "venue": "",
584
+ "volume": "",
585
+ "issue": "",
586
+ "pages": "",
587
+ "other_ids": {},
588
+ "num": null,
589
+ "urls": [],
590
+ "raw_text": "Lori Levin, A. Lavie, M. Woszczyna, D. Gates, M. Gavald~, D. Koll, and A. Waibel. The Janus-III Translation System. Machine Translation. To ap- pear.",
591
+ "links": null
592
+ },
593
+ "BIBREF4": {
594
+ "ref_id": "b4",
595
+ "title": "Using the Interchange Format for Encoding Spoken Dialogue",
596
+ "authors": [
597
+ {
598
+ "first": "Fabio",
599
+ "middle": [],
600
+ "last": "Pianesi",
601
+ "suffix": ""
602
+ },
603
+ {
604
+ "first": "Lucia",
605
+ "middle": [],
606
+ "last": "Tovena",
607
+ "suffix": ""
608
+ }
609
+ ],
610
+ "year": 1998,
611
+ "venue": "Proceedings of SIG-IL Workshop",
612
+ "volume": "",
613
+ "issue": "",
614
+ "pages": "",
615
+ "other_ids": {},
616
+ "num": null,
617
+ "urls": [],
618
+ "raw_text": "Fabio Pianesi and Lucia Tovena. Us- ing the Interchange Format for Encod- ing Spoken Dialogue. In Proceedings of SIG-IL Workshop, 1998.",
619
+ "links": null
620
+ }
621
+ },
622
+ "ref_entries": {
623
+ "FIGREF0": {
624
+ "uris": null,
625
+ "text": "-acts in development erage. However, the score of an end-to-end evaluation encompasses grammar coverage problems as well as IF coverage problems.",
626
+ "type_str": "figure",
627
+ "num": null
628
+ },
629
+ "FIGREF1": {
630
+ "uris": null,
631
+ "text": "Coverage of Top 20 SAs in test data second column shows the percent of the development data covered by each DA or speech act. The third column shows the number of times each DA or speech act occurs in the development data.",
632
+ "type_str": "figure",
633
+ "num": null
634
+ },
635
+ "TABREF2": {
636
+ "text": "summarizes the amount of data in each language. The English, Italian, and Korean data was used for IF development.The development database contains over 4000 dialogue act units, which are covered by a total of about 542 distinct DAs (346 agent DAs and 278 client DAs).Figures 1 and 2show the cumulative coverage of the top twenty DA's and speech acts in the development data.Figure 1also shows the percentage of no-tag utterances (the ones we decided not to cover) in the development data. The first column shows the percent of the development data that is covered cumulatively by the DA's or speech acts from the top of the table to the current line. For example, acknowledg e and affirm together account for 19.8 percent of the data. The",
637
+ "type_str": "table",
638
+ "num": null,
639
+ "content": "<table><tr><td/><td colspan=\"3\">Language(s)</td><td>Type of Dialogue</td><td/><td colspan=\"3\">Number of DA Units</td></tr><tr><td/><td colspan=\"3\">D'evelopment Data:</td><td/><td/><td/><td/></tr><tr><td/><td colspan=\"2\">English</td><td/><td>monolingual</td><td/><td>2698</td><td/></tr><tr><td/><td colspan=\"2\">Italian</td><td/><td>monolingual</td><td/><td/><td/></tr><tr><td/><td colspan=\"3\">Korean-English</td><td colspan=\"2\">biiingual (only 'Korean utterances are included)</td><td>1142</td><td/></tr><tr><td/><td colspan=\"2\">Test Data:</td><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td colspan=\"2\">bilingual (Japanese and</td><td/><td/></tr><tr><td/><td colspan=\"3\">Japanese-English</td><td colspan=\"2\">English utterances are</td><td>6069</td><td/></tr><tr><td/><td/><td/><td/><td>included)</td><td/><td/><td/></tr><tr><td/><td/><td/><td/><td colspan=\"2\">Table 1: The IF Database</td><td/><td/></tr><tr><td>Percent ' Cumulative</td><td>Percent</td><td>Count</td><td>DA</td><td colspan=\"2\">Percent Cumulative</td><td>Percent</td><td>Count</td><td>DA</td></tr><tr><td>Cover~,--= -\"</td><td>4.6</td><td>263</td><td>no-tag</td><td colspan=\"2\">Coverage 25.6</td><td>25.6</td><td>1454</td><td>give-information</td></tr><tr><td>15.6</td><td colspan=\"2\">15.6 \u2022 -885</td><td>acknowledge</td><td/><td/><td/><td/></tr><tr><td>20.2</td><td>4,6</td><td>260</td><td>thank</td><td/><td>41.7</td><td>16.1</td><td>916</td><td>acknowledge</td></tr><tr><td>23.7</td><td>3.5</td><td>200</td><td>introduce-self</td><td/><td>53.6</td><td>11.9</td><td>677</td><td>request-information</td></tr><tr><td>27.0 29.7</td><td>3.4 2.7</td><td>191 153</td><td>affirm apologize</td><td/><td>58.2 62,0</td><td>4.6 3.7</td><td>260 213</td><td>thank request-verification-give-inform</td></tr><tr><td>32.3</td><td>2.6</td><td>147</td><td>greeting</td><td/><td>65.5</td><td>3.5</td><td>200</td><td>introduce-self</td></tr><tr><td>34.6</td><td>2.3</td><td>128</td><td>closing</td><td/><td>68.8</td><td>3.4</td><td>191</td><td>affirm</td></tr><tr><td>36.3</td><td>1.7</td><td>98</td><td colspan=\"2\">give-information+personal-data</td><td>72.0</td><td>3.2</td><td>181</td><td>request-action</td></tr><tr><td>38.0</td><td>1.7</td><td>95</td><td colspan=\"2\">glve-inform ation +t em poraI</td><td>74.8</td><td>2.8</td><td>159</td><td>accept</td></tr><tr><td>39.5 41.1</td><td>1.6 1.5</td><td>89 88</td><td colspan=\"2\">give-in formation + price please-wait</td><td>77.5 80.1</td><td>2.7 2.6</td><td>153 147</td><td>apologize greeting</td></tr><tr><td>42.5</td><td>1.4</td><td>82</td><td colspan=\"2\">give-inform+telephone-number</td><td>82.4</td><td>2.3</td><td>130</td><td>closing</td></tr><tr><td>43.8</td><td>1.3</td><td>75</td><td colspan=\"2\">give-information+features+room</td><td>84.4</td><td>2.1</td><td>117</td><td>suggest</td></tr><tr><td>45.0</td><td>I.I</td><td>65</td><td colspan=\"2\">request-inform+personal-data</td><td>86.3</td><td>1.8</td><td>104</td><td>verlfy-give-information</td></tr><tr><td>46.0 47.0</td><td>1.0 1.0</td><td>59 55</td><td colspan=\"2\">give-in for m \u00f7temp oral-.{-arrival accept</td><td>87.9 89.5</td><td>1.7 1.5</td><td>94 88</td><td>offer please-wait</td></tr><tr><td>48.0</td><td>l.O</td><td>55</td><td colspan=\"2\">give-infor m +availability + room</td><td>90.6</td><td>I.I</td><td>65</td><td>negate-glve-lnformation</td></tr><tr><td>48.9</td><td>1.0</td><td>55</td><td colspan=\"2\">give-information+price-broom</td><td>91.5</td><td>0.9</td><td>50</td><td>verify</td></tr><tr><td>49.8</td><td>0.9</td><td>50</td><td>verify</td><td/><td>92.0</td><td>0.5</td><td>30</td><td>negate</td></tr><tr><td>50.7</td><td>0.9</td><td>49</td><td colspan=\"2\">request-in form +temporal+arrival</td><td>92.5</td><td>0.5 .</td><td>26</td><td>request-affirmatlon</td></tr><tr><td colspan=\"5\">Figure 3: Coverage of Top 20 DAs and No-tag in</td><td/><td/><td/></tr><tr><td>test data</td><td/><td/><td/><td/><td/><td/><td/></tr></table>",
640
+ "html": null
641
+ }
642
+ }
643
+ }
644
+ }
Full_text_JSON/prefixW/json/W00/W00-0204.json ADDED
@@ -0,0 +1,656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0204",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:40.833657Z"
6
+ },
7
+ "title": "An interlingua aiming at communication on the Web: How language-independent can it be?",
8
+ "authors": [
9
+ {
10
+ "first": "Ronaldo",
11
+ "middle": [],
12
+ "last": "Teixeira Martins",
13
+ "suffix": "",
14
+ "affiliation": {},
15
+ "email": ""
16
+ },
17
+ {
18
+ "first": "Lucia",
19
+ "middle": [],
20
+ "last": "Helena",
21
+ "suffix": "",
22
+ "affiliation": {},
23
+ "email": ""
24
+ },
25
+ {
26
+ "first": "Machado",
27
+ "middle": [],
28
+ "last": "Rino",
29
+ "suffix": "",
30
+ "affiliation": {},
31
+ "email": ""
32
+ },
33
+ {
34
+ "first": "Maria",
35
+ "middle": [],
36
+ "last": "Das Graqas",
37
+ "suffix": "",
38
+ "affiliation": {},
39
+ "email": ""
40
+ },
41
+ {
42
+ "first": "Volpe",
43
+ "middle": [],
44
+ "last": "Nunes",
45
+ "suffix": "",
46
+ "affiliation": {},
47
+ "email": ""
48
+ },
49
+ {
50
+ "first": "Gisele",
51
+ "middle": [],
52
+ "last": "Montilha",
53
+ "suffix": "",
54
+ "affiliation": {},
55
+ "email": "gisele@nilc.icmsc@sc.usp.br"
56
+ }
57
+ ],
58
+ "year": "",
59
+ "venue": null,
60
+ "identifiers": {},
61
+ "abstract": "In this paper, we describe the Universal Networking Language, an interlingua to be plugged in a Web environment aiming at allowing for many-to-many information exchange, 'many' here referring to many natural languages. The interlingua is embedded in a Knowledge-Base MT system whose languagedependent modules comprise an encoder, a decoder, and linguistic resources that have been developed by native speakers of each language involved in the project. Issues concerning both the interlingua formalism and its foundational issues are discussed.",
62
+ "pdf_parse": {
63
+ "paper_id": "W00-0204",
64
+ "_pdf_hash": "",
65
+ "abstract": [
66
+ {
67
+ "text": "In this paper, we describe the Universal Networking Language, an interlingua to be plugged in a Web environment aiming at allowing for many-to-many information exchange, 'many' here referring to many natural languages. The interlingua is embedded in a Knowledge-Base MT system whose languagedependent modules comprise an encoder, a decoder, and linguistic resources that have been developed by native speakers of each language involved in the project. Issues concerning both the interlingua formalism and its foundational issues are discussed.",
68
+ "cite_spans": [],
69
+ "ref_spans": [],
70
+ "eq_spans": [],
71
+ "section": "Abstract",
72
+ "sec_num": null
73
+ }
74
+ ],
75
+ "body_text": [
76
+ {
77
+ "text": "The widespread use of the Web and the growing Intemet facilities have sparked enormous interest in improving the ways people use to communicate. In this context multilingual Machine Translation systems become prominent, for they allow for a huge information flow. To date, MT systems have been built under limited conditions, of which we highlight two: i) in general, they mirror one-to-many(languages) or many(languages)to-one approaches, often involving English at the \"one\" end; ii) communication is reduced to basic information exchange, ignoring richness and flexibility implied by human mind. The first limitation has been seldom overcome, since it requires a robust environment and research teams that can cope with knowledge of several languages 1, to derive precise automatic language analyzers and synthesizers. The second limitation follows up the first: adding up communicative issues to linguistic processing/modeling makes still harder to overcome MT limitations.",
78
+ "cite_spans": [],
79
+ "ref_spans": [],
80
+ "eq_spans": [],
81
+ "section": "Introduction",
82
+ "sec_num": "1."
83
+ },
84
+ {
85
+ "text": "In this article, we elaborate on work using an interlingua conceived to overcome the first limitation, i.e., to allow for a many-to-many information exchange environment, which shall be plugged in a nontraditional Internet platform. The goal is to allow interlocutors to entangle communication even if they do not share the same mother tongue or the English Standing, most often, for natural language, or NL. language, unlike MT systems that have just one language at one of their edges. As the main component of a Knowledge-Base MT system (hereafter, KBMT), the interlingua approach has been developed under the Universal Networking Language Project, or simply UNL Project. What makes the interlingua UNL special is its intended use: as an electronic language for networks, it has to allow for high quality 2 conversation systems involving many languages. As the main component of a KBMT system, it has to be sufficiently robust to ground research and development (R&D) of the language-specific modules to be attached to the system. It is this latter perspective that is undertaken here: from the viewpoint of R&D, we discuss how broad, or language-independent, the interlingua UNL is, especially focusing on its syntax and coverage. In addition to being consistent and complete to represent meaning, we also consider its sharing by researchers all around the world, which is an important bottleneck of the UNL Project, since information exchange by researchers during R&D brings about the problems introduced by the interlingua UNL itself, concerning both its formalism and foundational issues. Before discussing this topic in Section 5, we present an overview of the UNL Project (Section 2) and describe the main features of the interlingua UNL (Section 3). In Section 4, we describe the UNL system architecture. Hereafter, 'interlingua UNL' will be simply referred to as UNL, the acronym for Universal Networking Language. Also, the viewpoint presented here is that of interlingua users who experience R&D for a given NL, and not of its authors.",
86
+ "cite_spans": [],
87
+ "ref_spans": [],
88
+ "eq_spans": [],
89
+ "section": "Introduction",
90
+ "sec_num": "1."
91
+ },
92
+ {
93
+ "text": "The UNL Project 3 has been launched by the United Nations University to foster and ease international web communication by means of NLP systems. Its main strength lies on the development of the UNL, as a unique semantic (or meaning) representation that can be interchanged with the various languages to be integrated in the KBMT system. In the UNL Project, plug-in software to encode NL texts onto UNL ones (NL-UNL encoders) and to decode UNL into NL texts (UNL-NL decoders) have been developed by R&D groups in their own native languages. The modules to process Brazilian Portuguese 4, for example, have been developed by a team of Portuguese native speakers that comprises linguists, computational linguists, and computer experts. Such packages will be made available in WWW servers and will be accessible by browsing through Internet, thus overcoming the need for people all around the world tO learn the language of their interlocutors. Several linguistic groups have signed to the. Project, namely: the Indo-European (Portuguese, Spanish, French, Italian, English, German, Russian, Latvian and Hindi), the Semitic (Arabic), the Sino-Tibetan (Chinese), the Ural-Altaic (Mongolian), the Malayan-Polynesian (Indonesian), and the Japanese.",
94
+ "cite_spans": [],
95
+ "ref_spans": [],
96
+ "eq_spans": [],
97
+ "section": "The UNL Project",
98
+ "sec_num": "2."
99
+ },
100
+ {
101
+ "text": "On the one hand, the main strength of the Project is that knowledgeable specialists address language-dependent issues of their mother tongue, most of which are related to R&D of the encoding and decoding modules and to the specification of the NL-UNL lexicon. On the other hand, this also represents a crucial problem faced by the project participants, for distinct groups may interpret the interlingua specification differently. There is thus the need for a consensus about the UNL formalism, bringing about an assessment of its coverage, completeness, and consistency, all features that will be discussed shortly.",
102
+ "cite_spans": [],
103
+ "ref_spans": [],
104
+ "eq_spans": [],
105
+ "section": "The UNL Project",
106
+ "sec_num": "2."
107
+ },
108
+ {
109
+ "text": "The UNL is a formal language designed for rendering automatic multilingual information exchange. It is intended to be a cross-linguistic semantic representation of NL sentence meaning, being the core of the UNL System, the KBMT system developed by H. Uchida (1996) at the Institute of Advanced-Studies, United Nations University, Tokyo; Japan.",
110
+ "cite_spans": [
111
+ {
112
+ "start": 251,
113
+ "end": 264,
114
+ "text": "Uchida (1996)",
115
+ "ref_id": "BIBREF6"
116
+ }
117
+ ],
118
+ "ref_spans": [],
119
+ "eq_spans": [],
120
+ "section": "The Universal Networking Language",
121
+ "sec_num": "3."
122
+ },
123
+ {
124
+ "text": "UNL subsumes a tridimensional theory of (sentence) meaning, whose components are defined according to one of the following sets (Martins et al., 1998a) : concepts (e.g., \"cat\", \"sit\", \"on\", or \"mat\"), concept relations (e.g., \"agent\", \"place\", or \"object\"), and concept predicates (e.g., \"past\" or \"definite\"). Such components are formally and correspondingly represented by three different kinds of entities, namely: Universal Words (UWs), Relation Labels (RLs), and Attribute Labels (ALs). According to the UNL syntax, information conveyed by each sentence can be represented by a hypergraph whose nodes represent UWs and whose arcs represent RLs. To make symbol processing simpler, hypergraphs are often reduced to lists of ordered binary relations between concepts, as it is shown in Figure 1 for the sentence (1) The cat sat on the mat. 5 agt(sit. @entry. @past,cat. @def) plc(sit. @entry. @past,on) obj(on,mat. @def) UWs are labels for concept-like information, roughly corresponding to the lexical level in the sentence structure. They comprise an open large inventory, virtually capable of denoting every non-compositional meaning to be conveyed by any speaker of any language. For the sake of representation, these atomic semantic contents are associated to English words and expressions, which play the role of semantic labels. However, there is no one-to-one mapping between the English vocabulary and the UNL lexicon, for UNL, as a multilingual representation code, is larger than the English vocabulary. To avoid unnecessary proliferation of the UNL vocabulary and to certify that standards be observed by UNL teams, control over the specification of the UW set is centered at the UNL Center, in Japan.",
125
+ "cite_spans": [
126
+ {
127
+ "start": 128,
128
+ "end": 151,
129
+ "text": "(Martins et al., 1998a)",
130
+ "ref_id": "BIBREF4"
131
+ }
132
+ ],
133
+ "ref_spans": [
134
+ {
135
+ "start": 788,
136
+ "end": 796,
137
+ "text": "Figure 1",
138
+ "ref_id": null
139
+ }
140
+ ],
141
+ "eq_spans": [],
142
+ "section": "The Universal Networking Language",
143
+ "sec_num": "3."
144
+ },
145
+ {
146
+ "text": "Several semantic relationships hold between UWs, namely synonymy, antonymy, hyponymy, hypemymy and meronymy, which compose the UNL Ontology. Steady semantic valencies (such as agent and object features) can also be represented, forming the UNL Knowledge-Base. Both Ontology and Knowledge-Base aim at constraining the scope of UW labels, whenever ambiguity is to be avoided. The. UNL representation of sentence (1), for example, can be ambiguous in Romance languages, for the translation of 'cat' should make explicit the animal sex: if male, it would be \"gato\" (Portuguese and Spanish), \"gatto\" (Italian), \"chat\" (French), whereas different names would have to be used for the female cat. Instead of having a unique UW 'cat', it is thus quite feasible to have a whole structure in which 'cat' is only the hyper-ordinate option.",
147
+ "cite_spans": [],
148
+ "ref_spans": [],
149
+ "eq_spans": [],
150
+ "section": "The Universal Networking Language",
151
+ "sec_num": "3."
152
+ },
153
+ {
154
+ "text": "For the English-UNL association not to undermine the intended universality of the UW inventory, its semantic-orthograpical correspondence has to be considered rather incidental, or even. approximated. It is not always the case that extensions 6 of a UW label and of its corresponding English word coincide. The extension of the English word \"mat\", for example, does not exactly coincide with the extension of any Portuguese word, although we can find many overlaps between \"mat\" and, e.g., \"capacho\" (Portuguese). Portuguese speakers, however, would not say \"capacho\" for the ornamental dishmat, as would not English speakers use the word \"mat\" for a fawner (still \"capacho\" in Portuguese). Since each language categorizes the world in a very idiosyncratic way, it would be misleading to impose a straightforward correspondence between lexical items of two different languages. In UNL, this problem has been overcome by proposing a rather analogic lexicon, instead of a digital one. Although discrete, UWs convey continuous entities, in the sense that semantic gaps between concepts are fulfilled by the UNL Knowledge-Base, as it is shown for the UW 'mat' in Figure 2 . Granularity thus plays an important role in UNL lexical organization and brings flexibility into crosslinguistic lexical matching.",
155
+ "cite_spans": [],
156
+ "ref_spans": [
157
+ {
158
+ "start": 1159,
159
+ "end": 1167,
160
+ "text": "Figure 2",
161
+ "ref_id": null
162
+ }
163
+ ],
164
+ "eq_spans": [],
165
+ "section": "The Universal Networking Language",
166
+ "sec_num": "3."
167
+ },
168
+ {
169
+ "text": "Cf. (Frege, 1892) , extension here is used to establish the relationship between a word and the world, opposed to intension, referring to the relationship between aword and its meaning. icl Figure 2a : UNL hypergraph partial representation for the meaning denoted by the English word \"mat\"",
170
+ "cite_spans": [
171
+ {
172
+ "start": 4,
173
+ "end": 17,
174
+ "text": "(Frege, 1892)",
175
+ "ref_id": "BIBREF3"
176
+ }
177
+ ],
178
+ "ref_spans": [
179
+ {
180
+ "start": 190,
181
+ "end": 199,
182
+ "text": "Figure 2a",
183
+ "ref_id": null
184
+ }
185
+ ],
186
+ "eq_spans": [],
187
+ "section": "The Universal Networking Language",
188
+ "sec_num": "3."
189
+ },
190
+ {
191
+ "text": "\"mat\" \"mat(aoj>entity)\" \"mat(icl>event)\" \"mat(icl>frame)\" \"mat(icl>rug)\" \"mat(icl>state)\" \"mat(obi>entitv)\"",
192
+ "cite_spans": [],
193
+ "ref_spans": [],
194
+ "eq_spans": [],
195
+ "section": "The Universal Networking Language",
196
+ "sec_num": "3."
197
+ },
198
+ {
199
+ "text": "Figure 2b: UNL partial linear representation for the meaning denoted by the English word \"mat\"",
200
+ "cite_spans": [],
201
+ "ref_spans": [],
202
+ "eq_spans": [],
203
+ "section": "The Universal Networking Language",
204
+ "sec_num": "3."
205
+ },
206
+ {
207
+ "text": "While lexical representation in UNL comprises a set of universal concepts signaled by UWs, the cross-lexical level involves a set of ordered binary relations between UWs, which are the Relation Labels (RLs). RLs specification are similar to Fillmore's semantic cases (1968), with RLs corresponding to semantic-value relations linking concept-like information. There are currently 44 RLs, but this set has been continuously modified by empirical evidence of lack, or redundancy, of relations. The inventory of RLs can be divided into three parts, according to the functional aspects of the related concepts: ontological, event-like and logical relations. Ontological relations are used as UW constraints in reducing lexical granularity or avoiding ambiguity as shown above, and they help positioning UWs in a UNL lexical structure. Five different labels are used to convey ontological relations: icl (hyponymy), equ (synonymy), ant (antonymy), pof (meronymy), and fld (semantic field).",
208
+ "cite_spans": [],
209
+ "ref_spans": [],
210
+ "eq_spans": [],
211
+ "section": "The Universal Networking Language",
212
+ "sec_num": "3."
213
+ },
214
+ {
215
+ "text": "UNL depicts sentence meaning as a fact composed by either a simple or a complex event, which is considered here the starting point of a UNL representation, i.e., its minimal complete semantic unit. Event-like relations are assigned by an event external or internal structure, or by both. An event external structure has to do nearly always with time and space boundaries. It can be referred to by a set of RLs signaling the event co-occurrent meanings, such as 7 its environment (scn); starting place (pl0, finishing p!ace (pit), or, simply, place (plc); range (fmt); starting time (tmf), finishing time (tmt), or, simply, time (tim); and duration (dur). Action modifiers, such as manner (man) and method (met) can also qualify this structure. An event internal structure is associated to one of the following simple frames: action, activity, movement, state, and process, each expressing different RLs in the event itself, including its actors and circumstances.",
216
+ "cite_spans": [],
217
+ "ref_spans": [],
218
+ "eq_spans": [],
219
+ "section": "The Universal Networking Language",
220
+ "sec_num": "3."
221
+ },
222
+ {
223
+ "text": "Event actors are any animate or inanimate character playing any role in events, which can be the main or the coadjutant actors. There can be up to eight actors, signaled by the following RLs: agent (agt), co-agent (cag), object (obj), co-object (cob), object place (opl), beneficiary (ben), partner (ptn) and instrument (ins). They can also be coordinated through the RLs conjunction (and) and disjunction (or), or subordinated to each other by possession (pos), content (cnt), naming (nam), comparison (bas), proportion (per), and modification (mod). They can still be quantified (qua) or qualified by the RLs \"property attribution\" (aoj) and co-attribution (cao). It is possible to refer to an \"initial actor\" (src), a \"final actor\" (gol), or an \"intermediary actor\" (via). Finally, spatial relationships can also hold between actors: current place (plc), origin (firm), destination (to), and path (via). Besides single events, there can still be complex cross-event relationships which express either paralleled events -co-occurrence (coo), conjunction (and), and disjunction (or) -or hierarchically posed events -purpose (pur), reason (rsn), condition (con), and sequence (seq). They can all be referred to as logical relations, since they are often isomorphic to first-order logic predicates.",
224
+ "cite_spans": [],
225
+ "ref_spans": [],
226
+ "eq_spans": [],
227
+ "section": "The Universal Networking Language",
228
+ "sec_num": "3."
229
+ },
230
+ {
231
+ "text": "According to the UNL authors, it is possible to codify any sentence written in any NL into a corresponding UNL text expressing the sentence meaning through the use of the above RLs. This is still a claim to be verified, since cases of superposition and competition between different RLs have been observed, as it is discussed in Section 5.",
232
+ "cite_spans": [],
233
+ "ref_spans": [],
234
+ "eq_spans": [],
235
+ "section": "The Universal Networking Language",
236
+ "sec_num": "3."
237
+ },
238
+ {
239
+ "text": "In addition to UWs and RLs, UNL makes use of predicate-like information, or Attribute Labels (ALs), which are names for event and concept \"transformations\", in a sense very close to that intended by Chomsky (1957 Chomsky ( , 1965 . They are not explicitly represented in a UNL hypergraph, although they are used to modify its nodes. ALs can convey information about concept intensions and extensions. In the former case, ALs name information about utterers' intensions over either specific parts of a sentence (focus, topic, emphasis, theme) or the whole structure (exclamation, interrogation, invitation, recommendation, obligation, etc.). In the latter case, ALs refer to spatial (definite, indefinite, generic, plural) or temporal (past, present, future)information, or still, temporal external (begin-soon, beginjust, end-soon, end-just) or intemal (perfecfive, progressive, imperfective, iterative) structures. To differentiate ALs from UWs, ALs are attached to UWs by the symbol \".@\". The cOncept expressed by the UW 'sit' in \"sit. @entry. @past\", for example, is taken as the starting point (. @entry) of the corresponding hypergraph and it is to be modified by temporal information (. @past).",
240
+ "cite_spans": [
241
+ {
242
+ "start": 199,
243
+ "end": 212,
244
+ "text": "Chomsky (1957",
245
+ "ref_id": "BIBREF0"
246
+ },
247
+ {
248
+ "start": 213,
249
+ "end": 229,
250
+ "text": "Chomsky ( , 1965",
251
+ "ref_id": "BIBREF1"
252
+ }
253
+ ],
254
+ "ref_spans": [],
255
+ "eq_spans": [],
256
+ "section": "The Universal Networking Language",
257
+ "sec_num": "3."
258
+ },
259
+ {
260
+ "text": "The UNL system architecture consists of two main processes, the encoder and decoder, and several linguistic resources, each group of these corresponding to a NL embedded in the system, as depicted in \u2022 s~tCNL e language-to-~ grammar J Figure 3 : The UNL System Architecture A source document (SLD) conveys written text on any subject, in any of the NLs considered. There is no constraint in the domain or structure of the SLD, but there is necessarily a loss of semantic expressiveness during NL-UNL encoding. The goal of the UNL is not, in principle, to fully preserve text meaning, but only its main components, i.e., those considered to be essential. However, there is no measurable account as to what is essential in the UNL Project. By convention, this is linked to what has been called the literal meaning, whi.ch is directly derived from interpreting the sentence surface structure. Therefore, there is no room to represent content that is not directly mapped onto the NL syntactic-semantic licensed structures.",
261
+ "cite_spans": [],
262
+ "ref_spans": [
263
+ {
264
+ "start": 235,
265
+ "end": 243,
266
+ "text": "Figure 3",
267
+ "ref_id": null
268
+ }
269
+ ],
270
+ "eq_spans": [],
271
+ "section": "The UNL System",
272
+ "sec_num": "4."
273
+ },
274
+ {
275
+ "text": "The NL-UNL encoding tool, or UNL Encoder, is generic enough to handle all the languages included in the Project. Apart from the (supposedly) universal knowledge-base, used to fill-in possible interlexical gaps when mapping is not precise, all other linguistic resources are language-dependent. The source grammar essentially guides the elicitation of the sentence semantic structure into its corresponding UNL structure, by determining RLs and ALs, always giving priority to information content.",
276
+ "cite_spans": [],
277
+ "ref_spans": [],
278
+ "eq_spans": [],
279
+ "section": "The UNL System",
280
+ "sec_num": "4."
281
+ },
282
+ {
283
+ "text": "The UNL-NL decoding tool, or UNL Decoder, works in the opposite way to the Encoder. Besides the lexicon and the grammar, a cooccurrence dictionary is also used at this stage, to disentangle lexical choice. The target grammar is responsible for the semantic-syntactic mapping, now resolving semantic organization by making syntactic and dependence choices between UWs, taking RLs and ALs into account.",
284
+ "cite_spans": [],
285
+ "ref_spans": [],
286
+ "eq_spans": [],
287
+ "section": "The UNL System",
288
+ "sec_num": "4."
289
+ },
290
+ {
291
+ "text": "The main strength of the UNL Project rests on human expertise: language-specific aspects to be included in the multilingual KBMT system are handled by native speakers of that language, in an attempt to overcome the need of representing knowledge across several languages or cultures. It has been successful in developing NL-driven resources and processes by researchers all around the world. For example, the BP UNL lexicon has over 65,000 entries that are categorized according to grammatical and some semantic features, and this will be extended considerably in the future to cover the Portuguese vocabulary to a greater extent. Up to the present time, only decoding systems customized to each NL have been plugged into a general decoder skeleton (provided by the UNL Center) and have already been assessed, producing promising results. The BP decoder, for example, is able to produce outputs whose literal meaning is preserved in most cases (Martins et al., 1998b) , using handcoded UNL expressions. Actually, to decode any UNL text, NL-UNL encoding has to be handmade, since customization of the UNL Encoder to each NL has not yet been undertaken in the project. In spite of the promising decoding results, a) output quality varies enormously with UNL sentences encoding, which can be different across distinct research groups; b) communicative aspects of information exchange on the web are not explored in depth, as it can be seen through the list of RLs or ALs. UNL is not knowledge intensive and there are no guidelines as to consistently recognize or extract such kind of information from the surface of the source texts.",
292
+ "cite_spans": [
293
+ {
294
+ "start": 944,
295
+ "end": 967,
296
+ "text": "(Martins et al., 1998b)",
297
+ "ref_id": "BIBREF5"
298
+ }
299
+ ],
300
+ "ref_spans": [],
301
+ "eq_spans": [],
302
+ "section": "Remarks on language-independence",
303
+ "sec_num": "5."
304
+ },
305
+ {
306
+ "text": "There are several reasons why interpretation and use of the UNL among the various teams are not uniform, including cultural aspects and syntax differences of the languages involved. Using English as the lingua franca for communication and cooperation among the research groups and as the knowledge representation language has also brought limitations into the Project, since it implies a non-desirable level of language-dependence. This is inevitable, however, for limitations definitely come along with the choice made. For example, attaching a NL word to a UW may be difficult, owing to the cross-references introduced by using English to convey UNL symbols. Resuming the example shown in Figure 1 , this is the case of the UW \"on\" in (lb): the preposition 'on' fills in the position feature of the verb 'sit' and, thus, is represented in UNL correspondingly as the second term of the binary relation 'plc' and the first term of 'obj'. This, undoubtedly, is critical, for 'sit' can be juxtaposed to other prepositions leading to different meanings, which, in turn, may introduce different sets of binary relations, implying a high-level complexity in the UNL representation. As a result, languages whose syntactic structures deeply differ from the English ones may present an additional level of complexity that makes mapping to/from UNL impossible or unrealistic. In this respect, we have not been facing many problems in fitting Portuguese structures with UNL ones, since Portuguese, like English, is an inflectional language that also employs prepositional constructions. However, prepositions in Portuguese may play considerably different roles compared to English. Various extensions of the English spatial prepositions \"on\", \"over\" and \"above\", for example, are subsumed in Portuguese by a single form \"sobre\" (which may also mean ..about). Therefore, in Portuguese, cats could be, at the same time, not only \"on\" but also \"over\" and \"above\" mats. Only world knowledge, associated to contextual indexes, both absent in the referred UNL hypergraph, could avoid the unsuited encodings The cat sat over the mat. or The cat sat above the mat. from the Portuguese sentence \"O gato sentou sobre o tapete\".",
307
+ "cite_spans": [],
308
+ "ref_spans": [
309
+ {
310
+ "start": 691,
311
+ "end": 699,
312
+ "text": "Figure 1",
313
+ "ref_id": null
314
+ }
315
+ ],
316
+ "eq_spans": [],
317
+ "section": "Remarks on language-independence",
318
+ "sec_num": "5."
319
+ },
320
+ {
321
+ "text": "Another problem related to the sentence The cat sat on the mat. refers to the existence of competing analyses: it is quite plausible that a UNL representation suggesting a noun phrase instead of a full sentence holds for this sentence. It so happens when the arc between 'sitting' and 'cat' concepts are labeled by the RL 'obj', instead of the RL 'agt' in (1), as it is shown in Figure 1 obj(sit. @entry. @past,cat. @def) plc(sit. @ entry. @ past,on) obj(on,mat. @def) Both analyses are equally accurate and can lead to good NL surface expressions, although they refer to different semantic facts. Indeed, to define an object relationship between \"sitting\" and \"cat\" is to say that the cat was already sat before the beginning of the event (e,g., The cat sat on the mat ate the fish.). In this case, the animal does not actually perform the action, but is conditioned to it, the main performer position being empty, thus yielding the referred noun phrase. In Figure 1 , instead, the cat on its own has taken the sitting position, therefore introducing an agent relationship. These two different semantic facts may correspond, in English, to a single surface structure. Indeed, (1) is orthographically identical to (1'). However, other languages (e.g., Portuguese) do behave differently.",
322
+ "cite_spans": [],
323
+ "ref_spans": [
324
+ {
325
+ "start": 379,
326
+ "end": 387,
327
+ "text": "Figure 1",
328
+ "ref_id": null
329
+ },
330
+ {
331
+ "start": 959,
332
+ "end": 967,
333
+ "text": "Figure 1",
334
+ "ref_id": null
335
+ }
336
+ ],
337
+ "eq_spans": [],
338
+ "section": "Remarks on language-independence",
339
+ "sec_num": "5."
340
+ },
341
+ {
342
+ "text": "Although it is also possible to have, in Portuguese, the same surface structure corresponding to both UNL representations (\"sentado no tapete\"), it is more feasible to have, for each case, completely different constructions. In the case depicted by Figure 1 , the UW \"sit\" would be associated to the verb \"sentar\" (corresponding to \"to sit\"). Thus, the generation result should be something like \"O gato sentou no tapete\" or \"O gato sentado no tapete\". On the other hand, for Figure 1 ', the same UW 'sit' would be generated in a completely different way, corresponding to the passive form of the Portuguese expression \"colocar sentado\" (to be put in a sitted position), for which there is no adequate English surface expression.",
343
+ "cite_spans": [],
344
+ "ref_spans": [
345
+ {
346
+ "start": 249,
347
+ "end": 258,
348
+ "text": "Figure 1",
349
+ "ref_id": null
350
+ },
351
+ {
352
+ "start": 477,
353
+ "end": 485,
354
+ "text": "Figure 1",
355
+ "ref_id": null
356
+ }
357
+ ],
358
+ "eq_spans": [],
359
+ "section": "Remarks on language-independence",
360
+ "sec_num": "5."
361
+ },
362
+ {
363
+ "text": "Distinguishing such situations to cope with syntactic-semantic troublesome mappings, though interesting, is a highly context-sensitive task, often surpassing sentence boundaries. UNL descriptions do not address such fine-grained level of meaning representation, being limited to meanings derived from context-free source sentences, even when context-freeness implies insufficient information. When this is not possible, UNL offers a default analysis for semantically ambiguous sentences, in which case we can say that the UNL representation is probabilistic, rather than deterministic.",
364
+ "cite_spans": [],
365
+ "ref_spans": [],
366
+ "eq_spans": [],
367
+ "section": "Remarks on language-independence",
368
+ "sec_num": "5."
369
+ },
370
+ {
371
+ "text": "The _way we believe some of UNL limitations can-be overcome and/or minimized is by designing a fully-fledged testing procedure to assess outputs of both decoder and encoder for the various languages. Since the same encoding and decoding procedures have been delivered to the UNL teams, it is possible that part of the set of rules or translation strategies of a given team may be interchangeable with another one from a different language. In this way, sharing procedures may become a warranty for common ground assessment of the varied models, in which case it may be possible to make eligible concurrent strategies equally available for the languages involved.",
372
+ "cite_spans": [],
373
+ "ref_spans": [],
374
+ "eq_spans": [],
375
+ "section": "Remarks on language-independence",
376
+ "sec_num": "5."
377
+ },
378
+ {
379
+ "text": "Concerning the UNL means to disambiguate or proceed to reference resolution or other discourse figures, most of the troublesome occurrences are enclosed in the treatment issued by specialists and, thus, they are constrained to, and handled by, at the level of native speakers use. This measure can be somewhat fruitful, provided that each signatory of the Project finds a way to trace a UNL text back onto its own NL text or viceversa, making a proper use of the UNL syntax or symbols. This, in fact, can be a good method to evaluate (de)coding: once a UNL code has been produced from any NL text, this code can be the source to decoding into the same NL, in order to compare the original NL text with the automatically generated one. Evaluation, in this case, can be carried out by the same research group responsible for both processes.",
380
+ "cite_spans": [],
381
+ "ref_spans": [],
382
+ "eq_spans": [],
383
+ "section": "Remarks on language-independence",
384
+ "sec_num": "5."
385
+ },
386
+ {
387
+ "text": "Compared to other interlingua approaches (e.g., Mikrokosmos, Gazelle, or Kant), the UNL Project is in a much earlier stage -most of those are over 10 years old, while the UNL one is about 3 years old -but it is much more ambitious than most of the current systems under construction. For UNL is actually a front-end to a many-to-many communication system, with no constraints that are normally inherent in MT systems. Since knowledge is specified by native speakers for each NL module, grammar, semantics and world knowledge can be well founded. Its limitations, from a conceptual viewpoint, are shared by most of its counterparts, as in treating text at the sentence level only. In addition, by no means is the UNL system committed to event replication as it is the case of human translation. Automatic strategies have no psychological motivation whatsoever and are solely based upon computer efficiency principles, namely time and space.",
388
+ "cite_spans": [],
389
+ "ref_spans": [],
390
+ "eq_spans": [],
391
+ "section": "Remarks on language-independence",
392
+ "sec_num": "5."
393
+ },
394
+ {
395
+ "text": "By 'high quality' we mean 'at least allowing for readability and understandability by any user'.3 A description of both, the Project and the UNL itself, can be found in http://www.unl.ias.unu.edu/. 4 Hereafter referred to as Portuguese or by its acronym, BP.",
396
+ "cite_spans": [],
397
+ "ref_spans": [],
398
+ "eq_spans": [],
399
+ "section": "",
400
+ "sec_num": null
401
+ },
402
+ {
403
+ "text": "'sit', 'cat', 'on' and 'mat' are UWs; 'agt' (agent), 'pie' (place) and 'obj' (object) are RLs; '@def, '@entry' and '@past' are ALs.",
404
+ "cite_spans": [],
405
+ "ref_spans": [],
406
+ "eq_spans": [],
407
+ "section": "",
408
+ "sec_num": null
409
+ },
410
+ {
411
+ "text": "RLs names are bracketed.",
412
+ "cite_spans": [],
413
+ "ref_spans": [],
414
+ "eq_spans": [],
415
+ "section": "",
416
+ "sec_num": null
417
+ }
418
+ ],
419
+ "back_matter": [
420
+ {
421
+ "text": "The development of resources for Brazilian Portuguese in the UNL Project has been sponsored by the Institute of Advanced Studies of the United Nations University. The authors are also grateful to CNPq and Finep (Brazil) for the financial support and to Mr. Tadao Takahashi, the coordinator of the Brazilian branch in the UNL Project.",
422
+ "cite_spans": [],
423
+ "ref_spans": [],
424
+ "eq_spans": [],
425
+ "section": "Acknowledgments",
426
+ "sec_num": null
427
+ }
428
+ ],
429
+ "bib_entries": {
430
+ "BIBREF0": {
431
+ "ref_id": "b0",
432
+ "title": "Syntactic Structures. The Hague, Mouton",
433
+ "authors": [
434
+ {
435
+ "first": "N",
436
+ "middle": [],
437
+ "last": "Chomsky",
438
+ "suffix": ""
439
+ }
440
+ ],
441
+ "year": 1957,
442
+ "venue": "",
443
+ "volume": "",
444
+ "issue": "",
445
+ "pages": "",
446
+ "other_ids": {},
447
+ "num": null,
448
+ "urls": [],
449
+ "raw_text": "Chomsky, N. (1957). Syntactic Structures. The Hague, Mouton.",
450
+ "links": null
451
+ },
452
+ "BIBREF1": {
453
+ "ref_id": "b1",
454
+ "title": "Aspects of the Theory of Syntax",
455
+ "authors": [
456
+ {
457
+ "first": "N",
458
+ "middle": [],
459
+ "last": "Chomsky",
460
+ "suffix": ""
461
+ }
462
+ ],
463
+ "year": 1965,
464
+ "venue": "",
465
+ "volume": "",
466
+ "issue": "",
467
+ "pages": "",
468
+ "other_ids": {},
469
+ "num": null,
470
+ "urls": [],
471
+ "raw_text": "Chomsky, N. (1965). Aspects of the Theory of Syntax. MIT Press, Cambridge, MA.",
472
+ "links": null
473
+ },
474
+ "BIBREF2": {
475
+ "ref_id": "b2",
476
+ "title": "The case for case",
477
+ "authors": [
478
+ {
479
+ "first": "C",
480
+ "middle": [],
481
+ "last": "Fillmore",
482
+ "suffix": ""
483
+ }
484
+ ],
485
+ "year": 1968,
486
+ "venue": "Universals in linguistic theory",
487
+ "volume": "",
488
+ "issue": "",
489
+ "pages": "1--88",
490
+ "other_ids": {},
491
+ "num": null,
492
+ "urls": [],
493
+ "raw_text": "Fillmore, C. (1968). The case for case. In Bach, E. and Harms, R.T. (orgs.), Universals in linguistic theory, pp. 1-88. Rinehard and Winston, New York.",
494
+ "links": null
495
+ },
496
+ "BIBREF3": {
497
+ "ref_id": "b3",
498
+ "title": "On Sinn and Bedeutung",
499
+ "authors": [
500
+ {
501
+ "first": "G",
502
+ "middle": [],
503
+ "last": "Frege",
504
+ "suffix": ""
505
+ }
506
+ ],
507
+ "year": null,
508
+ "venue": "",
509
+ "volume": "",
510
+ "issue": "",
511
+ "pages": "",
512
+ "other_ids": {},
513
+ "num": null,
514
+ "urls": [],
515
+ "raw_text": "Frege, G. (1892). On Sinn and Bedeutung. In Beaney, M. (ed.), The Frege Reader. Blackwell Publishers, Malden, MA, 1997.",
516
+ "links": null
517
+ },
518
+ "BIBREF4": {
519
+ "ref_id": "b4",
520
+ "title": "As Regras Gramaticais para a Decodtfica~ao UNL-Portugu~s no Projeto UNL. Relat6rio T6cnico 67. Instituto de CiSncias Matem~iticas e da Computa~ao",
521
+ "authors": [
522
+ {
523
+ "first": "R",
524
+ "middle": [
525
+ "T"
526
+ ],
527
+ "last": "Martins",
528
+ "suffix": ""
529
+ },
530
+ {
531
+ "first": "L",
532
+ "middle": [
533
+ "H M"
534
+ ],
535
+ "last": "Rino",
536
+ "suffix": ""
537
+ },
538
+ {
539
+ "first": "M",
540
+ "middle": [
541
+ "G V"
542
+ ],
543
+ "last": "Nunes",
544
+ "suffix": ""
545
+ }
546
+ ],
547
+ "year": 1998,
548
+ "venue": "",
549
+ "volume": "",
550
+ "issue": "",
551
+ "pages": "",
552
+ "other_ids": {},
553
+ "num": null,
554
+ "urls": [],
555
+ "raw_text": "Martins, R.T., Rino, L.H.M., Nunes, M.G.V. (1998a). As Regras Gramaticais para a Decodtfica~ao UNL-Portugu~s no Projeto UNL. Relat6rio T6cnico 67. Instituto de CiSncias Matem~iticas e da Computa~ao. Universidade de S~o Paulo, Sao Carlos.",
556
+ "links": null
557
+ },
558
+ "BIBREF5": {
559
+ "ref_id": "b5",
560
+ "title": "Can the syntactic realization be detached from the syntactic analysis during generation of natural ldnguage sentences? III Encontro para o processamento computacional da lingua portuguesa escrita e falada (PROPOR'98)",
561
+ "authors": [
562
+ {
563
+ "first": "R",
564
+ "middle": [
565
+ "T"
566
+ ],
567
+ "last": "Martins",
568
+ "suffix": ""
569
+ },
570
+ {
571
+ "first": "L",
572
+ "middle": [
573
+ "H M"
574
+ ],
575
+ "last": "Rino",
576
+ "suffix": ""
577
+ },
578
+ {
579
+ "first": "M",
580
+ "middle": [
581
+ "G V"
582
+ ],
583
+ "last": "Nunes",
584
+ "suffix": ""
585
+ },
586
+ {
587
+ "first": "O",
588
+ "middle": [
589
+ "N"
590
+ ],
591
+ "last": "Oliveira",
592
+ "suffix": ""
593
+ }
594
+ ],
595
+ "year": 1998,
596
+ "venue": "",
597
+ "volume": "",
598
+ "issue": "",
599
+ "pages": "",
600
+ "other_ids": {},
601
+ "num": null,
602
+ "urls": [],
603
+ "raw_text": "Martins, R.T.; Rino, L.H.M.; Nunes, M.G.V.; Oliveira Jr., O.N. (1998b). Can the syntactic realization be detached from the syntactic analysis during generation of natural ldnguage sentences? III Encontro para o processamento computacional da lingua portuguesa escrita e falada (PROPOR'98). Porto Alegre -RS. Novembro.",
604
+ "links": null
605
+ },
606
+ "BIBREF6": {
607
+ "ref_id": "b6",
608
+ "title": "UNL: Universal Networking Language -An Electronic Language for Communication, Understanding, and Collaboration",
609
+ "authors": [
610
+ {
611
+ "first": "H",
612
+ "middle": [],
613
+ "last": "Uchida",
614
+ "suffix": ""
615
+ }
616
+ ],
617
+ "year": 1996,
618
+ "venue": "",
619
+ "volume": "",
620
+ "issue": "",
621
+ "pages": "",
622
+ "other_ids": {},
623
+ "num": null,
624
+ "urls": [],
625
+ "raw_text": "Uchida, H. (1996). UNL: Universal Networking Language -An Electronic Language for Communication, Understanding, and Collaboration.",
626
+ "links": null
627
+ }
628
+ },
629
+ "ref_entries": {
630
+ "FIGREF0": {
631
+ "uris": null,
632
+ "type_str": "figure",
633
+ "text": "UNL hypergraph representation of the English sentence \"The cat sat on the mat\"",
634
+ "num": null
635
+ },
636
+ "FIGREF1": {
637
+ "uris": null,
638
+ "type_str": "figure",
639
+ "text": "UNL linear representation of the English sentence \"The cat sat on the mat.\"",
640
+ "num": null
641
+ },
642
+ "FIGREF3": {
643
+ "uris": null,
644
+ "type_str": "figure",
645
+ "text": "a', yielding the UNL text shown in Figure lb'. o Figure la': UNL hypergraph representation of the English sentence \"The cat sat on the mat.\"",
646
+ "num": null
647
+ },
648
+ "FIGREF4": {
649
+ "uris": null,
650
+ "type_str": "figure",
651
+ "text": "lb': UNL linear representation of the English sentence \"The cat sat on the mat.\"",
652
+ "num": null
653
+ }
654
+ }
655
+ }
656
+ }
Full_text_JSON/prefixW/json/W00/W00-0205.json ADDED
@@ -0,0 +1,1260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0205",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:26.482334Z"
6
+ },
7
+ "title": "Telicity as a Cue to Temporal and Discourse Structure in Chinese-English Machine Translation*",
8
+ "authors": [
9
+ {
10
+ "first": "Mari",
11
+ "middle": [],
12
+ "last": "Olsen",
13
+ "suffix": "",
14
+ "affiliation": {},
15
+ "email": "molsen@microsoft.com"
16
+ },
17
+ {
18
+ "first": "David",
19
+ "middle": [],
20
+ "last": "Traum",
21
+ "suffix": "",
22
+ "affiliation": {},
23
+ "email": "traum@cs.umd.edu"
24
+ },
25
+ {
26
+ "first": "Microsoft",
27
+ "middle": [
28
+ "U"
29
+ ],
30
+ "last": "Maryland",
31
+ "suffix": "",
32
+ "affiliation": {},
33
+ "email": ""
34
+ },
35
+ {
36
+ "first": "Amy",
37
+ "middle": [],
38
+ "last": "Weinberg",
39
+ "suffix": "",
40
+ "affiliation": {},
41
+ "email": "weinberg@umiacs.umd.edu"
42
+ },
43
+ {
44
+ "first": "U",
45
+ "middle": [],
46
+ "last": "Maryland",
47
+ "suffix": "",
48
+ "affiliation": {},
49
+ "email": ""
50
+ },
51
+ {
52
+ "first": "Ron",
53
+ "middle": [],
54
+ "last": "Dolan",
55
+ "suffix": "",
56
+ "affiliation": {},
57
+ "email": "rdolan@cfar.umd.edu"
58
+ }
59
+ ],
60
+ "year": "",
61
+ "venue": null,
62
+ "identifiers": {},
63
+ "abstract": "Machine translation between any two languages requires the generation of information that is implicit in the source language. In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Moreover, Chinese multiple-clause sentences may contain inter-clausal relations (temporal or otherwise) that must be explicit in English (e.g., by means of a discourse marker). Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is not present in every sentence. We report on a project to use the ]exical aspect features of (a)te]icity reflected in the Lexical Conceptual Structure of the input text to suggest tense and discourse structure in the English translation of a Chinese newspaper corpus.",
64
+ "pdf_parse": {
65
+ "paper_id": "W00-0205",
66
+ "_pdf_hash": "",
67
+ "abstract": [
68
+ {
69
+ "text": "Machine translation between any two languages requires the generation of information that is implicit in the source language. In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. Moreover, Chinese multiple-clause sentences may contain inter-clausal relations (temporal or otherwise) that must be explicit in English (e.g., by means of a discourse marker). Perfective and imperfective grammatical aspect markers can provide cues to temporal structure, but such information is not present in every sentence. We report on a project to use the ]exical aspect features of (a)te]icity reflected in the Lexical Conceptual Structure of the input text to suggest tense and discourse structure in the English translation of a Chinese newspaper corpus.",
70
+ "cite_spans": [],
71
+ "ref_spans": [],
72
+ "eq_spans": [],
73
+ "section": "Abstract",
74
+ "sec_num": null
75
+ }
76
+ ],
77
+ "body_text": [
78
+ {
79
+ "text": "It is commonly held that an appropriate interlingua must allow for the expression of argument relations in many languages. This paper advances the state of the art of designing an interlingua by showing how aspectual distinctions (telic versus atelic) can be derived from verb classifications primarily influenced by considerations of argument structure, and how these aspectual distinctions can be used to fill lexical gaps in the source language that cannot be left unspecified in the target language. Machine translation between any two languages often requires the generation of information that is implicit in the source language. In translating from Chinese to English, tense and other temporal information must be inferred from other grammatical and lexical cues. For example, Chinese verbs do not necessarily specify whether the event described is prior or cotemporaneous with the moment of speaking. While grammatical aspect information can be loosely associated with time, with imperfective aspect (Chinese ~ zaiand ~ .zhe) representing present time and perfectiv e (Chinese Tle) representing past time, (Chu, * We gratefully acknowledge DOD support for this work through contract MDA904-96-R-0738 1998; Li and Thompson, 1981) , verbs in the past do not need to have any aspect marking distinguishing them from present tense verbs. This is unlike English, which much more rigidly distinguishes past from present tense through use of suffixes. Thus, in order to generate an appropriate English sentence from its Chinese counterpart, we need to fill in a potentially unexpressed tense.",
80
+ "cite_spans": [
81
+ {
82
+ "start": 1214,
83
+ "end": 1236,
84
+ "text": "Li and Thompson, 1981)",
85
+ "ref_id": "BIBREF16"
86
+ }
87
+ ],
88
+ "ref_spans": [],
89
+ "eq_spans": [],
90
+ "section": "Introduction",
91
+ "sec_num": "1"
92
+ },
93
+ {
94
+ "text": "Moreover, Chinese multiple-clause sentences may contain implicit relations between clauses (temporal or otherwise) that must be made explicit in English. These multiple-clause sentences are often most naturally translated into English including an overt expression of their relation, e.g., the \"and\" linking the two clauses in (1), or as multiple sentences, as in (2)). This 80,000 tons actually included 517 ships. Ship tonnage was very low.",
95
+ "cite_spans": [],
96
+ "ref_spans": [],
97
+ "eq_spans": [],
98
+ "section": "Introduction",
99
+ "sec_num": "1"
100
+ },
101
+ {
102
+ "text": "In our NLP applications, we use a level of linguistic structure driven by the argument-taking properties of predicates and composed monotonically up to the sentence level. The resulting Lexical Conceptual Structures (LCS) (3ackendoff, 1983) , is a languageneutral representation of the situation (event or state), suitable for use as an interlingua, e.g., for machine translation. The LCS represents predicate argument structure abstracted away from languagespecific properties of semantics and syntax. The primitives of the interlingua provide for monotonic composition that captures both conceptual and syntactic generalities (Dorr et al., 1993) among languages. 1 The strength of the representation derives from the cross-linguistic regularities in the lexical semantics encoded in the LCS. The syntactic hierarchy (subject, object, oblique) is mirrored in the LCS hierarchy: for example THEMES are arguments of the LCS predicate, and AGENTS are arguments of the theme-predicate composition. Syntactic divergences (whether the object precedes or follows the verb, for example) are represented in language specific linearization rules; lexical divergences (whether the location argument is encoded directly in the verb, e.g. the English verb pocket or must be saturated by an exterfial argument) are stated in terms of the pieces of LCS struct-ure in the lexicon. SententiM representations derive from saturating the arguments required by the predicates in the sentence.",
103
+ "cite_spans": [
104
+ {
105
+ "start": 216,
106
+ "end": 240,
107
+ "text": "(LCS) (3ackendoff, 1983)",
108
+ "ref_id": null
109
+ },
110
+ {
111
+ "start": 628,
112
+ "end": 647,
113
+ "text": "(Dorr et al., 1993)",
114
+ "ref_id": "BIBREF7"
115
+ }
116
+ ],
117
+ "ref_spans": [],
118
+ "eq_spans": [],
119
+ "section": "Introduction",
120
+ "sec_num": "1"
121
+ },
122
+ {
123
+ "text": "LCS rePresentations also include temporal information, where available in the source language: recent revisions include, for example (Dorr and Olsen, 1997a) standardizing LCS representations for the aspectual (un)boundedness ((A)TELICITY) of events, either lexically or sententially represented. Although at present the LCS encodes no supra-sentential discourse relations, we show how the lexical aspect information may be used to generate discourse coherence in temporal structure. Relations between clauses as constrained by temporal reference has been examined in an LCS framework by Dorr and Gaasterland (Dorr and Gaasterland, 1995) . They explore how temporal connectives are constrained in interpretation, based on the tense of the clauses they connect. While overt temporal connectives are helpful when they appear, our corpus contains many sentences with neither tense markers nor tense connectives. We must therefore look to a new source of information. We rely on the lexical information of the verbs within a sentence to generate both tense and temporal connectives.",
124
+ "cite_spans": [
125
+ {
126
+ "start": 133,
127
+ "end": 156,
128
+ "text": "(Dorr and Olsen, 1997a)",
129
+ "ref_id": "BIBREF3"
130
+ },
131
+ {
132
+ "start": 587,
133
+ "end": 636,
134
+ "text": "Dorr and Gaasterland (Dorr and Gaasterland, 1995)",
135
+ "ref_id": "BIBREF2"
136
+ }
137
+ ],
138
+ "ref_spans": [],
139
+ "eq_spans": [],
140
+ "section": "Introduction",
141
+ "sec_num": "1"
142
+ },
143
+ {
144
+ "text": "Straightforward LCS analysis of many of the multi-clause sentences in our corpus leads to violations of the wellformedness conditions, which prevent structures with events or states directly modifying other events or states. LCS, as previously conceived, prohibits an event or state from standing in a modifier relationship to another event or state, without mediation of a path or position (i.e., as lexically realized by a preposition). This restriction reflects the insight that (at least in English) when events and states modify each other, the modification is either implicit, with the relevant events and states in separate sentences (and hence separate LCSs), as in the first sentence below, or explicit in a single sentence, as in the second sentence below. Implicit event-state modification (sentence 3) is prohibited. It is exactly these third type that are permitted in standard Chinese and robustly attested in our data. If the LCS is to be truly an interlingua, we must extend the representation to allow these kinds of sentences to be processed. One possibility is to posit an implicit position connecting the situations described by the multiple clauses. In the source language analysis phase, this would amount to positing a disjunction of all possible position relations implicitly realizable in this language. Another option is to relax the wellformedness constraints to allow an event to directly modify another event. This not only fails to recognize the regularities we see in English (and other language) LCS structures, for Chinese it merely pushes the problem back one step, as the set of implicitly realizable relations may vary from language to language and may result in some ungrammatical or misleading translations. The second option can be augmented, however, by factoring out of the interlingua (and into the generation code) language-specific principles for generating connectives using information in the LCS-structure, proper. For the present, this is the approach we take, using lexical aspectual information, as read from the LCS structure, to generate appropriate temporal relations.",
145
+ "cite_spans": [],
146
+ "ref_spans": [],
147
+ "eq_spans": [],
148
+ "section": "Introduction",
149
+ "sec_num": "1"
150
+ },
151
+ {
152
+ "text": "Therefore not only tense, but inter-sentential discourse relations must be considered when generating English from Chinese, even at the sentence level. We report on a project to generate both temporal and discourse relations using the LCS representation. In particular, we focus on the encoding of the lexical aspect feature TELICITY and its complement ATELIG-ITY to generate past and present tense, and corresponding temporal relations for modifying clauses within sentences. While we cannot at present directly capture discourse relations, we can garner aspectual class from LCS verb classification, which in turn can be used to predict the appropriate tense for translations of Chinese verbs into English.",
153
+ "cite_spans": [],
154
+ "ref_spans": [],
155
+ "eq_spans": [],
156
+ "section": "Introduction",
157
+ "sec_num": "1"
158
+ },
159
+ {
160
+ "text": "We begin with a discussion of aspectual features of sentences, and how this information can be used to provide information about the time of the situations presented in a sentence. Such information can be used to help provide clues as to both tense and relationships (and cue words) between connected situations. Aspectual features can be divided into grammatical aspect, which is indicated by lexical or morphological markers in a sentence, and lexical aspect, which is inherent in the meanings of words.",
161
+ "cite_spans": [],
162
+ "ref_spans": [],
163
+ "eq_spans": [],
164
+ "section": "Use of Aspect to Provide Temporal Information",
165
+ "sec_num": "2"
166
+ },
167
+ {
168
+ "text": "Grammatical aspect provides a viewpoint on situation (event or state) structure (Smith, 1997) . Since imperfective aspect, such as the English PROGRES-SIVE construction be VERB-ing, views a situation from within, it is often associated with present or contemporaneous time reference. On the other hand, perfective aspect, such as the English have VERB-ed, Views a situation as a whole; it is therefore often associated with past time reference ((Comrie, 1976; Olsen, 1997; Smith, 1997 ) cf. (Chu, 1998 ). The temporal relations are tendencies, rather than an absolute correlation: although the perfective is found more frequently in past tenses (Comrie, 1976) , both imperfective and perfective cooccur in some language with past, present, and future tense. In some cases, an English verb will specify tense and/or aspect for a complement. For example, continue requires either an infinitive 3 While grammatical aspect and overt temporal cues are clearly helpful in translation, there are many cases in our corpus in which such cues are not present. These are the hard cases, where we must infer tense or grammatical aspectual marking in the target language from a source that looks like it provides no overt cues. We will show however, that Chinese does provide implicit cues through its lexical aspect classes. First, we review what lexical aspect is. Lexical aspect refers to the type of situation denoted by the verb, alone or combined with other sentential constituents. Verbs are assigned to lexical aspect classes based on their behavior in a variety of syntactic and semantic frames that focus on three aspectual features: telicity, dynamicity and durativity. We focus on telicity, also known as BOUNDEDNESS. Verbs that are telic have an inherent end: winning, for example, ends with the finish line. Verbs that are atelic do not name their end: running could end with a distance run a mile or an endpoint run to the store, for example. Olsen (Olsen, 1997) proposed that aspectual interpretation be derived through monotonic composition of marked privative features [\u00f7/0 dynamic], [.4-/0 durative] and [-t-/0 telic], as shown in Table 1 (Olsen, 1997, pp. 32-33) .",
169
+ "cite_spans": [
170
+ {
171
+ "start": 80,
172
+ "end": 93,
173
+ "text": "(Smith, 1997)",
174
+ "ref_id": null
175
+ },
176
+ {
177
+ "start": 444,
178
+ "end": 459,
179
+ "text": "((Comrie, 1976;",
180
+ "ref_id": "BIBREF1"
181
+ },
182
+ {
183
+ "start": 460,
184
+ "end": 472,
185
+ "text": "Olsen, 1997;",
186
+ "ref_id": "BIBREF19"
187
+ },
188
+ {
189
+ "start": 473,
190
+ "end": 484,
191
+ "text": "Smith, 1997",
192
+ "ref_id": null
193
+ },
194
+ {
195
+ "start": 485,
196
+ "end": 501,
197
+ "text": ") cf. (Chu, 1998",
198
+ "ref_id": null
199
+ },
200
+ {
201
+ "start": 645,
202
+ "end": 659,
203
+ "text": "(Comrie, 1976)",
204
+ "ref_id": "BIBREF1"
205
+ },
206
+ {
207
+ "start": 1951,
208
+ "end": 1964,
209
+ "text": "(Olsen, 1997)",
210
+ "ref_id": "BIBREF19"
211
+ },
212
+ {
213
+ "start": 2145,
214
+ "end": 2169,
215
+ "text": "(Olsen, 1997, pp. 32-33)",
216
+ "ref_id": null
217
+ }
218
+ ],
219
+ "ref_spans": [
220
+ {
221
+ "start": 2137,
222
+ "end": 2144,
223
+ "text": "Table 1",
224
+ "ref_id": "TABREF2"
225
+ }
226
+ ],
227
+ "eq_spans": [],
228
+ "section": "Grammatical aspect",
229
+ "sec_num": "2.1"
230
+ },
231
+ {
232
+ "text": "With privative features, other sentential constituents can add to features provided by the verb but not remove them. On this analysis, the [.-I-durative, +dynamic] features of run propagate to the sentence level in run ~o the store; the [\u00f7telic] feature is added by the NP or PP, yielding an accomplishment interpretation. The feature specification of this \u00a2ompositionally derived accomplishment is therefore identical to that of a sentence containing a telic accomplishment verb, such as destroy.",
233
+ "cite_spans": [],
234
+ "ref_spans": [],
235
+ "eq_spans": [],
236
+ "section": "Grammatical aspect",
237
+ "sec_num": "2.1"
238
+ },
239
+ {
240
+ "text": "According to many researchers, knowledge of lexical aspect--how verbs denote situations as developing or holding in time-=may be used to interpret event sequences in discourse (Dowty, 1986; Moens and Steedman, 1988; Passoneau, 1988) . In particular, Dowty suggests that, absent other cues, a relic event is interpreted as completed before the next event or state, as with ran into lhe room in 4a; in contrast, atelic situations, such as run, was hungry in 4b and 4% are interpreted as contemporaneous with the following situations: fell and made a pizza, respectively. Also, these tendencies are heuristic, and not absolute, as shown by the examples in (5). While we get the expected prediction that the jumping occurs after the explosion in (5)(a), we get the reverse prediction in (5)(b). Other factors such as consequences of described situations, discourse context, and stereotypical causal relationships also play a role. (Dowty, 1979; Guerssel et al., 1985) --an augmented form of (Jackendoff, 1983; Jackendoff, 1990 )--permits lexical aspect information to be read directly off the lexical entries for individual verbs, as well-as composed representations for sentences, using uniform processes and representations. The LCS framework consists of primitives (GO, BE, STAY, etc.), types (Event, State, Path, etc.) and fields (Loc(ational), Temp(oral), Foss(essional), Ident(ificational), Perc(eptual), etc.).",
241
+ "cite_spans": [
242
+ {
243
+ "start": 176,
244
+ "end": 189,
245
+ "text": "(Dowty, 1986;",
246
+ "ref_id": "BIBREF9"
247
+ },
248
+ {
249
+ "start": 190,
250
+ "end": 215,
251
+ "text": "Moens and Steedman, 1988;",
252
+ "ref_id": null
253
+ },
254
+ {
255
+ "start": 216,
256
+ "end": 232,
257
+ "text": "Passoneau, 1988)",
258
+ "ref_id": "BIBREF20"
259
+ },
260
+ {
261
+ "start": 927,
262
+ "end": 940,
263
+ "text": "(Dowty, 1979;",
264
+ "ref_id": "BIBREF8"
265
+ },
266
+ {
267
+ "start": 941,
268
+ "end": 963,
269
+ "text": "Guerssel et al., 1985)",
270
+ "ref_id": "BIBREF10"
271
+ },
272
+ {
273
+ "start": 987,
274
+ "end": 1005,
275
+ "text": "(Jackendoff, 1983;",
276
+ "ref_id": "BIBREF10"
277
+ },
278
+ {
279
+ "start": 1006,
280
+ "end": 1022,
281
+ "text": "Jackendoff, 1990",
282
+ "ref_id": "BIBREF11"
283
+ }
284
+ ],
285
+ "ref_spans": [],
286
+ "eq_spans": [],
287
+ "section": "Grammatical aspect",
288
+ "sec_num": "2.1"
289
+ },
290
+ {
291
+ "text": "We adopt a refinement of the LCS representation, incorporating meaning components from the linguistically motivated notion of !ezical semantic template (LST), based on lexical aspect classes, as defined in the work of Levin and Rappaport Hovav (Levin and Rappaport Hovav, 1995; Rappaport lttovav and Levin, 1995) . Verbs that appear in multiple aspectual frames appear in multiple pairings between constants (representing the idiosyncratic meaning of the verb) and structures (the aspectual class). Since the aspectual templates may be realized in a variety of ways, other aspects of the structural meaning contribute to differentiating the verbs from each other. Our current database contains some 400 classes, based on an initial representation of the 213 classes in (Levin, 1993) . Our current working lexicon includes about 10,000 English verbs and 18,000 Chinese verbs spread out into these classes.",
292
+ "cite_spans": [
293
+ {
294
+ "start": 244,
295
+ "end": 277,
296
+ "text": "(Levin and Rappaport Hovav, 1995;",
297
+ "ref_id": "BIBREF14"
298
+ },
299
+ {
300
+ "start": 278,
301
+ "end": 312,
302
+ "text": "Rappaport lttovav and Levin, 1995)",
303
+ "ref_id": null
304
+ },
305
+ {
306
+ "start": 769,
307
+ "end": 782,
308
+ "text": "(Levin, 1993)",
309
+ "ref_id": "BIBREF15"
310
+ }
311
+ ],
312
+ "ref_spans": [],
313
+ "eq_spans": [],
314
+ "section": "Grammatical aspect",
315
+ "sec_num": "2.1"
316
+ },
317
+ {
318
+ "text": "Telic verbs (and sentences) contain certain types of Paths, or a constant, represented by ! !, filled by the verb constant, in the right most leaf-node argument. Some examples are shown below: depart (go foe (* thing 2) (away_from loc (thing 2) (at foe (thing 2) (* thing 4))) (!!+ingly 26)) insert (cause (* thing 1) (go loc (* thing 2) ((* toward 5) loc (thing 2) ([at] loc (thing 2) (thing 6)))) (! !+ingly 26))",
319
+ "cite_spans": [],
320
+ "ref_spans": [],
321
+ "eq_spans": [],
322
+ "section": "Grammatical aspect",
323
+ "sec_num": "2.1"
324
+ },
325
+ {
326
+ "text": "Each of these relic verbs has a potential counterpart with an atelic verb plus the requisite path. Depart, for example, corresponds to move away, or something similar in another language.",
327
+ "cite_spans": [],
328
+ "ref_spans": [],
329
+ "eq_spans": [],
330
+ "section": "Grammatical aspect",
331
+ "sec_num": "2.1"
332
+ },
333
+ {
334
+ "text": "We therefore identify telic sentences by the algorithm, formally specified in in Figure 1 (cf. (Dorr and Olsen, 1997b) This algorithm applies to the structural primitives of the interlingua structure rather than actual verbs in source or target language. The first step initialized the aspectual values as unspecified: atelic f-T], stative (not event: f-D]), and adurative f-R]. First the top node is examined for primitives that indicate telicity: if the top node is CAUSE, LET, GO, telicity is set to [+T], as with the verbs break, destroy, for example. (The node is further checked for dynamicity [+D] and durativity [+R] indicators, not in focus in this paper.)If the top node is not a relic indicator (i.e., the verb is a basically atelic predicate such as love or run, telicity may still be still be indicated by the presence of complement nodes of particular types: e.g. a goal phrase (to primitive) in the case of run. The same algorithm may be used to determine felicity in either individual verbal entries (break but not run) or composed sentences (John ran to ~he store but not John ran.",
335
+ "cite_spans": [
336
+ {
337
+ "start": 95,
338
+ "end": 118,
339
+ "text": "(Dorr and Olsen, 1997b)",
340
+ "ref_id": "BIBREF4"
341
+ }
342
+ ],
343
+ "ref_spans": [
344
+ {
345
+ "start": 81,
346
+ "end": 89,
347
+ "text": "Figure 1",
348
+ "ref_id": "FIGREF5"
349
+ }
350
+ ],
351
+ "eq_spans": [],
352
+ "section": "Grammatical aspect",
353
+ "sec_num": "2.1"
354
+ },
355
+ {
356
+ "text": "Similar mismatches of telicity between representations of particular predicates can occur between languages, although there is remarkable agreement as to the set of templates that verbs with related meanings will fit into (Olsen et al., 1998) . In the Chinese-English interlingual system we describe, the Chinese is first mapped into the LCS, a languageindependent representation, from which the targetlanguage sentence is generated. Since telicity (and other aspects of event structure) are uniformly represented at the lexical and the sentential level, telicity mismatches between verbs of different languages may then be compensated for by combining verbs with other .components.",
357
+ "cite_spans": [
358
+ {
359
+ "start": 222,
360
+ "end": 242,
361
+ "text": "(Olsen et al., 1998)",
362
+ "ref_id": "BIBREF18"
363
+ }
364
+ ],
365
+ "ref_spans": [],
366
+ "eq_spans": [],
367
+ "section": "Return T(L), D(L), R(L).",
368
+ "sec_num": "4."
369
+ },
370
+ {
371
+ "text": ".o",
372
+ "cite_spans": [],
373
+ "ref_spans": [],
374
+ "eq_spans": [],
375
+ "section": "Return T(L), D(L), R(L).",
376
+ "sec_num": "4."
377
+ },
378
+ {
379
+ "text": "Based on (Dowty, 1986) and others, as discussed above, we predict that sentences that have a telic LCS will better translate into English as the past tense, and those that lack telic identifiers will translate as present tense. Moreover, we predict that verbs in the main clause that are telic, will be past with respect to their subordinates (X then Y). Verbs in the main clause that are atelic we predict will temporally overlap (X while Y).",
380
+ "cite_spans": [
381
+ {
382
+ "start": 9,
383
+ "end": 22,
384
+ "text": "(Dowty, 1986)",
385
+ "ref_id": "BIBREF9"
386
+ }
387
+ ],
388
+ "ref_spans": [],
389
+ "eq_spans": [],
390
+ "section": "Predictions",
391
+ "sec_num": "4"
392
+ },
393
+ {
394
+ "text": "LCSes are used as the interlingua for our machine translation efforts. Following the principles in (Dorr, 1993) , lexical information and constraints on wellformed LCSes are used to compose an LCS for a complete sentence from a sentence parse in a source language. This composed LCS (CLCS) is then used as the starting points for generation into the target language, using lexical information and constraints for the target language.",
395
+ "cite_spans": [
396
+ {
397
+ "start": 99,
398
+ "end": 111,
399
+ "text": "(Dorr, 1993)",
400
+ "ref_id": "BIBREF7"
401
+ }
402
+ ],
403
+ "ref_spans": [],
404
+ "eq_spans": [],
405
+ "section": "Implementation",
406
+ "sec_num": "5"
407
+ },
408
+ {
409
+ "text": "The generation component consists of the following subcomponents:",
410
+ "cite_spans": [],
411
+ "ref_spans": [],
412
+ "eq_spans": [],
413
+ "section": "Implementation",
414
+ "sec_num": "5"
415
+ },
416
+ {
417
+ "text": "Decomposition and lexlcal selection First, primitive LCSes for words in the target language are matched against CLCSes, and tree structures of covering words are selected. Ambiguity in the input and analysis represented in the CLCS is maintained (insofar as it is possible to realize particular readings using the target language lexicon), and new ambiguities are introduced when there are different ways of realizing a CLCS in the target language.",
418
+ "cite_spans": [],
419
+ "ref_spans": [],
420
+ "eq_spans": [],
421
+ "section": "Implementation",
422
+ "sec_num": "5"
423
+ },
424
+ {
425
+ "text": "AMR Construction This tree structure is then translated into a representation using the Augmented Meaning Representation (AMR) syntax \u2022 of instances and hierarchical relations (Langkfide and Knight, 1998a) ; however the relations include information present in the CLCS and LCSes for target language words, including theta roles, LCS type, and associated features.",
426
+ "cite_spans": [
427
+ {
428
+ "start": 176,
429
+ "end": 205,
430
+ "text": "(Langkfide and Knight, 1998a)",
431
+ "ref_id": null
432
+ }
433
+ ],
434
+ "ref_spans": [],
435
+ "eq_spans": [],
436
+ "section": "Implementation",
437
+ "sec_num": "5"
438
+ },
439
+ {
440
+ "text": "Realization The AMR structure is then linearized, as described in , and morphological realization is performed. The result is a lattice of possible realizations, representing both the preserved ambiguity from previous processing phases and multiple ways of linearizing the sentence.",
441
+ "cite_spans": [],
442
+ "ref_spans": [],
443
+ "eq_spans": [],
444
+ "section": "Implementation",
445
+ "sec_num": "5"
446
+ },
447
+ {
448
+ "text": "Extraction The final stage uses a statistical bigram extractor to pick an approximation of the most fluentrealization (Langkilde and Knight, 1998b) .",
449
+ "cite_spans": [
450
+ {
451
+ "start": 118,
452
+ "end": 147,
453
+ "text": "(Langkilde and Knight, 1998b)",
454
+ "ref_id": "BIBREF13"
455
+ }
456
+ ],
457
+ "ref_spans": [],
458
+ "eq_spans": [],
459
+ "section": "Implementation",
460
+ "sec_num": "5"
461
+ },
462
+ {
463
+ "text": "While there are several possible ways to address the tense and discourse connective issues mentioned above, such as modifying the LCS primitive elements and/or the composition of the LCS from the source language, we instead have been experimenting for the moment with solutions implemented within the generation component. The only extensions to the LCS language have been loosening of the constraint against direct modification of states and events by other states and events (thus allowing composed LC-Ses to be formed from Chinese with these structures, but creating a challenge for fluent generation into English), and a few added features to cover some of the discourse markers that are present. We are able to calculate telicity of a CLCS, using the algorithm in Figure 1 and encode this information as a binary teli\u00a2 feature in the Augmented Meaning Representation (AMR).",
464
+ "cite_spans": [],
465
+ "ref_spans": [
466
+ {
467
+ "start": 769,
468
+ "end": 777,
469
+ "text": "Figure 1",
470
+ "ref_id": "FIGREF5"
471
+ }
472
+ ],
473
+ "eq_spans": [],
474
+ "section": "Implementation",
475
+ "sec_num": "5"
476
+ },
477
+ {
478
+ "text": "The realization algorithm has been augmented with the rules in (6) (6) a. If there is no tense feature, use telicity to determine the tense: \u2022 abbreviations are used extensively in Chinese newspaper headlines",
479
+ "cite_spans": [],
480
+ "ref_spans": [],
481
+ "eq_spans": [],
482
+ "section": "Implementation",
483
+ "sec_num": "5"
484
+ },
485
+ {
486
+ "text": "However, the presence of multiple events and states in a single sentence, without explicit modificatioia is characteristic of written Chinese in general. In the 80-sentence corpus under consideration, the sentence structure is complex and stylized; with an average of 20 words per sentence. Many sentences, such as (1)and 2, have multiple clauses that are not in a direct complement relationship or indicated with explicit connective words.",
487
+ "cite_spans": [],
488
+ "ref_spans": [],
489
+ "eq_spans": [],
490
+ "section": "Implementation",
491
+ "sec_num": "5"
492
+ },
493
+ {
494
+ "text": "To evaluate the extent to which our Predictions result in an improvement in translation, we have used a database of human translations of the sentences in our corpus as the ground truth, or gold standard. One of the translators is included among our authors.",
495
+ "cite_spans": [],
496
+ "ref_spans": [],
497
+ "eq_spans": [],
498
+ "section": "Ground Truth",
499
+ "sec_num": "7"
500
+ },
501
+ {
502
+ "text": "The ground truth data was created to provide a fluid human translation of the text early in our system development. It therefore includes many complex tenses and multiple sentences combined, both currently beyond the state of our system. Thus, two of the authors and an additional researcher also created a database of temporal relations among the clauses in the sentences that produced illegal event/state modifications. This was used to test predictions of temporal relationships indicated by telicity. In evaluating our results, we concentrate on how well the System did at matching past and present, and on the appropriateness of temporal connectives generated.",
503
+ "cite_spans": [],
504
+ "ref_spans": [],
505
+ "eq_spans": [],
506
+ "section": "Ground Truth",
507
+ "sec_num": "7"
508
+ },
509
+ {
510
+ "text": "We have applied the rules in (6) in generating 80 sentences in the corpus (starting from often ambiguous CLCS analyses). Evaluation is still tricky, since, in many cases, the interlingua analysis is incorrect or ambiguous in ways that affect the appropriateness of the generated translation.",
511
+ "cite_spans": [],
512
+ "ref_spans": [],
513
+ "eq_spans": [],
514
+ "section": "Results",
515
+ "sec_num": "8"
516
+ },
517
+ {
518
+ "text": "As mentioned above, evaluation can be very difficult in a number of cases. Concerning tense, our \"gold standard\" is the set of human translations, generated tense past present human past 134 17 translation present 17 27 Table 2 : Preliminary Tense Results previously constructed for these sentences. In many cases, there is nothing overt in the sentence which would specify tense, so a mismatch might not actually be \"wrong\". Also, there are a number of sentences which were not directly applicable for comparison, such as when the human translator chose a different syntactic structure or a complex tense. The newspaper articles were divided into 80 sentences. Since some of these sentences were conjunctions, this yielded 99 tensed main verbs. These verbs either appeared in simple present, past, present or past perfect('has or had verb-t-ed), present or past imperfective (is verb-l-lag , was verb--I--lag) and their corresponding passive (is being kicked, was being kicked, have been kicked) forms. For cases like the present perfect ('has kicked), we noted the intended meaning ( e.g past activity) expressed by the verb as well as the verb's actual present perfective form. We scored the form as correct if the system translated a present perfective with past tense meaning as a simple past or present perfective. There were 10 instances where a verb in the human translation had no corresponding verb in the machine translation, either due to incorrect omission or correct substitution of the corresponding nominalization. We excluded these forms from consideration. If the system fails to supply a verb for independent reasons, our system clearly can't mark it with tense. The results of our evaluation are summarized in Table 2 .",
519
+ "cite_spans": [],
520
+ "ref_spans": [
521
+ {
522
+ "start": 220,
523
+ "end": 227,
524
+ "text": "Table 2",
525
+ "ref_id": null
526
+ },
527
+ {
528
+ "start": 1730,
529
+ "end": 1737,
530
+ "text": "Table 2",
531
+ "ref_id": null
532
+ }
533
+ ],
534
+ "eq_spans": [],
535
+ "section": "Tense",
536
+ "sec_num": "8.1"
537
+ },
538
+ {
539
+ "text": "These results definitely improve over our previous heuristic, which was to always use past tense (assuming this to be the default mode for newspaper article reporting). Results are also better than always picking present tense. These results seem to indicate that atelicity is a fairly good cue for present tense. We also note that 8 out of the 14 cases where the human translation used the present tense while the system used past tense are headlines. Headlines are written using the historical present in English (\"Man bites Dog\"). These sentences would not be incorrectly translated in the past (\"The Man Bit the Dog\") Therefore, a fairer judgement might leave only remaining 6 incorrect cases in this cell. Using atelicity as a cue for the present yields correct results approximately 65incorrect results 35worst case results because they do not take into account presence or absence of the grammatical perfective and progressive markers referred to in the introduction.",
540
+ "cite_spans": [],
541
+ "ref_spans": [],
542
+ "eq_spans": [],
543
+ "section": "Tense",
544
+ "sec_num": "8.1"
545
+ },
546
+ {
547
+ "text": "Results are more preliminary for the clausal connectives. Of the 80 sentences, 35 of them are flagged as (possibly) containing events or states directly modifying other events or states. However, of this number, some actually do have lexical connectives represented as featural rather than structural elements in the LCS, and can be straightforwardly realized using translated English connectives such as since, after, and if.then. Other apparently \"modifying\" events or states should be treated as a complement relationship (at least according to the preferred reading in ambiguous cases), but are incorrectly analyzed as being in a non-complement relationship, or have other structural problems rendering the interlingua representation and English output not directly related to the original clause structure.",
548
+ "cite_spans": [],
549
+ "ref_spans": [],
550
+ "eq_spans": [],
551
+ "section": "Relationship between clauses",
552
+ "sec_num": "8.2"
553
+ },
554
+ {
555
+ "text": "Of the remaining clear cases, six while relationships were generated according to our heuristics, indicating cotemporaneousness of main and modifying situation, e.g. (7)a,b, in the automated translations of (1) and (2), respectively. None were inappropriate. Of the cases where then was generated, indicating sequential events, there were four cases in which this was appropriate, and three cases in which the situations really should have been cotemporaneous. While these numbers are small, this preliminary data seems to suggest again that atelicity is a good cue for cotemporality, while telicity is not a sufficient cue.",
556
+ "cite_spans": [],
557
+ "ref_spans": [],
558
+ "eq_spans": [],
559
+ "section": "Relationship between clauses",
560
+ "sec_num": "8.2"
561
+ },
562
+ {
563
+ "text": "(7) a. Before 1965, China altogether only have the ability shipbuilding about 300 thousand tons , while the annual output is 80 thousand tons.",
564
+ "cite_spans": [],
565
+ "ref_spans": [],
566
+ "eq_spans": [],
567
+ "section": "Relationship between clauses",
568
+ "sec_num": "8.2"
569
+ },
570
+ {
571
+ "text": "b. this 80 thousand tons actually includes 517 ships, while the ship tonnage is very low.",
572
+ "cite_spans": [],
573
+ "ref_spans": [],
574
+ "eq_spans": [],
575
+ "section": "Relationship between clauses",
576
+ "sec_num": "8.2"
577
+ },
578
+ {
579
+ "text": "We therefore conclude that lexical aspect can serve as a valuable heuristic for suggesting tense, in the absence of tense and other temporal markers. We anticipate incorporation of grammatical aspect information to improve our temporal representation further. In addition, lexical aspect, as represented by the interlingual LCS structure, can serve as the foundation for language specific heuristics. Furthermore, the lexical aspect represented in the LCS can help to provide the beginnings of cross-sentential discourse information. We have suggested applications in the temporal domain while, then. Causality is another possible domain in which relevant pieces encoded in sentence-level LCS structures could be used to provide links between LCSes/sentences. Thus, the interlingual representation may be used to provide not only shared semantic and syntactic structure, but \"also the building blocks for language-specific heuristics for mismatches between languages.",
580
+ "cite_spans": [],
581
+ "ref_spans": [],
582
+ "eq_spans": [],
583
+ "section": "Conclusions",
584
+ "sec_num": "9"
585
+ },
586
+ {
587
+ "text": "There are a number of other directions we intend to pursue in extending this work. First, we will evaluate the role of the grammatical aspect markers mentioned above, in combination with the telicity features. Second, we will also examine the role of the nature of the modifying situation. Third, we will incorporate other lexical information present in the sentence, including adverbial cue words (e.g. now, already and specific dates that have time-related information, and distinguishing reported speech from other sentences. Finally, as mentioned, these results do not take embedded verbs or verbs in adjunct clauses into account. Many adjunct and embedded clauses are tenseless, making evaluation more difficult. For example, is The President believed China to be a threat equivalent to The president believed China is a threat).",
588
+ "cite_spans": [],
589
+ "ref_spans": [],
590
+ "eq_spans": [],
591
+ "section": "Future Research",
592
+ "sec_num": "10"
593
+ },
594
+ {
595
+ "text": "LCS representations in our system have been created for Korean, Spanish and Arabic, as well as for English and Chinese.",
596
+ "cite_spans": [],
597
+ "ref_spans": [],
598
+ "eq_spans": [],
599
+ "section": "",
600
+ "sec_num": null
601
+ }
602
+ ],
603
+ "back_matter": [],
604
+ "bib_entries": {
605
+ "BIBREF0": {
606
+ "ref_id": "b0",
607
+ "title": "A Discourse Grammar of Mandarin Chinese",
608
+ "authors": [
609
+ {
610
+ "first": "C",
611
+ "middle": [],
612
+ "last": "Chauncey",
613
+ "suffix": ""
614
+ },
615
+ {
616
+ "first": "",
617
+ "middle": [],
618
+ "last": "Chu",
619
+ "suffix": ""
620
+ }
621
+ ],
622
+ "year": 1998,
623
+ "venue": "",
624
+ "volume": "",
625
+ "issue": "",
626
+ "pages": "",
627
+ "other_ids": {},
628
+ "num": null,
629
+ "urls": [],
630
+ "raw_text": "Chauncey C. Chu. 1998. A Discourse Grammar of Mandarin Chinese. Peter Lang Publishing, Inc., New York, NY.",
631
+ "links": null
632
+ },
633
+ "BIBREF1": {
634
+ "ref_id": "b1",
635
+ "title": "Aspect. Cambridge University Press",
636
+ "authors": [
637
+ {
638
+ "first": "",
639
+ "middle": [],
640
+ "last": "Bernard Comrie",
641
+ "suffix": ""
642
+ }
643
+ ],
644
+ "year": 1976,
645
+ "venue": "",
646
+ "volume": "",
647
+ "issue": "",
648
+ "pages": "",
649
+ "other_ids": {},
650
+ "num": null,
651
+ "urls": [],
652
+ "raw_text": "Bernard Comrie. 1976. Aspect. Cambridge Univer- sity Press, Cambridge, MA.",
653
+ "links": null
654
+ },
655
+ "BIBREF2": {
656
+ "ref_id": "b2",
657
+ "title": "Selecting Tense, Aspect, and Connecting Words in Language Generation",
658
+ "authors": [
659
+ {
660
+ "first": "Bonnie",
661
+ "middle": [
662
+ "J"
663
+ ],
664
+ "last": "Dorr",
665
+ "suffix": ""
666
+ },
667
+ {
668
+ "first": "Terry",
669
+ "middle": [],
670
+ "last": "Gaasterland",
671
+ "suffix": ""
672
+ }
673
+ ],
674
+ "year": 1995,
675
+ "venue": "Proceedings of IJCAI-95",
676
+ "volume": "",
677
+ "issue": "",
678
+ "pages": "",
679
+ "other_ids": {},
680
+ "num": null,
681
+ "urls": [],
682
+ "raw_text": "Bonnie J. Dorr and Terry Gaasterland. 1995. Se- lecting Tense, Aspect, and Connecting Words in Language Generation. In Proceedings of IJCAI- 95, Montreal, Canada.",
683
+ "links": null
684
+ },
685
+ "BIBREF3": {
686
+ "ref_id": "b3",
687
+ "title": "Aspectual Modifications to a LCS Database for NLP Applications",
688
+ "authors": [
689
+ {
690
+ "first": "Bonnie",
691
+ "middle": [
692
+ "J"
693
+ ],
694
+ "last": "Dorr",
695
+ "suffix": ""
696
+ },
697
+ {
698
+ "first": "",
699
+ "middle": [],
700
+ "last": "Marl Broman Olsen",
701
+ "suffix": ""
702
+ }
703
+ ],
704
+ "year": 1997,
705
+ "venue": "",
706
+ "volume": "",
707
+ "issue": "",
708
+ "pages": "",
709
+ "other_ids": {},
710
+ "num": null,
711
+ "urls": [],
712
+ "raw_text": "Bonnie J. Dorr and Marl Broman Olsen. 1997a. As- pectual Modifications to a LCS Database for NLP Applications. Technical Report LAMP TR 007, UMIACS TR 97-23, CS TR 3763, University of Maryland, College Park, MD.",
713
+ "links": null
714
+ },
715
+ "BIBREF4": {
716
+ "ref_id": "b4",
717
+ "title": "Deriving Verbal and Compositional Lexical Aspect for NLP Applications",
718
+ "authors": [
719
+ {
720
+ "first": "Bonnie",
721
+ "middle": [
722
+ "J"
723
+ ],
724
+ "last": "Dorr",
725
+ "suffix": ""
726
+ },
727
+ {
728
+ "first": "",
729
+ "middle": [],
730
+ "last": "Marl Broman Olsen",
731
+ "suffix": ""
732
+ }
733
+ ],
734
+ "year": 1997,
735
+ "venue": "Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics (ACL-97)",
736
+ "volume": "",
737
+ "issue": "",
738
+ "pages": "151--158",
739
+ "other_ids": {},
740
+ "num": null,
741
+ "urls": [],
742
+ "raw_text": "Bonnie J. Dorr and Marl Broman Olsen. 1997b. Deriving Verbal and Compositional Lexical As- pect for NLP Applications. In Proceedings of the 35th Annual Meeting of the Association for Com- putational Linguistics (ACL-97), pages 151-158, Madrid, SPain , July 7-12.",
743
+ "links": null
744
+ },
745
+ "BIBREF5": {
746
+ "ref_id": "b5",
747
+ "title": "Use of Lexical Conceptual Structure for Intelligent Tutoring",
748
+ "authors": [
749
+ {
750
+ "first": "Bonnie",
751
+ "middle": [
752
+ "J"
753
+ ],
754
+ "last": "Doff",
755
+ "suffix": ""
756
+ },
757
+ {
758
+ "first": "James",
759
+ "middle": [],
760
+ "last": "Hendler",
761
+ "suffix": ""
762
+ },
763
+ {
764
+ "first": "Scott",
765
+ "middle": [],
766
+ "last": "Blanksteen",
767
+ "suffix": ""
768
+ },
769
+ {
770
+ "first": "Barrie",
771
+ "middle": [],
772
+ "last": "Migdaloff",
773
+ "suffix": ""
774
+ }
775
+ ],
776
+ "year": 1993,
777
+ "venue": "",
778
+ "volume": "",
779
+ "issue": "",
780
+ "pages": "",
781
+ "other_ids": {},
782
+ "num": null,
783
+ "urls": [],
784
+ "raw_text": "Bonnie J. Doff, James Hendler, Scott Blanksteen, and Barrie Migdaloff. 1993. Use of Lexical Con- ceptual Structure for Intelligent Tutoring. Tech- nical Report UMIACS TR 93-108, CS TR 3161, University of Maryland.",
785
+ "links": null
786
+ },
787
+ "BIBREF6": {
788
+ "ref_id": "b6",
789
+ "title": "A Thematic HieJfarchy for Efficient Generation from Lexical-ConceptM Structure",
790
+ "authors": [
791
+ {
792
+ "first": "Bonnie",
793
+ "middle": [
794
+ "J"
795
+ ],
796
+ "last": "Dorr",
797
+ "suffix": ""
798
+ },
799
+ {
800
+ "first": "Nizar",
801
+ "middle": [],
802
+ "last": "Habash",
803
+ "suffix": ""
804
+ },
805
+ {
806
+ "first": "David",
807
+ "middle": [],
808
+ "last": "Traum",
809
+ "suffix": ""
810
+ }
811
+ ],
812
+ "year": 1998,
813
+ "venue": "Proceedings of the Third Conference of the Association for Machine Translation in the Americas, AMTA-98",
814
+ "volume": "15",
815
+ "issue": "",
816
+ "pages": "28--31",
817
+ "other_ids": {},
818
+ "num": null,
819
+ "urls": [],
820
+ "raw_text": "Bonnie J. Dorr, Nizar Habash, and David Traum. 1998. A Thematic HieJfarchy for Efficient Gener- ation from Lexical-ConceptM Structure. In Pro- ceedings of the Third Conference of the Associ- ation for Machine Translation in the Americas, AMTA-98, in Lecture Notes in Artificial Intelli- gence, 15~9, pages 333-343, Langhorne, PA, Oc- tober 28-31.",
821
+ "links": null
822
+ },
823
+ "BIBREF7": {
824
+ "ref_id": "b7",
825
+ "title": "Machine Translation: A View from the Lexicon",
826
+ "authors": [
827
+ {
828
+ "first": "Bonnie",
829
+ "middle": [
830
+ "J"
831
+ ],
832
+ "last": "Dorr",
833
+ "suffix": ""
834
+ }
835
+ ],
836
+ "year": 1993,
837
+ "venue": "",
838
+ "volume": "",
839
+ "issue": "",
840
+ "pages": "",
841
+ "other_ids": {},
842
+ "num": null,
843
+ "urls": [],
844
+ "raw_text": "Bonnie J. Dorr. 1993. Machine Translation: A View from the Lexicon. The MIT Press, Cambridge, MA.",
845
+ "links": null
846
+ },
847
+ "BIBREF8": {
848
+ "ref_id": "b8",
849
+ "title": "Word Meaning in Montague Grammar",
850
+ "authors": [
851
+ {
852
+ "first": "David",
853
+ "middle": [],
854
+ "last": "Dowty",
855
+ "suffix": ""
856
+ }
857
+ ],
858
+ "year": 1979,
859
+ "venue": "",
860
+ "volume": "",
861
+ "issue": "",
862
+ "pages": "",
863
+ "other_ids": {},
864
+ "num": null,
865
+ "urls": [],
866
+ "raw_text": "David Dowty. 1979. Word Meaning in Montague Grammar. Reidel, Dordrecht.",
867
+ "links": null
868
+ },
869
+ "BIBREF9": {
870
+ "ref_id": "b9",
871
+ "title": "The Effects of Aspectual Class on the Temporal Structure of Discourse: Semantics or Pragmatics? Linguistics and Philosophy",
872
+ "authors": [
873
+ {
874
+ "first": "David",
875
+ "middle": [],
876
+ "last": "Dowty",
877
+ "suffix": ""
878
+ }
879
+ ],
880
+ "year": 1986,
881
+ "venue": "",
882
+ "volume": "9",
883
+ "issue": "",
884
+ "pages": "37--61",
885
+ "other_ids": {},
886
+ "num": null,
887
+ "urls": [],
888
+ "raw_text": "David Dowty. 1986. The Effects of Aspectual Class on the Temporal Structure of Discourse: Seman- tics or Pragmatics? Linguistics and Philosophy, 9:37-61.",
889
+ "links": null
890
+ },
891
+ "BIBREF10": {
892
+ "ref_id": "b10",
893
+ "title": "A Cross-linguistic Study of Transitivity Alternations",
894
+ "authors": [
895
+ {
896
+ "first": "Mohamed",
897
+ "middle": [],
898
+ "last": "Guerssel",
899
+ "suffix": ""
900
+ },
901
+ {
902
+ "first": "Kenneth",
903
+ "middle": [],
904
+ "last": "Hale",
905
+ "suffix": ""
906
+ },
907
+ {
908
+ "first": "Mary",
909
+ "middle": [],
910
+ "last": "Laughten",
911
+ "suffix": ""
912
+ },
913
+ {
914
+ "first": "Beth",
915
+ "middle": [],
916
+ "last": "Levin",
917
+ "suffix": ""
918
+ },
919
+ {
920
+ "first": "Josie",
921
+ "middle": [
922
+ "White"
923
+ ],
924
+ "last": "Eagle",
925
+ "suffix": ""
926
+ }
927
+ ],
928
+ "year": 1983,
929
+ "venue": "Papers from the Parasession on Causatives and Agentivity at the Twenty.First Regional Meeting, CLS P1, Part P",
930
+ "volume": "",
931
+ "issue": "",
932
+ "pages": "48--63",
933
+ "other_ids": {},
934
+ "num": null,
935
+ "urls": [],
936
+ "raw_text": "Mohamed Guerssel, Kenneth Hale, Mary Laugh- ten, Beth Levin, and Josie White Eagle. 1985. A Cross-linguistic Study of Transitivity Alterna- tions. In W. H. Eilfort, P. D. Kroeber, and K. L. Peterson, editors, Papers from the Parasession on Causatives and Agentivity at the Twenty.First Re- gional Meeting, CLS P1, Part P, pages 48-63. The Chicago Linguistic Society, Chicago, IL, April. Ray Jackendoff. 1983. Semantics and Cognition. The MIT P.r\u00a2ss, Cambridge, MA.",
937
+ "links": null
938
+ },
939
+ "BIBREF11": {
940
+ "ref_id": "b11",
941
+ "title": "Semantic Structures",
942
+ "authors": [
943
+ {
944
+ "first": "Ray",
945
+ "middle": [],
946
+ "last": "Jackendoff",
947
+ "suffix": ""
948
+ }
949
+ ],
950
+ "year": 1990,
951
+ "venue": "",
952
+ "volume": "",
953
+ "issue": "",
954
+ "pages": "",
955
+ "other_ids": {},
956
+ "num": null,
957
+ "urls": [],
958
+ "raw_text": "Ray Jackendoff. 1990.. Semantic Structures. The MIT Press, Cambridge, MA.",
959
+ "links": null
960
+ },
961
+ "BIBREF12": {
962
+ "ref_id": "b12",
963
+ "title": "Generatiort that Exploits Corpus-Based Statistical Knowledge",
964
+ "authors": [
965
+ {
966
+ "first": "Irene",
967
+ "middle": [],
968
+ "last": "Langkilde",
969
+ "suffix": ""
970
+ },
971
+ {
972
+ "first": "Kevin",
973
+ "middle": [],
974
+ "last": "Knight",
975
+ "suffix": ""
976
+ }
977
+ ],
978
+ "year": 1998,
979
+ "venue": "Proceedings of COLING-ACL '98",
980
+ "volume": "",
981
+ "issue": "",
982
+ "pages": "704--710",
983
+ "other_ids": {},
984
+ "num": null,
985
+ "urls": [],
986
+ "raw_text": "Irene Langkilde and Kevin Knight. 1998a. Gen- eratiort that Exploits Corpus-Based Statistical Knowledge. In Proceedings of COLING-ACL '98, pages 704-710.",
987
+ "links": null
988
+ },
989
+ "BIBREF13": {
990
+ "ref_id": "b13",
991
+ "title": "The Practical Value of N-Grams in Generation",
992
+ "authors": [
993
+ {
994
+ "first": "Irene",
995
+ "middle": [],
996
+ "last": "Langkilde",
997
+ "suffix": ""
998
+ },
999
+ {
1000
+ "first": "Kevin",
1001
+ "middle": [],
1002
+ "last": "Knight",
1003
+ "suffix": ""
1004
+ }
1005
+ ],
1006
+ "year": 1998,
1007
+ "venue": "International Natural Language Generation Workshop",
1008
+ "volume": "",
1009
+ "issue": "",
1010
+ "pages": "",
1011
+ "other_ids": {},
1012
+ "num": null,
1013
+ "urls": [],
1014
+ "raw_text": "Irene Langkilde and Kevin Knight. 1998b. The Practical Value of N-Grams in Generation. In In- ternational Natural Language Generation Work- shop.",
1015
+ "links": null
1016
+ },
1017
+ "BIBREF14": {
1018
+ "ref_id": "b14",
1019
+ "title": "Unaccusativity: At the Syntaz-Lexical Semantics Interface",
1020
+ "authors": [
1021
+ {
1022
+ "first": "Beth",
1023
+ "middle": [],
1024
+ "last": "Levin",
1025
+ "suffix": ""
1026
+ },
1027
+ {
1028
+ "first": "Malka",
1029
+ "middle": [
1030
+ "Rappaport"
1031
+ ],
1032
+ "last": "Hovav",
1033
+ "suffix": ""
1034
+ }
1035
+ ],
1036
+ "year": 1926,
1037
+ "venue": "",
1038
+ "volume": "",
1039
+ "issue": "",
1040
+ "pages": "",
1041
+ "other_ids": {},
1042
+ "num": null,
1043
+ "urls": [],
1044
+ "raw_text": "Beth Levin and Malka Rappaport Hovav. 1995. Un- accusativity: At the Syntaz-Lexical Semantics In- terface. The MIT Press, Cambridge, MA. LI Monograph 26.",
1045
+ "links": null
1046
+ },
1047
+ "BIBREF15": {
1048
+ "ref_id": "b15",
1049
+ "title": "English Verb Classes and Alternations: A Preliminary Investigation",
1050
+ "authors": [
1051
+ {
1052
+ "first": "Beth",
1053
+ "middle": [],
1054
+ "last": "Levin",
1055
+ "suffix": ""
1056
+ }
1057
+ ],
1058
+ "year": 1993,
1059
+ "venue": "",
1060
+ "volume": "",
1061
+ "issue": "",
1062
+ "pages": "",
1063
+ "other_ids": {},
1064
+ "num": null,
1065
+ "urls": [],
1066
+ "raw_text": "Beth Levin. 1993. English Verb Classes and Alter- nations: A Preliminary Investigation. University of Chicago Press, Chicago, IL.",
1067
+ "links": null
1068
+ },
1069
+ "BIBREF16": {
1070
+ "ref_id": "b16",
1071
+ "title": "Mandarin Chinese: A functional reference grammar",
1072
+ "authors": [
1073
+ {
1074
+ "first": "Charles",
1075
+ "middle": [],
1076
+ "last": "Li",
1077
+ "suffix": ""
1078
+ },
1079
+ {
1080
+ "first": "Sandra",
1081
+ "middle": [],
1082
+ "last": "Thompson",
1083
+ "suffix": ""
1084
+ }
1085
+ ],
1086
+ "year": 1981,
1087
+ "venue": "",
1088
+ "volume": "",
1089
+ "issue": "",
1090
+ "pages": "",
1091
+ "other_ids": {},
1092
+ "num": null,
1093
+ "urls": [],
1094
+ "raw_text": "Charles Li and Sandra Thompson. 1981. Mandarin Chinese: A functional reference grammar. Uni- versity of California Press, Berkeley, CA.",
1095
+ "links": null
1096
+ },
1097
+ "BIBREF18": {
1098
+ "ref_id": "b18",
1099
+ "title": "Enhancing Automatic Acquisition of Thematic Structure in a Large-Scale Lexicon for Mandarin Chinese",
1100
+ "authors": [
1101
+ {
1102
+ "first": "Bonnie",
1103
+ "middle": [
1104
+ "J"
1105
+ ],
1106
+ "last": "Mart Broman Olsen",
1107
+ "suffix": ""
1108
+ },
1109
+ {
1110
+ "first": "Scott",
1111
+ "middle": [
1112
+ "C"
1113
+ ],
1114
+ "last": "Dorr",
1115
+ "suffix": ""
1116
+ },
1117
+ {
1118
+ "first": "",
1119
+ "middle": [],
1120
+ "last": "Thomas",
1121
+ "suffix": ""
1122
+ }
1123
+ ],
1124
+ "year": 1998,
1125
+ "venue": "Proceedings of the Third Conference of the Association for Machine Translation in the Americas, AMTA-98",
1126
+ "volume": "1529",
1127
+ "issue": "",
1128
+ "pages": "41--50",
1129
+ "other_ids": {},
1130
+ "num": null,
1131
+ "urls": [],
1132
+ "raw_text": "Mart Broman Olsen, Bonnie J. Dorr, and Scott C. Thomas. 1998. Enhancing Automatic Acquisi- tion of Thematic Structure in a Large-Scale Lex- icon for Mandarin Chinese. In Proceedings of the Third Conference of the Association for Machine Translation in the Americas, AMTA-98, in Lec- ture Notes in Artificial Intelligence, 1529, pages 41-50, Langhorne, PA, October 28-31.",
1133
+ "links": null
1134
+ },
1135
+ "BIBREF19": {
1136
+ "ref_id": "b19",
1137
+ "title": "A Semantic and Pragmatic Model of Lexical and Grammatical Aspect",
1138
+ "authors": [
1139
+ {
1140
+ "first": "",
1141
+ "middle": [],
1142
+ "last": "Mart Broman Olsen",
1143
+ "suffix": ""
1144
+ }
1145
+ ],
1146
+ "year": 1997,
1147
+ "venue": "",
1148
+ "volume": "",
1149
+ "issue": "",
1150
+ "pages": "",
1151
+ "other_ids": {},
1152
+ "num": null,
1153
+ "urls": [],
1154
+ "raw_text": "Mart Broman Olsen. 1997. A Semantic and Prag- matic Model of Lexical and Grammatical Aspect. Garland, New York.",
1155
+ "links": null
1156
+ },
1157
+ "BIBREF20": {
1158
+ "ref_id": "b20",
1159
+ "title": "A Computational Model of the Semantics of Tense and Aspect",
1160
+ "authors": [
1161
+ {
1162
+ "first": "Rebecca",
1163
+ "middle": [],
1164
+ "last": "Passoneau",
1165
+ "suffix": ""
1166
+ }
1167
+ ],
1168
+ "year": 1988,
1169
+ "venue": "Computational Linguistics: Special Issue on Tense and Aspect",
1170
+ "volume": "14",
1171
+ "issue": "2",
1172
+ "pages": "44--60",
1173
+ "other_ids": {},
1174
+ "num": null,
1175
+ "urls": [],
1176
+ "raw_text": "Rebecca Passoneau. 1988. A Computational Model of the Semantics of Tense and Aspect. Compu- tational Linguistics: Special Issue on Tense and Aspect, 14(2):44-60.",
1177
+ "links": null
1178
+ },
1179
+ "BIBREF21": {
1180
+ "ref_id": "b21",
1181
+ "title": "The Elasticity of Verb Meaning",
1182
+ "authors": [
1183
+ {
1184
+ "first": "Malka",
1185
+ "middle": [],
1186
+ "last": "P~appaport Hovav",
1187
+ "suffix": ""
1188
+ },
1189
+ {
1190
+ "first": "Beth",
1191
+ "middle": [],
1192
+ "last": "Levin",
1193
+ "suffix": ""
1194
+ }
1195
+ ],
1196
+ "year": 1995,
1197
+ "venue": "Processes in Argument Structure",
1198
+ "volume": "",
1199
+ "issue": "",
1200
+ "pages": "1--13",
1201
+ "other_ids": {},
1202
+ "num": null,
1203
+ "urls": [],
1204
+ "raw_text": "Malka P~appaport Hovav and Beth Levin. 1995. The Elasticity of Verb Meaning. In Processes in Argument Structure, pages 1-13, Germany. SfS- Report-06-95, Seminar fiir Sprachwissenschaft, Eberhard-Karls-Universit~t Tiibingen, Tiibingen. Carlota Smith. 1997. The parameter of aspect. Kluwer, Dordrecht, 2nd edition.",
1205
+ "links": null
1206
+ }
1207
+ },
1208
+ "ref_entries": {
1209
+ "FIGREF0": {
1210
+ "num": null,
1211
+ "uris": null,
1212
+ "text": "had a total of only 300,000 tons of shipbuilding capacity and the annual output was 80,000 ~ons. de tonnage is very low de",
1213
+ "type_str": "figure"
1214
+ },
1215
+ "FIGREF1": {
1216
+ "num": null,
1217
+ "uris": null,
1218
+ "text": "a or progressive complement (3)b (and subject drop), while other verbs like say do not place such restrictions (3)c,d. (3) a. Wolfe continued to publicize the baseless criticism on various occasions b. Wolfe continued publicizing the baseless criticism on various occasions c. Wolfe continued publicizing the baseless criticism on various occasions d. He said the asia-pacific region already became a focal point region e. He said the asia-pacific region already is becoming a focal point region 2.2 Lexical aspect",
1219
+ "type_str": "figure"
1220
+ },
1221
+ "FIGREF2": {
1222
+ "num": null,
1223
+ "uris": null,
1224
+ "text": "a. Mary ran into the room. She turned on her walkman. b. Mary ran. She turned on her walkman. c. Mary was hungry. She made a pizza. Smith similarly suggests that in English all past events are interpreted as telic (Smith, 1997) (but cf. (Olsen, 1997)).",
1225
+ "type_str": "figure"
1226
+ },
1227
+ "FIGREF3": {
1228
+ "num": null,
1229
+ "uris": null,
1230
+ "text": "(5) a. The building exploded. Mary jumped. b. The building exploded. Chunks of concrete flew everywhere.",
1231
+ "type_str": "figure"
1232
+ },
1233
+ "FIGREF4": {
1234
+ "num": null,
1235
+ "uris": null,
1236
+ "text": "[156]).Given an LCS representation L:1. Initialize: T(L):=[\u00a2T], D(L):=[0R], R(L):=[0D] 2. If Top node of L E {CAUSE, LET, GO} Then T(L):=[+T] If Top node of L E {CAUSE, LET} Then D(L):=[+D], R(L):=[+R] If Top node of L E {GO} Then D(L):=[+D] 3. IfTop node of L E {ACT, BE, STAY} Then If Internal node of L E {TO, TOWARD, FORTemp} Then T(L):=[+T] If Top node of L E {BE, STAY} Then R(L):=[+R] If Top node of L E {ACT} Then set D(L):=[+D], R(L):=[+R]",
1237
+ "type_str": "figure"
1238
+ },
1239
+ "FIGREF5": {
1240
+ "num": null,
1241
+ "uris": null,
1242
+ "text": "Algorithm for Aspectual Feature Determination",
1243
+ "type_str": "figure"
1244
+ },
1245
+ "FIGREF6": {
1246
+ "num": null,
1247
+ "uris": null,
1248
+ "text": "an event or state directly modifying another event or state, if there is no other clausal connective (coming from a subordinating conjunction or post-position in the original), then use telicity to pick a connective expressing assumed temporal relation:We have applied this machine translation system to a corpus of Chinese newspaper text from Xinhua and other sources, primarily in the economics domain.The genre is roughly comparable to the American Wall Street Journal. Chinese newspaper genre differs from other Chinese textual sources, in a number of ways, including: \u2022 more complex sentence structure \u2022 more extensive use of acronyms \u2022 less use of Classical Chinese \u2022 more representative grammar \u2022 more constrained vocabulary (limited lexicon)",
1249
+ "type_str": "figure"
1250
+ },
1251
+ "TABREF2": {
1252
+ "content": "<table><tr><td>: Lexical Aspect Features</td></tr></table>",
1253
+ "num": null,
1254
+ "text": "",
1255
+ "type_str": "table",
1256
+ "html": null
1257
+ }
1258
+ }
1259
+ }
1260
+ }
Full_text_JSON/prefixW/json/W00/W00-0206.json ADDED
@@ -0,0 +1,1022 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0206",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:06.676777Z"
6
+ },
7
+ "title": "An Application of the Interlingua System ISS for Spanish-English Pronominal Anaphora Generation",
8
+ "authors": [
9
+ {
10
+ "first": "Jesfis",
11
+ "middle": [],
12
+ "last": "Peral",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "Research Group on Language Processing and Information Systems",
16
+ "institution": "University of Alicante",
17
+ "location": {
18
+ "postCode": "03690",
19
+ "settlement": "San Vicente del Raspeig. Aiicante",
20
+ "country": "Spain"
21
+ }
22
+ },
23
+ "email": "jperal@dlsi.ua.es"
24
+ },
25
+ {
26
+ "first": "Antonio",
27
+ "middle": [],
28
+ "last": "Ferrfindez",
29
+ "suffix": "",
30
+ "affiliation": {
31
+ "laboratory": "Research Group on Language Processing and Information Systems",
32
+ "institution": "University of Alicante",
33
+ "location": {
34
+ "postCode": "03690",
35
+ "settlement": "San Vicente del Raspeig. Aiicante",
36
+ "country": "Spain"
37
+ }
38
+ },
39
+ "email": "antonio@dlsi.ua.es"
40
+ }
41
+ ],
42
+ "year": "",
43
+ "venue": null,
44
+ "identifiers": {},
45
+ "abstract": "In this ~paper, we present the Interlingua system ISS to generate the pronominal anaphora into the Spanish and English languages. We also describe the main problems in the pronoun generation into both languages such as zero-subject constructions and number, gender and syntactic differences. Our system improves other proposals presented so far due to the fact that we are able to solve and generate intersentential anaphora, to detect coreference chains and to generate Spanish zero-pronouns into English, issues that are hardly considered by other systems. Finally, we provide outstanding results of our system on unrestricted corpora.",
46
+ "pdf_parse": {
47
+ "paper_id": "W00-0206",
48
+ "_pdf_hash": "",
49
+ "abstract": [
50
+ {
51
+ "text": "In this ~paper, we present the Interlingua system ISS to generate the pronominal anaphora into the Spanish and English languages. We also describe the main problems in the pronoun generation into both languages such as zero-subject constructions and number, gender and syntactic differences. Our system improves other proposals presented so far due to the fact that we are able to solve and generate intersentential anaphora, to detect coreference chains and to generate Spanish zero-pronouns into English, issues that are hardly considered by other systems. Finally, we provide outstanding results of our system on unrestricted corpora.",
52
+ "cite_spans": [],
53
+ "ref_spans": [],
54
+ "eq_spans": [],
55
+ "section": "Abstract",
56
+ "sec_num": null
57
+ }
58
+ ],
59
+ "body_text": [
60
+ {
61
+ "text": "One of the main problems of many commercial Machine Translation (MT) and experimental systems is that they do not carry out a correct pronominal anaphora generation. Solving the anaphora and extracting the antecedent are key issues in a correct generation into the target language. Unfortunately, the majority of MT systems do not deal with anaphora resolution and their successful operation usually does not go beyond the sentence level. In this paper, we present a complete approach that allows pronoun resolution and generation into the target language.",
62
+ "cite_spans": [],
63
+ "ref_spans": [],
64
+ "eq_spans": [],
65
+ "section": "Introduction",
66
+ "sec_num": null
67
+ },
68
+ {
69
+ "text": "Our approach works on unrestricted texts unlike other systems, like the KANT interlingua system (Leavitt et al. (1994) ), that are designed for well-defined domains. Although full parsing of these texts could be applied, we have used partial parsing of the texts due to the unavoidable incompleteness of the grammar. This is a main difference with the majority of the interlingua systems such as the DLT system based on a modification of Esperanto (Witkam (1983) ), the Rosetta system which is experimenting with Montague semantics as the basis for an interlingua (Appelo and Landsbergen (1986) ), the KANT system, etc. as they use full parsing of the text.",
70
+ "cite_spans": [
71
+ {
72
+ "start": 96,
73
+ "end": 118,
74
+ "text": "(Leavitt et al. (1994)",
75
+ "ref_id": "BIBREF4"
76
+ },
77
+ {
78
+ "start": 448,
79
+ "end": 462,
80
+ "text": "(Witkam (1983)",
81
+ "ref_id": "BIBREF7"
82
+ },
83
+ {
84
+ "start": 564,
85
+ "end": 594,
86
+ "text": "(Appelo and Landsbergen (1986)",
87
+ "ref_id": "BIBREF0"
88
+ }
89
+ ],
90
+ "ref_spans": [],
91
+ "eq_spans": [],
92
+ "section": "Introduction",
93
+ "sec_num": null
94
+ },
95
+ {
96
+ "text": "After the parsing and solving pronominal anaphora, an interlingua representation of the whole text is obtained. In the interlingua representation no semantic information is used as input, unlike some approaches that have as input semantic information of the constituents (Miyoshi et al. (1997 ), Castell6n et al. (1998 , the DLT system, etc) .",
97
+ "cite_spans": [
98
+ {
99
+ "start": 271,
100
+ "end": 292,
101
+ "text": "(Miyoshi et al. (1997",
102
+ "ref_id": "BIBREF5"
103
+ },
104
+ {
105
+ "start": 293,
106
+ "end": 318,
107
+ "text": "), Castell6n et al. (1998",
108
+ "ref_id": "BIBREF1"
109
+ },
110
+ {
111
+ "start": 325,
112
+ "end": 341,
113
+ "text": "DLT system, etc)",
114
+ "ref_id": null
115
+ }
116
+ ],
117
+ "ref_spans": [],
118
+ "eq_spans": [],
119
+ "section": "Introduction",
120
+ "sec_num": null
121
+ },
122
+ {
123
+ "text": "From this interlingua representation, the generation of anaphora (including intersentential anaphora), the detection of coreference chains of the whole text and the generation of Spanish zero-pronouns into English have been carried out, issues that are hardly considered by other systems. Furthermore, this approach can be used for other different applications, e.g. Information Retrieval, Summarization, etc. The paper is organized as follows: In section 1, the complete approach that includes Analysis, Interlingua and Generation modules will be described. These modules will be explained in detail in the next three sections. In section 5, the Generation module has been evaluated in order ! This paper has been partly financed by the collaborative research project between Spain and The United Kingdom number HB 1998-0068. to measure the efficiency of our proposal. To do so, two experiments have been accomplished: the generation of Spanish zero-pronouns into English (syntactic generation module) and the generation of English pronouns into Spanish ones (morphological generation module). Finally, the conclusions of this work will be presented.",
124
+ "cite_spans": [
125
+ {
126
+ "start": 798,
127
+ "end": 826,
128
+ "text": "Kingdom number HB 1998-0068.",
129
+ "ref_id": null
130
+ }
131
+ ],
132
+ "ref_spans": [],
133
+ "eq_spans": [],
134
+ "section": "Introduction",
135
+ "sec_num": null
136
+ },
137
+ {
138
+ "text": "The complete approach that solves and generates the anaphor is based on the scheme of Figure 1 . Translation is carried out in two stages: from the source language to the Interlingua, and from the Interlingua into the target language. Modules for analysis are independent from modules for generation..In this paper, although we have only studied the Spanish and English languages, our approach is easily extended to other languages, i.e. multilingual system, in the sense that any analysis module can be linked to any generation module. As can be observed in Figure 1 , there are three independent modules in the process of generation: Analysis, Interlingua and Generation modules.",
139
+ "cite_spans": [],
140
+ "ref_spans": [
141
+ {
142
+ "start": 86,
143
+ "end": 94,
144
+ "text": "Figure 1",
145
+ "ref_id": "FIGREF0"
146
+ },
147
+ {
148
+ "start": 559,
149
+ "end": 567,
150
+ "text": "Figure 1",
151
+ "ref_id": "FIGREF0"
152
+ }
153
+ ],
154
+ "eq_spans": [],
155
+ "section": "System Architecture",
156
+ "sec_num": "1"
157
+ },
158
+ {
159
+ "text": "The analysis is carried out by means of SUPAR (Slot Unification Parser for Anaphora resolution) system, presented in Femindez et al. (2000) . SUPAR is a computational system focused on anaphora resolution. It can deal with several kinds of anaphora, such as pronominal anaphora, one-anaphora, surface-count anaphora and definite descriptions. In this paper, we focus on pronominal anaphora resolution and generation into the target language. In pronominal anaphora resolution in both the Spanish and English languages, the system has achieved an accuracy of 84% and 87% respectively. A grammar defined by means of the grammatical formalism SUG (Slot Unification Grammar) is used as input of SUPAR. A translator that transforms SUG rules into Prolog clauses has been developed. This translator will provide a Prolog program that will parse each sentence. SUPAR allows to carry out either a full or a partial parsing of the text, with the same parser and grammar. Here, partial parsing techniques have been used due to the unavoidable incompleteness of the grammar and the use of unrestricted texts (corpora) as inputs.",
160
+ "cite_spans": [
161
+ {
162
+ "start": 117,
163
+ "end": 139,
164
+ "text": "Femindez et al. (2000)",
165
+ "ref_id": null
166
+ }
167
+ ],
168
+ "ref_spans": [],
169
+ "eq_spans": [],
170
+ "section": "Analysis module",
171
+ "sec_num": "2"
172
+ },
173
+ {
174
+ "text": "These unrestricted corpora used as input for the partial parser contain the words tagged with their grammatical categories obtained from the output of a part-of-speech (POS) tagger. The word, as it appears in the corpus, its lemma and its POS tag (with morphological information) is supplied for each word in the corpus. The corpus is split into sentences before applying the parsing.",
175
+ "cite_spans": [],
176
+ "ref_spans": [],
177
+ "eq_spans": [],
178
+ "section": "Analysis module",
179
+ "sec_num": "2"
180
+ },
181
+ {
182
+ "text": "The output of the parsing module will be the Slot Structure (SS) that stores the necessary information 2 for Natural Language Processing (NLP) problem resolution. This SS will be the input for the following module in which NLP problems (anaphora, extraposition, ellipsis, etc.) will be treated and solved.",
183
+ "cite_spans": [],
184
+ "ref_spans": [],
185
+ "eq_spans": [],
186
+ "section": "Analysis module",
187
+ "sec_num": "2"
188
+ },
189
+ {
190
+ "text": "In Fernindez et al. (1998) , a partial parsing 3 strategy that provides all the necessary information for resolving anaphora is presented. This partial parsing shows that only the following constituents are necessary for anaphora resolution: co-ordinated prepositional and noun phrases, pronouns, conjunctions and 2 The SS stores for each constituent the following information: constituent name (NP, PP, etc.), semantic and morphologic information, discourse marker (identifier of the entity or discourse object) and the SS of its subconstituents. 3 It is important to emphasize that the system allows to carry out a full parsing of the text. In this paper, partial parsing with no semantic information is used in the evaluation of our approach. verbs, regardless of the order in which they appear in the text. The free words consist of constituents that are not covered by this partial parsing (e.g. adverbs).",
191
+ "cite_spans": [
192
+ {
193
+ "start": 3,
194
+ "end": 26,
195
+ "text": "Fernindez et al. (1998)",
196
+ "ref_id": null
197
+ }
198
+ ],
199
+ "ref_spans": [],
200
+ "eq_spans": [],
201
+ "section": "Analysis module",
202
+ "sec_num": "2"
203
+ },
204
+ {
205
+ "text": "After applying the anaphora resolution module, a new Slot Structure (SS') is obtained. In this new structure the correct antecedent (chosen from the possible candidates) for each anaphoric expression will be stored together with its morphological and semantic information. SS' will be the input for the lnterlingua system.",
206
+ "cite_spans": [],
207
+ "ref_spans": [],
208
+ "eq_spans": [],
209
+ "section": "Analysis module",
210
+ "sec_num": "2"
211
+ },
212
+ {
213
+ "text": "Interlingua system (ISS)",
214
+ "cite_spans": [],
215
+ "ref_spans": [],
216
+ "eq_spans": [],
217
+ "section": "3",
218
+ "sec_num": null
219
+ },
220
+ {
221
+ "text": "As said before, the Interlingua system takes the SS of the sentence after applying the anaphora resolution module as input. This system, named lnterlingua Slot Structure (1SS), generates an interlingua representation from the SS of the sentence. SUPAR generates one SS for each sentence from the whole text and it solves intrasentential and intersententiai anaphora. Then, 1SS generates the interlingua representation of the whole text. This is one of the main advantages of 1SS because it is possible to generate intersentential pronominal anaphora.",
222
+ "cite_spans": [],
223
+ "ref_spans": [],
224
+ "eq_spans": [],
225
+ "section": "3",
226
+ "sec_num": null
227
+ },
228
+ {
229
+ "text": "To begin with, 1SS splits sentences into clauses 4. To identify a new clause when partial parsing has been carried out, the following heuristic has been applied: H1 Let us assume that the beginning of a new clause has been found when a verb is parsed and a free conjunction is subsequently parsed.",
230
+ "cite_spans": [],
231
+ "ref_spans": [],
232
+ "eq_spans": [],
233
+ "section": "3",
234
+ "sec_num": null
235
+ },
236
+ {
237
+ "text": "In this particular case, a free conjunction does not imply conjunctions that join coordinated noun and prepositional phrases. It refers, here, to conjunctions that are parsed in our partial parsing scheme.",
238
+ "cite_spans": [],
239
+ "ref_spans": [],
240
+ "eq_spans": [],
241
+ "section": "3",
242
+ "sec_num": null
243
+ },
244
+ {
245
+ "text": "Once the text has been split into clauses, the next stage is to generate the interlingua representation for clauses. We have used a complex feature structure for each clause. In Figure 2 the information of the first clause of the example (1) is presented:",
246
+ "cite_spans": [],
247
+ "ref_spans": [
248
+ {
249
+ "start": 178,
250
+ "end": 186,
251
+ "text": "Figure 2",
252
+ "ref_id": "FIGREF1"
253
+ }
254
+ ],
255
+ "eq_spans": [],
256
+ "section": "3",
257
+ "sec_num": null
258
+ },
259
+ {
260
+ "text": "(1) The boys of the mountains were in the garden. They were catching flowers. 4 A clause could be defined as \"a group of words containing a verb\". As can be observed in Figure 25 , the interlingua is a frame composed of semantic roles and features extracted from the SS of the clause. Semantic roles that have been used in this approach are the following: ACTION, AGENT, THEME and MODIFIER that correspond to verb, subject, object and prepositional phrases of the clause respectively. The notation we have used is based on the representation used in KANT interlingua. To identify these semantic roles when partial parsing has been carried out and no semantic knowledge is used, the following heuristic has been applied:",
261
+ "cite_spans": [],
262
+ "ref_spans": [
263
+ {
264
+ "start": 169,
265
+ "end": 178,
266
+ "text": "Figure 25",
267
+ "ref_id": "FIGREF1"
268
+ }
269
+ ],
270
+ "eq_spans": [],
271
+ "section": "3",
272
+ "sec_num": null
273
+ },
274
+ {
275
+ "text": "H2 Let us assume that the NP parsed before the verb is the agent of the clause. In the same way, the NP parsed after the verb is the theme of the clause. Finally., all the PP found in the clause are its modifiers.",
276
+ "cite_spans": [],
277
+ "ref_spans": [],
278
+ "eq_spans": [],
279
+ "section": "3",
280
+ "sec_num": null
281
+ },
282
+ {
283
+ "text": "5 Only the relevant attributes of each semantic role appear in a simplified way in the picture. Additional attributes are added to the semantic roles in order to complete all the necessary information for the interlingua representation.",
284
+ "cite_spans": [],
285
+ "ref_spans": [],
286
+ "eq_spans": [],
287
+ "section": "3",
288
+ "sec_num": null
289
+ },
290
+ {
291
+ "text": "In Figure 2 the following elements have been found: ACTION= 'were', AGENT = 'the boys of the mountains', THEME= ~ (it has not been found any NP after the verb) and MODIFIER = 'in the garden'. These elements are represented by a simple feature structure.",
292
+ "cite_spans": [],
293
+ "ref_spans": [
294
+ {
295
+ "start": 3,
296
+ "end": 11,
297
+ "text": "Figure 2",
298
+ "ref_id": "FIGREF1"
299
+ }
300
+ ],
301
+ "eq_spans": [],
302
+ "section": "3",
303
+ "sec_num": null
304
+ },
305
+ {
306
+ "text": "Features are represented as attributes with their corresponding values. MODIFIER that contains all the information about the modifiers (PP) of the NP, and Sem_Ref that contains semantic information about the constituent's head if this information is available. The semantic role THEME has the same attributes as the semantic role AGENT, i.e. the difference is that THEME is the object of the clause and AGENT is the subject. Finally, the semantic role MODIFIER has the following attributes: Cat that contains the syntactic category of the constituent; Identifier with the value of the discourse marker; Prep with the preposition of the constituent and ENTITY, which is the object of the PP and contains the same attributes as the THEME. One clause can have more than one MODIFIER depending on the number of PP that it has. It is important to emphasize that all this information is extracted from the SS of the constituents parsed in the clause. As said before, instead of representing the clauses independently, we are interested in the interlingua representation of the whole input text. With the global representation of the input text we will be able to generate intrasentential and intersentential anaphora. Furthermore, it will be possible to solve and generate coreference chains. Thereby, the scheme of Figure 2 is extended in order to represent all the discourse using the clauses as main units of this representation. In Figure 3 the interlingua representation of the whole text of the example (1) can be observed. On the right side, the CLAUSES of the text are represented in a simplified way. They contain the semantic role. ACTION with its attributes and the semantic roles AGENT, THEME and MODIFIER that have appeared in the clause. These semantic roles are linked to the ENTITIES that they refer to. It also contains the identifier of the sentence in which the CLAUSE appears (Sentence lD) and the Conjunction that joints two or more CLAUSES in a sentence.",
307
+ "cite_spans": [],
308
+ "ref_spans": [
309
+ {
310
+ "start": 1310,
311
+ "end": 1318,
312
+ "text": "Figure 2",
313
+ "ref_id": "FIGREF1"
314
+ },
315
+ {
316
+ "start": 1430,
317
+ "end": 1438,
318
+ "text": "Figure 3",
319
+ "ref_id": "FIGREF2"
320
+ }
321
+ ],
322
+ "eq_spans": [],
323
+ "section": "3",
324
+ "sec_num": null
325
+ },
326
+ {
327
+ "text": "In the picture, four ENTITIES and two CLAUSES can be distinguished. The ENTITIES are as follows: ENTITY 1 ('boy'), ENTITY 2 ('mountain), ENTITY 3 ('garden') and ENTITY 4 ('flower'). Moreover, a relation between two ENTITIES (number 1 and number 2) appears in the picture due to the ENTITY1 (NP) contains a MODIFIER (PP).",
328
+ "cite_spans": [],
329
+ "ref_spans": [],
330
+ "eq_spans": [],
331
+ "section": "3",
332
+ "sec_num": null
333
+ },
334
+ {
335
+ "text": "The CLAUSE 1 contains: Sentence 1D (' 1 '), ACTION ('be'), AGENT ('V', the link to ENTITY 1) and MODIFIER (which is a PP and contains the link to ENTITY 3). The CLAUSE 2 contains: Sentence_lD ('2'), 'ACTION ('catch'), AGENT (which is a PRONOUN and contains the link to ENTITY 1) and THEME ('Z', the link to ENTITY 4).",
336
+ "cite_spans": [],
337
+ "ref_spans": [],
338
+ "eq_spans": [],
339
+ "section": "3",
340
+ "sec_num": null
341
+ },
342
+ {
343
+ "text": "The coreference chain can be identified thanks to AGENTS of CLAUSE 1 and CLAUSE 2 ('the boys' and 'they') have their links to the same ENTITY. As can be seen, these links can occur between constituents of different clauses or different sentences. Then, the global system is able to generate intersentential anaphora and identify the coreferenee chains of the text.",
344
+ "cite_spans": [],
345
+ "ref_spans": [],
346
+ "eq_spans": [],
347
+ "section": "3",
348
+ "sec_num": null
349
+ },
350
+ {
351
+ "text": "The Generation module takes the interlingua representation of the text as input and generates it into the target language. In this paper, we are only describing the generation of pronouns. The generation phase is split into two modules: syntactic generation and morphological generation. In the next two subsections they will be studied in detail. Although the approach presented here is multilingual, we have focused on the generation into the Spanish and English languages.",
352
+ "cite_spans": [],
353
+ "ref_spans": [],
354
+ "eq_spans": [],
355
+ "section": "Generation module",
356
+ "sec_num": "4"
357
+ },
358
+ {
359
+ "text": "In syntactic generation the interlingua representation is converted by 'transformational rules' into an ordered surface-structure tree, with appropriate labeling of the leaves with target language grammatical functions and features. The basic task of syntactic generation is to order constituents in the correct sequence for the target language. However, the aim of this work is only the generation of pronominal anaphora into the target language, so we have only focused on the differences between the Spanish and English languages in the generation of the pronoun. These differences are what we have named discrepancies (a study of Spanish-English-Spanish discrepancies is showed in Peral et aL (1999) ). In syntactic generation the following discrepancies can be found: syntactic discrepancies and Spanish elliptical zero-subject constructions.",
360
+ "cite_spans": [
361
+ {
362
+ "start": 685,
363
+ "end": 703,
364
+ "text": "Peral et aL (1999)",
365
+ "ref_id": "BIBREF6"
366
+ }
367
+ ],
368
+ "ref_spans": [],
369
+ "eq_spans": [],
370
+ "section": "Syntactic generation",
371
+ "sec_num": "4.1"
372
+ },
373
+ {
374
+ "text": "This discrepancy is due to the fact that the surface structures of the Spanish sentences are more flexible than the English ones. The constituents of the Spanish sentences can appear without a specific order in the sentence. In order to carry out a correct generation into English, we must firstly reorganize the Spanish sentence. Nevertheless, in the English-Spanish translation, in general, this reorganization is not necessary.",
375
+ "cite_spans": [],
376
+ "ref_spans": [],
377
+ "eq_spans": [],
378
+ "section": "Syntactic discrepancies",
379
+ "sec_num": "4.1.1"
380
+ },
381
+ {
382
+ "text": "Let us see an example with the Spanish sentence In 2, the object of the verb, A Pedro (to Peter), appears before the verb (in the position of the theoretically subject) and the subject is omitted (this phenomena is usual in Spanish and it will be explained in the next subsection). The PP A Pedro (to Peter) functions as an indirect object of the verb (because it has the preposition A (to)). We can find out the subject since the verb is in first person and singular, so the subject would be the pronoun Yo (1). Moreover, there is a pronoun, lo (him) that functions as complement of the verb vi (saw). This pronoun in Spanish refers to the object of the verb, Peter, when it is moved from its theoretical place after the verb (as it occurs in this sentence).",
383
+ "cite_spans": [],
384
+ "ref_spans": [],
385
+ "eq_spans": [],
386
+ "section": "Syntactic discrepancies",
387
+ "sec_num": "4.1.1"
388
+ },
389
+ {
390
+ "text": "As explained before, it is possible to identify the semantic roles (AGENT, ACTION, etc.) of the previous constituents in the CLAUSE applying a series of heuristics. Once the semantic roles of the constituents have been established, they will be stored in the interlingua representation. The generation into English will be a new clause in which the order of the constituents is the usual in English: AGENT, ACTION, THEME and MODIFIERS.",
391
+ "cite_spans": [],
392
+ "ref_spans": [],
393
+ "eq_spans": [],
394
+ "section": "Syntactic discrepancies",
395
+ "sec_num": "4.1.1"
396
+ },
397
+ {
398
+ "text": "As commented before, the Spanish language allows to omit the pronominal subject of the sentences. These omitted pronouns are usually named zero-pronouns. While in other languages, zero-pronouns may appear in either the subject's or the object's grammatical position, (e.g. Japanese), in Spanish texts, zero-pronouns only appear in the position of the subject. In English texts, this sort of pronoun occurs far less frequently, as the use of them are generally compulsory in the language. Nevertheless, some examples can be found: \"Ross carefully folded his trousers and ~.climbed into bed\". (The symbol ~ shows the position of the omitted pronoun).. Target languages with typical elliptical (zero) constructions corresponding to source English pronouns are Italian, Thai, Chinese or Japanese.",
399
+ "cite_spans": [],
400
+ "ref_spans": [],
401
+ "eq_spans": [],
402
+ "section": "Elliptical zero-subject constructions (zero-pronouns)",
403
+ "sec_num": "4.1.2"
404
+ },
405
+ {
406
+ "text": "In order to generate Spanish zero-pronouns into English, they must first be located in the text (ellipsis detection), and then resolved (anaphora resolution). At the ellipsis detection stage, information about the zero-pronoun (e.g. person, gender, and number) must first be obtained from the verb of the clause and then used to identify the antecedent of the zeropronoun (resolution stage). The detection process depends on the knowledge about the structure of the language itself, which gives us clues to the use of each type of zero-pronoun.",
407
+ "cite_spans": [],
408
+ "ref_spans": [],
409
+ "eq_spans": [],
410
+ "section": "Elliptical zero-subject constructions (zero-pronouns)",
411
+ "sec_num": "4.1.2"
412
+ },
413
+ {
414
+ "text": "The resolution of zero-pronouns has been implemented in SUPAR. As we may work on unrestricted texts to which partial parsing is applied, zero-pronouns must also be detected when we do not dispose of full syntactic information. Once the input text has been split into clauses after applying the heuristic H1, the next problem consists of the detection of the omission of the subject from each clause.",
415
+ "cite_spans": [],
416
+ "ref_spans": [],
417
+ "eq_spans": [],
418
+ "section": "Elliptical zero-subject constructions (zero-pronouns)",
419
+ "sec_num": "4.1.2"
420
+ },
421
+ {
422
+ "text": "If partial parsing techniques have been applied, we can establish the following heuristic to detect the omission of the subject from each clause:",
423
+ "cite_spans": [],
424
+ "ref_spans": [],
425
+ "eq_spans": [],
426
+ "section": "Elliptical zero-subject constructions (zero-pronouns)",
427
+ "sec_num": "4.1.2"
428
+ },
429
+ {
430
+ "text": "H3 After the sentence has been divided into clauses, a noun phrase or a pronoun is sought, for each clause, through the clause constituents on the left-hand side of the verb, unless it is imperative or impersonal. Such a noun phrase or pronoun must agree in person and number with the verb of the clause.",
431
+ "cite_spans": [],
432
+ "ref_spans": [],
433
+ "eq_spans": [],
434
+ "section": "Elliptical zero-subject constructions (zero-pronouns)",
435
+ "sec_num": "4.1.2"
436
+ },
437
+ {
438
+ "text": "Sometimes, gender information of the pronoun can be obtained when the verb is copulative. For example, in: In this example, the verb estaba (was) is copulative, so that its subject must agree in gender and number with its object whenever the object can have either a masculine or a feminine linguistic form (guapo: masc, guapa: fem). We can therefore get information about its gender from the object, guapa (\"beautiful\" in its feminine form) which automatically assigns it to the feminine gender so the omitted pronoun would have to be she rather than he.",
439
+ "cite_spans": [],
440
+ "ref_spans": [],
441
+ "eq_spans": [],
442
+ "section": "Elliptical zero-subject constructions (zero-pronouns)",
443
+ "sec_num": "4.1.2"
444
+ },
445
+ {
446
+ "text": "After the zero-pronoun has been detected, SUPAR inserts the pronoun (with its information of person, gender and number) in the position in which it has been omitted. This pronoun will be detected and resolved in the following module of anaphora resolution. After that, ISS generates the interlingua representation of the text.",
447
+ "cite_spans": [],
448
+ "ref_spans": [],
449
+ "eq_spans": [],
450
+ "section": "Elliptical zero-subject constructions (zero-pronouns)",
451
+ "sec_num": "4.1.2"
452
+ },
453
+ {
454
+ "text": "In the example (3), two CLAUSES are identified. In the second CLAUSE the zeropronoun is detected (third person, singular and feminine -she-) and solved (third person, singular and feminine -Ann-). So the AGENT of this CLAUSE is the PRONOUN she and it has a link to the ENTITY Ann (the chosen antecedent). Now, the generation of Spanish zeropronouns into English is easy because all the information that it is needed is located in the interlingua representatio.n. The English pronoun's information is extracted in the following way: number and person information are obtained from the PRONOUN and gender information is obtained from the Head of its antecedent.",
455
+ "cite_spans": [],
456
+ "ref_spans": [],
457
+ "eq_spans": [],
458
+ "section": "Elliptical zero-subject constructions (zero-pronouns)",
459
+ "sec_num": "4.1.2"
460
+ },
461
+ {
462
+ "text": "In the morphological generation we mainly have to treat and solve number and gender discrepancies in the generation of pronouns.",
463
+ "cite_spans": [],
464
+ "ref_spans": [],
465
+ "eq_spans": [],
466
+ "section": "Morphological generation",
467
+ "sec_num": "4.2"
468
+ },
469
+ {
470
+ "text": "This problem is generated by the discrepancy between words of different languages that express the same concept. These words can be referred to a singular pronoun in the source language and to a plural pronoun in the target language. For example, in English the concept .people is plural, whereas in Spanish is singular. In 4, it can be observed that the name people in English has been replaced with the plural pronoun they, whereas in Spanish (5) the name gente has been replaced with the singular pronoun dsta (it) . Gender discrepancies also exist in the translation of other languages such as in the German-English translation.",
471
+ "cite_spans": [
472
+ {
473
+ "start": 513,
474
+ "end": 517,
475
+ "text": "(it)",
476
+ "ref_id": null
477
+ }
478
+ ],
479
+ "ref_spans": [],
480
+ "eq_spans": [],
481
+ "section": "Number discrepancies",
482
+ "sec_num": "4.2.1"
483
+ },
484
+ {
485
+ "text": "In order to take into account number discrepancies in the generation of the pronoun into the target language a set of morphological (number) rules is constructed.",
486
+ "cite_spans": [],
487
+ "ref_spans": [],
488
+ "eq_spans": [],
489
+ "section": "Number discrepancies",
490
+ "sec_num": "4.2.1"
491
+ },
492
+ {
493
+ "text": "In the generation of the pronoun They into Spanish in the example (4), the interlingua representation has a PRONOUN ('they', third person and plural) that it is linked to the ENTITY ('police', plural). For the correct generation into Spanish the following morphological rule is constructed: The left-hand side of the morphological rule contains the interlingua representation of the pronoun and the right-hand side contains the pronoun in the target language.",
494
+ "cite_spans": [],
495
+ "ref_spans": [],
496
+ "eq_spans": [],
497
+ "section": "Number discrepancies",
498
+ "sec_num": "4.2.1"
499
+ },
500
+ {
501
+ "text": "In the same way, a set of morphological rules is constructed in order to generate English pronouns. Next, an example of these rules is shown:",
502
+ "cite_spans": [],
503
+ "ref_spans": [],
504
+ "eq_spans": [],
505
+ "section": "Number discrepancies",
506
+ "sec_num": "4.2.1"
507
+ },
508
+ {
509
+ "text": "pronoun + third_person + singular + antecedent ('policla~ ~ they (pronoun, third person and plural) ",
510
+ "cite_spans": [
511
+ {
512
+ "start": 47,
513
+ "end": 99,
514
+ "text": "('policla~ ~ they (pronoun, third person and plural)",
515
+ "ref_id": null
516
+ }
517
+ ],
518
+ "ref_spans": [],
519
+ "eq_spans": [],
520
+ "section": "Number discrepancies",
521
+ "sec_num": "4.2.1"
522
+ },
523
+ {
524
+ "text": "English has less morphological information than Spanish. With reference to plural personal pronouns, the pronoun we can be translated into nosotros (masculine) or nosotras (feminine), you into ustedes (masculine/feminine), vosotros (masculine) or vosotras (feminine) and they into ellos or elias. Furthermore, the singular personal pronoun it can be translated into dl/dste (masculine) or ella/dsta (feminine). For example:",
525
+ "cite_spans": [],
526
+ "ref_spans": [],
527
+ "eq_spans": [],
528
+ "section": "Gender discrepancies",
529
+ "sec_num": "4.2.2"
530
+ },
531
+ {
532
+ "text": "(6) Women~ were in the shop. They~ were buying gifts for their husbands. In Spanish, the plural name mujeres (women) is feminine and is replaced by the personal pronoun elias (plural feminine) (7), whereas in English they is valid for masculine as well as for feminine (6).",
533
+ "cite_spans": [],
534
+ "ref_spans": [],
535
+ "eq_spans": [],
536
+ "section": "Gender discrepancies",
537
+ "sec_num": "4.2.2"
538
+ },
539
+ {
540
+ "text": "\", These discrepancies do not always mean that Spanish anaphors bear more information than English one. For example, Spanish possessive adjectives ~ casa) do not carry gender information whereas English possessive adjectives do (his~her house).",
541
+ "cite_spans": [],
542
+ "ref_spans": [],
543
+ "eq_spans": [],
544
+ "section": "Gender discrepancies",
545
+ "sec_num": "4.2.2"
546
+ },
547
+ {
548
+ "text": "We can find similar discrepancies among other languages. For example, in the French-German translation, gender is assigned arbitrarily in both languages (although in French is not as arbitrarily as in German). The English-German translation, like English-Spanish, supposes a translation from a language with neutral gender into a language that assigns gender grammatically.",
549
+ "cite_spans": [],
550
+ "ref_spans": [],
551
+ "eq_spans": [],
552
+ "section": "Gender discrepancies",
553
+ "sec_num": "4.2.2"
554
+ },
555
+ {
556
+ "text": "As commented, it is important to emphasize that the omission of the pronominal subject is very usual in Spanish. If we want to stress the subject of a clause or distinguish between different possible subjects, we will have to write the pronominal subject. Otherwise, pronominal subject could be omitted. We are interested, however, in the correct generation of pronouns, and therefore, they will never be omitted.",
557
+ "cite_spans": [],
558
+ "ref_spans": [],
559
+ "eq_spans": [],
560
+ "section": "Gender discrepancies",
561
+ "sec_num": "4.2.2"
562
+ },
563
+ {
564
+ "text": "Thanks to the fact that our system solves only personal pronouns in third person, we have only studied gender discrepancies in the generation of the third person pronouns. The study has been divided into pronouns with subject role and pronouns with complement role. a) Pronouns with subject role. This kind of pronouns can be identified in the interlingua representation because they have the semantic role of AGENT in a CLAUSE. Their antecedents are established with the links to the ENTITIES.",
565
+ "cite_spans": [],
566
+ "ref_spans": [],
567
+ "eq_spans": [],
568
+ "section": "Gender discrepancies",
569
+ "sec_num": "4.2.2"
570
+ },
571
+ {
572
+ "text": "The main problem in the pronoun generation into English consists of the generation of pronoun it. If we have a pronoun with the following attributes: masculine, singular and third person in the interlingua representation, this can be generated into the Spanish pron6uns he or it. If the antecedent of the pronoun refers to a person, we will generate it into he. If the antecedent of the pronoun is an animal or a thing we will generate it into it.",
573
+ "cite_spans": [],
574
+ "ref_spans": [],
575
+ "eq_spans": [],
576
+ "section": "Gender discrepancies",
577
+ "sec_num": "4.2.2"
578
+ },
579
+ {
580
+ "text": "These characteristics of the antecedent can be obtained from the semantic information stored in its attribute Sem_Ref. A similar strategy is used to generate the pronouns she or it. With reference to plural personal pronouns: masculine/feminine, plural and third person, they are generated into the English pronoun they.",
581
+ "cite_spans": [],
582
+ "ref_spans": [],
583
+ "eq_spans": [],
584
+ "section": "Gender discrepancies",
585
+ "sec_num": "4.2.2"
586
+ },
587
+ {
588
+ "text": "In Figure 4 ",
589
+ "cite_spans": [],
590
+ "ref_spans": [
591
+ {
592
+ "start": 3,
593
+ "end": 11,
594
+ "text": "Figure 4",
595
+ "ref_id": "FIGREF4"
596
+ }
597
+ ],
598
+ "eq_spans": [],
599
+ "section": "Gender discrepancies",
600
+ "sec_num": "4.2.2"
601
+ },
602
+ {
603
+ "text": "The syntactic generation and morphological generation modules of our approach have been evaluated. To do so, one experiment for each module has been accomplished. In the first one, the generation of Spanish zero-pronouns into English, using the techniques described above in subsection 4.1.2, has been evaluated 6. In the second one, the generation of English pronouns into Spanish ones has been evaluated. In this experiment number and gender discrepancies and their resolution, described above in section 4.2, have been taken into account.",
604
+ "cite_spans": [],
605
+ "ref_spans": [],
606
+ "eq_spans": [],
607
+ "section": "Evaluation",
608
+ "sec_num": "5"
609
+ },
610
+ {
611
+ "text": "With reference to the first experiment, our computational system has been trained with a 6 Syntactic discrepancies has not been evaluated due to the aim of this work is only the pronominal anaphora generation into the target language, so the evaluation of the generation of the whole sentence into the target language has been omitted. handmade corpus 7 that contains 106 zeropronouns. With this training, we have extracted the degree of importance of the preferences that are used in the anaphora resolution module of the system. Furthermore, we have been able to check and correct the techniques used in the detection and generation of zero-pronouns into English. After that, we have carried out a blind evaluation on unrestricted texts using the set of preferences and the generation techniques learned during the training phase. In this case, partial parsing of the text with no semantic information has been used.",
612
+ "cite_spans": [],
613
+ "ref_spans": [],
614
+ "eq_spans": [],
615
+ "section": "Evaluation",
616
+ "sec_num": "5"
617
+ },
618
+ {
619
+ "text": "With regard to unrestricted texts, our system has been run on two different Spanish corpora: a) a fragment of the Spanish version of The Blue Book corpus (15,571 words), which contains the handbook of the International Telecommunications Union CCITT, and b) a fragment of the Lexesp corpus (9,746 words), which contains ten Spanish texts from different genres and authors. These texts are taken mainly from newspapers. These corpora have been POS-tagged. Having worked with different genres and disparate authors, we feel that the applicability of our proposal to other sorts of texts is assured.",
620
+ "cite_spans": [],
621
+ "ref_spans": [],
622
+ "eq_spans": [],
623
+ "section": "Evaluation",
624
+ "sec_num": "5"
625
+ },
626
+ {
627
+ "text": "To evaluate the generation of Spanish zeropronouns into English three tasks have been accomplished: a) the evaluation of the detection of zero-pronouns, b) the evaluation of anaphora resolution and c) the evaluation of generation. a) Evaluating the detection of zero-pronouns.",
628
+ "cite_spans": [],
629
+ "ref_spans": [],
630
+ "eq_spans": [],
631
+ "section": "Evaluation",
632
+ "sec_num": "5"
633
+ },
634
+ {
635
+ "text": "To do this, verbs have been classified into two categories: 1) verbs whose subjects have been omitted, and 2) verbs whose subjects have not. We have obtained a success rate s of 88% on 1,599 classified verbs, with no significant differences seen between the corpora. We should also remark that a success rate of 98% has been obtained in the detection of verbs whose subjects were omitted, whereas only 80% was achieved for verbs whose subjects were not. This lower success rate is This corpus contains sentences with zero-pronouns made by different researchers of our Research Group. g By \"success rate\", we mean the number of verbs successfully classified, divided by the total number of verbs in the text. justified for several reasons. One important reason is the non-detection of impersonal verbs by the POS tagger. Two other reasons are. the lack of semantic information and the inaccuracy of the grammar used. It is important to note that 46% of the verbs in these corpora have their subjects omitted. It shows quite clearly the importance of this phenomenon in Spanish. b) Evaluating anaphora resolution. In this task, the evaluation of zero-pronoun resolution is accomplished. Of the 1,599 verbs classified in these two corpora, 734 of them have zero-pronouns. Only 228 of them 9, however, are in third person and will be anaphorically resolved. A success rate of 75% was attained for the 228 zeropronouns. By \"successful resolutions\" we mean that the solutions offered by our system agree with the solutions offered by two human experts. c) Evaluating zero-pronoun generation. The generation of the 228 Spanish zeropronouns into English has been evaluated. The following results in the generation have been obtained: a success rate of 70% in Lexesp and a success rate of 899'o in The Blue Book. In general (both corpora) a success rate of 75% has been achieved. The errors are mainly produced by fails in anaphora resolution and fails in the generation of pronouns he/she/it (some heuristics 10, which have failed sometimes, have been applied due to the used corpora do not include semantic information). In the second experiment, we have evaluated the generation of Spanish personal pronouns with subject role into the English ones.",
636
+ "cite_spans": [],
637
+ "ref_spans": [],
638
+ "eq_spans": [],
639
+ "section": "Evaluation",
640
+ "sec_num": "5"
641
+ },
642
+ {
643
+ "text": "A fragment of the English version of The Blue Book corpus (70,319 words) containing 165 9 The remaining pronouns are not in third person or they are cataphoric (the antecedent appears after the anaphor) or exophoric (the antecedent does not appear, linguistically, in the text). J0 For instance: \"all the pronouns in third person and singular whose antecedents are proper nouns have boon translated into he (antecedent with masculine gender) or she (antecedent with feminine gender); otherwise they have been translated into it\". pronouns with subject role has been used in order to carry out a blind evaluation. A success rate of 85.41% has been achieved. The errors are mainly produced by fails in anaphora resolution and in the correct choice of the gender of the antecedent's Head in Spanish. With reference to the choice of the gender of the antecedent's Head, an electronic dictionary has been used in order to translate the original English word into the Spanish one, and subsequently, the gender is extracted from the Spanish word. Several problems have occurred when using this electronic dictionary: 1) ' the word to be translated does not appear in the dic-tionary, and therefore, a heuristic is applied to assign the gender 2) the correct sense of the English word is not chosen, and therefore, the gender could be assigned incorrectly.",
644
+ "cite_spans": [],
645
+ "ref_spans": [],
646
+ "eq_spans": [],
647
+ "section": "Evaluation",
648
+ "sec_num": "5"
649
+ },
650
+ {
651
+ "text": "In this paper a complete approach to solve and generate pronominal anaphora in the Spanish and English languages is presented. The approach works on unrestricted texts to which partial parsing techniques have been applied. After the parsing and solving pronominal anaphora, an interlingua representation (based on semantic roles and features) of the whole text is obtained. The representation of the whole text is one of the main advantages of our system due to several problems, that are hardly solved by the majority of MT systems, can be treated and solved. These problems are the generation of intersentential anaphora, the detection of coreference chains and the generation of Spanish zero-pronouns into English. Generation of zeropronouns and Spanish personal pronouns has been evaluated obtaining a success rate of 75% and 85.41% respectively.",
652
+ "cite_spans": [],
653
+ "ref_spans": [],
654
+ "eq_spans": [],
655
+ "section": "Conclusion",
656
+ "sec_num": null
657
+ }
658
+ ],
659
+ "back_matter": [],
660
+ "bib_entries": {
661
+ "BIBREF0": {
662
+ "ref_id": "b0",
663
+ "title": "The machine translation project Rose",
664
+ "authors": [
665
+ {
666
+ "first": "L",
667
+ "middle": [],
668
+ "last": "Appelo",
669
+ "suffix": ""
670
+ },
671
+ {
672
+ "first": "J",
673
+ "middle": [],
674
+ "last": "Landsbergen",
675
+ "suffix": ""
676
+ }
677
+ ],
678
+ "year": 1986,
679
+ "venue": "Proceedings of I. International Conference on the State of the Art in Machine Translation in America, Asia and Europe, 1A1-MT'86 (Saarbr0cken)",
680
+ "volume": "",
681
+ "issue": "",
682
+ "pages": "34--51",
683
+ "other_ids": {},
684
+ "num": null,
685
+ "urls": [],
686
+ "raw_text": "Appelo, L. and Landsbergen, J. (1986) The machine translation project Rose. In Proceedings of I. International Conference on the State of the Art in Machine Translation in America, Asia and Europe, 1A1-MT'86 (Saarbr0cken). pp. 34-51.",
687
+ "links": null
688
+ },
689
+ "BIBREF1": {
690
+ "ref_id": "b1",
691
+ "title": "An lnterlingua Representation Based on the Lexieo-Semantie Information",
692
+ "authors": [
693
+ {
694
+ "first": "I",
695
+ "middle": [],
696
+ "last": "Castell6n",
697
+ "suffix": ""
698
+ },
699
+ {
700
+ "first": "A",
701
+ "middle": [],
702
+ "last": "Fern~sndez",
703
+ "suffix": ""
704
+ },
705
+ {
706
+ "first": "M",
707
+ "middle": [
708
+ "A"
709
+ ],
710
+ "last": "Mart|",
711
+ "suffix": ""
712
+ },
713
+ {
714
+ "first": "R",
715
+ "middle": [],
716
+ "last": "Morante",
717
+ "suffix": ""
718
+ },
719
+ {
720
+ "first": "G",
721
+ "middle": [],
722
+ "last": "V~zquez",
723
+ "suffix": ""
724
+ }
725
+ ],
726
+ "year": 1998,
727
+ "venue": "Proceedings of the Second AMTA SIG-1L Workshop on lnterlinguas",
728
+ "volume": "",
729
+ "issue": "",
730
+ "pages": "",
731
+ "other_ids": {},
732
+ "num": null,
733
+ "urls": [],
734
+ "raw_text": "Castell6n, I.; Fern~Sndez, A.; Mart|, M.A.; Morante, R. and V~zquez, G. (1998) An lnterlingua Representation Based on the Lexieo-Semantie Information. In Proceedings of the Second AMTA SIG-1L Workshop on lnterlinguas (Philadelphia, USA, 1998).",
735
+ "links": null
736
+ },
737
+ "BIBREF2": {
738
+ "ref_id": "b2",
739
+ "title": "Anaphora resolution in unrestricted texts with partial parsing",
740
+ "authors": [
741
+ {
742
+ "first": "A",
743
+ "middle": [],
744
+ "last": "Ferrfindez",
745
+ "suffix": ""
746
+ },
747
+ {
748
+ "first": "M",
749
+ "middle": [],
750
+ "last": "Palomar",
751
+ "suffix": ""
752
+ },
753
+ {
754
+ "first": "L",
755
+ "middle": [],
756
+ "last": "Moreno",
757
+ "suffix": ""
758
+ }
759
+ ],
760
+ "year": 1998,
761
+ "venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and 17th International Conference on Computational Linguistics, COLING -ACL'98",
762
+ "volume": "",
763
+ "issue": "",
764
+ "pages": "385--391",
765
+ "other_ids": {},
766
+ "num": null,
767
+ "urls": [],
768
+ "raw_text": "Ferrfindez, A.; Palomar, M. and Moreno, L. (1998) Anaphora resolution in unrestricted texts with partial parsing. In Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and 17th International Conference on Computational Linguistics, COLING -ACL'98 (Montreal, Canada, 1998). pp. 385-391.",
769
+ "links": null
770
+ },
771
+ "BIBREF3": {
772
+ "ref_id": "b3",
773
+ "title": "An empirical approach to Spanish anaphora resolution. To appear in Machine Translation (Special Issue on anaphora resolution in Machine Translation",
774
+ "authors": [
775
+ {
776
+ "first": "A",
777
+ "middle": [],
778
+ "last": "Ferrfindez",
779
+ "suffix": ""
780
+ },
781
+ {
782
+ "first": "M",
783
+ "middle": [],
784
+ "last": "Palomar",
785
+ "suffix": ""
786
+ },
787
+ {
788
+ "first": "L",
789
+ "middle": [],
790
+ "last": "Moreno",
791
+ "suffix": ""
792
+ }
793
+ ],
794
+ "year": 2000,
795
+ "venue": "",
796
+ "volume": "",
797
+ "issue": "",
798
+ "pages": "",
799
+ "other_ids": {},
800
+ "num": null,
801
+ "urls": [],
802
+ "raw_text": "Ferrfindez, A.; Palomar, M. and Moreno, L. (2000) An empirical approach to Spanish anaphora resolution. To appear in Machine Translation (Special Issue on anaphora resolution in Machine Translation). 2000.",
803
+ "links": null
804
+ },
805
+ "BIBREF4": {
806
+ "ref_id": "b4",
807
+ "title": "A Reasoned Interlingua for knowledge-Based Machine Translation",
808
+ "authors": [
809
+ {
810
+ "first": "J",
811
+ "middle": [
812
+ "R R"
813
+ ],
814
+ "last": "Leavitt",
815
+ "suffix": ""
816
+ },
817
+ {
818
+ "first": "D",
819
+ "middle": [],
820
+ "last": "Lonsdale",
821
+ "suffix": ""
822
+ },
823
+ {
824
+ "first": "A",
825
+ "middle": [],
826
+ "last": "Franz",
827
+ "suffix": ""
828
+ }
829
+ ],
830
+ "year": 1994,
831
+ "venue": "Proceedings",
832
+ "volume": "",
833
+ "issue": "",
834
+ "pages": "1--94",
835
+ "other_ids": {},
836
+ "num": null,
837
+ "urls": [],
838
+ "raw_text": "Leavitt, J.R.R.; Lonsdale, D. and Franz, A. (1994) A Reasoned Interlingua for knowledge-Based Machine Translation. In Proceedings of CSCS1-94.",
839
+ "links": null
840
+ },
841
+ "BIBREF5": {
842
+ "ref_id": "b5",
843
+ "title": "EDR's Concept Classification and Description for Interlingual Representation",
844
+ "authors": [
845
+ {
846
+ "first": "H",
847
+ "middle": [],
848
+ "last": "Miyoshi",
849
+ "suffix": ""
850
+ },
851
+ {
852
+ "first": "T",
853
+ "middle": [],
854
+ "last": "Ogino",
855
+ "suffix": ""
856
+ },
857
+ {
858
+ "first": "K",
859
+ "middle": [],
860
+ "last": "Sugiyarna",
861
+ "suffix": ""
862
+ }
863
+ ],
864
+ "year": 1997,
865
+ "venue": "Proceedings of the First Workshop on lnterlinguas",
866
+ "volume": "",
867
+ "issue": "",
868
+ "pages": "",
869
+ "other_ids": {},
870
+ "num": null,
871
+ "urls": [],
872
+ "raw_text": "Miyoshi, H.; Ogino, T. and Sugiyarna, K. (1997) EDR's Concept Classification and Description for Interlingual Representation. In Proceedings of the First Workshop on lnterlinguas (San Diego, USA, 1997).",
873
+ "links": null
874
+ },
875
+ "BIBREF6": {
876
+ "ref_id": "b6",
877
+ "title": "Coreference-oriented Interlingual Slot Structure and Machine Translation",
878
+ "authors": [
879
+ {
880
+ "first": "J",
881
+ "middle": [],
882
+ "last": "Peral",
883
+ "suffix": ""
884
+ },
885
+ {
886
+ "first": "M",
887
+ "middle": [],
888
+ "last": "Palomar",
889
+ "suffix": ""
890
+ },
891
+ {
892
+ "first": "A",
893
+ "middle": [],
894
+ "last": "Ferffmdez",
895
+ "suffix": ""
896
+ }
897
+ ],
898
+ "year": 1999,
899
+ "venue": "Proceedings of ACL Workshop on Coreference and its Applications",
900
+ "volume": "",
901
+ "issue": "",
902
+ "pages": "69--76",
903
+ "other_ids": {},
904
+ "num": null,
905
+ "urls": [],
906
+ "raw_text": "Peral, J.; Palomar, M. and Ferffmdez, A. (1999) Coreference-oriented Interlingual Slot Structure and Machine Translation. In Proceedings of ACL Workshop on Coreference and its Applications (College Park, Maryland, USA, 1999). pp. 69-76.",
907
+ "links": null
908
+ },
909
+ "BIBREF7": {
910
+ "ref_id": "b7",
911
+ "title": "Distributed language translation: feasibility study of multi!ingual facility for videotex information networks",
912
+ "authors": [
913
+ {
914
+ "first": "A",
915
+ "middle": [
916
+ "P M"
917
+ ],
918
+ "last": "Witkam",
919
+ "suffix": ""
920
+ }
921
+ ],
922
+ "year": 1983,
923
+ "venue": "",
924
+ "volume": "",
925
+ "issue": "",
926
+ "pages": "",
927
+ "other_ids": {},
928
+ "num": null,
929
+ "urls": [],
930
+ "raw_text": "Witkam, A.P.M. (1983) Distributed language translation: feasibility study of multi!ingual facility for videotex information networks. BSO, Utrecht.",
931
+ "links": null
932
+ }
933
+ },
934
+ "ref_entries": {
935
+ "FIGREF0": {
936
+ "uris": null,
937
+ "type_str": "figure",
938
+ "num": null,
939
+ "text": "System architecture."
940
+ },
941
+ "FIGREF1": {
942
+ "uris": null,
943
+ "type_str": "figure",
944
+ "num": null,
945
+ "text": "lnterlingua representation of a clause."
946
+ },
947
+ "FIGREF2": {
948
+ "uris": null,
949
+ "type_str": "figure",
950
+ "num": null,
951
+ "text": "Interlingua representation of example (1).On the left side ofFigure 3the new objects or entities of the discourse are represented. These objects are named ENTITIES and contain the following attributes: Cat, Identifier, Head, Number, Gender, Person and Sem_Ref, due to they can represent an AGENT, a THEME or an object in a MODIFIER."
952
+ },
953
+ "FIGREF3": {
954
+ "uris": null,
955
+ "type_str": "figure",
956
+ "num": null,
957
+ "text": "A Pedro Io vi ayer. (I saw Peter yesterday.)"
958
+ },
959
+ "FIGREF4": {
960
+ "uris": null,
961
+ "type_str": "figure",
962
+ "num": null,
963
+ "text": "Tile stadium was full of peoplej. They~ were very angry withthe referee.(5) El estadio estaba /leno de gentei. ~stai estaba muy enfadada con el ~rbitro."
964
+ },
965
+ "FIGREF5": {
966
+ "uris": null,
967
+ "type_str": "figure",
968
+ "num": null,
969
+ "text": "pronoun + third..person + plural + antecedent (~olice') ~ ~sta(pronoun, third person, feminine and singular)"
970
+ },
971
+ "FIGREF6": {
972
+ "uris": null,
973
+ "type_str": "figure",
974
+ "num": null,
975
+ "text": "Las mujeresl estaban en la tienda. Ellasi estaban comprando regains pard sus maridos."
976
+ },
977
+ "FIGREF7": {
978
+ "uris": null,
979
+ "type_str": "figure",
980
+ "num": null,
981
+ "text": ", the set of morphological rules to treat gender discrepancies in English generation of pronouns is shown: pron + third_person + masculine + sing + antec (person) ~ he pmn + thirdperson + masculine + sing + antec (animal or thing) --) it pron + thirdperson + feminine + sing + antec (person) ..-) she pron + third_person + feminine + sing + antec (animal orthing) ~ it pron + thitrlperson + feminine/masculine + plural ~ they"
982
+ },
983
+ "FIGREF8": {
984
+ "uris": null,
985
+ "type_str": "figure",
986
+ "num": null,
987
+ "text": "In Spanish generation, the main problem consists of the translation of pronoun it The set of morphological rules to treat this case is shown in Figure 5: pron + third_person + sing + antec (animal with masculine gender) ~ ~1 [ pron + third_person + sing + antec (thing with masculine gender) .-) dste ] pron + third_person + sing + antec (animal with feminine gender) ~ ella l pmn + third_person + sing + antec (thing with feminine gender) .-y 6sta"
988
+ },
989
+ "FIGREF9": {
990
+ "uris": null,
991
+ "type_str": "figure",
992
+ "num": null,
993
+ "text": "with complement role. This kind of pronouns can be identified in the interlingua representation because they have the semantic role of THEME or they are in a MODIFIER in a CLAUSE. In the pronoun generation into English, the set of morphological rules of Figure 6 is applied: pron + third_parson + sing + antec (person with masculine gander) ~ him pron + third_person + sing + antec (person with feminine gender) ~ her pron + third_person + sing + antec (animal orthing) ...) it pron \u00f7 thirdperson + plural + antec (person) ~ them"
994
+ },
995
+ "FIGREF10": {
996
+ "uris": null,
997
+ "type_str": "figure",
998
+ "num": null,
999
+ "text": "In the process of generating a pronoun with the semantic role of THEME into Spanish, the set of morphological rules ofFigure 7is applied:pron \u00f7 third_person \u00f7 plural ~ les"
1000
+ },
1001
+ "FIGREF11": {
1002
+ "uris": null,
1003
+ "type_str": "figure",
1004
+ "num": null,
1005
+ "text": "On the other hand, if the pronoun is in a MODIFIER, the rules of Spanish generation will be as shown inFigure 8: pron + third_person + sing + antec (masculine gender) ~ ~1 pron + third_person + sing + antec (feminine gender) ...) ella pron + third.person + plural + antec (masculine gender) ~ ellos pron + third_person + plural + antec (feminine gender) ~ elias"
1006
+ },
1007
+ "FIGREF12": {
1008
+ "uris": null,
1009
+ "type_str": "figure",
1010
+ "num": null,
1011
+ "text": "Figure 8."
1012
+ },
1013
+ "TABREF1": {
1014
+ "content": "<table/>",
1015
+ "type_str": "table",
1016
+ "text": "The semantic role ACTION has the following attributes: Verb with the value of the lemma of the verb; Number, Person and Tense (grammatical features) and Type with the type of the verb: impersonal, transitive, etc.",
1017
+ "num": null,
1018
+ "html": null
1019
+ }
1020
+ }
1021
+ }
1022
+ }
Full_text_JSON/prefixW/json/W00/W00-0207.json ADDED
@@ -0,0 +1,1109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0207",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:31.571478Z"
6
+ },
7
+ "title": "Generation from Lexical Conceptual Structures",
8
+ "authors": [
9
+ {
10
+ "first": "David",
11
+ "middle": [],
12
+ "last": "Traum",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "University of Maryland",
17
+ "location": {}
18
+ },
19
+ "email": "traum@umd.edu"
20
+ },
21
+ {
22
+ "first": "Nizar",
23
+ "middle": [],
24
+ "last": "Habash",
25
+ "suffix": "",
26
+ "affiliation": {
27
+ "laboratory": "",
28
+ "institution": "University of Maryland",
29
+ "location": {}
30
+ },
31
+ "email": "habash@cs@umd.edu"
32
+ }
33
+ ],
34
+ "year": "",
35
+ "venue": null,
36
+ "identifiers": {},
37
+ "abstract": "",
38
+ "pdf_parse": {
39
+ "paper_id": "W00-0207",
40
+ "_pdf_hash": "",
41
+ "abstract": [],
42
+ "body_text": [
43
+ {
44
+ "text": "This paper describes a system for generating natural language sentences from an interlingual representation, Lexical Conceptual Structure (LCS). This system has been developed as part of a Chinese-English Machine Translation system, however, it promises to be useful for many other MT language pairs. The generation system has also been used in Cross-Language information retrieval research (Levow et al., 2000) .",
45
+ "cite_spans": [
46
+ {
47
+ "start": 391,
48
+ "end": 411,
49
+ "text": "(Levow et al., 2000)",
50
+ "ref_id": "BIBREF10"
51
+ }
52
+ ],
53
+ "ref_spans": [],
54
+ "eq_spans": [],
55
+ "section": "Introduction",
56
+ "sec_num": "1"
57
+ },
58
+ {
59
+ "text": "One of the big challenges in Natural Language processing efforts is to be able to make use of existing resources, a big difficulty being the sometimes large differences in syntax, semantics, and ontologies of such resources. A case in point is the interlingua representations used for machine translation and cross-language processing. Such representations are becoming fairly popular, yet there are widely different views about what these languages should be composed of, varying from purely conceptual knowledge-representations, having little to do with the structure of language, to very syntactic representations, maintaining most of the idiosyncrasies of the source languages. In our generation system we make use of resources associated with two different (kinds of) interlingua structures: Lexical Conceptual Structure (LCS) , and the Abstract Meaning Representations used at USC/ISI (Langkilde and Knight, 1998a ).",
60
+ "cite_spans": [
61
+ {
62
+ "start": 826,
63
+ "end": 831,
64
+ "text": "(LCS)",
65
+ "ref_id": null
66
+ },
67
+ {
68
+ "start": 891,
69
+ "end": 919,
70
+ "text": "(Langkilde and Knight, 1998a",
71
+ "ref_id": "BIBREF7"
72
+ }
73
+ ],
74
+ "ref_spans": [],
75
+ "eq_spans": [],
76
+ "section": "Introduction",
77
+ "sec_num": "1"
78
+ },
79
+ {
80
+ "text": "Lexical Conceptual Structure is a compositional abstraction with language-independent properties that transcend structural idiosyncrasies (Jackendoff, 1983; Jackendoff, 1990; Jackendoff, 1996) . This representation has been used as the interlingua of several projects such as UNITRAN and MILT (Dorr, 1997) .",
81
+ "cite_spans": [
82
+ {
83
+ "start": 138,
84
+ "end": 156,
85
+ "text": "(Jackendoff, 1983;",
86
+ "ref_id": "BIBREF4"
87
+ },
88
+ {
89
+ "start": 157,
90
+ "end": 174,
91
+ "text": "Jackendoff, 1990;",
92
+ "ref_id": "BIBREF5"
93
+ },
94
+ {
95
+ "start": 175,
96
+ "end": 192,
97
+ "text": "Jackendoff, 1996)",
98
+ "ref_id": "BIBREF6"
99
+ },
100
+ {
101
+ "start": 293,
102
+ "end": 305,
103
+ "text": "(Dorr, 1997)",
104
+ "ref_id": "BIBREF3"
105
+ }
106
+ ],
107
+ "ref_spans": [],
108
+ "eq_spans": [],
109
+ "section": "Lexical Conceptual Structure",
110
+ "sec_num": "2"
111
+ },
112
+ {
113
+ "text": "An LCS is adirected graph with a root. Each node is associated with certain information, including a type, a primitive and a field. The type of an LCS node is one of Event, State, Path, Manner, Property or Thing, loosely correlated with verbs prepositions, adverbs, adjectives and nouns. Within each of these types, there are a number of conceptual primitives of that type, which are the basic building blocks of LCS structures. There are two general classes of primitives: closed class or structural primitive (e.g., CAUSE, GO, BE, TO) and CONSTANTS, corresponding to the primitives for open lexical classes (e.g., reduce+ed, textile+, slash+ingly). I. Examples of fields include Locational, Possessional, Identificational. Children are also designated as to whether they are subject, argument, or modifier position.",
114
+ "cite_spans": [
115
+ {
116
+ "start": 166,
117
+ "end": 202,
118
+ "text": "Event, State, Path, Manner, Property",
119
+ "ref_id": null
120
+ }
121
+ ],
122
+ "ref_spans": [],
123
+ "eq_spans": [],
124
+ "section": "Lexical Conceptual Structure",
125
+ "sec_num": "2"
126
+ },
127
+ {
128
+ "text": "An LCS captures the semantics of a lexical item through a combination of semantic structure (specified by the shape of the graph and its structural primitives and fields) and semantic content (specified through constants). The semantic structure of a verb is the same for all members of a verb class (Levin and Rappaport Hovav, 1995) whereas the content is specific to the verb itself. So, all the verbs in the \"Cut Verbs -Change of State\" class have the same semantic structure but vary in their semantic content (for example, chip, cut, saw, scrape, slash and scratch).",
129
+ "cite_spans": [
130
+ {
131
+ "start": 300,
132
+ "end": 333,
133
+ "text": "(Levin and Rappaport Hovav, 1995)",
134
+ "ref_id": "BIBREF9"
135
+ }
136
+ ],
137
+ "ref_spans": [],
138
+ "eq_spans": [],
139
+ "section": "Lexical Conceptual Structure",
140
+ "sec_num": "2"
141
+ },
142
+ {
143
+ "text": "The lexicon entry or Root LCS (RLCS) of one sense of the Chinese verb xuel_jian3 is as follows:",
144
+ "cite_spans": [],
145
+ "ref_spans": [],
146
+ "eq_spans": [],
147
+ "section": "Lexical Conceptual Structure",
148
+ "sec_num": "2"
149
+ },
150
+ {
151
+ "text": "(1)",
152
+ "cite_spans": [],
153
+ "ref_spans": [],
154
+ "eq_spans": [],
155
+ "section": "Lexical Conceptual Structure",
156
+ "sec_num": "2"
157
+ },
158
+ {
159
+ "text": "(act_on loc (* thing 1) (* thing 2) ((* [on] 23) loc (*head*) (thing 24)) (cut+ingly 26) (down+/m))",
160
+ "cite_spans": [],
161
+ "ref_spans": [],
162
+ "eq_spans": [],
163
+ "section": "Lexical Conceptual Structure",
164
+ "sec_num": "2"
165
+ },
166
+ {
167
+ "text": "The top node in the. RLCS has the structural primitive ACT_ON in the locational field. Its subject is a star-marked LCS, meaning a subordinate RLCS needs to be filled in here to form a complete event. It also has the restriction that the filler LCS be of the type thing. The number \"1\" in that node specifies the thematic role: in this case, agent. The second child node, in argument position, needs to t Suffixes such as \u00f7, \u00f7ed, +ingly are markers of the open class of primitives, indicating the type be of type thing too. The number \"2\" stands for theme. The last two children specify the manner of the locational act_on, that is \"cutting in a downward manner\". The RLCS for nouns are generally much simpler since they usually include only one root node with a primitive\u2022 For instance (US+) or (quota+).",
168
+ "cite_spans": [],
169
+ "ref_spans": [],
170
+ "eq_spans": [],
171
+ "section": "Lexical Conceptual Structure",
172
+ "sec_num": "2"
173
+ },
174
+ {
175
+ "text": "The meaning of complex phrases is represented as a composed LCS (CLCS). This is constructed \"composed\" from several RLCSes corresponding to individual words. In the composition process, which starts with a parse tree of the input sentence, all the obligatory positions in the root and subordinate RLCS corresponding to lexical items are filled with other RLCSes from appropriately placed items in the parse tree. For example, the three RLCSes we have seen already can compose to give the CLCS in (2), corresponding~o the English sentence: United states cut down (the) quota.",
176
+ "cite_spans": [],
177
+ "ref_spans": [],
178
+ "eq_spans": [],
179
+ "section": "Lexical Conceptual Structure",
180
+ "sec_num": "2"
181
+ },
182
+ {
183
+ "text": "(2)",
184
+ "cite_spans": [],
185
+ "ref_spans": [],
186
+ "eq_spans": [],
187
+ "section": "Lexical Conceptual Structure",
188
+ "sec_num": "2"
189
+ },
190
+ {
191
+ "text": "(act_on loc (us+) (quota+) ((* [on] 23) loc (*head*) (thing 24)) (cut+ingly 26) (dowa+/m))",
192
+ "cite_spans": [],
193
+ "ref_spans": [],
194
+ "eq_spans": [],
195
+ "section": "Lexical Conceptual Structure",
196
+ "sec_num": "2"
197
+ },
198
+ {
199
+ "text": "CLCS structures can be composed of different sorts of RLCS structures, corresponding to different words. A CLCS can also be decomposed on the generation side in different ways depending on the RLCSes of the lexical items in the target language. For example, the CLCS above will match a single verb and two arguments when generated in Chinese (regardless of the input language). But it will match four lexical items in English: cut, US, quota, and down, since the RLCS for the verb \"cut\" in the English lexicon, as shown in (3), does not include the modifier down.",
200
+ "cite_spans": [],
201
+ "ref_spans": [],
202
+ "eq_spans": [],
203
+ "section": "Lexical Conceptual Structure",
204
+ "sec_num": "2"
205
+ },
206
+ {
207
+ "text": "(3)",
208
+ "cite_spans": [],
209
+ "ref_spans": [],
210
+ "eq_spans": [],
211
+ "section": "Lexical Conceptual Structure",
212
+ "sec_num": "2"
213
+ },
214
+ {
215
+ "text": "(act_on ioc (* thing 1) (* thing 2) ((* [on] 23) loc (*head*) (thing 24)) (cut+ingly 26) )",
216
+ "cite_spans": [],
217
+ "ref_spans": [],
218
+ "eq_spans": [],
219
+ "section": "Lexical Conceptual Structure",
220
+ "sec_num": "2"
221
+ },
222
+ {
223
+ "text": "The rest of the examples in this paper will refer to the slightly more complex CLCS shown in (4), corresponding to the English sentence The United States unilaterally reduced the China textile export quota This LCS is presented without all the additional features for sake of clarity. Also, it is actually one of eight possible LCS compositions produced by the analysis component from the input Chinese sentence. 3 The Generation System Since this generation system was developed in tandem with the most recent LCS composition system, and LCS-language and specific lexicon extensions, a premium was put on the ability for experimentation along a number of parameters and rapid adjustment on the basis of intermediate inputs and results to the generation system. This goal encouraged a modular design, and made lisp a convenient language for implementation. We were also able to successfully integrate components from the Nitrogen Generation System (Langkilde and Knight, 1998a; Langkilde and Knight, 1998b) . The architecture of the generation system is shown in Figure 1 , showing the main modules and sub-modules and flow of information between them. The first main component translates, with the use of a language specific lexicon, from the LCS interlingua to a language-specific representation of the sentence in a modified form of the AMR-interlingua, using words and features specific to the target language, but also including syntactic and semantic information from the LCS representation. The second main component produces target language sentences from this intermediate representation. We will now describe each of these components in more detail.",
224
+ "cite_spans": [
225
+ {
226
+ "start": 948,
227
+ "end": 977,
228
+ "text": "(Langkilde and Knight, 1998a;",
229
+ "ref_id": "BIBREF7"
230
+ },
231
+ {
232
+ "start": 978,
233
+ "end": 1006,
234
+ "text": "Langkilde and Knight, 1998b)",
235
+ "ref_id": "BIBREF8"
236
+ }
237
+ ],
238
+ "ref_spans": [
239
+ {
240
+ "start": 1063,
241
+ "end": 1071,
242
+ "text": "Figure 1",
243
+ "ref_id": null
244
+ }
245
+ ],
246
+ "eq_spans": [],
247
+ "section": "Lexical Conceptual Structure",
248
+ "sec_num": "2"
249
+ },
250
+ {
251
+ "text": "The input to the generation component is a textrepresentation of a CLCS, the Lexical Conceptual Structure corresponding to a natural language sentence. The particular format, known as long-hand is equivalent to the form shown in (4), but making certain information more explicit and regular (at the price of increased verbosity). The Longhand CLCS can either be a fully language-neutral interlingua representation, or one which still incorporates some aspects of the source-language interpretation process. This latter may include grammatical features on LCS nodes, but also nodes, known as functional nodes, which correspond to words in the source language but are not LCS-nodes themselves, serving merely as place-holders for feature information. Examples of these nodes include punctuation markers, coordinating conjunctions, grammatical aspec t markers, and determiners. An additional extension of the LCS input language, beyond traditional LCS is the in-place representation of an ambiguous sub-tree as a POSSIBLES node, which has the various possibilities represented as its own children. Thus, for example, the following structure (with some aspects elided for brevity) represents a node that could be one of three possibilities. In the second one, the root of the sub-tree is a functional node, passing its features to its child, COUNTRY+:",
252
+ "cite_spans": [],
253
+ "ref_spans": [],
254
+ "eq_spans": [],
255
+ "section": "Lexical Conceptual Structure",
256
+ "sec_num": "2"
257
+ },
258
+ {
259
+ "text": "(5) (:POSSIBLES -2589104",
260
+ "cite_spans": [],
261
+ "ref_spans": [],
262
+ "eq_spans": [],
263
+ "section": "Lexical Conceptual Structure",
264
+ "sec_num": "2"
265
+ },
266
+ {
267
+ "text": "(MIDDLE+ (COUNTRY+ (DEVELOPING+/P))) (FUNCTIONAL (PDSTPOSITION AMONG) (COUNTRY+ (DEVELOPING+/P))) (CHINA+ (COUNTRY+ (DEVELOPING+/P))) )",
268
+ "cite_spans": [],
269
+ "ref_spans": [],
270
+ "eq_spans": [],
271
+ "section": "Lexical Conceptual Structure",
272
+ "sec_num": "2"
273
+ },
274
+ {
275
+ "text": "The first major component, divided into four pipelined sub-modules, as shown in Figure 1 transforms a CLCS structure to what we call an LCS-AMR structure, using the syntax of the abstract meaning representation (AMR), used in the Nitrogen generation system, but with words already chosen (rather than more abstract Sensus ontology concepts), and also augmented with information from the LCS that is useful for target language realization.",
276
+ "cite_spans": [],
277
+ "ref_spans": [
278
+ {
279
+ "start": 80,
280
+ "end": 88,
281
+ "text": "Figure 1",
282
+ "ref_id": null
283
+ }
284
+ ],
285
+ "eq_spans": [],
286
+ "section": "Lexical Choice",
287
+ "sec_num": "3.1"
288
+ },
289
+ {
290
+ "text": "The pre-processing phase converts the text input format into internal graph representations, for efficient access of components (with links for parents as well as children), also doing away with extraneous sourcelanguage features, converting, for example, (5) to remove the functional node and promote COUNTRY+ to be one of the possible sub-trees. This involves a topdown ,reversal of the tree, including some complexities when functional nodes without children (which then assign features to their parents) are direct children of possibles nodes.",
291
+ "cite_spans": [],
292
+ "ref_spans": [],
293
+ "eq_spans": [],
294
+ "section": "Pre-Processing",
295
+ "sec_num": "3.1.1"
296
+ },
297
+ {
298
+ "text": "The lexical access phase compares the internal CLCS form to the target language lexicon, decorating the CLCS tree with the RLCSes of target language words which are likely to match sub-structures of the CLCS. In an off-line processing phase, the target language lexicon is stored in a hash-table, with each entry keyed on a designated primitive which would be a most distinguishing node in the RLCS. On-line decoration then proceeds in two step process, for each node in the CLCS:",
299
+ "cite_spans": [],
300
+ "ref_spans": [],
301
+ "eq_spans": [],
302
+ "section": "Lexical Access",
303
+ "sec_num": "3.1.2"
304
+ },
305
+ {
306
+ "text": "(6) a. look for RLCSes stored in the lexicon under the CLCS node's primitives b. store retrieved RLCSes at the node in the CLCS that matches the root of this RLCS Figure 2 shows some of the English entries matching the CLCS in (4). For most of these words, the designated primitive is the only node in the corresponding LCS for that entry. For reduce, however, reduce+ed is the designated primitive. While this will be retrieved in step (6) while examining the reduce+ed node from (4), in (6)b, the LCS for \"reduce\" will be stored at the root node of (4) (cause). ",
307
+ "cite_spans": [],
308
+ "ref_spans": [
309
+ {
310
+ "start": 163,
311
+ "end": 171,
312
+ "text": "Figure 2",
313
+ "ref_id": null
314
+ }
315
+ ],
316
+ "eq_spans": [],
317
+ "section": "Lexical Access",
318
+ "sec_num": "3.1.2"
319
+ },
320
+ {
321
+ "text": "The current English lexicon contains over 11000 RLCS entries such as those in Figure 2 , including over 4000 verbs and 6200 unique primitive keys in the hash-table.",
322
+ "cite_spans": [],
323
+ "ref_spans": [
324
+ {
325
+ "start": 78,
326
+ "end": 86,
327
+ "text": "Figure 2",
328
+ "ref_id": null
329
+ }
330
+ ],
331
+ "eq_spans": [],
332
+ "section": "B4",
333
+ "sec_num": null
334
+ },
335
+ {
336
+ "text": "The heart of the lexical access algorithm is the decomposition process. This algorithm attempts to align RLCSes selected by the lexical access portion with parts of the CLCS, to find a complete covering of the CLCS graph. The main algorithm is very similar to that described in (Dorr, 1993) , however with some extensions to be able to also deal with the in-place ambiguity represented by the possibles nodes.",
337
+ "cite_spans": [
338
+ {
339
+ "start": 278,
340
+ "end": 290,
341
+ "text": "(Dorr, 1993)",
342
+ "ref_id": "BIBREF2"
343
+ }
344
+ ],
345
+ "ref_spans": [],
346
+ "eq_spans": [],
347
+ "section": "Alignment/Decomposition",
348
+ "sec_num": "3.1.3"
349
+ },
350
+ {
351
+ "text": "The algorithm recursively checks a CLCS node against corresponding RLCS nodes coming from the lexical entries-retrieved and stored in the previous phase. If significant incompatibilities are found, the lexical entry is discarded. If all (obligatory) nodes in the RLCS match against nodes in the CLCS, then the rest of the CLCS is recursively checked against other lexical entries stored at the remaining unmatched CLCS nodes. Some nodes, indicated with a \"*\", as in Figure 2 , require not just a match against the corresponding CLCS node, but also a match against another lexical entry. Some CLCS nodes must thus match multiple RLCS nodes. A CLCS node matches an RLCS node, if the following conditions hold: Subject and argument children of an RLCS node are obligatory unless specified as optional, whereas modifiers are optional unless specified as obligatory. In the RLCS for \"reduce\" in Figure 2 , the nodes corresponding to agent and theme (numbered 1 and 2, respectively) are obligatory, while the instrument (the node numbered 19) is optional. Thus, even though in (4) there is no matching lexical entry for the node in Figure 2 numbered 20 (\"*\"marked in the RLCS for \"with\"), the main RLCS for ' 'reduce' ' is allowed to match, though without any realization for the instrument.",
352
+ "cite_spans": [],
353
+ "ref_spans": [
354
+ {
355
+ "start": 466,
356
+ "end": 474,
357
+ "text": "Figure 2",
358
+ "ref_id": null
359
+ },
360
+ {
361
+ "start": 890,
362
+ "end": 898,
363
+ "text": "Figure 2",
364
+ "ref_id": null
365
+ },
366
+ {
367
+ "start": 1126,
368
+ "end": 1134,
369
+ "text": "Figure 2",
370
+ "ref_id": null
371
+ }
372
+ ],
373
+ "eq_spans": [],
374
+ "section": "Alignment/Decomposition",
375
+ "sec_num": "3.1.3"
376
+ },
377
+ {
378
+ "text": "A complexity in the algorithm occurs when there are multiple possibilities filling in a position in a CLCS. in this case, only one of these possibilities is requirea to match all the corresponding RLCS nodes in order for a lexical entry to match. In the case where there are some of these possibilities that do not match any RLCS nodes (meaning there are no target-language realizations for these constructs), these possibilities can be pruned at this stage. On the other hand, ambiguity can also be introduced at the decomposition stage, if multiple lexical entries can match a single structure",
379
+ "cite_spans": [],
380
+ "ref_spans": [],
381
+ "eq_spans": [],
382
+ "section": "Alignment/Decomposition",
383
+ "sec_num": "3.1.3"
384
+ },
385
+ {
386
+ "text": "The result of the decomposition process is a match-structure indicating the hierarchical relationship between all lexical entries, which, together cover the input CLCS.",
387
+ "cite_spans": [],
388
+ "ref_spans": [],
389
+ "eq_spans": [],
390
+ "section": "Alignment/Decomposition",
391
+ "sec_num": "3.1.3"
392
+ },
393
+ {
394
+ "text": "The match structure resulting from decomposition is then converted into the appropriate input format used by the Nitrogen generation system. Nitrogen's input, Abstract Meaning Representation (AMR), is a labeled directed graph written using the syntax for the PENMAN Sentence Plan Language (Penman 1989) . the structure of an AMR is basically as in (8).",
395
+ "cite_spans": [
396
+ {
397
+ "start": 289,
398
+ "end": 302,
399
+ "text": "(Penman 1989)",
400
+ "ref_id": null
401
+ }
402
+ ],
403
+ "ref_spans": [],
404
+ "eq_spans": [],
405
+ "section": "LCS-AMR Creation",
406
+ "sec_num": "3.1.4"
407
+ },
408
+ {
409
+ "text": "(8) AMR = <concept> I (<label> {<role> <AMR>}+)",
410
+ "cite_spans": [],
411
+ "ref_spans": [],
412
+ "eq_spans": [],
413
+ "section": "LCS-AMR Creation",
414
+ "sec_num": "3.1.4"
415
+ },
416
+ {
417
+ "text": "Since the roles expected by Nitrogen's English generation grammar do not match well with the thematic roles and features of a CLCS, we have extended the AMR language with LCS-specific relations, calling the result, an LCS-AMR. To distinguish the LCS relations from those used by Nitrogen, we mark most of the new roles with the prefix : LCS-. Figure 3 shows the LCS-AMR corresponding to the CLCS in (4).",
418
+ "cite_spans": [],
419
+ "ref_spans": [
420
+ {
421
+ "start": 343,
422
+ "end": 351,
423
+ "text": "Figure 3",
424
+ "ref_id": "FIGREF3"
425
+ }
426
+ ],
427
+ "eq_spans": [],
428
+ "section": "LCS-AMR Creation",
429
+ "sec_num": "3.1.4"
430
+ },
431
+ {
432
+ "text": "In the above example, the basic role / is used to specify an instance. So, the LCS-AMR can be read as an instance of the concept Ireduce I whose category is a verb and is in the active voice. Moreover, Ireducel has two thematic roles related to it, an agent and a theme; and it is modified by the concept lunilaterally]. The different roles modifying Ireduce I come from different origins. The :LCS-NODE value comes directly from the unique node number in the input CLCS. The category, voice and telicity are derived from features of the LCS entry for the verb Ireduce] in the English lexicon. The specifications of agent and theme come from the LCS representation of the verb reduce in the English lexicon as well, as can be seen by the node numbers 1 and 2, in the lexicon entry in Figure 2 . The role :LCS-MOD-MANNER is derived by combining the fact that the corresponding AMR had a modifier role in the CLCS and because its type is a Manner.",
433
+ "cite_spans": [],
434
+ "ref_spans": [
435
+ {
436
+ "start": 784,
437
+ "end": 792,
438
+ "text": "Figure 2",
439
+ "ref_id": null
440
+ }
441
+ ],
442
+ "eq_spans": [],
443
+ "section": "LCS-AMR Creation",
444
+ "sec_num": "3.1.4"
445
+ },
446
+ {
447
+ "text": "The LCS-AMR representation is then passed to the realization module. The strategy used by Nitrogen is ",
448
+ "cite_spans": [],
449
+ "ref_spans": [],
450
+ "eq_spans": [],
451
+ "section": "Realization",
452
+ "sec_num": "3.2"
453
+ },
454
+ {
455
+ "text": "In this module, we force linear order on the unordered parts of an LCS-AMR. This is done by recursively calling subroutines that create various phrase types (NP, PP, etc.) from aspects of the LCS-AMR. The result of the linearization phase is a word lattice specifying the sequence of words that make up the resulting sentence and the points of ambiguity where different generation paths are taken. (9) shows the word lattice corresponding to the LCS-AMR in (8).",
456
+ "cite_spans": [],
457
+ "ref_spans": [],
458
+ "eq_spans": [],
459
+ "section": "Linearizatlon",
460
+ "sec_num": "3.2.1"
461
+ },
462
+ {
463
+ "text": "(9) (SEQ (WRD \"*start-sentence*\" BOS) (WRD \"united states\" NOUN) (WRD \"unilaterally\" ADJ) (WRD \"reduced\" VERB) (OR (WRD \"the\" ART) (WRD \"a\" ART) (WRD \"an\" ART)) (WRD \"china\" ADJ) (OR (SEQ (WRD \"export\" ADJ) (WRD \"textile\" ADJ)) (SEQ (WRD \"textile\" ADJ) (WRD \"export\" ADJ))) (WRD \"quota\" NOUN) (WRD \".\" PUNC) (WRD \"*end-sentence*\" EOS))",
464
+ "cite_spans": [],
465
+ "ref_spans": [],
466
+ "eq_spans": [],
467
+ "section": "Linearizatlon",
468
+ "sec_num": "3.2.1"
469
+ },
470
+ {
471
+ "text": "The keyword SEQ specifies that what follows it is a list of words in their correct linear order. The keyword OR specifies the existence of different paths for generation. In the above example, the word 'quota' gets all possible determiners since its definiteness is not specified. Also, the relative order of the words 'textile' and 'export' is not resolved so both possibilities are generated.",
472
+ "cite_spans": [],
473
+ "ref_spans": [],
474
+ "eq_spans": [],
475
+ "section": "Linearizatlon",
476
+ "sec_num": "3.2.1"
477
+ },
478
+ {
479
+ "text": "Sentences were realized according to the pattern in (10). That is, first subordinating conjunctions, if any, then modifiers in the temporal field (e.g., \"now\", \"in 1978\"), then the first thematic role, then most other modifiers, the verb (with collocations if any) then spatial modifiers (\"up\", \"down\"), then the second and third thematic roles, followed by prepositional phrases and relative sentences. Nitrogen's morphology component was also used, e.g., to give tense to the head verb. In the example above, since there was no tense specified in the input LCS, past tense was used on the basis of the telicity of the verb.",
480
+ "cite_spans": [],
481
+ "ref_spans": [],
482
+ "eq_spans": [],
483
+ "section": "Linearizatlon",
484
+ "sec_num": "3.2.1"
485
+ },
486
+ {
487
+ "text": "(10) (Sconj ,) (temp-mod)* Whl (Mods)* V (coll) (stood)* (Th2)+ (Th3)+ (PP)* (RelS)*",
488
+ "cite_spans": [],
489
+ "ref_spans": [],
490
+ "eq_spans": [],
491
+ "section": "Linearizatlon",
492
+ "sec_num": "3.2.1"
493
+ },
494
+ {
495
+ "text": "There is no one-to-one mapping between a particular thematic role and an argument position. For example, a theme can be the subject in some cases and it can be the object in others or even an oblique. Observe \"cookie\" in ill). ill) a. John ate a cookie (object) b. the cookie contains chocolate (subject) c. she nibbled at a cookie (oblique) Thematic roles are numbered for their correct realization order, according to the hierarchy for arguments shown in (12).",
496
+ "cite_spans": [],
497
+ "ref_spans": [],
498
+ "eq_spans": [],
499
+ "section": "Linearizatlon",
500
+ "sec_num": "3.2.1"
501
+ },
502
+ {
503
+ "text": "(12) agent > instrument > theme > perceived > ( everythin gel se )",
504
+ "cite_spans": [],
505
+ "ref_spans": [],
506
+ "eq_spans": [],
507
+ "section": "Linearizatlon",
508
+ "sec_num": "3.2.1"
509
+ },
510
+ {
511
+ "text": "So, in the case of the occurrence of theme alone, it is mapped to first argument position. If a theme and an agent occur, the agent is mapped to first argument position and the theme is mapped to second argument position. A more detailed discussion is available in (Doff et al., 1998) . For the LCS-AMR in Figure 3 , the thematic hierarchy is what determined that the lunited statesl is the subject and Iquotal is the object of the verb Ireducel.",
512
+ "cite_spans": [
513
+ {
514
+ "start": 265,
515
+ "end": 284,
516
+ "text": "(Doff et al., 1998)",
517
+ "ref_id": null
518
+ }
519
+ ],
520
+ "ref_spans": [
521
+ {
522
+ "start": 306,
523
+ "end": 314,
524
+ "text": "Figure 3",
525
+ "ref_id": "FIGREF3"
526
+ }
527
+ ],
528
+ "eq_spans": [],
529
+ "section": "Linearizatlon",
530
+ "sec_num": "3.2.1"
531
+ },
532
+ {
533
+ "text": "In our input CLCSs, in most cases little hierarchical information was given about multiple modifiers of a noun. Our initial, brute force, solution was to generate all permutations and depend on statistical extraction to decide. This technique Worked for noun phrases of about 6 words, but was too costly for larger phrases (of which there were several examples in our test corpus). This cost was alleviated to some degree, also providing slightly better results than pure bigram selection by labelling adjectives in the English lexicon as belonging to one of several ordered classes, inspired by the adjective ordering scheme in (Quirk et al., 1985) . This is shown in (13).",
534
+ "cite_spans": [
535
+ {
536
+ "start": 629,
537
+ "end": 649,
538
+ "text": "(Quirk et al., 1985)",
539
+ "ref_id": null
540
+ }
541
+ ],
542
+ "ref_spans": [],
543
+ "eq_spans": [],
544
+ "section": "Linearizatlon",
545
+ "sec_num": "3.2.1"
546
+ },
547
+ {
548
+ "text": "(13) a. Determiner (all, few, several, some, etc.) b. Most Adjectival (important, practical, economic, etc.) c. Age (old, young, etc.) d. Color (black, red, etc.) e. Participle (confusing, adjusted, convincing, decided) f. Provenance (China, southern, etc.) g. Noun (Bank_of_China, difference, memorandum, etc.) h. Denominal (nouns made into adjectives by adding-al, e.g., individual, coastal, annual, etc.)",
549
+ "cite_spans": [
550
+ {
551
+ "start": 19,
552
+ "end": 50,
553
+ "text": "(all, few, several, some, etc.)",
554
+ "ref_id": null
555
+ },
556
+ {
557
+ "start": 70,
558
+ "end": 108,
559
+ "text": "(important, practical, economic, etc.)",
560
+ "ref_id": null
561
+ },
562
+ {
563
+ "start": 116,
564
+ "end": 134,
565
+ "text": "(old, young, etc.)",
566
+ "ref_id": null
567
+ },
568
+ {
569
+ "start": 144,
570
+ "end": 162,
571
+ "text": "(black, red, etc.)",
572
+ "ref_id": null
573
+ },
574
+ {
575
+ "start": 177,
576
+ "end": 219,
577
+ "text": "(confusing, adjusted, convincing, decided)",
578
+ "ref_id": null
579
+ },
580
+ {
581
+ "start": 234,
582
+ "end": 257,
583
+ "text": "(China, southern, etc.)",
584
+ "ref_id": null
585
+ },
586
+ {
587
+ "start": 266,
588
+ "end": 311,
589
+ "text": "(Bank_of_China, difference, memorandum, etc.)",
590
+ "ref_id": null
591
+ }
592
+ ],
593
+ "ref_spans": [],
594
+ "eq_spans": [],
595
+ "section": "Linearizatlon",
596
+ "sec_num": "3.2.1"
597
+ },
598
+ {
599
+ "text": "If multiple words fall within the same group, permutations are generated for them. This situation can be seen for the LCA-AMR in Figure 3 with the ordering of the modifiers of the word I quota]: I chinal, lexportl and Itextilel. Ichinal fell within the Provenance class of modifiers which gives it precedenc e over the other two words. They, on the other hand, fell in the Noun class and therefore both permutations were passed on to the statistical component.",
600
+ "cite_spans": [],
601
+ "ref_spans": [
602
+ {
603
+ "start": 129,
604
+ "end": 137,
605
+ "text": "Figure 3",
606
+ "ref_id": "FIGREF3"
607
+ }
608
+ ],
609
+ "eq_spans": [],
610
+ "section": "Linearizatlon",
611
+ "sec_num": "3.2.1"
612
+ },
613
+ {
614
+ "text": "The final step, extracting a preferred sentence from the word lattice of possibilities is done using Nitrogen's Statistical Extractor without any changes. Sentences are scored using uni and bigram frequencies calculated based on two years of Wall Street Journal (Langkilde and Knight, 1998b) .",
615
+ "cite_spans": [
616
+ {
617
+ "start": 262,
618
+ "end": 291,
619
+ "text": "(Langkilde and Knight, 1998b)",
620
+ "ref_id": "BIBREF8"
621
+ }
622
+ ],
623
+ "ref_spans": [],
624
+ "eq_spans": [],
625
+ "section": "Statistical Preferences",
626
+ "sec_num": "3.2.2"
627
+ },
628
+ {
629
+ "text": "A major issue in sentence generation from an interlingua or conceptual structure, especially as part of a machine translation project, is how and when to deal with ambiguity. There are several different sources of ambiguity in the generation process outlined in the previous section. Some of these include:",
630
+ "cite_spans": [],
631
+ "ref_spans": [],
632
+ "eq_spans": [],
633
+ "section": "Dealing with Ambiguity",
634
+ "sec_num": "4"
635
+ },
636
+ {
637
+ "text": "\u2022 ambiguity in source language analysis (as represented by possibles nodes in the CLCS input to the Generation system). This can include ambiguity between multiple concepts, such as the example in (5), LCS type/structure (e.g., thing or event, which field), or structural ambiguity (subject, argument or modifier). ambiguity introduced in lexical choice (when multiple match structures can cover a single",
638
+ "cite_spans": [],
639
+ "ref_spans": [],
640
+ "eq_spans": [],
641
+ "section": "Dealing with Ambiguity",
642
+ "sec_num": "4"
643
+ },
644
+ {
645
+ "text": "ambiguity introduced in realization (when multiple orderings are possible, also multiple morphological realizations)",
646
+ "cite_spans": [],
647
+ "ref_spans": [],
648
+ "eq_spans": [],
649
+ "section": "CLCS)",
650
+ "sec_num": null
651
+ },
652
+ {
653
+ "text": "There are also several types of strategies for addressing ambiguity at various phases, including:",
654
+ "cite_spans": [],
655
+ "ref_spans": [],
656
+ "eq_spans": [],
657
+ "section": "CLCS)",
658
+ "sec_num": null
659
+ },
660
+ {
661
+ "text": "\u2022 passing all possible structures down for further processing stages to deal with",
662
+ "cite_spans": [],
663
+ "ref_spans": [],
664
+ "eq_spans": [],
665
+ "section": "CLCS)",
666
+ "sec_num": null
667
+ },
668
+ {
669
+ "text": "\u2022 filtering based on \"soft\" preferences (only pass the highest set of candidates, according to some metric)",
670
+ "cite_spans": [],
671
+ "ref_spans": [],
672
+ "eq_spans": [],
673
+ "section": "CLCS)",
674
+ "sec_num": null
675
+ },
676
+ {
677
+ "text": "\u2022 quota-based filtering, passing only the top N candidates",
678
+ "cite_spans": [],
679
+ "ref_spans": [],
680
+ "eq_spans": [],
681
+ "section": "CLCS)",
682
+ "sec_num": null
683
+ },
684
+ {
685
+ "text": "\u2022 threshold filtering, passing only candidates that exceed a fixed threshold (either score or binary test)",
686
+ "cite_spans": [],
687
+ "ref_spans": [],
688
+ "eq_spans": [],
689
+ "section": "CLCS)",
690
+ "sec_num": null
691
+ },
692
+ {
693
+ "text": "The generation system uses a combination of these strategies, at different phases in the processing. Ambiguous CLCS sub-trees are sometimes annotated with scores based on preference of attachment as an argument rather than a modifier. The alignment algorithm can be run in either of two modes, one which selects only the top scoring possibility for which a matching structure can be found, and one in which all possible structures are passed on, regardless of score. The former method is the only one feasible when given very large (e.g., over 1 megabyte text files) CLCS inputs. Also at the decomposition level, soft preferences are used in that missing lexical entries can be hypothesized to cover parts of the CLCS (essentially \"making up\" words in the target language). This is done, however, only when no legitimate matches are found using only the available lexical entries. At the linearization phase, there are often many choices for ordering of modifiers at the same level. As mentioned in the previous section, we are experimenting with separating these into positional classes, but our last resort is to pass along all permutations of elements in each sub-class. The ultimate arbiter is the statistical extractor, which orders and presents the top scoring realizations.",
694
+ "cite_spans": [],
695
+ "ref_spans": [],
696
+ "eq_spans": [],
697
+ "section": "CLCS)",
698
+ "sec_num": null
699
+ },
700
+ {
701
+ "text": "One issue that needs to be confronted in an Interlingua such as LCS is what to do when linguistic structure of languages vary widely, and useful conceptual structure may also diverge from these. A case in point is the representation of numbers. Languages diverge widely as to which numbers are primitive terms, and how larger numbers are built com-positionaUy through modification (e.g., multiplication and addition). One question that immediately comes up is whether an interlingua such as LCS should represent numbers according to the linguistic structure of the source language (or some particular designated natural language) or as some other internal numerical form, (e.g. decimal numerals). Likewise, on generation into a target language, how much of the structure of the source language should be kept, especially when this is not the most natural way to group things in the target language. One might be tempted to always convert to a standard interlingua representation of numbers, however this does los_e some possible classification into groups that might be present in the input (contrast in English: \"12 pair\" with \"2 dozen\". In our Chinese-English efforts, such issues came up, since the natural multiplication points in Chinese were 100, 10,000, and 100,000,000, rather than 100, 1000, and 1,000,000, as in English. Our provisional solution is to propogate the source language modification structure all the way through the LCS-AMR stage, and include special purpose rules looking for the \"Chinese\" numbers and multiplying them together to get numerals, and then divide and realize in the English fashion. E.g., using the words thousand, million, and billion.",
702
+ "cite_spans": [],
703
+ "ref_spans": [],
704
+ "eq_spans": [],
705
+ "section": "Interlingual representation issues",
706
+ "sec_num": "5"
707
+ },
708
+ {
709
+ "text": "So far most of the evaluation has been fairly smallscale and fairly subjective, generating English sentences from CLCSs produced from about 80 sentences. Evaluation in this case is difficult, because the ultimate criteria is translation quality, which can, itself, be difficult to judge, but, moreover, it can be hard to attribute specific deficits to the analysis phase, the lexical resources, or the generation system proper. So far results have been mostly adequate, even for large and fairly complex sentences, taking less than 1 minute for generation up to inputs of about 1 megabyte input CLCS files. Ambiguity and complexity beyond that level tends to overtax the generation system.",
710
+ "cite_spans": [],
711
+ "ref_spans": [],
712
+ "eq_spans": [],
713
+ "section": "Evaluation",
714
+ "sec_num": "6"
715
+ },
716
+ {
717
+ "text": "For the most part, the over-generation strategy of Nitrogen, coupled with the bigram preferences works very well. There are still some difficulties, however. One major one is that, especially with its bias for shorter sentences, fluency is given preference over translation fidelity. Thus, if there are options of whether or not to express some optional information, this will tend to be left out. Also, bigrams are obviously inadequate for capturing long-distance dependencies, and so, if things like agreement are not carefully controlled in the symbolic component, they will be incorrect in some cases.",
718
+ "cite_spans": [],
719
+ "ref_spans": [],
720
+ "eq_spans": [],
721
+ "section": "Evaluation",
722
+ "sec_num": "6"
723
+ },
724
+ {
725
+ "text": "The generation component has also been used on a broader scale, generating thousands of simple sentences -at least one for each verb in the English LCS lexicon, creating sentence templates to be used in a Cross-Language information retrieval system (Levow et al., 2000) .",
726
+ "cite_spans": [
727
+ {
728
+ "start": 249,
729
+ "end": 269,
730
+ "text": "(Levow et al., 2000)",
731
+ "ref_id": "BIBREF10"
732
+ }
733
+ ],
734
+ "ref_spans": [],
735
+ "eq_spans": [],
736
+ "section": "Evaluation",
737
+ "sec_num": "6"
738
+ },
739
+ {
740
+ "text": "The biggest remaining step is a more careful evaluation of different sub-systems and preference strategies to more efficiently process very ambiguous and complex inputs, without substantially sacrificing translation quality. Also a current research topic is how to combine other metrics coming from various points in the generation process with the bigram statistics, to result in better overall outputs.",
741
+ "cite_spans": [],
742
+ "ref_spans": [],
743
+ "eq_spans": [],
744
+ "section": "Future Work",
745
+ "sec_num": "7"
746
+ },
747
+ {
748
+ "text": "Another topic of interest is developing other language outputs. Most of the subcomponents are language-independent. The realization components being an obvious exception. In particular, the pre-processing algorithm is completely languageindependent. The lexical access algorithm is language independent, although it requires a targetlanguage lexicon, which of course is language dependent. The alignment algorithm is also completely language independent. The lcs-amr creation language is mostly language independent, however there may not be sufficient features added to the language and extracted from the LCS-AMR for full generation of some other languages. Some target languages might require some extensions to the output language and new rules to extract this information from the LCS. The realization process is mostly language dependent. The current linearizaton module is very dependent on the structure of English. We are, however working on a future version of this component splitting the linearization task into language independent processes and grammar compilers, and independent language-specific output grammars. Nitrogen's realizer, also, is algorithmically language-independent, however one would need a target language database for realization in another language. veristy of Maryland, especially Ron Dolanl ~ Bonnie Dorr, Gina Levow, Mari Olsen, Wade sti~fi, Amy Weinberg, for helpful input and feedback on the generation system.",
749
+ "cite_spans": [],
750
+ "ref_spans": [],
751
+ "eq_spans": [],
752
+ "section": "Future Work",
753
+ "sec_num": "7"
754
+ }
755
+ ],
756
+ "back_matter": [
757
+ {
758
+ "text": "This work was supported by the US Department of Defense through contract MDA904-96-I:t-0738. The Nitrogen system used in the realization process was provided by USC/ISI, we would like to thank Keven Knight and Irene Langkilde for help and advice in using it. The adjective classifications described in Section 3 were devised by Carol Van Ess-Dykema. David Clark and Noah Smith worked on previous versions of the system, and we are indebted to Some of their ideas for the current implementation. We would also like to thank the CLIP group at Uni-",
759
+ "cite_spans": [],
760
+ "ref_spans": [],
761
+ "eq_spans": [],
762
+ "section": "Acknowledgements",
763
+ "sec_num": null
764
+ }
765
+ ],
766
+ "bib_entries": {
767
+ "BIBREF0": {
768
+ "ref_id": "b0",
769
+ "title": "Use of Lexical Conceptual Structure for Intelligent Tutoring",
770
+ "authors": [
771
+ {
772
+ "first": "Bonnie",
773
+ "middle": [
774
+ "J"
775
+ ],
776
+ "last": "Dorr",
777
+ "suffix": ""
778
+ },
779
+ {
780
+ "first": "James",
781
+ "middle": [],
782
+ "last": "Hendler",
783
+ "suffix": ""
784
+ },
785
+ {
786
+ "first": "Scott",
787
+ "middle": [],
788
+ "last": "Blanksteen",
789
+ "suffix": ""
790
+ },
791
+ {
792
+ "first": "Barrie",
793
+ "middle": [],
794
+ "last": "Migdaloff",
795
+ "suffix": ""
796
+ }
797
+ ],
798
+ "year": 1993,
799
+ "venue": "",
800
+ "volume": "",
801
+ "issue": "",
802
+ "pages": "",
803
+ "other_ids": {},
804
+ "num": null,
805
+ "urls": [],
806
+ "raw_text": "Bonnie J. Dorr, James Hendler, Scott Blanksteen, and Barrie Migdaloff. 1993. Use of Lexical Con- ceptual Structure for Intelligent Tutoring. Tech- nical Report UMIACS TR 93-108, CS TR 3161, University of Maryland.",
807
+ "links": null
808
+ },
809
+ "BIBREF1": {
810
+ "ref_id": "b1",
811
+ "title": "A Thematic Hierarchy for Efficient Generation from Lexical-Conceptal Structure",
812
+ "authors": [
813
+ {
814
+ "first": "Bonnie",
815
+ "middle": [
816
+ "J"
817
+ ],
818
+ "last": "Dorr",
819
+ "suffix": ""
820
+ },
821
+ {
822
+ "first": "Nizar",
823
+ "middle": [],
824
+ "last": "Habash",
825
+ "suffix": ""
826
+ },
827
+ {
828
+ "first": "David",
829
+ "middle": [],
830
+ "last": "Traum",
831
+ "suffix": ""
832
+ }
833
+ ],
834
+ "year": 1998,
835
+ "venue": "Proceedings of the Third Conference of the Associatio.n for Machine Translation in the Americas",
836
+ "volume": "8",
837
+ "issue": "",
838
+ "pages": "28--31",
839
+ "other_ids": {},
840
+ "num": null,
841
+ "urls": [],
842
+ "raw_text": "Bonnie J. Dorr, Nizar Habash, and David Traum. 1998. A Thematic Hierarchy for Efficient Gener- ation from Lexical-Conceptal Structure. In Pro- ceedings of the Third Conference of the Associ- atio.n for Machine Translation in the Americas, AMTA-#8, in Lecture Notes in Artificial Intelli- gence, 15~9, pages 333-343, Langhorne, PA, Oc- tober 28-31.",
843
+ "links": null
844
+ },
845
+ "BIBREF2": {
846
+ "ref_id": "b2",
847
+ "title": "Machine Translation: A View from the Lexicon",
848
+ "authors": [
849
+ {
850
+ "first": "Bonnie",
851
+ "middle": [
852
+ "J"
853
+ ],
854
+ "last": "Dorr",
855
+ "suffix": ""
856
+ }
857
+ ],
858
+ "year": 1993,
859
+ "venue": "",
860
+ "volume": "",
861
+ "issue": "",
862
+ "pages": "",
863
+ "other_ids": {},
864
+ "num": null,
865
+ "urls": [],
866
+ "raw_text": "Bonnie J. Dorr. 1993. Machine Translation: A View from the Lexicon. The MIT Press.",
867
+ "links": null
868
+ },
869
+ "BIBREF3": {
870
+ "ref_id": "b3",
871
+ "title": "Large-Scale Acquisition of LCS-Based Lexicons for Foreign Language Tutoring",
872
+ "authors": [
873
+ {
874
+ "first": "Bonnie",
875
+ "middle": [
876
+ "J"
877
+ ],
878
+ "last": "Dorr",
879
+ "suffix": ""
880
+ }
881
+ ],
882
+ "year": 1997,
883
+ "venue": "Proceedings of the A CL Fifth Conference on Applied Natural Language Processing (ANLP)",
884
+ "volume": "",
885
+ "issue": "",
886
+ "pages": "139--146",
887
+ "other_ids": {},
888
+ "num": null,
889
+ "urls": [],
890
+ "raw_text": "Bonnie J. Dorr. 1997. Large-Scale Acquisition of LCS-Based Lexicons for Foreign Language Tutor- ing. In Proceedings of the A CL Fifth Conference on Applied Natural Language Processing (ANLP), pages 139-146, Washington, DC.",
891
+ "links": null
892
+ },
893
+ "BIBREF4": {
894
+ "ref_id": "b4",
895
+ "title": "Semantics and Cognition",
896
+ "authors": [
897
+ {
898
+ "first": "Ray",
899
+ "middle": [],
900
+ "last": "Jackendoff",
901
+ "suffix": ""
902
+ }
903
+ ],
904
+ "year": 1983,
905
+ "venue": "",
906
+ "volume": "",
907
+ "issue": "",
908
+ "pages": "",
909
+ "other_ids": {},
910
+ "num": null,
911
+ "urls": [],
912
+ "raw_text": "Ray Jackendoff. 1983. Semantics and Cognition. The MIT Press, Cambridge, MA.",
913
+ "links": null
914
+ },
915
+ "BIBREF5": {
916
+ "ref_id": "b5",
917
+ "title": "Semantic Structures. The",
918
+ "authors": [
919
+ {
920
+ "first": "Ray",
921
+ "middle": [],
922
+ "last": "Jackendoff",
923
+ "suffix": ""
924
+ }
925
+ ],
926
+ "year": 1990,
927
+ "venue": "",
928
+ "volume": "",
929
+ "issue": "",
930
+ "pages": "",
931
+ "other_ids": {},
932
+ "num": null,
933
+ "urls": [],
934
+ "raw_text": "Ray Jackendoff. 1990. Semantic Structures. The MIT Press, Cambridge, MA.",
935
+ "links": null
936
+ },
937
+ "BIBREF6": {
938
+ "ref_id": "b6",
939
+ "title": "The Proper Treatment of Measuring Out, Telicity, and Perhaps Even Quantification in English",
940
+ "authors": [
941
+ {
942
+ "first": "Ray",
943
+ "middle": [],
944
+ "last": "Jackendoff",
945
+ "suffix": ""
946
+ }
947
+ ],
948
+ "year": 1996,
949
+ "venue": "Natural Language and Linguistic Theory",
950
+ "volume": "14",
951
+ "issue": "",
952
+ "pages": "305--354",
953
+ "other_ids": {},
954
+ "num": null,
955
+ "urls": [],
956
+ "raw_text": "Ray Jackendoff. 1996. The Proper Treatment of Measuring Out, Telicity, and Perhaps Even Quan- tification in English. Natural Language and Lin- guistic Theory, 14:305-354.",
957
+ "links": null
958
+ },
959
+ "BIBREF7": {
960
+ "ref_id": "b7",
961
+ "title": "Generation that Exploits Corpus-Based Statistical Knowledge",
962
+ "authors": [
963
+ {
964
+ "first": "Irene",
965
+ "middle": [],
966
+ "last": "Langkilde",
967
+ "suffix": ""
968
+ },
969
+ {
970
+ "first": "Kevin",
971
+ "middle": [],
972
+ "last": "Knight",
973
+ "suffix": ""
974
+ }
975
+ ],
976
+ "year": 1998,
977
+ "venue": "Proceedings of COLING-A CL '98",
978
+ "volume": "",
979
+ "issue": "",
980
+ "pages": "704--710",
981
+ "other_ids": {},
982
+ "num": null,
983
+ "urls": [],
984
+ "raw_text": "Irene Langkilde and Kevin Knight. 1998a. Gen- eration that Exploits Corpus-Based Statistical Knowledge. In Proceedings of COLING-A CL '98, pages 704-710.",
985
+ "links": null
986
+ },
987
+ "BIBREF8": {
988
+ "ref_id": "b8",
989
+ "title": "The Practical Value of N-Grams in Generation",
990
+ "authors": [
991
+ {
992
+ "first": "Irene",
993
+ "middle": [],
994
+ "last": "Langkilde",
995
+ "suffix": ""
996
+ },
997
+ {
998
+ "first": "Kevin",
999
+ "middle": [],
1000
+ "last": "Knight",
1001
+ "suffix": ""
1002
+ }
1003
+ ],
1004
+ "year": 1998,
1005
+ "venue": "International Natural Language Generation Workshop",
1006
+ "volume": "",
1007
+ "issue": "",
1008
+ "pages": "",
1009
+ "other_ids": {},
1010
+ "num": null,
1011
+ "urls": [],
1012
+ "raw_text": "Irene Langkilde and Kevin Knight. 1998b. The Practical Value of N-Grams in Generation. In In- ternational Natural Language Generation Work- shop.",
1013
+ "links": null
1014
+ },
1015
+ "BIBREF9": {
1016
+ "ref_id": "b9",
1017
+ "title": "Unaecusativity: At the Syntaz-Lezical Semantics Interface",
1018
+ "authors": [
1019
+ {
1020
+ "first": "Beth",
1021
+ "middle": [],
1022
+ "last": "Levin",
1023
+ "suffix": ""
1024
+ },
1025
+ {
1026
+ "first": "Malka",
1027
+ "middle": [
1028
+ "Rappaport"
1029
+ ],
1030
+ "last": "Hovav",
1031
+ "suffix": ""
1032
+ }
1033
+ ],
1034
+ "year": 1926,
1035
+ "venue": "",
1036
+ "volume": "",
1037
+ "issue": "",
1038
+ "pages": "",
1039
+ "other_ids": {},
1040
+ "num": null,
1041
+ "urls": [],
1042
+ "raw_text": "Beth Levin and Malka Rappaport Hovav. 1995. Un- aecusativity: At the Syntaz-Lezical Semantics In- terface. The MIT Press, Cambridge, MA. LI Monograph 26.",
1043
+ "links": null
1044
+ },
1045
+ "BIBREF10": {
1046
+ "ref_id": "b10",
1047
+ "title": "Construction of chinese-english semantic hierarchy for cross-language retrieval, forthcoming. Randolph Quirk, Sidney Greenbaum, Geoffrey Leech, and Jan Svartvik. 1985. A Comprehensive Grammar of the English Language",
1048
+ "authors": [
1049
+ {
1050
+ "first": "Gina",
1051
+ "middle": [],
1052
+ "last": "Levow",
1053
+ "suffix": ""
1054
+ },
1055
+ {
1056
+ "first": "Bonnie",
1057
+ "middle": [
1058
+ "J"
1059
+ ],
1060
+ "last": "Dorr",
1061
+ "suffix": ""
1062
+ },
1063
+ {
1064
+ "first": "Dekang",
1065
+ "middle": [],
1066
+ "last": "Lin",
1067
+ "suffix": ""
1068
+ }
1069
+ ],
1070
+ "year": 2000,
1071
+ "venue": "",
1072
+ "volume": "",
1073
+ "issue": "",
1074
+ "pages": "",
1075
+ "other_ids": {},
1076
+ "num": null,
1077
+ "urls": [],
1078
+ "raw_text": "Gina Levow, Bonnie J. Dorr, and Dekang Lin. 2000. Construction of chinese-english semantic hierar- chy for cross-language retrieval, forthcoming. Randolph Quirk, Sidney Greenbaum, Geoffrey Leech, and Jan Svartvik. 1985. A Comprehen- sive Grammar of the English Language. Longman, London.",
1079
+ "links": null
1080
+ }
1081
+ },
1082
+ "ref_entries": {
1083
+ "FIGREF0": {
1084
+ "uris": null,
1085
+ "text": "Figure 1: Generation System Architecture",
1086
+ "num": null,
1087
+ "type_str": "figure"
1088
+ },
1089
+ "FIGREF1": {
1090
+ "uris": null,
1091
+ "text": "( : DEF_WORD \"reduce\" : CLASS \"45.4. a\" :THETA_ROLES ( (I \"_ag_th, instr (with)\") ) :LCS (cause (* thing I) (go ident (* Zhing 2) (toward ident (thing 2) (at ident (thing 2) (reduce+ed 9) ) ) ) ((* with 19) instr (*head*) (thin E 20) )) :VAR_SPEC ((1 (animate +)))) (:DEF_WORD .\"US\" :LCS (US+ 0)) (:DEF_WORD \"China\" :LCS (China+ 0)) (:DEF_WORD \"quota\" :5CS (quota+ 0)) (:DEF_WORD \"WITH\" :LCS (with instr (thing 2) (* thing 20))) (: DEF_WORD \"unilaterally\" :LCS (unilaterally+/m 0)) Figure 2: Lexicon entries",
1092
+ "num": null,
1093
+ "type_str": "figure"
1094
+ },
1095
+ "FIGREF2": {
1096
+ "uris": null,
1097
+ "text": "the same (or primitive for one is a wild-card, represented as nil) the types (e.g., thing, event, state, etc.) are the same the fields (e.g., identificational, possessive, locational, etc) are the same the positions (e.g., subject, argument, or modifier) are the same all obligatory children of the RLCS node have corresponding matches to children of the CLCS",
1098
+ "num": null,
1099
+ "type_str": "figure"
1100
+ },
1101
+ "FIGREF3": {
1102
+ "uris": null,
1103
+ "text": "MOD-THING (a7542 / [exportl :LCS-NODE 6112400 :CAT N)) :LCS-MOD-MANNER (a7543 / [unilaterally[ :LCS-NODE 6279392 :CAT ADV)) LCS-AMR to over-generate possible sequences of English from the ambiguous or under-specified AMRs and then decide amongst them based on bigram frequency. The interface between the Linearization module and the Statistical Extraction module is a word lattice of possible renderings. The Nitrogen package offers support for both subtasks, Linearization and Statistical Extraction. Initially, we used the Nitrogen grammar to do Linearization. But complexities in recasting the LCS-AMR roles as standard AMR roles as well as efficiency considerations compelled us to create our own English grammar implemented in Lisp to generate the word lattices.",
1104
+ "num": null,
1105
+ "type_str": "figure"
1106
+ }
1107
+ }
1108
+ }
1109
+ }
Full_text_JSON/prefixW/json/W00/W00-0300.json ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0300",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:11.204230Z"
6
+ },
7
+ "title": "Conversational Systems Organizing Committee",
8
+ "authors": [
9
+ {
10
+ "first": "Candy",
11
+ "middle": [],
12
+ "last": "Sidner",
13
+ "suffix": "",
14
+ "affiliation": {},
15
+ "email": ""
16
+ },
17
+ {
18
+ "first": "James",
19
+ "middle": [],
20
+ "last": "Allen",
21
+ "suffix": "",
22
+ "affiliation": {},
23
+ "email": ""
24
+ },
25
+ {
26
+ "first": "Harald",
27
+ "middle": [],
28
+ "last": "Aust",
29
+ "suffix": "",
30
+ "affiliation": {},
31
+ "email": ""
32
+ },
33
+ {
34
+ "first": "Phil",
35
+ "middle": [],
36
+ "last": "Cohen",
37
+ "suffix": "",
38
+ "affiliation": {},
39
+ "email": ""
40
+ },
41
+ {
42
+ "first": "Justine",
43
+ "middle": [],
44
+ "last": "Cassell",
45
+ "suffix": "",
46
+ "affiliation": {},
47
+ "email": ""
48
+ },
49
+ {
50
+ "first": "Laila",
51
+ "middle": [],
52
+ "last": "Dybkjaer",
53
+ "suffix": "",
54
+ "affiliation": {},
55
+ "email": ""
56
+ },
57
+ {
58
+ "first": "X",
59
+ "middle": [
60
+ "D"
61
+ ],
62
+ "last": "Huang",
63
+ "suffix": "",
64
+ "affiliation": {},
65
+ "email": ""
66
+ },
67
+ {
68
+ "first": "Masato",
69
+ "middle": [],
70
+ "last": "Ishizaki",
71
+ "suffix": "",
72
+ "affiliation": {},
73
+ "email": ""
74
+ },
75
+ {
76
+ "first": "Candace",
77
+ "middle": [],
78
+ "last": "Kamm",
79
+ "suffix": "",
80
+ "affiliation": {},
81
+ "email": ""
82
+ },
83
+ {
84
+ "first": "Lin-Shan",
85
+ "middle": [],
86
+ "last": "Lee",
87
+ "suffix": "",
88
+ "affiliation": {},
89
+ "email": ""
90
+ },
91
+ {
92
+ "first": "Susann",
93
+ "middle": [],
94
+ "last": "Luperfoy",
95
+ "suffix": "",
96
+ "affiliation": {},
97
+ "email": ""
98
+ },
99
+ {
100
+ "first": "Patti",
101
+ "middle": [],
102
+ "last": "Price",
103
+ "suffix": "",
104
+ "affiliation": {},
105
+ "email": ""
106
+ },
107
+ {
108
+ "first": "Owen",
109
+ "middle": [],
110
+ "last": "Rambow",
111
+ "suffix": "",
112
+ "affiliation": {},
113
+ "email": ""
114
+ },
115
+ {
116
+ "first": "Norbert",
117
+ "middle": [],
118
+ "last": "Reithinger",
119
+ "suffix": "",
120
+ "affiliation": {},
121
+ "email": ""
122
+ },
123
+ {
124
+ "first": "Alexander",
125
+ "middle": [
126
+ "I"
127
+ ],
128
+ "last": "Rudnicky",
129
+ "suffix": "",
130
+ "affiliation": {},
131
+ "email": ""
132
+ },
133
+ {
134
+ "first": "Stephanie",
135
+ "middle": [],
136
+ "last": "Seneff",
137
+ "suffix": "",
138
+ "affiliation": {},
139
+ "email": ""
140
+ },
141
+ {
142
+ "first": "Dave",
143
+ "middle": [],
144
+ "last": "Stallard",
145
+ "suffix": "",
146
+ "affiliation": {},
147
+ "email": ""
148
+ },
149
+ {
150
+ "first": "David",
151
+ "middle": [],
152
+ "last": "Traum",
153
+ "suffix": "",
154
+ "affiliation": {},
155
+ "email": ""
156
+ },
157
+ {
158
+ "first": "Marilyn",
159
+ "middle": [],
160
+ "last": "Walker",
161
+ "suffix": "",
162
+ "affiliation": {},
163
+ "email": ""
164
+ },
165
+ {
166
+ "first": "Wayne",
167
+ "middle": [],
168
+ "last": "Ward",
169
+ "suffix": "",
170
+ "affiliation": {},
171
+ "email": ""
172
+ },
173
+ {
174
+ "first": "Washington",
175
+ "middle": [],
176
+ "last": "Seattle",
177
+ "suffix": "",
178
+ "affiliation": {},
179
+ "email": ""
180
+ },
181
+ {
182
+ "first": "Candace",
183
+ "middle": [
184
+ "L"
185
+ ],
186
+ "last": "Sidner",
187
+ "suffix": "",
188
+ "affiliation": {},
189
+ "email": ""
190
+ },
191
+ {
192
+ "first": "Carolyn",
193
+ "middle": [],
194
+ "last": "Boettner",
195
+ "suffix": "",
196
+ "affiliation": {},
197
+ "email": ""
198
+ },
199
+ {
200
+ "first": "Charles",
201
+ "middle": [],
202
+ "last": "Rich",
203
+ "suffix": "",
204
+ "affiliation": {},
205
+ "email": ""
206
+ },
207
+ {
208
+ "first": "Staffan",
209
+ "middle": [],
210
+ "last": "Larsson",
211
+ "suffix": "",
212
+ "affiliation": {},
213
+ "email": ""
214
+ },
215
+ {
216
+ "first": "Peter",
217
+ "middle": [],
218
+ "last": "Ljungl6f",
219
+ "suffix": "",
220
+ "affiliation": {},
221
+ "email": ""
222
+ },
223
+ {
224
+ "first": "Robin",
225
+ "middle": [],
226
+ "last": "Cooper",
227
+ "suffix": "",
228
+ "affiliation": {},
229
+ "email": ""
230
+ },
231
+ {
232
+ "first": "Elisabet",
233
+ "middle": [],
234
+ "last": "Engdahl",
235
+ "suffix": "",
236
+ "affiliation": {},
237
+ "email": ""
238
+ },
239
+ {
240
+ "first": "Stina",
241
+ "middle": [],
242
+ "last": "Ericsson",
243
+ "suffix": "",
244
+ "affiliation": {},
245
+ "email": ""
246
+ },
247
+ {
248
+ "first": "Joseph",
249
+ "middle": [],
250
+ "last": "Polifroni",
251
+ "suffix": "",
252
+ "affiliation": {},
253
+ "email": ""
254
+ },
255
+ {
256
+ "first": "Diane",
257
+ "middle": [],
258
+ "last": "Litman",
259
+ "suffix": "",
260
+ "affiliation": {},
261
+ "email": ""
262
+ },
263
+ {
264
+ "first": "Satinder",
265
+ "middle": [],
266
+ "last": "Singh",
267
+ "suffix": "",
268
+ "affiliation": {},
269
+ "email": ""
270
+ },
271
+ {
272
+ "first": "Michael",
273
+ "middle": [],
274
+ "last": "Kearns",
275
+ "suffix": "",
276
+ "affiliation": {},
277
+ "email": ""
278
+ },
279
+ {
280
+ "first": "Scott",
281
+ "middle": [],
282
+ "last": "Axelrod",
283
+ "suffix": "",
284
+ "affiliation": {},
285
+ "email": ""
286
+ },
287
+ {
288
+ "first": "Alice",
289
+ "middle": [
290
+ "H"
291
+ ],
292
+ "last": "Oh",
293
+ "suffix": "",
294
+ "affiliation": {},
295
+ "email": ""
296
+ },
297
+ {
298
+ "first": "Donna",
299
+ "middle": [],
300
+ "last": "Byron",
301
+ "suffix": "",
302
+ "affiliation": {},
303
+ "email": ""
304
+ },
305
+ {
306
+ "first": "Dave",
307
+ "middle": [],
308
+ "last": "Costello",
309
+ "suffix": "",
310
+ "affiliation": {},
311
+ "email": ""
312
+ },
313
+ {
314
+ "first": "Myroslava",
315
+ "middle": [],
316
+ "last": "Dzikovska",
317
+ "suffix": "",
318
+ "affiliation": {},
319
+ "email": ""
320
+ },
321
+ {
322
+ "first": "George",
323
+ "middle": [],
324
+ "last": "Ferguson",
325
+ "suffix": "",
326
+ "affiliation": {},
327
+ "email": ""
328
+ },
329
+ {
330
+ "first": "Lucian",
331
+ "middle": [],
332
+ "last": "Galescu",
333
+ "suffix": "",
334
+ "affiliation": {},
335
+ "email": ""
336
+ },
337
+ {
338
+ "first": "Amanda",
339
+ "middle": [],
340
+ "last": "Stent",
341
+ "suffix": "",
342
+ "affiliation": {},
343
+ "email": ""
344
+ },
345
+ {
346
+ "first": "Wei",
347
+ "middle": [],
348
+ "last": "Xu",
349
+ "suffix": "",
350
+ "affiliation": {},
351
+ "email": ""
352
+ },
353
+ {
354
+ "first": "Christine",
355
+ "middle": [
356
+ "H"
357
+ ],
358
+ "last": "Nakatani",
359
+ "suffix": "",
360
+ "affiliation": {},
361
+ "email": ""
362
+ },
363
+ {
364
+ "first": "Jennifer",
365
+ "middle": [],
366
+ "last": "Chu-Carroll",
367
+ "suffix": "",
368
+ "affiliation": {},
369
+ "email": ""
370
+ },
371
+ {
372
+ "first": "Manny",
373
+ "middle": [],
374
+ "last": "Rayner",
375
+ "suffix": "",
376
+ "affiliation": {},
377
+ "email": ""
378
+ },
379
+ {
380
+ "first": "Beth",
381
+ "middle": [
382
+ "Ann"
383
+ ],
384
+ "last": "Hockey",
385
+ "suffix": "",
386
+ "affiliation": {},
387
+ "email": ""
388
+ },
389
+ {
390
+ "first": "Frankie",
391
+ "middle": [],
392
+ "last": "James",
393
+ "suffix": "",
394
+ "affiliation": {},
395
+ "email": ""
396
+ },
397
+ {
398
+ "first": "Sharon",
399
+ "middle": [
400
+ "J"
401
+ ],
402
+ "last": "Goldwater",
403
+ "suffix": "",
404
+ "affiliation": {},
405
+ "email": ""
406
+ },
407
+ {
408
+ "first": "Elizabeth",
409
+ "middle": [
410
+ "Owen"
411
+ ],
412
+ "last": "Bratt",
413
+ "suffix": "",
414
+ "affiliation": {},
415
+ "email": ""
416
+ },
417
+ {
418
+ "first": "Jean",
419
+ "middle": [
420
+ "Mark"
421
+ ],
422
+ "last": "Gawron",
423
+ "suffix": "",
424
+ "affiliation": {},
425
+ "email": ""
426
+ },
427
+ {
428
+ "first": "John",
429
+ "middle": [],
430
+ "last": "Dowding",
431
+ "suffix": "",
432
+ "affiliation": {},
433
+ "email": ""
434
+ }
435
+ ],
436
+ "year": "",
437
+ "venue": null,
438
+ "identifiers": {},
439
+ "abstract": "",
440
+ "pdf_parse": {
441
+ "paper_id": "W00-0300",
442
+ "_pdf_hash": "",
443
+ "abstract": [],
444
+ "body_text": [],
445
+ "back_matter": [],
446
+ "bib_entries": {},
447
+ "ref_entries": {}
448
+ }
449
+ }
Full_text_JSON/prefixW/json/W00/W00-0301.json ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0301",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:24.532564Z"
6
+ },
7
+ "title": "Lessons Learned in Building Spoken Language Collaborative Interface Agents",
8
+ "authors": [],
9
+ "year": "",
10
+ "venue": null,
11
+ "identifiers": {},
12
+ "abstract": "This paper reports on the development of two spoken language collaborative interface agents built with the Collagen system. It presents sample dialogues with the agents working with email applications and meeting planning applications, and discusses how these applications were created. It also discusses limitations and benefits of this approach.",
13
+ "pdf_parse": {
14
+ "paper_id": "W00-0301",
15
+ "_pdf_hash": "",
16
+ "abstract": [
17
+ {
18
+ "text": "This paper reports on the development of two spoken language collaborative interface agents built with the Collagen system. It presents sample dialogues with the agents working with email applications and meeting planning applications, and discusses how these applications were created. It also discusses limitations and benefits of this approach.",
19
+ "cite_spans": [],
20
+ "ref_spans": [],
21
+ "eq_spans": [],
22
+ "section": "Abstract",
23
+ "sec_num": null
24
+ }
25
+ ],
26
+ "body_text": [
27
+ {
28
+ "text": "The underlying premise of the Collageff M (for Collaborative agent) project is that software agents, when they interact with people, should be governed by the same principles that govern human-to-human collaboration. To determine the principles governing human collaboration, we have relied on research in computational linguistics on collaborative discourse, specifically within the SharedPlan framework of Sidner (1986, 1990) Kraus, 1996, Lochbaum, 1998) . This work has provided us with a computationally-specified theory that has been empirically validated across a range of human tasks. We have implemented the algorithms and information structures of this theory in the form of a Java middleware component, a collaboration manager called Collagen, which software developers can use to implement a collaborative interface agent for any Java application.",
29
+ "cite_spans": [
30
+ {
31
+ "start": 408,
32
+ "end": 427,
33
+ "text": "Sidner (1986, 1990)",
34
+ "ref_id": null
35
+ },
36
+ {
37
+ "start": 428,
38
+ "end": 456,
39
+ "text": "Kraus, 1996, Lochbaum, 1998)",
40
+ "ref_id": null
41
+ }
42
+ ],
43
+ "ref_spans": [],
44
+ "eq_spans": [],
45
+ "section": "C o l l a b o r a t i v e A g e n t s",
46
+ "sec_num": "1"
47
+ },
48
+ {
49
+ "text": "In the collaborative interface agent paradigm, illustrated abstractly in Figure 1 , a software agent is able to both communicate with and observe the actions of a user on a shared application interface, and vice versa. The software agent in this paradigm takes an active r01e in joint problem solving, including advising the user when he gets stuck, suggesting what to do next when he gets lost, and taking care of lowlevel details after a high-level decision is made.",
50
+ "cite_spans": [],
51
+ "ref_spans": [
52
+ {
53
+ "start": 73,
54
+ "end": 81,
55
+ "text": "Figure 1",
56
+ "ref_id": "FIGREF0"
57
+ }
58
+ ],
59
+ "eq_spans": [],
60
+ "section": "Agent communi cate l Application",
61
+ "sec_num": null
62
+ },
63
+ {
64
+ "text": "The screenshot in Figure 2 shows how the collaborative interface agent paradigm is concretely realized on a user's display. The large window in the background is the shared application, in this case, the Lotus eSuite TM email program. The two smaller overlapping windows in the corners of the screen are the agent's and user's home windows, through which they communicate with each other.",
65
+ "cite_spans": [],
66
+ "ref_spans": [
67
+ {
68
+ "start": 18,
69
+ "end": 26,
70
+ "text": "Figure 2",
71
+ "ref_id": "FIGREF1"
72
+ }
73
+ ],
74
+ "eq_spans": [],
75
+ "section": "Agent communi cate l Application",
76
+ "sec_num": null
77
+ },
78
+ {
79
+ "text": "A key benefit of using Collagen to build an interface agent is that the collaboration manager automatically constructs a structured history of the user's and agent's activities. This segmented interaction history is hierarchically organized according to the goal structure of the application tasks. Among other things, this history can help re-orient the user when he gets confused or after an extended absence. It also supports high-level, task-oriented transformations, such as returning to an earlier goal. Figure 3 shows a sample segmented interaction history for the an email interaction.",
80
+ "cite_spans": [],
81
+ "ref_spans": [
82
+ {
83
+ "start": 510,
84
+ "end": 518,
85
+ "text": "Figure 3",
86
+ "ref_id": "FIGREF2"
87
+ }
88
+ ],
89
+ "eq_spans": [],
90
+ "section": "Agent communi cate l Application",
91
+ "sec_num": null
92
+ },
93
+ {
94
+ "text": "To apply Collagen to a particular application, the application developer must provide an abstract model of the tasks for which the application software will be used. This knowledge is formalized in a recipe library, which is then automatically compiled for use by the interface agent. This approach also allows us to easily vary an agent's level of initiative from very passive to very active, using the same task model. For more details on the internal architecture of Collagen, see (Rich and Sidner, 1998) .",
95
+ "cite_spans": [
96
+ {
97
+ "start": 484,
98
+ "end": 507,
99
+ "text": "(Rich and Sidner, 1998)",
100
+ "ref_id": "BIBREF5"
101
+ }
102
+ ],
103
+ "ref_spans": [],
104
+ "eq_spans": [],
105
+ "section": "Agent communi cate l Application",
106
+ "sec_num": null
107
+ },
108
+ {
109
+ "text": "We have developed prototype interface agents using Collagen for several applications, including air travel planning (Rich and Sidner, 1998) , resource allocation, industrial control, and common PC desktop activities.",
110
+ "cite_spans": [
111
+ {
112
+ "start": 116,
113
+ "end": 139,
114
+ "text": "(Rich and Sidner, 1998)",
115
+ "ref_id": "BIBREF5"
116
+ }
117
+ ],
118
+ "ref_spans": [],
119
+ "eq_spans": [],
120
+ "section": "Agent communi cate l Application",
121
+ "sec_num": null
122
+ },
123
+ {
124
+ "text": "The email agent (Gruen et al., 1999) is the first Collagen-based agent we have built that supports spoken-language interaction. Our other agents avoided the need for natural language understanding by presenting the user with a dynamically-changing menu of expected utterances, which was generated from the current discourse state according to the predictions of the SharedPlan theory. Sample menus are displayed in Figure 2 . The email agent, however, incorporates a speech and natural language understanding system developed by IBM Research, allowing users to collaborate either entirely in speech or with a mixture of speech and interface actions, such as selecting a message. More recently we have developed the Lotus Notes TM meeting planning agent, which incorporates speech and sentence level understanding using the Java Speech API, as implemented by IBM. The JSAPI toolkit provides a parser, which we use with a vocabulary and grammar we developed for the domain of meeting planning. The tags produced by the Java Speech parser are interpreted with a set of semantic rules that produce internal structures used by the Collagen agent.",
125
+ "cite_spans": [
126
+ {
127
+ "start": 16,
128
+ "end": 36,
129
+ "text": "(Gruen et al., 1999)",
130
+ "ref_id": null
131
+ }
132
+ ],
133
+ "ref_spans": [
134
+ {
135
+ "start": 415,
136
+ "end": 423,
137
+ "text": "Figure 2",
138
+ "ref_id": "FIGREF1"
139
+ }
140
+ ],
141
+ "eq_spans": [],
142
+ "section": "A Collaborative Email Agent",
143
+ "sec_num": "2"
144
+ },
145
+ {
146
+ "text": "With the email application, the user can read, compose and send messages as one typically does with email. The Collagen email agent, called Daffy, performs actions requested by the user with speech and watches user interface actions. It can perform a few email actions on its own (such as opening and closing windows, and filling in the user's signature on email) and can also undertake actions that the user requests in spoken utterances. In the sample session shown in Figure 4 , the agent keeps a todo list for the user, explains how to accomplish email tasks for a user who is new to email, answers user questions about what actions were taken in the interaction and offers suggestions about what to do next in the interaction as well as forming {lser requests.",
147
+ "cite_spans": [],
148
+ "ref_spans": [
149
+ {
150
+ "start": 471,
151
+ "end": 479,
152
+ "text": "Figure 4",
153
+ "ref_id": null
154
+ }
155
+ ],
156
+ "eq_spans": [],
157
+ "section": "A Collaborative Email Agent",
158
+ "sec_num": "2"
159
+ },
160
+ {
161
+ "text": "To create the email agent, we built a recipe library about email, as required for the Collagen architecture, of about 55 actions and 32 recipes for doing those actions; the actions included GUI primitives such as sending a message, and high level actions such as reacting to a message.",
162
+ "cite_spans": [],
163
+ "ref_spans": [],
164
+ "eq_spans": [],
165
+ "section": "A Collaborative Email Agent",
166
+ "sec_num": "2"
167
+ },
168
+ {
169
+ "text": "Our experience with Daffy convinced us that collaborative agents would be more useful if they not only understood what the user was doing in the interface but could undertake more of the user's sub-goals and thus off-load some of the burden from the user. To explore this notion, we built Dotty, a Collagen agent, that works with a user who is planning a meeting with a customer, using Lotus Notes. As the dialogue in Figure 5 demonstrates, Dotty is able to take over many of the details of planning the meeting. Dotty uses a library that is far smaller than Daffy's: 19 actions and 5 recipes. This dialogue begins with an overall goals of managing sales contacts and several sub-goals, :",
170
+ "cite_spans": [],
171
+ "ref_spans": [
172
+ {
173
+ "start": 418,
174
+ "end": 426,
175
+ "text": "Figure 5",
176
+ "ref_id": null
177
+ }
178
+ ],
179
+ "eq_spans": [],
180
+ "section": "A Collaborative Agent for Planning Meetings",
181
+ "sec_num": "3"
182
+ },
183
+ {
184
+ "text": "USER: \"Your turn,\" Done checking our to-do list.",
185
+ "cite_spans": [],
186
+ "ref_spans": [],
187
+ "eq_spans": [],
188
+ "section": "A Collaborative Agent for Planning Meetings",
189
+ "sec_num": "3"
190
+ },
191
+ {
192
+ "text": "AGENT: \"Let's check our to-do list.\" USER: \"Ok.\" AGENT: \"The following items are on our to-do list.\" AGENT: \"Work on email.\" AGENT: \"Prepare a presentation about Java Beans.\" AGENT: \"That's all,\" USER:",
193
+ "cite_spans": [],
194
+ "ref_spans": [],
195
+ "eq_spans": [],
196
+ "section": "A Collaborative Agent for Planning Meetings",
197
+ "sec_num": "3"
198
+ },
199
+ {
200
+ "text": "\"Ok.\" Done working on email.",
201
+ "cite_spans": [],
202
+ "ref_spans": [],
203
+ "eq_spans": [],
204
+ "section": "A Collaborative Agent for Planning Meetings",
205
+ "sec_num": "3"
206
+ },
207
+ {
208
+ "text": "AGENT: \"Let's work on email.\" Done explaining how to work on email. USER: \"What should I do?\" AGENT: \"First, you reed a message.\" USER:",
209
+ "cite_spans": [],
210
+ "ref_spans": [],
211
+ "eq_spans": [],
212
+ "section": "A Collaborative Agent for Planning Meetings",
213
+ "sec_num": "3"
214
+ },
215
+ {
216
+ "text": "\"Ok.\" AGENT: \"Then, you might store, print or delete the message.\" Done reading message from Brian Gorwin <brg@dewey. AGENT: \"Let's schedule event tentatively at a time.\" USER: \"Schedule event tentatively at three P.M. Thursday.\" AGENT: Schedule event tentatively at three P.M. Thursday. AGENT: \"There is a conflict of meeting with Brian at three P.M. Thursday with meeting with Irene Landoz at three P.M. Thursday.\" AGENT: \"Let's remember to reschedule meeting with Irene Landoz at three P.M, Thursday.\" USER:",
217
+ "cite_spans": [],
218
+ "ref_spans": [],
219
+ "eq_spans": [],
220
+ "section": "A Collaborative Agent for Planning Meetings",
221
+ "sec_num": "3"
222
+ },
223
+ {
224
+ "text": "\"Ok.\" AGENT: Finalize meeting with Brian at three P.M. Thursday. AGENT: Close message from Brian Gorwin <brg@dewey.watson.ibm.com> about WEEKLY SPEECH GROUP SEMINAR. Done reacting to message from Brian Gorwin <brg@dewey.watson.ibm.com> about WEEKLY SPEECH ....",
225
+ "cite_spans": [],
226
+ "ref_spans": [],
227
+ "eq_spans": [],
228
+ "section": "A Collaborative Agent for Planning Meetings",
229
+ "sec_num": "3"
230
+ },
231
+ {
232
+ "text": "AGENT: \"Shall we react to message from Brian Gorwin <brg@dewey.watson.ibm.com> about WEEKLY ...?\" Done agent forwarding message from Brian Gorwin <brg@dewey.watson.ibm,com> about WEEKLY ... to Steve Krantz. USER: \"Forward message from Brian Gorwin <brg@dewey.watson.ibm.com> about ... to Steve Krantz.\" Done user saying identify the recipient as Steve Krantz.",
233
+ "cite_spans": [],
234
+ "ref_spans": [],
235
+ "eq_spans": [],
236
+ "section": "A Collaborative Agent for Planning Meetings",
237
+ "sec_num": "3"
238
+ },
239
+ {
240
+ "text": "AGENT: \"Who is the recipient?\" USER: \"The recipient is Steve Krantz.\" AGENT: Forward message from Brian Gorwin <brg@dewey.watson.ibm.com> about ... to Steve Krantz. Done filling in message from Candy Sidner <csidner@lotus.com> about Fwd:WEEKLY SPEECH GROUP SEMINAR.",
241
+ "cite_spans": [],
242
+ "ref_spans": [],
243
+ "eq_spans": [],
244
+ "section": "A Collaborative Agent for Planning Meetings",
245
+ "sec_num": "3"
246
+ },
247
+ {
248
+ "text": "AGENT: \"Shall we fill in message from Candy Sidner <csidner@lotus.com> about Fwd:WEEKLY ,..?\" USER: \"Ok.\" AGENT: Add text to message from Candy Sidner <csidner@lotus.com> about Fwd:WEEKLY .... Stopped ccing message from Candy Sidner <csidner@lotus.com> about Fwd:WEEKLY SPEECH ....",
249
+ "cite_spans": [],
250
+ "ref_spans": [],
251
+ "eq_spans": [],
252
+ "section": "A Collaborative Agent for Planning Meetings",
253
+ "sec_num": "3"
254
+ },
255
+ {
256
+ "text": "AGENT: \"Shall we cc message from Candy Sidner <csidner@lotus.com> about Fwd:WEEKLY SPEECH GROUP SEMINAR?\" USER: \"No.\" AGENT: \"0k, your turn.\" Done sending, canceling or saving this message as a draft.",
257
+ "cite_spans": [],
258
+ "ref_spans": [],
259
+ "eq_spans": [],
260
+ "section": "A Collaborative Agent for Planning Meetings",
261
+ "sec_num": "3"
262
+ },
263
+ {
264
+ "text": "Done agent sending message from Candy Sidner <csidner@lotus.com> about Fwd:WEEKLY SPEECH .... USER: \"Send message from Candy Sidner <csidner@lotus.com> about Fwd:WEEKLY SPEECH GROUP .... \" AGENT: Send message from Candy Sidner <csidner@lotus.com> about Fwd:WEEKLY SPEECH .... Done storing, printing or deleting this message.",
265
+ "cite_spans": [],
266
+ "ref_spans": [],
267
+ "eq_spans": [],
268
+ "section": "A Collaborative Agent for Planning Meetings",
269
+ "sec_num": "3"
270
+ },
271
+ {
272
+ "text": "AGENT: \"Shall we store, print or delete this message?\" USER: \"Did I forward message to Steve Krantz.\" AGENT: \"Yes, you did forward message from Brian Gorwin <brg@dewey.watson.ibm,com> about ... to Steve Krantz.\" USER:",
273
+ "cite_spans": [],
274
+ "ref_spans": [],
275
+ "eq_spans": [],
276
+ "section": "A Collaborative Agent for Planning Meetings",
277
+ "sec_num": "3"
278
+ },
279
+ {
280
+ "text": "\"Done.\" Done interruption agent checking history.",
281
+ "cite_spans": [],
282
+ "ref_spans": [],
283
+ "eq_spans": [],
284
+ "section": "A Collaborative Agent for Planning Meetings",
285
+ "sec_num": "3"
286
+ },
287
+ {
288
+ "text": "USER: \"Please check history,\" USER: \"Done.\" \"Let's manage sales contacts.\" \"Let's create profile for Miles White from Abbott Laboratories.\" \"What is the eemail address?\" \"The emailAddress is mileswhite@abl.com.\" \"What is the workPhone?\" \"The workPhone is 8173329280.\" Goto profile for Abbott Laboratories. Create profile for Miles White. \"Do you want to schedule a presentation for Miles White?\" \"Schedule the meeting for December 9 from 2 to 4 pm.\" Make meeting entry for Miles White on December 9. \"Is the meeting here or at the customer site?\" \"The site is here.\" \"Which building?\" \"The building is Rogers Street.\" Locate room 5003 Center in Rogers Street. \"I'm going to reserve room 5003 Center in Rogers Street.\" \"Ok.\" Reserve room 5003 Center in Rogers Street. \"Do you need any special equipment?\" \"No.\" \"Okay.\" \"Who will attend the meeting?\" \"Invite Fred Smith and Jerry Johnson.\" Invite Fred Smith and Jerry Johnson. \"Will the customer need consulting services?\" \"Ok.\" \"Identifying the consulting staff.\" \"Invite Pam Bridges from Lotus Consulting and Jimmy Barnes from IGS.\" Invite Pam Bridges and Jimmy Barnes. \"The following people: Pam Bridges have a conflict with the meeting time..\" \"Instead invite Julie Knox.\" Invite Julie Knox instead of Pam Bridges. \"I will get back to you to confirm this meeting.\" \"Ok.\" Send invitations to Fred Smith, Jerry Johnson, Julie Knox, and Jimmy Barnes. \"Do you want to see company information?\" \"Show me information about Dover Hospital.\" Goto profile for Dover Hospital. \"You need to plan a presentation for Miles White.\" \"Shall we find presentations about a subject?\" \"No.\" \"Okay.\" \"I'm going to get back to you about meeting.\" \"The following people: Jimmy Barnes have a conflict with the meeting time..\" \"This is message from Jimmy Barnes: I have to be in Cleveland on December 9. \"Invite Jonathan Parks instead.\" Invite Jonathan Parks instead of Jimmy Barnes. \"I will get back to you to confirm this meeting.\"",
289
+ "cite_spans": [],
290
+ "ref_spans": [],
291
+ "eq_spans": [],
292
+ "section": "A Collaborative Agent for Planning Meetings",
293
+ "sec_num": "3"
294
+ },
295
+ {
296
+ "text": "Jonathan Parks is available.\" Figure 5 : Sample session of Collagen agent using Lotus Notes for meeting planning (unsegmented).",
297
+ "cite_spans": [],
298
+ "ref_spans": [
299
+ {
300
+ "start": 30,
301
+ "end": 38,
302
+ "text": "Figure 5",
303
+ "ref_id": null
304
+ }
305
+ ],
306
+ "eq_spans": [],
307
+ "section": "A Collaborative Agent for Planning Meetings",
308
+ "sec_num": "3"
309
+ },
310
+ {
311
+ "text": "including creating a profile for Miles White (which is displayed to the user as a Notes document), scheduling a meeting with Miles White (which the agent undertakes by itself using facilities in Lotus Notes), finding information about Dover Hospital (which is displayed as a Notes document), and a brief discussion about planning a presentation.",
312
+ "cite_spans": [],
313
+ "ref_spans": [],
314
+ "eq_spans": [],
315
+ "section": "A Collaborative Agent for Planning Meetings",
316
+ "sec_num": "3"
317
+ },
318
+ {
319
+ "text": "The spoken interaction of our two Collagen agents is limited by the range of utterances that the utterance understanding components can interpret More significantly, we feel these agents are limited in dealing with spoken conversational errors, i.e. errors that arise either because the recognition system produces an error, or the semantic interpretation is faulty (even given the correct choice of words). Errors resulting from semantic mis-interpretation are especially important because often the content of the faulty interpretation is something that the agent can respond to and does, which results in the conversation going awry. In such cases we have in mind using the history based transformations possible in Collagen (c.f. (Rich and Sidner, 1998) ) to allow the user to turn the conversation back to before where the error occurred. Whether communicating by speech or menus, our agents are limited by their inability to negotiate with their human partner. For example, whenever one of our agents propose an action to perform that the user rejects (as in the email conversation in Figure 4 , where the agent proposes filling in the cclist and the user says no), the agent currently does not have any strategies for responding in the conversation other than to accept the rejection and turn the conversation back to the user. We are in present exploring how to use a set of strategies for negotiation of activities and beliefs that we have identified from corpora of human-human collaborations. Using these strategies in the Collagen system will give interface agents a richer set of negotiation capabilities critical for collaboration.",
320
+ "cite_spans": [
321
+ {
322
+ "start": 734,
323
+ "end": 757,
324
+ "text": "(Rich and Sidner, 1998)",
325
+ "ref_id": "BIBREF5"
326
+ }
327
+ ],
328
+ "ref_spans": [
329
+ {
330
+ "start": 1091,
331
+ "end": 1099,
332
+ "text": "Figure 4",
333
+ "ref_id": null
334
+ }
335
+ ],
336
+ "eq_spans": [],
337
+ "section": "Current Limitations",
338
+ "sec_num": "4"
339
+ },
340
+ {
341
+ "text": "Finally, our agents need a better model of conversational initiative. We have experimented in the Collagen system with three initiative modes, one dominated by the user, one by the agent and one that gives each some control of the conversation. The dialogues presented in this paper are all from agent initiative. None of these modes is quite right. The user dominated mode is characterized by an agent that only acts when specifically directed to or when explicitly told to take a turn in the conversation, while the agent dominated mode has a very chatty agent that constantly offers next possible actions relevant to the collaboration. We are currently investigating additional modes of initiative.",
342
+ "cite_spans": [],
343
+ "ref_spans": [],
344
+ "eq_spans": [],
345
+ "section": "Current Limitations",
346
+ "sec_num": "4"
347
+ },
348
+ {
349
+ "text": "The collaborative agent paradigm that we have implemented has several original features. The conversation and collaboration model is general and does not require tuning or the implementation of special dialogue steps for the agent to participate. The model tracks the interaction and treats both the utterances of both participants and the GUI level actions as communications for the discourse; it relates these to the actions and recipes for actions. The model has facilities for richer interpretation of discourse level phenomena, such as reference and anaphora, through the use of the focus stack. Finally, when we began this research, we were not certain that the Collagen system could be used to create agents that would interact with users for many different applications. Our experience with five different applications indicates that the model has the flexibility and richness to make human and computer collaboration possible in many circumstances.",
350
+ "cite_spans": [],
351
+ "ref_spans": [],
352
+ "eq_spans": [],
353
+ "section": "Current Limitations",
354
+ "sec_num": "4"
355
+ }
356
+ ],
357
+ "back_matter": [],
358
+ "bib_entries": {
359
+ "BIBREF0": {
360
+ "ref_id": "b0",
361
+ "title": "Collaborative plans for complex group action",
362
+ "authors": [
363
+ {
364
+ "first": "B",
365
+ "middle": [
366
+ "J"
367
+ ],
368
+ "last": "Grosz",
369
+ "suffix": ""
370
+ },
371
+ {
372
+ "first": "S",
373
+ "middle": [],
374
+ "last": "Kraus",
375
+ "suffix": ""
376
+ }
377
+ ],
378
+ "year": 1996,
379
+ "venue": "Artificial Intelligence",
380
+ "volume": "86",
381
+ "issue": "2",
382
+ "pages": "269--357",
383
+ "other_ids": {},
384
+ "num": null,
385
+ "urls": [],
386
+ "raw_text": "B. J. Grosz and S. Kraus. 1996. Collaborative plans for complex group action. Artificial Intelligence, 86(2):269-357, October.",
387
+ "links": null
388
+ },
389
+ "BIBREF1": {
390
+ "ref_id": "b1",
391
+ "title": "Attention, intentions, and the structure of discourse",
392
+ "authors": [
393
+ {
394
+ "first": "B",
395
+ "middle": [
396
+ "J"
397
+ ],
398
+ "last": "Grosz",
399
+ "suffix": ""
400
+ },
401
+ {
402
+ "first": "C",
403
+ "middle": [
404
+ "L"
405
+ ],
406
+ "last": "Sidner",
407
+ "suffix": ""
408
+ }
409
+ ],
410
+ "year": 1986,
411
+ "venue": "Computational Linguistics",
412
+ "volume": "12",
413
+ "issue": "3",
414
+ "pages": "175--204",
415
+ "other_ids": {},
416
+ "num": null,
417
+ "urls": [],
418
+ "raw_text": "B. J. Grosz and C. L. Sidner. 1986. Attention, in- tentions, and the structure of discourse. Compu- tational Linguistics, 12(3):175-204.",
419
+ "links": null
420
+ },
421
+ "BIBREF2": {
422
+ "ref_id": "b2",
423
+ "title": "Plans for discourse",
424
+ "authors": [
425
+ {
426
+ "first": "B",
427
+ "middle": [
428
+ "J"
429
+ ],
430
+ "last": "Grosz",
431
+ "suffix": ""
432
+ },
433
+ {
434
+ "first": "C",
435
+ "middle": [
436
+ "L"
437
+ ],
438
+ "last": "Sidner",
439
+ "suffix": ""
440
+ }
441
+ ],
442
+ "year": 1990,
443
+ "venue": "Intentions and Communication",
444
+ "volume": "",
445
+ "issue": "",
446
+ "pages": "417--444",
447
+ "other_ids": {},
448
+ "num": null,
449
+ "urls": [],
450
+ "raw_text": "B. J. Grosz and C. L. Sidner. 1990. Plans for dis- course. In P. R. Cohen, J. L. Morgan, and M. E. Pollack, editors, Intentions and Communication, pages 417-444. MIT Press, Cambridge, MA.",
451
+ "links": null
452
+ },
453
+ "BIBREF3": {
454
+ "ref_id": "b3",
455
+ "title": "A collaborative assistant for email",
456
+ "authors": [
457
+ {
458
+ "first": "D",
459
+ "middle": [],
460
+ "last": "Cruen",
461
+ "suffix": ""
462
+ },
463
+ {
464
+ "first": "C",
465
+ "middle": [],
466
+ "last": "Sidner",
467
+ "suffix": ""
468
+ },
469
+ {
470
+ "first": "C",
471
+ "middle": [],
472
+ "last": "Boettner",
473
+ "suffix": ""
474
+ },
475
+ {
476
+ "first": "C",
477
+ "middle": [],
478
+ "last": "Rich",
479
+ "suffix": ""
480
+ }
481
+ ],
482
+ "year": 1999,
483
+ "venue": "Proc. ACM SIGCHI Conference on Human Factors in Computing Systems",
484
+ "volume": "",
485
+ "issue": "",
486
+ "pages": "",
487
+ "other_ids": {},
488
+ "num": null,
489
+ "urls": [],
490
+ "raw_text": "D. Cruen, C. Sidner, C. Boettner, and C. Rich. 1999. A collaborative assistant for email. In Proc. ACM SIGCHI Conference on Human Factors in Computing Systems, Austin, TX, May.",
491
+ "links": null
492
+ },
493
+ "BIBREF4": {
494
+ "ref_id": "b4",
495
+ "title": "A collaborative planning model of intentional structure",
496
+ "authors": [
497
+ {
498
+ "first": "K",
499
+ "middle": [
500
+ "E"
501
+ ],
502
+ "last": "Lochbaum",
503
+ "suffix": ""
504
+ }
505
+ ],
506
+ "year": 1998,
507
+ "venue": "Computational Linguistics",
508
+ "volume": "24",
509
+ "issue": "4",
510
+ "pages": "",
511
+ "other_ids": {},
512
+ "num": null,
513
+ "urls": [],
514
+ "raw_text": "K. E. Lochbaum. 1998. A collaborative planning model of intentional structure. Computational Linguistics, 24(4), December.",
515
+ "links": null
516
+ },
517
+ "BIBREF5": {
518
+ "ref_id": "b5",
519
+ "title": "COLLAGEN: A collaboration manager for software interface agents",
520
+ "authors": [
521
+ {
522
+ "first": "C",
523
+ "middle": [],
524
+ "last": "Rich",
525
+ "suffix": ""
526
+ },
527
+ {
528
+ "first": "C",
529
+ "middle": [],
530
+ "last": "Sidner",
531
+ "suffix": ""
532
+ }
533
+ ],
534
+ "year": 1998,
535
+ "venue": "User Modeling and User-Adapted Interaction",
536
+ "volume": "8",
537
+ "issue": "3/4",
538
+ "pages": "315--350",
539
+ "other_ids": {},
540
+ "num": null,
541
+ "urls": [],
542
+ "raw_text": "C. Rich and C. Sidner. 1998. COLLAGEN: A col- laboration manager for software interface agents. User Modeling and User-Adapted Interaction, 8(3/4):315-350.",
543
+ "links": null
544
+ }
545
+ },
546
+ "ref_entries": {
547
+ "FIGREF0": {
548
+ "text": "Collaborative interface agent paradigm.",
549
+ "type_str": "figure",
550
+ "num": null,
551
+ "uris": null
552
+ },
553
+ "FIGREF1": {
554
+ "text": "Interface for Collagen email agent.",
555
+ "type_str": "figure",
556
+ "num": null,
557
+ "uris": null
558
+ },
559
+ "FIGREF2": {
560
+ "text": "Sample s\u00a2~gnmnted interaction history for ~m email interactiou.",
561
+ "type_str": "figure",
562
+ "num": null,
563
+ "uris": null
564
+ },
565
+ "TABREF0": {
566
+ "html": null,
567
+ "text": "watson.ibm.com> about WEEKLY SPEECH GROUP .... Done explaining how to read a message. Done agent selecting message from Brian Gorwin <brg@dewey.watson.ibm.com> about WEEKLY SPEECH GROUP .... USER: \"Select message from Brian Gorwin <brg@dewey.watson.ibm.com> about WEEKLY SPEECH GROUP .... \" AGENT: Select message from Brian Gorwin <brg@dewey.watson.ibm.com> about WEEKLY SPEECH GROUP .... AGENT: 0pen ~essage from Brian Gorwin <brg@dewey.watson.ibm.eom> about WEEKLY SPEECH GROUP SEMINAR. Done using the contents of message from Brian Gorwin <brg@dewey.watson.ibm.com> about WEEKLY SPEECH ....",
568
+ "num": null,
569
+ "content": "<table/>",
570
+ "type_str": "table"
571
+ }
572
+ }
573
+ }
574
+ }
Full_text_JSON/prefixW/json/W00/W00-0302.json ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0302",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:58.169438Z"
6
+ },
7
+ "title": "GoDiS -An Accommodating Dialogue System",
8
+ "authors": [
9
+ {
10
+ "first": "Staffan",
11
+ "middle": [],
12
+ "last": "Larsson",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "GSteborg University",
17
+ "location": {
18
+ "postBox": "Box 200-295",
19
+ "postCode": "SE-405 30",
20
+ "settlement": "Humanisten, G5teborg",
21
+ "country": "Sweden"
22
+ }
23
+ },
24
+ "email": ""
25
+ },
26
+ {
27
+ "first": "Peter",
28
+ "middle": [],
29
+ "last": "Ljungl6f",
30
+ "suffix": "",
31
+ "affiliation": {
32
+ "laboratory": "",
33
+ "institution": "GSteborg University",
34
+ "location": {
35
+ "postBox": "Box 200-295",
36
+ "postCode": "SE-405 30",
37
+ "settlement": "Humanisten, G5teborg",
38
+ "country": "Sweden"
39
+ }
40
+ },
41
+ "email": ""
42
+ },
43
+ {
44
+ "first": "Robin",
45
+ "middle": [],
46
+ "last": "Cooper",
47
+ "suffix": "",
48
+ "affiliation": {
49
+ "laboratory": "",
50
+ "institution": "GSteborg University",
51
+ "location": {
52
+ "postBox": "Box 200-295",
53
+ "postCode": "SE-405 30",
54
+ "settlement": "Humanisten, G5teborg",
55
+ "country": "Sweden"
56
+ }
57
+ },
58
+ "email": ""
59
+ },
60
+ {
61
+ "first": "Elisabet",
62
+ "middle": [],
63
+ "last": "Engdahl",
64
+ "suffix": "",
65
+ "affiliation": {
66
+ "laboratory": "",
67
+ "institution": "GSteborg University",
68
+ "location": {
69
+ "postBox": "Box 200-295",
70
+ "postCode": "SE-405 30",
71
+ "settlement": "Humanisten, G5teborg",
72
+ "country": "Sweden"
73
+ }
74
+ },
75
+ "email": ""
76
+ },
77
+ {
78
+ "first": "Stina",
79
+ "middle": [],
80
+ "last": "Ericsson",
81
+ "suffix": "",
82
+ "affiliation": {
83
+ "laboratory": "",
84
+ "institution": "GSteborg University",
85
+ "location": {
86
+ "postBox": "Box 200-295",
87
+ "postCode": "SE-405 30",
88
+ "settlement": "Humanisten, G5teborg",
89
+ "country": "Sweden"
90
+ }
91
+ },
92
+ "email": ""
93
+ }
94
+ ],
95
+ "year": "",
96
+ "venue": null,
97
+ "identifiers": {},
98
+ "abstract": "This paper accompanies a demo of the GoDiS system. Work on~hi~ system was reported at IJCAI-99 (Bohlin et-al.~ 1999). GoDiS is a prototype dialogue system for information-seeking dialogue, capable of accommodating questions and tasks to enable the user to present information in any desired order, without explicitly naming the dialogue task. GoDiS is implemented using the TRINDIKIT software package, which enables implementation of these behaviours in a compact and natural way.",
99
+ "pdf_parse": {
100
+ "paper_id": "W00-0302",
101
+ "_pdf_hash": "",
102
+ "abstract": [
103
+ {
104
+ "text": "This paper accompanies a demo of the GoDiS system. Work on~hi~ system was reported at IJCAI-99 (Bohlin et-al.~ 1999). GoDiS is a prototype dialogue system for information-seeking dialogue, capable of accommodating questions and tasks to enable the user to present information in any desired order, without explicitly naming the dialogue task. GoDiS is implemented using the TRINDIKIT software package, which enables implementation of these behaviours in a compact and natural way.",
105
+ "cite_spans": [],
106
+ "ref_spans": [],
107
+ "eq_spans": [],
108
+ "section": "Abstract",
109
+ "sec_num": null
110
+ }
111
+ ],
112
+ "body_text": [
113
+ {
114
+ "text": "This paper accompanies a demo of the GoDiS 1 system reported at IJCAI-99 (Bohlin et al., 1999) . GoDiS is a prototype dialogue system for information-seeking dialogue, capable of accommodating questions and tasks to enable the user to present information in any desired order, without explicitly naming the dialogue task. GoDiS is implemented using the TRINDIKIT 2 software package developed in the TRINDI project. The TRINDIKIT is a toolkit for building and experimenting with dialogue move engines and information states (IS) , We use the term information state to mean, roughly, the information stored internally by an agent, in this case a dialogue system. A dialogue move engine (DME) updates the information state on the basis of observed dialogue moves and selects appropriate moves to be performed.",
115
+ "cite_spans": [
116
+ {
117
+ "start": 73,
118
+ "end": 94,
119
+ "text": "(Bohlin et al., 1999)",
120
+ "ref_id": "BIBREF0"
121
+ },
122
+ {
123
+ "start": 523,
124
+ "end": 527,
125
+ "text": "(IS)",
126
+ "ref_id": null
127
+ }
128
+ ],
129
+ "ref_spans": [],
130
+ "eq_spans": [],
131
+ "section": "Introduction",
132
+ "sec_num": "1"
133
+ },
134
+ {
135
+ "text": "The overall structure of the GoDiS system is illustrated below:",
136
+ "cite_spans": [],
137
+ "ref_spans": [],
138
+ "eq_spans": [],
139
+ "section": "System Description",
140
+ "sec_num": "2"
141
+ },
142
+ {
143
+ "text": "1Work on GoDiS has been supported by the TRINDI (Task Oriented Instructional Dialogue), EC Project LE4-8314, SDS (Swedish Dialogue Systems), NUTEK/HSFR Language Technology Project F1472/1997, and INDI (Information Exchange in Dialogue), Riksbankens Jubileumsfond 1997-0134, projects.",
144
+ "cite_spans": [],
145
+ "ref_spans": [],
146
+ "eq_spans": [],
147
+ "section": "System Description",
148
+ "sec_num": "2"
149
+ },
150
+ {
151
+ "text": "2 .x~.n~. ling, gU. se/research/proJ ects/trlndi/ Like any dialogue system built using the TRINDIKIT, GoDiS consists of a number of modules, an information state, and a number of resources hooked up to the information state.",
152
+ "cite_spans": [],
153
+ "ref_spans": [],
154
+ "eq_spans": [],
155
+ "section": "System Description",
156
+ "sec_num": "2"
157
+ },
158
+ {
159
+ "text": "In addition to the control module, which wires together the other modules, there are six modules in \u2022 GoDiS: input, which receives input3from the user;",
160
+ "cite_spans": [],
161
+ "ref_spans": [],
162
+ "eq_spans": [],
163
+ "section": "System Description",
164
+ "sec_num": "2"
165
+ },
166
+ {
167
+ "text": "interpret, which interprets utterances as dialogue moves with some content; generate, which generates natural language from dialogue moves; output, which produces output to the user; update, which updates the information state based on interpreted moves; and select, which selects the next move(s) to perform 4. The last two are DME roodules, which means that they together make up the 3GoDiS originally accepted written input only, but it is currently being hooked up to a speech recogniser to accept spoken input.",
168
+ "cite_spans": [],
169
+ "ref_spans": [],
170
+ "eq_spans": [],
171
+ "section": "System Description",
172
+ "sec_num": "2"
173
+ },
174
+ {
175
+ "text": "4This is done by updating the part of the information state containing the moves to be performed. DME in GoDiS. DME modules consist of a set of update rules and (optionally) an update algorithm governing the order in which rules are applied. Update rules are rules for updating the information state. They consist of a rule name, a precondition list, and an effect list. The preconditions are conditions on the information state, and the effects are operations on the information state. If the preconditions of a rule are true for the information state, then the effects of that rule can be applied to the information state.",
176
+ "cite_spans": [],
177
+ "ref_spans": [],
178
+ "eq_spans": [],
179
+ "section": "System Description",
180
+ "sec_num": "2"
181
+ },
182
+ {
183
+ "text": "There are three resources in GoDiS: a lexicon, a database and a domain resource containing (among other things) domain-specific dialogue plans. Currently, there are GoDiS resources for a travel agency domain andS-the autoroute domain. Also, for each of these domains there are lexicons in both English and Swedish.",
184
+ "cite_spans": [],
185
+ "ref_spans": [],
186
+ "eq_spans": [],
187
+ "section": "System Description",
188
+ "sec_num": "2"
189
+ },
190
+ {
191
+ "text": "The question about what should be included in the information state is central to any theory of dialogue management. The notion of information state we are putting forward here is basically a simplified version of the dialogue game board which has been proposed by Ginzburg. We are attempting to use as simple a version as possible in order to have a more or less practical system to experiment with.",
192
+ "cite_spans": [],
193
+ "ref_spans": [],
194
+ "eq_spans": [],
195
+ "section": "System Description",
196
+ "sec_num": "2"
197
+ },
198
+ {
199
+ "text": "The main division in the information state is between information which is private to the agent and that which is (assumed to be) shared between the dialogue participants. What we mean by shared information here is that which has been established (i.e. grounded) during the conversation, akin to what Lewis in (Lewis, 1979) called the \"conversational scoreboard\". We represent information states of a dialogue participant as a record of the type shown in figure 1.",
200
+ "cite_spans": [
201
+ {
202
+ "start": 310,
203
+ "end": 323,
204
+ "text": "(Lewis, 1979)",
205
+ "ref_id": "BIBREF2"
206
+ }
207
+ ],
208
+ "ref_spans": [],
209
+ "eq_spans": [],
210
+ "section": "System Description",
211
+ "sec_num": "2"
212
+ },
213
+ {
214
+ "text": "The private part of the information state includes a set of beliefs and a dialogue plan, i.e. is a list of dialogue actions that the agent wishes to carry out. The plan can be changed during the course of the conversation. For example, if a travel agent discovers that his customer wishes to get information about a flight he will adopt a plan to ask her where she wants to go, when she wants to go, what price class she wants and so on. The agenda, on the other hand, contains the short term goals or obligations that the agent has, i.e. what the agent is going to do next. For example, if the other dialogue participant raises a question, then the agent will normally put an action on the agenda to respond to the question. This action may or may not be in the agent's plan.",
215
+ "cite_spans": [],
216
+ "ref_spans": [],
217
+ "eq_spans": [],
218
+ "section": "System Description",
219
+ "sec_num": "2"
220
+ },
221
+ {
222
+ "text": "The private part of the IS also includes \"temporary\" shared information that saves the previously shared information until the latest utterance is grounded, i.e. confirmed as having been understood 8 by the other dialogue participant 5. In this way it is easy to retract the \"optimistic\" assumption that the information was understood if it should turn out that the other dialogue participant does not understand or accept it. If the agent pursues a cautious rather than an optimistic strategy then information will at first only be placed in the \"temporary\" slot until it has been acknowledged by the other dialogue participant whereupon it can be moved to the appropriate shared field.",
223
+ "cite_spans": [],
224
+ "ref_spans": [],
225
+ "eq_spans": [],
226
+ "section": "System Description",
227
+ "sec_num": "2"
228
+ },
229
+ {
230
+ "text": "The (supposedly) shared part of the IS consists of three subparts. One is a set of propositions which the agent assumes for the sake of the conversation and which are established during the dialogue. The second is a stack of questions under discussion (QUD). These are questions that have been raised and are currently under discussion in the dialogue. The third contains information about the latest utterance (speaker, moves and integration status).",
231
+ "cite_spans": [],
232
+ "ref_spans": [],
233
+ "eq_spans": [],
234
+ "section": "System Description",
235
+ "sec_num": "2"
236
+ },
237
+ {
238
+ "text": "Dialogue participants can address questions that have not been explicitly raised in the dialogue. However, it is important that a question be available to the agent who is to interpret it because the utterance may be elliptical. Here is an example from a travel agency dialogue6: The strategy we adopt for interpreting elliptical utterances is to think of them as short answers (in the sense of Ginzburg (Ginzburg, 1998) ) to questions on QUD. A suitable question here is What kind of price does P want for the ticket?. This question is not under discussion at the point when P says \"as cheap as possible\". But it can be figured out since J knows that this is a relevant question. In fact it will be a question which J has as an action in his plan to raise. On our analysis it is this fact which enables A to interpret the ellipsis. He finds the matching question on his plan, accommodates by placing it on QUD and then continues with the integration of the information expressed by as cheap as possible as normal. Note that if such a question is \u2022 not available then the ellipsis cannot be interpreted as in the dialogue below. 5In discussing grounding we will assume that there is just one other dialogue participant.",
239
+ "cite_spans": [
240
+ {
241
+ "start": 404,
242
+ "end": 420,
243
+ "text": "(Ginzburg, 1998)",
244
+ "ref_id": "BIBREF1"
245
+ }
246
+ ],
247
+ "ref_spans": [],
248
+ "eq_spans": [],
249
+ "section": "Accommodation in GoDiS",
250
+ "sec_num": "3"
251
+ },
252
+ {
253
+ "text": "6This dialogue has been collected by the University of Lund as part of the SDS project. We quote a translation of the transcription done in GSteborg as part of the same project. This dialogue is incoherent if what is being discussed is when the child Maria is going to be picked up from her friend's house (at least under standard dialogue plans-that we might have for such a conversation).",
254
+ "cite_spans": [],
255
+ "ref_spans": [],
256
+ "eq_spans": [],
257
+ "section": "Accommodation in GoDiS",
258
+ "sec_num": "3"
259
+ },
260
+ {
261
+ "text": "PRIVATE",
262
+ "cite_spans": [],
263
+ "ref_spans": [],
264
+ "eq_spans": [],
265
+ "section": "IS :",
266
+ "sec_num": null
267
+ },
268
+ {
269
+ "text": "Question accommodation has been implemented in GoDiS using a single information state update rule accommodateQuestion, seen below. When interpreting the latest utterance by the other participant, the system makes the assumption that it was a reply move with content A. This assumption requires accommodating some question Q such that A is a relevant answer to Q. The check operator \"answer-to(A, Q)\" is true if A is a relevant answer to Q given the current information state, according to a (domain-dependent) definition of question-answer relevance. After an initial exchange for establishing contact the first thing that P says to the travel agent in our dialogue is \"flights to paris\". This is again an ellipsis which on our analysis has to be interpreted as the answer to a question (two questions, actually) in order to be understandable and relevant. As no questions have been raised yet in the dialogue (apart from whether the participants have each other's attention) the travel agent cannot find the appropriate question on his plan. Furthermore, as this is the first indication of what the customer wants, the travel agent cannot have a plan with detailed questions. We assume that the travel agent has various plan types in his domain knowledge determining what kind of conversations he is able to have. Each plan is associated with a task. E.g. he is able to book trips by various modes of travel, he is able to handle complaints, book hotels, rental cars etc. What he needs to do is take the customer's utterance and try to match it against questions in his plan types in his domain knowledge. When he finds a suitable match he will accommodate the corresponding task, thereby providing a plan to ask relevant question for flights, e.g. when to travel?, what date? etc. Once he has accommodated this task and retrieved the plan he can proceed as in the previous example. That is, he can accommodate the; QUD with the relevant question and proceed with the interpretation of ellipsis in the normal fashion.",
270
+ "cite_spans": [],
271
+ "ref_spans": [],
272
+ "eq_spans": [],
273
+ "section": "IS :",
274
+ "sec_num": null
275
+ },
276
+ {
277
+ "text": "This example is interesting for a couple of reasons. It provides us with an example of \"recursive\" accommodation. The QUD needs to be accommodated, but in order to do this the dialogue task needs to be accommodated and the plan retrieved. The other interesting aspect of this is that accommodating the dialogue task in this way actually serves to drive the dialogue forward. That is, the mechanism by which the agent interprets this ellipsis, gives him a plan for a substantial part of the rest of the dialogue. This is a way of capturing the intuition that saying flights to Paris to a travel agent immediately makes a number of questions become relevant.",
278
+ "cite_spans": [],
279
+ "ref_spans": [],
280
+ "eq_spans": [],
281
+ "section": "IS :",
282
+ "sec_num": null
283
+ },
284
+ {
285
+ "text": "Task accommodation and retrieval of the relevant plan has been implemented in GoDiS by two information state update rules.",
286
+ "cite_spans": [],
287
+ "ref_spans": [],
288
+ "eq_spans": [],
289
+ "section": "IS :",
290
+ "sec_num": null
291
+ },
292
+ {
293
+ "text": "We will give some examples of CoDiS dialogues in the English travel agency domain. One of the features of the TRINDIKIT is that it provides methods for inspecting the information state at any stage of a dialogue, and also to see which rules are applied. Unfortunately, because of space limitations, we are not able to include examples of this in this paper; however, it is an integral part of our demo.",
294
+ "cite_spans": [],
295
+ "ref_spans": [],
296
+ "eq_spans": [],
297
+ "section": "Sample dialogues",
298
+ "sec_num": "4"
299
+ },
300
+ {
301
+ "text": "The first dialogue shows the typical behaviour of dialogue systems for information-seeking: each question is answered in turn, and the user always gives exactly the information requested. ",
302
+ "cite_spans": [],
303
+ "ref_spans": [],
304
+ "eq_spans": [],
305
+ "section": "Sample dialogues",
306
+ "sec_num": "4"
307
+ },
308
+ {
309
+ "text": "GoDiS is a small-scale prototype and as such it suffers from the familiar drawbacks of many experimental systems: its lexicons and databases are very small, and the domain knowledge is limited. The dialogue plans cover only a small (but important) part of the kind of dialogues that can take place in the travel agency and autoroute domains. We hope to be able to extend both the number of domains and the scale of each domain in the future.",
310
+ "cite_spans": [],
311
+ "ref_spans": [],
312
+ "eq_spans": [],
313
+ "section": "Limitations",
314
+ "sec_num": "5"
315
+ },
316
+ {
317
+ "text": "Also, the focus in the development of GoDiS has been on the dialogue management, i.e. the dialogue move engine and the information state. Less effort has been put into the interpretation and generation modules, which currently use (a slightly extended version of) keyword spotting and partially canned text, respectively. We hope to replace these by plugging in existing interpretation and generation technology to GoDiS.",
318
+ "cite_spans": [],
319
+ "ref_spans": [],
320
+ "eq_spans": [],
321
+ "section": "Limitations",
322
+ "sec_num": "5"
323
+ },
324
+ {
325
+ "text": "For the tasks that GoDiS currently is able to handle, full-blown semantics is not needed. We use a very limited semantics where propositions are essentially feature-value pairs. As a consequence, GoDiS",
326
+ "cite_spans": [],
327
+ "ref_spans": [],
328
+ "eq_spans": [],
329
+ "section": "Limitations",
330
+ "sec_num": "5"
331
+ },
332
+ {
333
+ "text": "Currently, the main contribution of GoDiS is perhaps to show how an extended notion of accommodation can serve to make dialogue systems easier to interact with, by letting the user decide how and in what order to present information to the system. Also, the fact that accommodation can be implemented simply by adding three update rules indicates that information state update rules provide a natural and compact way of implementing dialogue strategies. An important issue for future research is the relation of question and task accommodation to plan recognition approaches to dialogue (Sidner, 1985) .",
334
+ "cite_spans": [
335
+ {
336
+ "start": 587,
337
+ "end": 601,
338
+ "text": "(Sidner, 1985)",
339
+ "ref_id": "BIBREF3"
340
+ }
341
+ ],
342
+ "ref_spans": [],
343
+ "eq_spans": [],
344
+ "section": "Contributions",
345
+ "sec_num": "6"
346
+ },
347
+ {
348
+ "text": "GoDiS also features a simple grounding strategy which is nevertheless sufficient in many cases. The grounding mechanism is implemented by three update rules. It is possible to switch resources in middialogue, e.g. to change language. Also, GoDiS is easily reconfigurable to new information-seeking domains. To adapt GoDiS to a new domain, one needs to supply a database, a lexicon and domain knowledge, including a set of dialogue plans. The GoDiS modules or information state don't need to be changed in any way.",
349
+ "cite_spans": [],
350
+ "ref_spans": [],
351
+ "eq_spans": [],
352
+ "section": "Contributions",
353
+ "sec_num": "6"
354
+ },
355
+ {
356
+ "text": "In general, as an example of a dialogue system iraplemented using the TRINDIKIT package, GoDiS shows how the information state approach is useful for clarifying and comparing theories of dialogue, and for exploring new solutions.",
357
+ "cite_spans": [],
358
+ "ref_spans": [],
359
+ "eq_spans": [],
360
+ "section": "Contributions",
361
+ "sec_num": "6"
362
+ },
363
+ {
364
+ "text": "is e.g. not able to handle dialogue with more than one referent; for this, the information state would have to be amended with a set of referents, and propositions would have to include referent information. This is an area where we hope to improve GoDiS in the near future.Speech recognition and synthesis is currently being added to GoDiS, but at the time of writing only written input and output is available.",
365
+ "cite_spans": [],
366
+ "ref_spans": [],
367
+ "eq_spans": [],
368
+ "section": "",
369
+ "sec_num": null
370
+ }
371
+ ],
372
+ "back_matter": [],
373
+ "bib_entries": {
374
+ "BIBREF0": {
375
+ "ref_id": "b0",
376
+ "title": "Information states and dialogue move engines",
377
+ "authors": [
378
+ {
379
+ "first": "P",
380
+ "middle": [],
381
+ "last": "Bohlin",
382
+ "suffix": ""
383
+ },
384
+ {
385
+ "first": "R",
386
+ "middle": [],
387
+ "last": "Cooper",
388
+ "suffix": ""
389
+ },
390
+ {
391
+ "first": "E",
392
+ "middle": [],
393
+ "last": "Engdahl",
394
+ "suffix": ""
395
+ },
396
+ {
397
+ "first": "S",
398
+ "middle": [],
399
+ "last": "Larsson",
400
+ "suffix": ""
401
+ }
402
+ ],
403
+ "year": 1999,
404
+ "venue": "IJCAI-99 Workshop on Knowledge and Reasoning in Practical Dialogue Systems",
405
+ "volume": "",
406
+ "issue": "",
407
+ "pages": "",
408
+ "other_ids": {},
409
+ "num": null,
410
+ "urls": [],
411
+ "raw_text": "P. Bohlin, R. Cooper, E. Engdahl, and S. Lars- son. 1999. Information states and dialogue move engines. In J. Alexandersson, editor, IJCAI-99 Workshop on Knowledge and Reasoning in Prac- tical Dialogue Systems.",
412
+ "links": null
413
+ },
414
+ "BIBREF1": {
415
+ "ref_id": "b1",
416
+ "title": "Clarifying utterances",
417
+ "authors": [
418
+ {
419
+ "first": "\u2022",
420
+ "middle": [
421
+ "J"
422
+ ],
423
+ "last": "Ginzburg",
424
+ "suffix": ""
425
+ }
426
+ ],
427
+ "year": 1998,
428
+ "venue": "Proc. of the Twente Workshop on the Formal Semantics and Pragmatics of Dialogues",
429
+ "volume": "",
430
+ "issue": "",
431
+ "pages": "11--30",
432
+ "other_ids": {},
433
+ "num": null,
434
+ "urls": [],
435
+ "raw_text": "\u2022 J. Ginzburg. 1998. Clarifying utterances. In J. Hul- stijn and A. Niholt, editors, Proc. of the Twente Workshop on the Formal Semantics and Pragmat- ics of Dialogues, pages 11-30, Enschede. Univer- siteit Twente, Faculteit Informatica.",
436
+ "links": null
437
+ },
438
+ "BIBREF2": {
439
+ "ref_id": "b2",
440
+ "title": "Scorekeeping in a language game",
441
+ "authors": [
442
+ {
443
+ "first": "D",
444
+ "middle": [
445
+ "K"
446
+ ],
447
+ "last": "Lewis",
448
+ "suffix": ""
449
+ }
450
+ ],
451
+ "year": 1979,
452
+ "venue": "Journal of Philosophical Logic",
453
+ "volume": "8",
454
+ "issue": "",
455
+ "pages": "339--359",
456
+ "other_ids": {},
457
+ "num": null,
458
+ "urls": [],
459
+ "raw_text": "D. K. Lewis. 1979. Scorekeeping in a language game. Journal of Philosophical Logic, 8:339-359.",
460
+ "links": null
461
+ },
462
+ "BIBREF3": {
463
+ "ref_id": "b3",
464
+ "title": "Plan parsing for intended response recognition in discourse",
465
+ "authors": [
466
+ {
467
+ "first": "C",
468
+ "middle": [
469
+ "L"
470
+ ],
471
+ "last": "Sidner",
472
+ "suffix": ""
473
+ }
474
+ ],
475
+ "year": 1985,
476
+ "venue": "Computational Intelligence",
477
+ "volume": "1",
478
+ "issue": "1",
479
+ "pages": "1--10",
480
+ "other_ids": {},
481
+ "num": null,
482
+ "urls": [],
483
+ "raw_text": "C. L. Sidner. 1985. Plan parsing for intended re- sponse recognition in discourse. Computational Intelligence, 1 (1) :1-10, February.",
484
+ "links": null
485
+ }
486
+ },
487
+ "ref_entries": {
488
+ "FIGREF0": {
489
+ "num": null,
490
+ "type_str": "figure",
491
+ "text": "$J: what month do you want to go SP: well around 3rd 4th april / some time there SP: as cheap as possible",
492
+ "uris": null
493
+ },
494
+ "FIGREF1": {
495
+ "num": null,
496
+ "type_str": "figure",
497
+ "text": ". What time are you coming to pick up Maria? B. Around 6 p.m. As cheap as possible.",
498
+ "uris": null
499
+ },
500
+ "FIGREF2": {
501
+ "num": null,
502
+ "type_str": "figure",
503
+ "text": "The type of information state we are assuming",
504
+ "uris": null
505
+ },
506
+ "FIGREF3": {
507
+ "num": null,
508
+ "type_str": "figure",
509
+ "text": ".LU.MOVES, answer(A), false ) in( PRIVATE.PLAN, raise(Q) ) domain :: relevant_answer(Q, A) del( PRIVATE.PLAN, raise(Q) ) EFF: push( SHARED.QUD, Q )",
510
+ "uris": null
511
+ }
512
+ }
513
+ }
514
+ }
Full_text_JSON/prefixW/json/W00/W00-0303.json ADDED
@@ -0,0 +1,745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0303",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:59.720961Z"
6
+ },
7
+ "title": "Dialogue Management in the Mercury Flight Reservation System",
8
+ "authors": [
9
+ {
10
+ "first": "Stephanie",
11
+ "middle": [],
12
+ "last": "Seneff",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "Spoken Language Systems Group Laboratory for Computer Science Massachusetts",
16
+ "institution": "",
17
+ "location": {
18
+ "postCode": "02139",
19
+ "region": "MA"
20
+ }
21
+ },
22
+ "email": "seneff@sls.ics.mit.edu"
23
+ },
24
+ {
25
+ "first": "Joseph",
26
+ "middle": [],
27
+ "last": "Polifroni",
28
+ "suffix": "",
29
+ "affiliation": {
30
+ "laboratory": "Spoken Language Systems Group Laboratory for Computer Science Massachusetts",
31
+ "institution": "",
32
+ "location": {
33
+ "postCode": "02139",
34
+ "region": "MA"
35
+ }
36
+ },
37
+ "email": ""
38
+ }
39
+ ],
40
+ "year": "",
41
+ "venue": null,
42
+ "identifiers": {},
43
+ "abstract": "This paper describes the dialogue module of the Mercury systemewhich has been under development over the past year or two. Mercury provides telephone access to an on-line flight database, and allows users to plan and price itineraries between major airports worldwide. The main focus of this paper is the dialogue control strategy, which is based on a set of ordered rules as a mechanism to manage complex dialogue interactions. The paper also describes the interactions between the dialogue component and the other servers of the system, mediated via a central hub. We evaluated the system on 49 dialogues from users booking real flights, and report on a number of quantitative measures of the dialogue interaction.",
44
+ "pdf_parse": {
45
+ "paper_id": "W00-0303",
46
+ "_pdf_hash": "",
47
+ "abstract": [
48
+ {
49
+ "text": "This paper describes the dialogue module of the Mercury systemewhich has been under development over the past year or two. Mercury provides telephone access to an on-line flight database, and allows users to plan and price itineraries between major airports worldwide. The main focus of this paper is the dialogue control strategy, which is based on a set of ordered rules as a mechanism to manage complex dialogue interactions. The paper also describes the interactions between the dialogue component and the other servers of the system, mediated via a central hub. We evaluated the system on 49 dialogues from users booking real flights, and report on a number of quantitative measures of the dialogue interaction.",
50
+ "cite_spans": [],
51
+ "ref_spans": [],
52
+ "eq_spans": [],
53
+ "section": "Abstract",
54
+ "sec_num": null
55
+ }
56
+ ],
57
+ "body_text": [
58
+ {
59
+ "text": "Dialogue modeling is a critical and challenging aspect of conversational systems, particularly when users are permitted flexibility with regard to defining the constraints of the task. For systems that adopt a strict system-initiated approach, it is feasible to define a set of states and state transitions depending on the usually small number of possible user actions at each state. However, if the user is permitted to say anything within the scope of the recognizer at any time, such a finite-state solution becomes unwieldy. We are interested in the development of mixed-initiative systems, where the system may make specific requests or suggestions, but the user is not required to be compliant. Instead of a finite state dialogue model, we choose to decompose dialogue state into a set of state variables. The activities for a given turn typically involve the sequential execution of a number of specialized routines, each of which performs a specific part of the dialogue requirements and alters the state variables in particular ways. To determine which of the operations should be performed, the system consults a dialogue control table, which is specified in a simple scripting language.",
60
+ "cite_spans": [],
61
+ "ref_spans": [],
62
+ "eq_spans": [],
63
+ "section": "Introduction",
64
+ "sec_num": "1"
65
+ },
66
+ {
67
+ "text": "This paper describes experiments with using this approach to dialogue modeling in the context of our Mercury flight reservation system. Mercury allows users to plan air travel between 226 cities worldwide. Following log-on, the user interacts with the system to select the flights of their trip. When the flight plan is completed, the system takes the initiative to offer to price and email the itinerary. Finally, the system asks the user a few questions to help determine user satisfaction.",
68
+ "cite_spans": [],
69
+ "ref_spans": [],
70
+ "eq_spans": [],
71
+ "section": "Introduction",
72
+ "sec_num": "1"
73
+ },
74
+ {
75
+ "text": "The overall system makes use of the GALAXY architecture [Seneffet al (1999) ], which consists of a number of specialized servers that communicate with one another via a central programmable hub. An audio server captures the user's speech via a Dialogic board, and transmits the waveform to the speech recognizer [Glass et al (1996) ]. The language understanding component [Seneff (1992) ] parses a word graph produced by the recognizer and delivers a semantic frame, encoding the meaning of the utterance, to the discourse component. The output of the discourse component [Seneff (1996) ] is the framein-context, which is transformed into a flattened Eform (electronic form) by the generation server. This E-form is delivered to the turn manager, and provides the initial settings of the dialogue state. The turn manager consults the dialogue control table to decide which operations to perform, and typically engages in a module-to-module subdialogue to retrieve tables from the database. It prepares a response frame, which may or may not include tabular entries. The response frame is sent to the generation component [Glass (1994) ] which transforms it in parallel into both a text string and an annotated string that specifies the input controls for the speech synthesizer. Finally, the speech synthesizer transmits a waveform to the audio server which then relays the spoken response to the user over the telephone. The entire dialogue is recorded in detail in a log file for later examination.",
76
+ "cite_spans": [
77
+ {
78
+ "start": 69,
79
+ "end": 75,
80
+ "text": "(1999)",
81
+ "ref_id": null
82
+ },
83
+ {
84
+ "start": 312,
85
+ "end": 331,
86
+ "text": "[Glass et al (1996)",
87
+ "ref_id": "BIBREF1"
88
+ },
89
+ {
90
+ "start": 372,
91
+ "end": 386,
92
+ "text": "[Seneff (1992)",
93
+ "ref_id": "BIBREF2"
94
+ },
95
+ {
96
+ "start": 572,
97
+ "end": 586,
98
+ "text": "[Seneff (1996)",
99
+ "ref_id": "BIBREF3"
100
+ },
101
+ {
102
+ "start": 1121,
103
+ "end": 1134,
104
+ "text": "[Glass (1994)",
105
+ "ref_id": "BIBREF5"
106
+ }
107
+ ],
108
+ "ref_spans": [],
109
+ "eq_spans": [],
110
+ "section": "Introduction",
111
+ "sec_num": "1"
112
+ },
113
+ {
114
+ "text": "Mercury's dialogue manager manipulates linguistic and world knowledge represented in the form of semantic frames. At each turn it begins with an E-form representing the constraints of the current query as a set of (key: value) pairs. This E-form pro-U: John Doe S: Please speak the month and day you registered as your password, as in, may first. U: april first S:",
115
+ "cite_spans": [],
116
+ "ref_spans": [],
117
+ "eq_spans": [],
118
+ "section": "Knowledge Representations",
119
+ "sec_num": "2"
120
+ },
121
+ {
122
+ "text": "Welcome John. You have successfully logged onto Mercury. How may I help you? U: i want to travel to washington dulles on november fourth S:",
123
+ "cite_spans": [],
124
+ "ref_spans": [],
125
+ "eq_spans": [],
126
+ "section": "Knowledge Representations",
127
+ "sec_num": "2"
128
+ },
129
+ {
130
+ "text": "From Boston to Washington Dulles on November 4. Can you provide the approximate departure time or airline preference? U: i need to arrive at around nine a m I:",
131
+ "cite_spans": [],
132
+ "ref_spans": [],
133
+ "eq_spans": [],
134
+ "section": "Knowledge Representations",
135
+ "sec_num": "2"
136
+ },
137
+ {
138
+ "text": "Okay. Flights from Boston to Washington Dulles arriving at 9:00 am on November 4. One moment please.",
139
+ "cite_spans": [],
140
+ "ref_spans": [],
141
+ "eq_spans": [],
142
+ "section": "Knowledge Representations",
143
+ "sec_num": "2"
144
+ },
145
+ {
146
+ "text": "The best matching flight is a U S Airways flight arriving at 8:59 a.m. I also have a United flight arriving at 8:36 a.m. U: i what time does the united flight leave S:",
147
+ "cite_spans": [],
148
+ "ref_spans": [],
149
+ "eq_spans": [],
150
+ "section": "S:",
151
+ "sec_num": null
152
+ },
153
+ {
154
+ "text": "United flight 1819 departs at 7:00 am from Boston. Shall I book this flight for you? U: yes please S:",
155
+ "cite_spans": [],
156
+ "ref_spans": [],
157
+ "eq_spans": [],
158
+ "section": "S:",
159
+ "sec_num": null
160
+ },
161
+ {
162
+ "text": "United flight 1819 has been added to your itinerary. You depart Boston at 7:00 am and arrive in Washington Dulles at 8:36 am. Where would you like to go next? U: i would like to return to boston S:",
163
+ "cite_spans": [],
164
+ "ref_spans": [],
165
+ "eq_spans": [],
166
+ "section": "S:",
167
+ "sec_num": null
168
+ },
169
+ {
170
+ "text": "What date will you be returning on? U: the same day I:",
171
+ "cite_spans": [],
172
+ "ref_spans": [],
173
+ "eq_spans": [],
174
+ "section": "S:",
175
+ "sec_num": null
176
+ },
177
+ {
178
+ "text": "Okay. United flights from Washington Dulles to Boston on November 4. One moment please.",
179
+ "cite_spans": [],
180
+ "ref_spans": [],
181
+ "eq_spans": [],
182
+ "section": "S:",
183
+ "sec_num": null
184
+ },
185
+ {
186
+ "text": "Can you provide a departure or arrival time? The earliest nonstop leaves at 9:30 a.m. and arrives at 10:57 a.m. U: i would like to leave after three thirty S:",
187
+ "cite_spans": [],
188
+ "ref_spans": [],
189
+ "eq_spans": [],
190
+ "section": "S:",
191
+ "sec_num": null
192
+ },
193
+ {
194
+ "text": "The best matching flight is a United flight departing at 4:30 p.m. I also have a United flight departing at 5:30 p.m. U: i will take the first one S:",
195
+ "cite_spans": [],
196
+ "ref_spans": [],
197
+ "eq_spans": [],
198
+ "section": "S:",
199
+ "sec_num": null
200
+ },
201
+ {
202
+ "text": "United flight 1278 has been added to your itinerary. You depart Washington Dulles at 4:30 pm and arrive in Boston at 6:07 pm. Shall I price your itinerary? U: yes please I:",
203
+ "cite_spans": [],
204
+ "ref_spans": [],
205
+ "eq_spans": [],
206
+ "section": "S:",
207
+ "sec_num": null
208
+ },
209
+ {
210
+ "text": "Pricing your itinerary will take a while. In the meantime, I'll summarize your itinerary for you.",
211
+ "cite_spans": [],
212
+ "ref_spans": [],
213
+ "eq_spans": [],
214
+ "section": "S:",
215
+ "sec_num": null
216
+ },
217
+ {
218
+ "text": "on November 4, you depart on United flight 1819 from Boston at 7:00 am and arrive in Washington Dulles at 8:36 am. From there, you depart on United flight 1278 at 4:30 pm and arrive in Boston at 6:07 pm. The price for your itinerary is $249.00. Shall I email your itinerary to you? U: yes S:",
219
+ "cite_spans": [],
220
+ "ref_spans": [],
221
+ "eq_spans": [],
222
+ "section": "S:",
223
+ "sec_num": null
224
+ },
225
+ {
226
+ "text": "Your itinerary has been emailed. Is there something else I can do for you? U: no thank you S:",
227
+ "cite_spans": [],
228
+ "ref_spans": [],
229
+ "eq_spans": [],
230
+ "section": "S:",
231
+ "sec_num": null
232
+ },
233
+ {
234
+ "text": "Thank you for calling! Have a nice day! Table 1 : Example dialogue between a user and the system involving a real trip. \"U\" denotes a user utterance, \"S\" a system utterance, and 'T' an intermediate system response spoken prior to database retrieval.",
235
+ "cite_spans": [],
236
+ "ref_spans": [
237
+ {
238
+ "start": 40,
239
+ "end": 47,
240
+ "text": "Table 1",
241
+ "ref_id": null
242
+ }
243
+ ],
244
+ "eq_spans": [],
245
+ "section": "S:",
246
+ "sec_num": null
247
+ },
248
+ {
249
+ "text": "vides the initial values in the dialogue state, which evolves over the course of the turn as rules are executed. In addition to the dialogue state, Mercury also makes use of several other internal representations which retain state. These are packaged up into a turn-manager history frame which is passed to the hub for safe-keeping at the end of each turn, and returned with each subsequent turn. The user model contains a record of the system's current belief about the state of the user's flight plan. For example, if the user's first query were \"I want to go to Denver next Friday morning, returning the fol-lowing Wednesday,\" the system would record that this is a round trip flight and would save the return date (unresolved, in case there was a recognition error on the forward leg date) in the user model. It would retrieve from the user model, as the forward leg source, the default value provided by the user at enrollment. Thus the user model retains both the initial user preferences and all of the details of the current flight plan as they become available. The system also retains internally a list of all the flights that matched the user's constraint specifications, as well as a list of the subset of the flights that it has spoken about (e.g., the nonstop flights or the earliest Figure 1 .\" An (incomplete) state diagram for the example system described in the text. flight). These flights become available for reference in subsequent queries such as \"book the third one,\" or \"where does the united flight stop?\". Whenever the system needs to go to the database, it provides an intermediate reply frame which serves both to ask the user for patience and to confirm a consistent shared model of the dialogue history. The system's final response is also a frame, and it often contains a list of flights to be read back to the user. In other cases, it may be a yes-no question as in \"Shall I email your itinerary to you?\" The latter case also necessitates sending both to the discourse component a system-initiative context for yes/no responses, and to the language understanding component a dialogue context filter that favors confirmations and denials in the N-best selection. A similar process takes place when the system prompts for information such as a date or a departure city. Table 1 shows a dialogue between a user and the system involving a real trip between Boston and Washington D.C. The name and password information have been omitted for privacy reasons. In the figure, \"U\" stands for \"user\", \"S\" for system, and 'T' for intermediate response.",
250
+ "cite_spans": [],
251
+ "ref_spans": [
252
+ {
253
+ "start": 1299,
254
+ "end": 1307,
255
+ "text": "Figure 1",
256
+ "ref_id": null
257
+ },
258
+ {
259
+ "start": 2302,
260
+ "end": 2309,
261
+ "text": "Table 1",
262
+ "ref_id": null
263
+ }
264
+ ],
265
+ "eq_spans": [],
266
+ "section": "S:",
267
+ "sec_num": null
268
+ },
269
+ {
270
+ "text": "A finite state network is a convenient way to formulate dialogue states, as long as the system is able to maintain strong control over the order in which slots are filled, and especially when slots are filled one at a time. However, if the user is permitted the flexibility to fill slots in any order, and to fill more than one slot in a single turn, then it becomes difficult and inefficient to define a finite state network that covers the entire space of possibilities.",
271
+ "cite_spans": [],
272
+ "ref_spans": [],
273
+ "eq_spans": [],
274
+ "section": "Dialogue Management Strategies",
275
+ "sec_num": "3"
276
+ },
277
+ {
278
+ "text": "A finite state solution for a simple dialogue exam-!source --> prompt_source !destination --> prompt_destination !airline --> prompt_airline !date --> prompt_date nprompts > 1 --> mark_multiple nprompts = 0 --> retrieve_flights ple is given in Figure 1 . Here, we assume that the user can fill four slots (source, destination, date, and airline) and that all four must be filled before the system can retrieve flights. The slots can be filled in any order, and the user can fill any number of slots in each turn. For notational convenience, we represent the states filled/unfilled by 1/0, and we order the fields as [source, destination, date, airline], such that state \"1101\" says that only date is missing. At each state the system prompts the user for all missing slots. Thus, state 1000 would speak, \"Can you provide one or more of destination, date, and airline?\" If the user provides more than a single field in a given turn, the system advances by more than one state in the state diagram. To fully specify all possibilities, even for this simple example, requires a large number of states in the state diagram. Figure 2 shows a set of ordered rules which achieve the same goals as the state diagram but with a greatly simplified dialogue control specification. In this case, state is encoded implicitly as a bit map corresponding to the filled/unfilled values of the four variables. The notation \"!\" stands for \" NOT Figure 3 : Selected rules from the Mercury dialogue control table. \"&\" stands for \"AND\" and \"[\" stands for \"OR\".",
279
+ "cite_spans": [
280
+ {
281
+ "start": 1421,
282
+ "end": 1424,
283
+ "text": "NOT",
284
+ "ref_id": null
285
+ }
286
+ ],
287
+ "ref_spans": [
288
+ {
289
+ "start": 244,
290
+ "end": 252,
291
+ "text": "Figure 1",
292
+ "ref_id": null
293
+ },
294
+ {
295
+ "start": 1119,
296
+ "end": 1127,
297
+ "text": "Figure 2",
298
+ "ref_id": "FIGREF1"
299
+ },
300
+ {
301
+ "start": 1425,
302
+ "end": 1433,
303
+ "text": "Figure 3",
304
+ "ref_id": null
305
+ }
306
+ ],
307
+ "eq_spans": [],
308
+ "section": "Dialogue Management Strategies",
309
+ "sec_num": "3"
310
+ },
311
+ {
312
+ "text": "performed when the conditions are met. Each operation is usually a specific function, and is free to augment the variable space by either altering the values of pre-existing variables or introducing new variables. In our simple example the four prompt operations simply add their respective variables to the response frame and increment the variable \"nprompt.\" The final function \"mark_multiple\" fires only if more than one of the preceding functions have fired. Its action is to insert \"one or more of\" into the response.",
313
+ "cite_spans": [],
314
+ "ref_spans": [],
315
+ "eq_spans": [],
316
+ "section": "Dialogue Management Strategies",
317
+ "sec_num": "3"
318
+ },
319
+ {
320
+ "text": "The final \"retrieve\" operation only fires if none of the preceding operations fired. It is significant that state is dispersed not only over the variables but also over the operations. Thus, any single prompt state of the state transition matrix would involve up to two operations in the dialogue control table, and, conversely, the \"mark_multiple\" operation is associated with several states. In addition to possibly altering the values of variables in the dialogue state, operations also have the responsibility to return, upon completion, one of three \"move\" states. The majority of the operations return the state ~'CONTINUE,\" which means simply to continue execution just beyond the rule that just fired. The two other possibilities are \"STOP,\" i.e., exit from the dialogue control table, and \"RESTART\" which means to return to the top of the set of dialogue rules and start over. An (optional) distinguished operation, which handles activities that should occur routinely at every turn, is executed upon exiting the dialogue control process. The tests on variables can be binary, arithmetic, or string matching on values.",
321
+ "cite_spans": [],
322
+ "ref_spans": [],
323
+ "eq_spans": [],
324
+ "section": "Dialogue Management Strategies",
325
+ "sec_num": "3"
326
+ },
327
+ {
328
+ "text": "Our Mercury system makes use of this \"ordered rules\" strategy for dialogue control. However, the rules are dealing with a great deal more complexity than simply deciding what prompts to issue when certain slots are unfilled. There are currently a total of over 200 rules in Mercury's dialogue control table. These rules can be grouped into several distinct categories. Only nine of the rules involve prompts for missing information. Another nine are involved with logging into the system, i.e., acquiring the name and the password, which may have to be entered using the telephone keypad, either upon user request or as a consequence of recognition failure. Eleven rules are concerned with meta-level interactions such as apologies for missing services (no flight status information available), and requests for help or repetition. Several rules have to do with determining whether the current request plausibly involves a reference to a flight in a pre-existing flight list. This could be an obvious reference to the nth flight, or it could be an inquiry about the \"united flight\" or the \"nine a.m. flight.\" Several more rules involve interpreting various references to relative dates and/or times such as \"the following Thursday,\" or \"that same afternoon.\"",
329
+ "cite_spans": [],
330
+ "ref_spans": [],
331
+ "eq_spans": [],
332
+ "section": "Dialogue Management Strategies",
333
+ "sec_num": "3"
334
+ },
335
+ {
336
+ "text": "The largest single category concerns preparing the reply frame, after the database retrieval has already taken place. Twenty six rules are concerned with this task, and they are keyed on a combination of the number and type of flights retrieved and the specific request of the user (e.g., \"where do they connect?\"). The purpose of these functions is to reduce the number of database tuples returned for a given query to a manageable set for a spoken response. This reduction is based on a hierarchy of quality measures for each flight, beginning with any stated or inferred preference (e.g., a particular airport in the case of cities with multiple airports, or a particular airline in the case of a multi-leg booking where one leg has already been established) and including number of stops and length of flight. These functions also consolidate information about the flights to be spoken, combining shared attributes. Our ultimate goal is to produce a response such as \"I have found three nonstop United flights. Flight 100 leaves at 8:45, flight 200 leaves at 9:10, and flight 300 leaves at 9:30.\" The dialogue control table facilitates this interaction by enabling the system developer to encode the constraints of the hierarchy in the rules.",
337
+ "cite_spans": [],
338
+ "ref_spans": [],
339
+ "eq_spans": [],
340
+ "section": "Dialogue Management Strategies",
341
+ "sec_num": "3"
342
+ },
343
+ {
344
+ "text": "Finally, there are a miscellaneous set of rules that have to do with updating the user model, preparing the intermediate reply, pricing or emailing the itinerary, preparing the database query, filtering flights, or updating the itinerary.",
345
+ "cite_spans": [],
346
+ "ref_spans": [],
347
+ "eq_spans": [],
348
+ "section": "Dialogue Management Strategies",
349
+ "sec_num": "3"
350
+ },
351
+ {
352
+ "text": "Since the operations are general functions, it is up to the system developer to decide how to parcel up the computational requirements into the individual operations. With experience, one acquires a set of guidelines to help formalize this process. As a general rule, it is preferrable to limit the use of nested function calls. Instead, an operation can set a vari-11. 5%WER ] words/turn I total turns I total time7 11 229 sec. Nutts: 41 90 55 31 9 226",
353
+ "cite_spans": [],
354
+ "ref_spans": [],
355
+ "eq_spans": [],
356
+ "section": "Dialogue Management Strategies",
357
+ "sec_num": "3"
358
+ },
359
+ {
360
+ "text": "Figure 4: Some easily computed statistics on the 36 successful bookings.",
361
+ "cite_spans": [],
362
+ "ref_spans": [],
363
+ "eq_spans": [],
364
+ "section": "Dialogue Management Strategies",
365
+ "sec_num": "3"
366
+ },
367
+ {
368
+ "text": "able to indicate that another operation needs to be called, and then the intended subroutine gets promoted to the status of a dialogue control operation. This has the effect of exposing more of the internal activities of the system to the dialogue control table, which serves as a very useful outline of system control flow. Another general policy is that each operation should be concerned with a single well-defined task, again in or~er~not to conceal complexity. Figure 3 shows several examples of actual rules in Mercury's dialogue control table.",
369
+ "cite_spans": [],
370
+ "ref_spans": [
371
+ {
372
+ "start": 466,
373
+ "end": 474,
374
+ "text": "Figure 3",
375
+ "ref_id": null
376
+ }
377
+ ],
378
+ "eq_spans": [],
379
+ "section": "Dialogue Management Strategies",
380
+ "sec_num": "3"
381
+ },
382
+ {
383
+ "text": "Mercury first became available for data collection in October '99. Prospective users must first enroll by filling in a simple form on a Web page, where they enter, minimally, their name, email address, and password (a date). Once the user's name has been added to the recognizer and language understanding components, they receive an email message informing them of the telephone number. Users are encouraged to attempt to book real trips. From late October to early December, we collected 49 dialogues involving real flight bookings, and these form the basis for our evaluation studies. Overall, 73% of the bookings were successful (36/49). We used a very strict requirement for success. For example, in one dialogue considered unsuccessful the system did not know the airline that the user requested, and so the user compromised and booked the trip on a different airline. Three of the failures are due to the user simply hanging up in frustration, and three others are due to the system hanging up due to a misrecognized \"good-bye.\" Two failures were due to user inattentiveness. The user believed that the trip was correctly booked, but a misrecognition produced a different itinerary than the one they were specifying. Finally, four of the failures involved completely correct bookings, but the system was unable to follow through with the pricing and/or emailing of the itinerary. Some of these involved inadequacies in the dialogue module, once the user did not provide the expected response to a system request. There was a striking difference in recognition error between the successful and the incomplete bookings (11.5% vs 26% WER). A heavy foreign accent accounted for some of the recognition problems.",
384
+ "cite_spans": [],
385
+ "ref_spans": [],
386
+ "eq_spans": [],
387
+ "section": "Data Collection and Evaluation",
388
+ "sec_num": "4"
389
+ },
390
+ {
391
+ "text": "Some easily measurable statistics for the successes are given in Figure 4 . These numbers were computed on the \"core dialogue,\" defined as the interval subsequent to logging on and up until the itinerary is fully specified, but has not yet been priced. On average users required less than four minutes to complete the core dialogue, although three outliers took more than seven minutes.",
392
+ "cite_spans": [],
393
+ "ref_spans": [
394
+ {
395
+ "start": 65,
396
+ "end": 73,
397
+ "text": "Figure 4",
398
+ "ref_id": null
399
+ }
400
+ ],
401
+ "eq_spans": [],
402
+ "section": "Data Collection and Evaluation",
403
+ "sec_num": "4"
404
+ },
405
+ {
406
+ "text": "We have long been interested in seeking evaluation metrics that are automatic and that can apply on a per-utterance basis but evaluate a significant portion of the system beyond the recognizer. In [Polifroni et al. (1998) ] we proposed an E-form evaluation metric, which compares an E-form obtained by parsing the original orthography against that obtained by parsing the selected recognizer hypothesis. We believe this is a good metric for evaluating how well the recognizer and parser are doing, but it says nothing about the discourse and dialogue components.",
407
+ "cite_spans": [
408
+ {
409
+ "start": 197,
410
+ "end": 221,
411
+ "text": "[Polifroni et al. (1998)",
412
+ "ref_id": "BIBREF6"
413
+ }
414
+ ],
415
+ "ref_spans": [],
416
+ "eq_spans": [],
417
+ "section": "Log File Evaluation",
418
+ "sec_num": "4.1"
419
+ },
420
+ {
421
+ "text": "We recently devised two new evaluation metrics, which we believe are useful measures for assessing the performance of the recognizer, parser, discourse, and dialogue components, collectively. To compute the measures, we must reprocess the log file after the orthographic transcription has been provided for the user queries. Basically, both the recognizer hypothesis and the original orthography are run through the system utterance by utterance, with the discourse and dialogue states being maintained exclusively by the recognizer branch. For both branches, the Eform that is produced after the turn manager has finished processing the query is sent to a special evaluation server. This server maintains a running record of all the attributes that appear in the orthography path, comparing them against their counterparts in the recognizer path.",
422
+ "cite_spans": [],
423
+ "ref_spans": [],
424
+ "eq_spans": [],
425
+ "section": "Log File Evaluation",
426
+ "sec_num": "4.1"
427
+ },
428
+ {
429
+ "text": "The two parameters that emerge from comparing these E-forms we refer to as information bit rate (IBR) and user frustration (UF). IBR measures the average number of new attributes introduced per user query. A subsequent query that reiterates the same attribute is excluded since it did not introduce any new information. Thus if the user said, \"I want to go from Seattle to Chicago on December 27,\" and the system misrecognized the date as \"December 22,\" then a subsequent query, \"I said December 27\" would be registered as contributing a 0 count to the IBR parameter. The UF. parameter tabulates how many turns it took, on average, for an intended attribute to be transmitted successfully to the system. Thus, in the example above, the source and destination each took one turn, but the date took two.",
430
+ "cite_spans": [],
431
+ "ref_spans": [],
432
+ "eq_spans": [],
433
+ "section": "Log File Evaluation",
434
+ "sec_num": "4.1"
435
+ },
436
+ {
437
+ "text": "There are some difficulties with rerunning the dialogue at a later time. Both the system and the database are in a state of flux, and so the dialogue can become incoherent. For example, in one case the user said, \"Book it,\" in response to a single flight being proposed, but due to changes in the flight schedule, the system proposed three flights in the rerun and the dialogue became incoherent from that point on. To help alleviate incoherence, we provide a mechanism to artificially offset the date, at least to assure that the dates they have selected haven't already pasaed. !_n spite of the above problems, we feel that these'evaluation metrics show considerable promise.",
438
+ "cite_spans": [],
439
+ "ref_spans": [],
440
+ "eq_spans": [],
441
+ "section": "Log File Evaluation",
442
+ "sec_num": "4.1"
443
+ },
444
+ {
445
+ "text": "In a pilot study, we processed a subset of our data through this evaluation configuration. We identified a set of 17 attributes that could be monitored. Five percent of the utterances had orthographies that failed to parse. These are unevaluable without human reannotation, and are hence eliminated from the pool in the discussion below, although they clearly are likely to be very problematic. Figure 5 summarizes the results for information bit rate for the remainder of the utterances. A surprisingly large percentage of the utterances introduce no new concepts. Some, but not all, of these are similar to the date misrecognition example given above. Others are cases where the user was confused about the state of the system's knowledge, and decided to simply repeat all the preceding constraints just to make sure. Some are also misfirings of the endpoint detector producing content-free utterances such as \"okay.\" In other cases the user intended an action, but the system's understanding mechanism was not sophisticated enough. For example \"That's good\" meaning \"book it.\" We were pleased with the percentage of sentences that contained more than one attribute. We believe that a typical directed dialogue would have far fewer utterances with more than one attribute.",
446
+ "cite_spans": [],
447
+ "ref_spans": [
448
+ {
449
+ "start": 395,
450
+ "end": 403,
451
+ "text": "Figure 5",
452
+ "ref_id": "FIGREF2"
453
+ }
454
+ ],
455
+ "eq_spans": [],
456
+ "section": "Log File Evaluation",
457
+ "sec_num": "4.1"
458
+ },
459
+ {
460
+ "text": "Excluding the 5% of utterances whose orthography failed to parse, our system achieved a 1.05% user frustration rate. This means that, on average, one out of every 20 attributes had to be entered twice. We were very pleased with this number.",
461
+ "cite_spans": [],
462
+ "ref_spans": [],
463
+ "eq_spans": [],
464
+ "section": "Log File Evaluation",
465
+ "sec_num": "4.1"
466
+ },
467
+ {
468
+ "text": "This paper described our strategy for dialogue management in the Mercury system. Overall, we have found it to be extremely powerful. While the Mercury system is still under active development, we feel that the anticipated extensions of capability will require a straightforward process of expansions in the dialogue control table; i.e., the\"system has not attained an unmanageable degree of complexity in organization. We believe that this is a direct consequence of the use of the dialogue control table.",
469
+ "cite_spans": [],
470
+ "ref_spans": [],
471
+ "eq_spans": [],
472
+ "section": "Summary and Future Work",
473
+ "sec_num": "5"
474
+ },
475
+ {
476
+ "text": "Mercury's turn manager deals with some issues, such as the interpretation of dates and times, that are of relevance to many other potential domains. We envision that at some future time we will have available a large library of operations of general utility that can be inserted into a new system to greatly accelerate the time required to bring the new domain up to full operation.",
477
+ "cite_spans": [],
478
+ "ref_spans": [],
479
+ "eq_spans": [],
480
+ "section": "Summary and Future Work",
481
+ "sec_num": "5"
482
+ }
483
+ ],
484
+ "back_matter": [],
485
+ "bib_entries": {
486
+ "BIBREF0": {
487
+ "ref_id": "b0",
488
+ "title": "Organization, Communication, and Control in the GALAXY-II Conversational System",
489
+ "authors": [
490
+ {
491
+ "first": "S",
492
+ "middle": [],
493
+ "last": "Seneff",
494
+ "suffix": ""
495
+ },
496
+ {
497
+ "first": "R",
498
+ "middle": [],
499
+ "last": "Lan",
500
+ "suffix": ""
501
+ },
502
+ {
503
+ "first": "J",
504
+ "middle": [],
505
+ "last": "Polifroni",
506
+ "suffix": ""
507
+ }
508
+ ],
509
+ "year": 1999,
510
+ "venue": "Proc. Eurospeeeh '99",
511
+ "volume": "",
512
+ "issue": "",
513
+ "pages": "1271--1274",
514
+ "other_ids": {},
515
+ "num": null,
516
+ "urls": [],
517
+ "raw_text": "Seneff, S., R. Lan, and J. Polifroni. (1999) \"Or- ganization, Communication, and Control in the GALAXY-II Conversational System,\" Proc. Eu- rospeeeh '99, Budapest, Hungary, pp. 1271-1274.",
518
+ "links": null
519
+ },
520
+ "BIBREF1": {
521
+ "ref_id": "b1",
522
+ "title": "A Probabilistic Framework for Feature-based Speech Recognition",
523
+ "authors": [
524
+ {
525
+ "first": "J",
526
+ "middle": [],
527
+ "last": "Glass",
528
+ "suffix": ""
529
+ },
530
+ {
531
+ "first": "J",
532
+ "middle": [],
533
+ "last": "Chang~",
534
+ "suffix": ""
535
+ },
536
+ {
537
+ "first": "M",
538
+ "middle": [],
539
+ "last": "Mccandless",
540
+ "suffix": ""
541
+ }
542
+ ],
543
+ "year": 1996,
544
+ "venue": "Proc. ICSLP 'Y6",
545
+ "volume": "",
546
+ "issue": "",
547
+ "pages": "2277--2280",
548
+ "other_ids": {},
549
+ "num": null,
550
+ "urls": [],
551
+ "raw_text": "Glass, J., J. Chang~ and M. McCandless. (1996) \"A Probabilistic Framework for Feature-based Speech Recognition,\" Proc. ICSLP 'Y6, Philadelphia, PA, pp. 2277-2280.",
552
+ "links": null
553
+ },
554
+ "BIBREF2": {
555
+ "ref_id": "b2",
556
+ "title": "TINA: a Natural Language System for Spoken Language Applications",
557
+ "authors": [
558
+ {
559
+ "first": "S",
560
+ "middle": [],
561
+ "last": "Seneff",
562
+ "suffix": ""
563
+ }
564
+ ],
565
+ "year": 1992,
566
+ "venue": "Computational Linguistics",
567
+ "volume": "18",
568
+ "issue": "1",
569
+ "pages": "61--86",
570
+ "other_ids": {},
571
+ "num": null,
572
+ "urls": [],
573
+ "raw_text": "Seneff, S. (1992) \"TINA: a Natural Language Sys- tem for Spoken Language Applications,\" Compu- tational Linguistics, 18/1, pp. 61-86.",
574
+ "links": null
575
+ },
576
+ "BIBREF3": {
577
+ "ref_id": "b3",
578
+ "title": "Multimodal Discourse Modelling",
579
+ "authors": [
580
+ {
581
+ "first": "S",
582
+ "middle": [],
583
+ "last": "Seneff",
584
+ "suffix": ""
585
+ },
586
+ {
587
+ "first": "D",
588
+ "middle": [],
589
+ "last": "Goddeau",
590
+ "suffix": ""
591
+ },
592
+ {
593
+ "first": "C",
594
+ "middle": [],
595
+ "last": "Pao",
596
+ "suffix": ""
597
+ },
598
+ {
599
+ "first": "J",
600
+ "middle": [],
601
+ "last": "Polifroni",
602
+ "suffix": ""
603
+ }
604
+ ],
605
+ "year": 1996,
606
+ "venue": "",
607
+ "volume": "",
608
+ "issue": "",
609
+ "pages": "",
610
+ "other_ids": {},
611
+ "num": null,
612
+ "urls": [],
613
+ "raw_text": "Seneff, S, D. Goddeau, C. Pao, and J. Polifroni. (1996) \"Multimodal Discourse Modelling-in a",
614
+ "links": null
615
+ },
616
+ "BIBREF4": {
617
+ "ref_id": "b4",
618
+ "title": "Proceedings, International Conference on Spoken Language Processing '96",
619
+ "authors": [
620
+ {
621
+ "first": "",
622
+ "middle": [],
623
+ "last": "Multi-User Multi-Domain",
624
+ "suffix": ""
625
+ },
626
+ {
627
+ "first": "",
628
+ "middle": [],
629
+ "last": "Environment",
630
+ "suffix": ""
631
+ }
632
+ ],
633
+ "year": null,
634
+ "venue": "",
635
+ "volume": "",
636
+ "issue": "",
637
+ "pages": "3--6",
638
+ "other_ids": {},
639
+ "num": null,
640
+ "urls": [],
641
+ "raw_text": "Multi-user Multi-domain Environment,\" Proceed- ings, International Conference on Spoken Lan- guage Processing '96, pp 192-195, Oct. 3-6.",
642
+ "links": null
643
+ },
644
+ "BIBREF5": {
645
+ "ref_id": "b5",
646
+ "title": "Multilingual Language Generation across Multiple Domains",
647
+ "authors": [
648
+ {
649
+ "first": "J",
650
+ "middle": [],
651
+ "last": "Glass",
652
+ "suffix": ""
653
+ },
654
+ {
655
+ "first": "J",
656
+ "middle": [],
657
+ "last": "Polifroni",
658
+ "suffix": ""
659
+ },
660
+ {
661
+ "first": "&",
662
+ "middle": [
663
+ "S"
664
+ ],
665
+ "last": "Seneff",
666
+ "suffix": ""
667
+ }
668
+ ],
669
+ "year": 1994,
670
+ "venue": "Proc. International Conference on Spoken Language Processing",
671
+ "volume": "",
672
+ "issue": "",
673
+ "pages": "983--986",
674
+ "other_ids": {},
675
+ "num": null,
676
+ "urls": [],
677
+ "raw_text": "Glass, J., J. Polifroni, & S. Seneff. (1994). \"Multi- lingual Language Generation across Multiple Do- mains.\" Proc. International Conference on Spoken Language Processing (pp. 983-986). Yokohama.",
678
+ "links": null
679
+ },
680
+ "BIBREF6": {
681
+ "ref_id": "b6",
682
+ "title": "Evaluation Methodology for a Telephonebased Conversational System",
683
+ "authors": [
684
+ {
685
+ "first": "J",
686
+ "middle": [],
687
+ "last": "Polifroni",
688
+ "suffix": ""
689
+ },
690
+ {
691
+ "first": "S",
692
+ "middle": [],
693
+ "last": "Seneff",
694
+ "suffix": ""
695
+ },
696
+ {
697
+ "first": "J",
698
+ "middle": [],
699
+ "last": "Glass",
700
+ "suffix": ""
701
+ },
702
+ {
703
+ "first": "T",
704
+ "middle": [
705
+ "J"
706
+ ],
707
+ "last": "Hazen",
708
+ "suffix": ""
709
+ }
710
+ ],
711
+ "year": 1998,
712
+ "venue": "Proc. LREC '98",
713
+ "volume": "",
714
+ "issue": "",
715
+ "pages": "43--50",
716
+ "other_ids": {},
717
+ "num": null,
718
+ "urls": [],
719
+ "raw_text": "Polifroni, J., S. Seneff, J. Glass, and T.J. Hazen. (1998) \"Evaluation Methodology for a Telephone- based Conversational System.\" Proc. LREC '98, pp. 43-50, Granada, Spain.",
720
+ "links": null
721
+ }
722
+ },
723
+ "ref_entries": {
724
+ "FIGREF1": {
725
+ "text": "A dialogue control table corresponding to the state diagram inFigure 2.",
726
+ "uris": null,
727
+ "type_str": "figure",
728
+ "num": null
729
+ },
730
+ "FIGREF2": {
731
+ "text": "Distribution of evaluable user utterances in terms of number of new attributes introduced with each dialogue turn. IBR = Information Bit Rate.",
732
+ "uris": null,
733
+ "type_str": "figure",
734
+ "num": null
735
+ },
736
+ "TABREF0": {
737
+ "content": "<table><tr><td>clause request_keypad</td><td>keypad..input</td></tr><tr><td>week[ day [ tel_date</td><td>resolve_relative_date</td></tr><tr><td>flight_number &amp; !airline</td><td>need_airline</td></tr><tr><td>clause price &amp; topic itinerary &amp; truth_value deny</td><td>dont_price_itinerary</td></tr><tr><td>clause book &amp; num_.found =1</td><td>) add..flight_to.Jtinerary</td></tr><tr><td>num_nonstops &gt; 1 &amp; num_nonstops &lt; 4 &amp; arrival_time</td><td>speak_three_arrivals</td></tr></table>",
738
+ "html": null,
739
+ "type_str": "table",
740
+ "num": null,
741
+ "text": ",\" meaning that this variable is not yet filled. Following the arrow is the name of the operation to be"
742
+ }
743
+ }
744
+ }
745
+ }
Full_text_JSON/prefixW/json/W00/W00-0304.json ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0304",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:45.915181Z"
6
+ },
7
+ "title": "NJFun: A Reinforcement Learning Spoken Dialogue System",
8
+ "authors": [
9
+ {
10
+ "first": "Diane",
11
+ "middle": [],
12
+ "last": "Litman",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "AT&T Labs --Research",
17
+ "location": {
18
+ "addrLine": "180 Park Avenue Florham Park",
19
+ "postCode": "07932",
20
+ "region": "NJ",
21
+ "country": "USA"
22
+ }
23
+ },
24
+ "email": ""
25
+ },
26
+ {
27
+ "first": "Satinder",
28
+ "middle": [],
29
+ "last": "Singh",
30
+ "suffix": "",
31
+ "affiliation": {
32
+ "laboratory": "",
33
+ "institution": "AT&T Labs --Research",
34
+ "location": {
35
+ "addrLine": "180 Park Avenue Florham Park",
36
+ "postCode": "07932",
37
+ "region": "NJ",
38
+ "country": "USA"
39
+ }
40
+ },
41
+ "email": ""
42
+ },
43
+ {
44
+ "first": "Michael",
45
+ "middle": [],
46
+ "last": "Kearns",
47
+ "suffix": "",
48
+ "affiliation": {
49
+ "laboratory": "",
50
+ "institution": "AT&T Labs --Research",
51
+ "location": {
52
+ "addrLine": "180 Park Avenue Florham Park",
53
+ "postCode": "07932",
54
+ "region": "NJ",
55
+ "country": "USA"
56
+ }
57
+ },
58
+ "email": "mkearns@research.att.com"
59
+ },
60
+ {
61
+ "first": "Marilyn",
62
+ "middle": [],
63
+ "last": "Walker",
64
+ "suffix": "",
65
+ "affiliation": {
66
+ "laboratory": "",
67
+ "institution": "AT&T Labs --Research",
68
+ "location": {
69
+ "addrLine": "180 Park Avenue Florham Park",
70
+ "postCode": "07932",
71
+ "region": "NJ",
72
+ "country": "USA"
73
+ }
74
+ },
75
+ "email": "walker@research.att.com"
76
+ }
77
+ ],
78
+ "year": "",
79
+ "venue": null,
80
+ "identifiers": {},
81
+ "abstract": "This paper describes NJFun, a real-time spoken dialogue systemthat-provides users with information about things to d~ in New Jersey. NJFun automatically optimizes its dialogue strategy over time, by using a methodology for applying reinforcement learning to a working dialogue system with human users.",
82
+ "pdf_parse": {
83
+ "paper_id": "W00-0304",
84
+ "_pdf_hash": "",
85
+ "abstract": [
86
+ {
87
+ "text": "This paper describes NJFun, a real-time spoken dialogue systemthat-provides users with information about things to d~ in New Jersey. NJFun automatically optimizes its dialogue strategy over time, by using a methodology for applying reinforcement learning to a working dialogue system with human users.",
88
+ "cite_spans": [],
89
+ "ref_spans": [],
90
+ "eq_spans": [],
91
+ "section": "Abstract",
92
+ "sec_num": null
93
+ }
94
+ ],
95
+ "body_text": [
96
+ {
97
+ "text": "Using the formalism of Markov decision processes (MDPs) and the algorithms of reinforcement learning (RL) has become a standard approach to many AI problems that involve an agent learning to optimize reward by interaction with its environment (Sutton and Barto, 1998) . We have adapted the methods of RL to the problem of automatically learning a good dialogue strategy in a fielded spoken dialogue system. Here is a summary of our proposed methodology for developing and evaluating spoken dialogue systems using R.L:",
98
+ "cite_spans": [
99
+ {
100
+ "start": 243,
101
+ "end": 267,
102
+ "text": "(Sutton and Barto, 1998)",
103
+ "ref_id": "BIBREF3"
104
+ }
105
+ ],
106
+ "ref_spans": [],
107
+ "eq_spans": [],
108
+ "section": "Introduction",
109
+ "sec_num": "1"
110
+ },
111
+ {
112
+ "text": "\u2022 Choose an appropriate reward measure for dialogues, and an appropriate representation for dialogue states.",
113
+ "cite_spans": [],
114
+ "ref_spans": [],
115
+ "eq_spans": [],
116
+ "section": "Introduction",
117
+ "sec_num": "1"
118
+ },
119
+ {
120
+ "text": "\u2022 Build an initial state-based training system that creates an exploratory data set. Despite being exploratory, this system should provide the desired basic functionality.",
121
+ "cite_spans": [],
122
+ "ref_spans": [],
123
+ "eq_spans": [],
124
+ "section": "Introduction",
125
+ "sec_num": "1"
126
+ },
127
+ {
128
+ "text": "\u2022 Use these training dialogues to build an empirical MDP model on the state space.",
129
+ "cite_spans": [],
130
+ "ref_spans": [],
131
+ "eq_spans": [],
132
+ "section": "Introduction",
133
+ "sec_num": "1"
134
+ },
135
+ {
136
+ "text": "\u2022 Compute the optimal dialogue policy according to this MDF, using RL.",
137
+ "cite_spans": [],
138
+ "ref_spans": [],
139
+ "eq_spans": [],
140
+ "section": "Introduction",
141
+ "sec_num": "1"
142
+ },
143
+ {
144
+ "text": "\u2022 Reimplement the system using the learned dialogue policy.",
145
+ "cite_spans": [],
146
+ "ref_spans": [],
147
+ "eq_spans": [],
148
+ "section": "Introduction",
149
+ "sec_num": "1"
150
+ },
151
+ {
152
+ "text": "In this demonstration session paper, we briefly describe our system, present some sample dialogues, and summarize our main contributions and limitations. Full details of our work (e.g. our reinforcement learning methodology, analysis establishing the veracity of the MDP we learn, a description of an experimental evaluation of NJFun, analysis of our learned dialogue strategy) can be found in two forthcoming technical papers .",
153
+ "cite_spans": [],
154
+ "ref_spans": [],
155
+ "eq_spans": [],
156
+ "section": "Introduction",
157
+ "sec_num": "1"
158
+ },
159
+ {
160
+ "text": "The NJFun System NJFun is a reM-time spoken dialogue system that provides users with information about things to do in New Jersey. 1 An example dialogue with NJFun is shown in Figure 1 . NJFun is built using an internal platform for spoken dialogue systems. NJFun uses a speech recognizer with stochastic language models trained from example user utterances, and a TTS system based on concatenative diphone synthesis. Its database is populated from the nj. online webpage to contain information about activities. NJFun indexes this database using three attributes: activity type, location, and time of day.",
161
+ "cite_spans": [],
162
+ "ref_spans": [
163
+ {
164
+ "start": 176,
165
+ "end": 184,
166
+ "text": "Figure 1",
167
+ "ref_id": null
168
+ }
169
+ ],
170
+ "eq_spans": [],
171
+ "section": "2",
172
+ "sec_num": null
173
+ },
174
+ {
175
+ "text": "Informally, the NJFun dialogue manager sequentially queries the user regarding the activity, location and time attributes, respectively. NJFun first asks the user for the current attribute (and possibly the other attributes, depending on the initiative). If the current attribute's value is not obtained, NJFun asks for the attribute (and possibly the later attributes) again. If NJFun still does not obtain a value, N J-Fun moves on to the next attribute(s). Whenever NJFun successfully obtains a value, it can confirm the value, or move on and attempt to obtain the next attribute(s)? When NJFun has finished asking about the attributes, it queries the database (using a wildcard for each unobtained attribute value).",
176
+ "cite_spans": [],
177
+ "ref_spans": [],
178
+ "eq_spans": [],
179
+ "section": "2",
180
+ "sec_num": null
181
+ },
182
+ {
183
+ "text": "We use reinforcement learning (RL) to optimize dialogue strategy, lq.L requires that all potential actions for each state be specified. Note that at some states it is easy for a human to make the correct action choice. We made obvious dialogue strategy choices in advance, and used learning only to optimize the difficult choices. In NJFun, we restricted the action choices to 1) the type of initiative to use 2Note that it is possible for users to specify multiple attributes, in any order, in a single utterance. However, NJFun will always process multiple attributes using its predefined sequential ordering. when asking or reasking for an attribute, and 2) whether to confirm an attribute value once obtained. The optimal actions may vary with dialogue state, and are subject to active debate in the literature.",
184
+ "cite_spans": [],
185
+ "ref_spans": [],
186
+ "eq_spans": [],
187
+ "section": "2",
188
+ "sec_num": null
189
+ },
190
+ {
191
+ "text": "The examples in Figure 2 shows that NJFun can ask the user about the first 2 attributes 3 using three types of initiative, based on the combination of the wording of the system prompt (open versus directive), and the type of grammar NJFun uses during ASR (restrictive versus non-restrictive). If NJFun uses an open question with an unrestricted grammar, it is using user initiative (e.g., GreetU). If N J-Fun instead uses a directive prompt with a restricted grammar, the system is using system initiative (e.g.,",
192
+ "cite_spans": [],
193
+ "ref_spans": [
194
+ {
195
+ "start": 16,
196
+ "end": 24,
197
+ "text": "Figure 2",
198
+ "ref_id": "FIGREF1"
199
+ }
200
+ ],
201
+ "eq_spans": [],
202
+ "section": "2",
203
+ "sec_num": null
204
+ },
205
+ {
206
+ "text": "). If NJFun uses a directive question with a non-restrictive grammar, it is using mixed initiative, because it is giving the user an opportunity to take the initiative by supplying extra information (e.g., ReAsklM).",
207
+ "cite_spans": [],
208
+ "ref_spans": [],
209
+ "eq_spans": [],
210
+ "section": "GreetS",
211
+ "sec_num": null
212
+ },
213
+ {
214
+ "text": "NJFun can also vary the strategy used to confirm each attribute. If NJFun asks the user to explicitly verify an attribute, it is using explicit confirmation (e.g., ExpConf2 for the location, exemplified by $2 in Figure 1 ). If NJFun does not generate any confirmation prompt, it is using no confirmation (an action we call NoConf).",
215
+ "cite_spans": [],
216
+ "ref_spans": [
217
+ {
218
+ "start": 212,
219
+ "end": 220,
220
+ "text": "Figure 1",
221
+ "ref_id": null
222
+ }
223
+ ],
224
+ "eq_spans": [],
225
+ "section": "GreetS",
226
+ "sec_num": null
227
+ },
228
+ {
229
+ "text": "Solely for the purposes of controlling its operation (as opposed to the learning, which we discuss in a moment), NJFun internally maintains an operations vector of 14 variables. 2 variables track whether the system has greeted the user, and which attribute the system is currently attempting to obtain. For each of the 3 attributes, 4 variables track whether '~ \"Greet\" is equivalent to asking for the first attribute. N J-Fun always uses system initiative for the third attribute, because at that point the user can only provide the time of day. the system has obtained the attribute's value, the system's confidence in the value (if obtained), the number of times the system has asked the user about the attribute, and the type of ASR grammar most recently used to ask for the attribute.",
230
+ "cite_spans": [],
231
+ "ref_spans": [],
232
+ "eq_spans": [],
233
+ "section": "GreetS",
234
+ "sec_num": null
235
+ },
236
+ {
237
+ "text": "The formal state space S maintained by NJFun for the purposes of learning is much simpler than the operations vector, due to data sparsity concerns. The dialogue state space $ contains only 7 variables, which are summarized in Figure 3 , and is easily computed from the operations vector. The \"greet\" variable tracks whether the system has greeted the user or not (no=0, yes=l). \"Attr\" specifies which attribute NJFun is currently attempting to obtain or verify (activity=l, location=2, time=3, done with attributes=4). \"Conf\" represents the confidence that NJFun has after obtaining a value for an attribute. The values 0, 1, and 2 represent low, medium and high ASR confidence. The values 3 and 4 are set when ASR hears \"yes\" or \"no\" after a confirmation question. \"Val\" tracks whether NJFun has obtained a value for the attribute (no=0, yes=l). \"Times\" tracks the number of times that NJFun has asked the user about the attribute. \"Gram\" tracks the type of grammar most recently used to obtain the attribute (0=non-restrictive, l=restrictive). Finally, \"history\" represents whether NJFun had trouble understanding the user in the earlier part of the conversation (bad=0, good=l). We omit the full definition, but as an example, when NJFun is working on the second attribute (location), the history variable is set to 0 if NJFun does not have an activity, has an activity but has no confidence in the value, or needed two queries to obtain the activity.",
238
+ "cite_spans": [],
239
+ "ref_spans": [
240
+ {
241
+ "start": 227,
242
+ "end": 235,
243
+ "text": "Figure 3",
244
+ "ref_id": "FIGREF2"
245
+ }
246
+ ],
247
+ "eq_spans": [],
248
+ "section": "GreetS",
249
+ "sec_num": null
250
+ },
251
+ {
252
+ "text": "In order to apply RL with a limited amount of training data, we need to design a small state space I greet attr conf val times gram history [ 0,1 1,2,3,4 0,1,2,3,4 0,1 0,1,2 0, that makes enough critical distinctions to support learning. The use of S yields a state space of size 62. The state space that we utilize here, although minimal, allows us to make initiative decisions based on the success of earlier exchanges, and confirmation decisions based on ASR confidence scores and grammars.",
253
+ "cite_spans": [],
254
+ "ref_spans": [
255
+ {
256
+ "start": 99,
257
+ "end": 189,
258
+ "text": "I greet attr conf val times gram history [ 0,1 1,2,3,4 0,1,2,3,4 0,1 0,1,2 0,",
259
+ "ref_id": null
260
+ }
261
+ ],
262
+ "eq_spans": [],
263
+ "section": "GreetS",
264
+ "sec_num": null
265
+ },
266
+ {
267
+ "text": "In order to learn a good dialogue strategy via RL we have to explore the state action space. The state/action mapping representing NJFun's initial exploratory dialog@ strategy EIC (Exploratory for Initiative and Confirmation) is given in Figure 4 . Only the exploratory portion of the strategy is shown, namely all those states for which NJFun has an action choice. For each such state, we list the two choices of actions available. (The action choices in boldface are the ones eventually identified as optimal by the learning process.) The EIC strategy chooses randomly between these two actions when in the indicated state, in order to maximize exploration and minimize data sparseness when constructing our model. Since there are 42 states with 2 choices each, there is a search space of 242 potential dialogue strategies; the goal of the RL is to identify an apparently optimal strategy from this large search space. Note that due to the randomization of the EIC strategy, the prompts are designed to ensure the coherence of all possible action sequences. Figure 5 illustrates how the dialogue strategy in Figure 4 generates the dialogue in Figure 1 . Each row indicates the state that NJFun is in, the action executed in this state, the corresponding turn in Figure 1 , and the reward received. The initial state represents that NJFun will first attempt to obtain attribute 1. NJFun executes GreetU (although as shown in Figure 4 , Greets is also possible), generating the first utterance in Figure 1 . After the user's response, the next state represents that N J-Fun has now greeted the user and obtained the activity value with high confidence, by using a nonrestrictive grammar. NJFun chooses not to confirm the activity, which causes the state to change but no prompt to be generated. The third state represents that NJFun is now working on the second attribute (location), that it already has this value with high confidence (location was obtained with activity after the user's first utterance), and that the dialogue history is good. This time NJFun chooses to confirm the attribute with the second NJFun utterance, and the state changes again. The processing of time is similar to that of location, which leads NJFun to the final state, where it performs the action \"Tell\" (cor- t g 0 1 0 0 0 0 0 1 1 0 0 1 0 0 1 1 0 1 0 0 0 1 1 0 1 0 1 0 1 1 1 1 0 0 0 1 1 1 1 0 1 0 1 1 2 I 0 0 0 1 1 2 1 0 1 0 1 1 4 0 0 1 3 0 1 0 0 1 3 0 1 0 0 1 3 0 1 0 1 1 3 0 1 0 1 1 3 1 1 0 0 1 3 1 1 0 0 1 3 1 1 0 1 i 3 1 I 0 i 1 3 2 1 0 0 1 3 2 1 0 0 1 3 2 1 0 1 i 3 2 1 0 ",
268
+ "cite_spans": [],
269
+ "ref_spans": [
270
+ {
271
+ "start": 238,
272
+ "end": 246,
273
+ "text": "Figure 4",
274
+ "ref_id": null
275
+ },
276
+ {
277
+ "start": 1060,
278
+ "end": 1068,
279
+ "text": "Figure 5",
280
+ "ref_id": null
281
+ },
282
+ {
283
+ "start": 1110,
284
+ "end": 1118,
285
+ "text": "Figure 4",
286
+ "ref_id": null
287
+ },
288
+ {
289
+ "start": 1145,
290
+ "end": 1153,
291
+ "text": "Figure 1",
292
+ "ref_id": null
293
+ },
294
+ {
295
+ "start": 1264,
296
+ "end": 1272,
297
+ "text": "Figure 1",
298
+ "ref_id": null
299
+ },
300
+ {
301
+ "start": 1426,
302
+ "end": 1434,
303
+ "text": "Figure 4",
304
+ "ref_id": null
305
+ },
306
+ {
307
+ "start": 1497,
308
+ "end": 1505,
309
+ "text": "Figure 1",
310
+ "ref_id": null
311
+ },
312
+ {
313
+ "start": 2293,
314
+ "end": 2480,
315
+ "text": "t g 0 1 0 0 0 0 0 1 1 0 0 1 0 0 1 1 0 1 0 0 0 1 1 0 1 0 1 0 1 1 1 1 0 0 0 1 1 1 1 0 1 0 1 1 2 I 0 0 0 1 1 2 1 0 1 0 1 1 4 0 0",
316
+ "ref_id": null
317
+ },
318
+ {
319
+ "start": 2481,
320
+ "end": 2694,
321
+ "text": "1 3 0 1 0 0 1 3 0 1 0 0 1 3 0 1 0 1 1 3 0 1 0 1 1 3 1 1 0 0 1 3 1 1 0 0 1 3 1 1 0 1 i 3 1 I 0 i 1 3 2 1 0 0 1 3 2 1 0 0 1 3 2 1 0 1 i 3 2 1 0",
322
+ "ref_id": null
323
+ }
324
+ ],
325
+ "eq_spans": [],
326
+ "section": "GreetS",
327
+ "sec_num": null
328
+ },
329
+ {
330
+ "text": "State C V",
331
+ "cite_spans": [],
332
+ "ref_spans": [],
333
+ "eq_spans": [],
334
+ "section": "GreetS",
335
+ "sec_num": null
336
+ },
337
+ {
338
+ "text": "I Action Choices GreetS,GreetU ReAsk 1 S,ReAsk 1 M NoConf, ExpConfl NoConf, ExpConfl NoConf,ExpConfl NoConf, ExpConfl NoConf, ExpConfl NoConf, ExpConfl ReAsklS,ReAsklM 0 ReAsklS,ReAsklM 0 -Ask2S,Ask2U 1 Ask2S,Ask2U 0 ReAsk2S,ReAsk2M 1 ReAsk2S,ReAsk2 M 0 NoConf, ExpConf2 1 NoConf, ExpConf2 0 NoConf, ExpConf2 1 NoConf, ExpConf2 0 NoConf, ExpConf2 1 NoConf,ExpConf2 0 NoConf, ExpConf2 1 NoConf, ExpConf2 0 NoConf, ExpConf2 1 NoConf, ExpConf2 0 NoConf, ExpConf2 1 NoConf, ExpConf2 0 ReAsk2S,ReAsk2M 1 ReAsk2S,ReAsk2M 0 ReAsk2S,R.eAsk2M 1 ReAsk2S,ReAskSM 0 NoConf, ExpConf3 1 NoConf, ExpConf3 0 NoConf, ExpConf3 I NoConf, ExpConf3 0 NoConf, ExpConf3 1 NoConf, ExpConf3 0 NoConf, ExpConf3 1 NoConf, ExpConf3 0 NoConf,ExpCon:f3 1 NoConf,ExpConf3 0 NoConf, ExpConf3 I NoConf, ExpConf3",
339
+ "cite_spans": [],
340
+ "ref_spans": [],
341
+ "eq_spans": [],
342
+ "section": "GreetS",
343
+ "sec_num": null
344
+ },
345
+ {
346
+ "text": "Figure 4: Exploratory portion of EIC strategy. Turn Reward gaevtgh 0100000 GreetU S1 0 I 121000 NoConf 0 1 2 2 1 0 0 1 ExpConf2 $2 0 1 3 2 1 0 0 1 ExpConf3 $3 0 1 4 0 0 0 0 0 Tell S4 1 Figure 5 : Generating the dialogue in Figure 1. responding to querying the database, presenting the results to the user, and asking the user to provide a reward). Note that in NJFun, the reward is always No. $11:",
347
+ "cite_spans": [],
348
+ "ref_spans": [
349
+ {
350
+ "start": 47,
351
+ "end": 77,
352
+ "text": "Turn Reward gaevtgh 0100000",
353
+ "ref_id": null
354
+ },
355
+ {
356
+ "start": 188,
357
+ "end": 196,
358
+ "text": "Figure 5",
359
+ "ref_id": null
360
+ },
361
+ {
362
+ "start": 226,
363
+ "end": 235,
364
+ "text": "Figure 1.",
365
+ "ref_id": null
366
+ }
367
+ ],
368
+ "eq_spans": [],
369
+ "section": "GreetS",
370
+ "sec_num": null
371
+ },
372
+ {
373
+ "text": "Thank~ou for using the system. Please give me feedback by saying 'good', 'so-so', or 'bad'. UII:",
374
+ "cite_spans": [],
375
+ "ref_spans": [],
376
+ "eq_spans": [],
377
+ "section": "State Action",
378
+ "sec_num": null
379
+ },
380
+ {
381
+ "text": "Bad'. _. is the same in both dialogues (\"1 1 2 1 0 0 0\"), the activity is not confirmed in the first dialogue.",
382
+ "cite_spans": [],
383
+ "ref_spans": [],
384
+ "eq_spans": [],
385
+ "section": "State Action",
386
+ "sec_num": null
387
+ },
388
+ {
389
+ "text": "The main contribution of this work is that we have developed and empirically validated a practical methodology for using RL to build a real dialogue system that optimizes its behavior from dialogue data. Unlike traditional approaches to learning dialogue strategy from data, which are limited to searching a handful of policies, our RL approach is able to search many tens of thousands of dialogue strategies. In particular, the traditional approach is to pick a handful of strategies that experts intuitively feel are good, implement each policy as a separate system, collect data from representative human users for each system, and then use standard statistical tests on that data to pick the best system, e.g. (Danieli and Gerbino, 1995) . In contrast, our use of RL allowed us to explore 242 strategies that were left in our search space after we excluded strategies that were clearly suboptimal. An empirical validation of our approach is detailed in two forthcoming technical papers . We obtained 311 dialogues with the exploratory (i.e., training) version of NJFun, constructed an MDP from this training data, used RL to compute the optimal dialogue strategy in this MDP, reimplemented NJFun such that it used this learned dialogue strategy, and obtained 124 more dialogues. Our main result was that task completion improved from 52% to 64% from training to test data. Furthermore, analysis of our MDP showed that the learned strategy was not only better than EIC, but also better than other fixed choices proposed in the literature .",
390
+ "cite_spans": [
391
+ {
392
+ "start": 714,
393
+ "end": 741,
394
+ "text": "(Danieli and Gerbino, 1995)",
395
+ "ref_id": "BIBREF0"
396
+ }
397
+ ],
398
+ "ref_spans": [],
399
+ "eq_spans": [],
400
+ "section": "Contributions",
401
+ "sec_num": "4"
402
+ },
403
+ {
404
+ "text": "The main limitation of this effort to automate the design of a good dialogue strategy is that our current framework has nothing to say about how to choose the reward measure, or how to best represent dialogue state. In NJFun we carefully but manually designed the state space of the dialogue. In the future, we hope to develop a learning methodology to automate the choice of state space for dialogue systems. With respect to the reward function, our empirical evaluation investigated the impact of using a number of reward measures (e.g., user feedback such as U4 in Figure 1 , task completion rate, ASR accuracy), and found that some rewards worked better than others. We would like to better understand these differences among the reward measures, investigate the use of a learned reward function, and explore the use of non-terminal rewards.",
405
+ "cite_spans": [],
406
+ "ref_spans": [
407
+ {
408
+ "start": 568,
409
+ "end": 576,
410
+ "text": "Figure 1",
411
+ "ref_id": null
412
+ }
413
+ ],
414
+ "eq_spans": [],
415
+ "section": "Limitations",
416
+ "sec_num": "5"
417
+ },
418
+ {
419
+ "text": "except at the terminal state, as shown in the last column ofFigure 5.",
420
+ "cite_spans": [],
421
+ "ref_spans": [],
422
+ "eq_spans": [],
423
+ "section": "",
424
+ "sec_num": null
425
+ }
426
+ ],
427
+ "back_matter": [],
428
+ "bib_entries": {
429
+ "BIBREF0": {
430
+ "ref_id": "b0",
431
+ "title": "Metrics for evaluating dialogue strategies in a spoken language system",
432
+ "authors": [
433
+ {
434
+ "first": "M",
435
+ "middle": [],
436
+ "last": "Danieli",
437
+ "suffix": ""
438
+ },
439
+ {
440
+ "first": "E",
441
+ "middle": [],
442
+ "last": "Gerbino",
443
+ "suffix": ""
444
+ }
445
+ ],
446
+ "year": 1995,
447
+ "venue": "Proceedings of the 1995 AAA1 Spring Symposium on Empirical Methods in Discourse Interpretation and Generation",
448
+ "volume": "",
449
+ "issue": "",
450
+ "pages": "34--39",
451
+ "other_ids": {},
452
+ "num": null,
453
+ "urls": [],
454
+ "raw_text": "M. Danieli and E. Gerbino. 1995. Metrics for eval- uating dialogue strategies in a spoken language system. In Proceedings of the 1995 AAA1 Spring Symposium on Empirical Methods in Discourse Interpretation and Generation, pages 34-39.",
455
+ "links": null
456
+ },
457
+ "BIBREF1": {
458
+ "ref_id": "b1",
459
+ "title": "Automatic optimization of dialogue management",
460
+ "authors": [
461
+ {
462
+ "first": "D",
463
+ "middle": [],
464
+ "last": "Litman",
465
+ "suffix": ""
466
+ },
467
+ {
468
+ "first": "M",
469
+ "middle": [],
470
+ "last": "Kearns",
471
+ "suffix": ""
472
+ },
473
+ {
474
+ "first": "S",
475
+ "middle": [],
476
+ "last": "Singh",
477
+ "suffix": ""
478
+ },
479
+ {
480
+ "first": "M",
481
+ "middle": [],
482
+ "last": "Walker",
483
+ "suffix": ""
484
+ }
485
+ ],
486
+ "year": 2000,
487
+ "venue": "",
488
+ "volume": "",
489
+ "issue": "",
490
+ "pages": "",
491
+ "other_ids": {},
492
+ "num": null,
493
+ "urls": [],
494
+ "raw_text": "D. Litman, M. Kearns, S. Singh, and M. Walker. 2000. Automatic optimization of dialogue man- agement. Manuscript submitted for publication.",
495
+ "links": null
496
+ },
497
+ "BIBREF2": {
498
+ "ref_id": "b2",
499
+ "title": "Empirical evaluation of a reinforcement learning spoken dialogue system",
500
+ "authors": [
501
+ {
502
+ "first": "S",
503
+ "middle": [],
504
+ "last": "Singh",
505
+ "suffix": ""
506
+ },
507
+ {
508
+ "first": "M",
509
+ "middle": [],
510
+ "last": "Kearns",
511
+ "suffix": ""
512
+ },
513
+ {
514
+ "first": "D",
515
+ "middle": [],
516
+ "last": "Litman",
517
+ "suffix": ""
518
+ },
519
+ {
520
+ "first": "M",
521
+ "middle": [],
522
+ "last": "Walker",
523
+ "suffix": ""
524
+ }
525
+ ],
526
+ "year": 2000,
527
+ "venue": "Proceedings of AAAI",
528
+ "volume": "",
529
+ "issue": "",
530
+ "pages": "",
531
+ "other_ids": {},
532
+ "num": null,
533
+ "urls": [],
534
+ "raw_text": "S. Singh, M. Kearns, D. Litman, and M. Walker. 2000. Empirical evaluation of a reinforcement learning spoken dialogue system. In Proceedings of AAAI 2000.",
535
+ "links": null
536
+ },
537
+ "BIBREF3": {
538
+ "ref_id": "b3",
539
+ "title": "Reinforcement Learning: An Introduction",
540
+ "authors": [
541
+ {
542
+ "first": "R",
543
+ "middle": [
544
+ "S"
545
+ ],
546
+ "last": "Sutton",
547
+ "suffix": ""
548
+ },
549
+ {
550
+ "first": "A",
551
+ "middle": [
552
+ "G"
553
+ ],
554
+ "last": "Barto",
555
+ "suffix": ""
556
+ }
557
+ ],
558
+ "year": 1998,
559
+ "venue": "",
560
+ "volume": "",
561
+ "issue": "",
562
+ "pages": "",
563
+ "other_ids": {},
564
+ "num": null,
565
+ "urls": [],
566
+ "raw_text": "R. S. Sutton and A. G. Barto. 1998. Reinforcement Learning: An Introduction. MIT Press.",
567
+ "links": null
568
+ }
569
+ },
570
+ "ref_entries": {
571
+ "FIGREF1": {
572
+ "num": null,
573
+ "text": "Sample initiative strategy choices.",
574
+ "uris": null,
575
+ "type_str": "figure"
576
+ },
577
+ "FIGREF2": {
578
+ "num": null,
579
+ "text": "State features and values.",
580
+ "uris": null,
581
+ "type_str": "figure"
582
+ },
583
+ "FIGREF4": {
584
+ "num": null,
585
+ "text": "present two dialogues that were generated using different dialogue strategies in the EIC search space. For example, although NJFun begins both dialogues in the first state inFigure 4, NJFun executes GreetU in one dialogue and GreetS in the other. Similarly, although the next state of NJFun",
586
+ "uris": null,
587
+ "type_str": "figure"
588
+ },
589
+ "FIGREF5": {
590
+ "num": null,
591
+ "text": "An example training dialogue with NJFun.",
592
+ "uris": null,
593
+ "type_str": "figure"
594
+ },
595
+ "TABREF0": {
596
+ "content": "<table><tr><td/><td>SI:</td><td>Welcome to NJFun. How may i help you?</td><td>._</td></tr><tr><td/><td>UI:</td><td>I'd like to find um winetasting in Lambertville in the morning.</td></tr><tr><td/><td/><td colspan=\"2\">(ASR output: I'd like to find out wineries the in the Lambertville in the morning.)</td></tr><tr><td/><td>$2:</td><td>Did you say you are interested in Lambertville?</td></tr><tr><td/><td>U2:</td><td>Yes.</td></tr><tr><td/><td>$3:</td><td>Did you say you want to go in the morning?</td></tr><tr><td/><td>U3:</td><td>Yes.</td></tr><tr><td/><td>$4:</td><td>I found a winery near Lambertville that is open in the morning. It is [... ]</td></tr><tr><td/><td/><td>Please give me feedback by saying 'good', 'so-so', or 'bad'.</td></tr><tr><td/><td>U4:</td><td>Good.</td></tr><tr><td/><td colspan=\"3\">Figure 1: An example dialogue with NJFun (after optimization via RL).</td></tr><tr><td>Action</td><td>m</td><td/></tr><tr><td>GreetS</td><td colspan=\"3\">Welcome to NJFun. Please say an activity name or say 'list activities' for a list of activities I know about.</td></tr><tr><td>GreetU</td><td colspan=\"2\">Welcome to NJFun. How may I help you?</td></tr><tr><td>ReAsk 1 S</td><td colspan=\"3\">I know about amusement parks, aquariums, cruises, historic sites, museums, parks, theaters, wineries,</td></tr><tr><td/><td colspan=\"2\">and zoos.</td></tr><tr><td>ReAsk 1 M</td><td/><td/></tr><tr><td>Ask2S</td><td/><td/></tr><tr><td>Ask2U</td><td/><td/></tr><tr><td>ReAsk2S</td><td/><td/></tr><tr><td>ReAsk2~</td><td/><td/></tr></table>",
597
+ "num": null,
598
+ "type_str": "table",
599
+ "html": null,
600
+ "text": "Please say an activity name from this list. Please tell me the activity type.You can also tell me the location and time. Please say the name of the town or city that you are interested in. Please give me more information. Please tell me the name of the town or city that you are interested in. \"Please tell me the location that you are interested in. You can also tell me the time."
601
+ },
602
+ "TABREF1": {
603
+ "content": "<table><tr><td>SI:</td><td>Welcome to NJFun. Please say an activity name or say 'list activities' for a list of activities I know about.</td></tr><tr><td>UI:</td><td>I would like to visit a historic site in Stanhope this morning.</td></tr><tr><td/><td>(ASR output: I would like to the zoo historic sites in stanhope historic)</td></tr><tr><td>$2:</td><td>Did you say you are interested in going to a zoo?</td></tr><tr><td>U2:</td><td>No.</td></tr><tr><td colspan=\"2\">$3: I know about $4: Did you say you are interested in going to a historic site?</td></tr><tr><td>U4:</td><td>Yes.</td></tr><tr><td>$5:</td><td>Please give me more information.</td></tr><tr><td>U5:</td><td>(user says nothing)</td></tr><tr><td>S6:</td><td>Please tell me the location that you are interested in. You can also tell me the time.</td></tr><tr><td>U6:</td><td>Visit a historic. (ASR output: What historic)</td></tr><tr><td>$7:</td><td>What time of the day do you want to go?</td></tr><tr><td>U7:</td><td>Stanhope. (no ASR output)</td></tr><tr><td>$8:</td><td>Do you want to go in the morning, in the afternoon, or in the evening?</td></tr><tr><td>US:</td><td>Morning.</td></tr><tr><td>S9:</td><td>Did you say you want to go in the morning?</td></tr><tr><td>U9:</td><td>Yes.</td></tr><tr><td>Sl0:</td><td>I found 11 historic sites in New Jersey that are open in the morning. The first 3 [... ] Would you like to hear more?</td></tr><tr><td>UiO:</td><td/></tr></table>",
604
+ "num": null,
605
+ "type_str": "table",
606
+ "html": null,
607
+ "text": "amusement parks, aquariums, cruises, historic sites, museums, parks, theaters, wineries, and zoos. Please say an activity name from this list. U3:I would like to visit a historic site. (ASR output: I would like to visit historic sites)"
608
+ }
609
+ }
610
+ }
611
+ }
Full_text_JSON/prefixW/json/W00/W00-0305.json ADDED
@@ -0,0 +1,761 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0305",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:09.434154Z"
6
+ },
7
+ "title": "Excerpt 2: The second excerpt regards a flight ~om New York to Melbourne, Australia. The excerpt is the system response after the user has restricted his New York departure to La Guardia ai~ port, similarly to (Excerpt 1, Line 6)",
8
+ "authors": [],
9
+ "year": "",
10
+ "venue": null,
11
+ "identifiers": {},
12
+ "abstract": "new york to washington national 4 BOB: TEN A.M. 5 SYSTEM: i see a few flights from new york to washington national which depart about ten A M on january twenty seventh. do you prefer to depart from newark, J F K, or la guardia. 6 BOB: LA GUARDIA 7 SYSTEM: there are several flights from la guardia, you can fly on delta departing at nine thirty A M or ten thirty A M, or on U S air at ten A M. please specify an airline or time.",
13
+ "pdf_parse": {
14
+ "paper_id": "W00-0305",
15
+ "_pdf_hash": "",
16
+ "abstract": [
17
+ {
18
+ "text": "new york to washington national 4 BOB: TEN A.M. 5 SYSTEM: i see a few flights from new york to washington national which depart about ten A M on january twenty seventh. do you prefer to depart from newark, J F K, or la guardia. 6 BOB: LA GUARDIA 7 SYSTEM: there are several flights from la guardia, you can fly on delta departing at nine thirty A M or ten thirty A M, or on U S air at ten A M. please specify an airline or time.",
19
+ "cite_spans": [],
20
+ "ref_spans": [],
21
+ "eq_spans": [],
22
+ "section": "Abstract",
23
+ "sec_num": null
24
+ }
25
+ ],
26
+ "body_text": [
27
+ {
28
+ "text": "three segments of the trip, Mthough the user would only discover this rather unusual coincidence upon asking for detMls about the individual flights. The difficulty in this case arises because the mechanism for generation for a very short list of flights has not yet been subsumed as part of the aggregation mechanism (described below) which would have au-tomaticMly determined that the two flights should be grouped. Excerpt 3:",
29
+ "cite_spans": [],
30
+ "ref_spans": [],
31
+ "eq_spans": [],
32
+ "section": "",
33
+ "sec_num": null
34
+ },
35
+ {
36
+ "text": "i SYSTEM: I will book this leg. Do you want to continue with another leg? Please say yes or no.",
37
+ "cite_spans": [],
38
+ "ref_spans": [],
39
+ "eq_spans": [],
40
+ "section": "",
41
+ "sec_num": null
42
+ },
43
+ {
44
+ "text": "TO HAVE FLIGHT COMING BACK 11 SYSTEM: i can't find any nonstop flights from phoenix to newark on america west which depart about ten A M on june third, you may want to try changing the departure time to seven twenty five A M, eight fifty nine A M, twelve seventeen P M, or three oh five P M.",
45
+ "cite_spans": [],
46
+ "ref_spans": [],
47
+ "eq_spans": [],
48
+ "section": "JANE: YES I NEED TO CONTINUE BUT I NEED",
49
+ "sec_num": "2"
50
+ },
51
+ {
52
+ "text": "In this section we will focus on some tasks handled by our deep generation module, emphasizing the role of constraint functions as a unifying theme. Although the notion can be developed more broadly in various ways, we will consider a constraint function here to be a function on a single data record consisting of information about a particular flight from one location to another, possibly making connections. Examples of a constraint function are: tlmeDepVal, giving the departure time of the flight; t|meArrClass, giving the class of the arrival time (before six A.M., between six A.M. and noon, etc); and connVal giving the list of connection cities. A constraint on a data record is the condition that some given constraint function has a given value. In a typical turn a user may modify the list of Constraints imposed on the flights under discussion 2. How the system interprets the user input, searches for flights satisfying the constraints, and decides what to say about them are all affected by the shared conversational context between system and 2For brevity, we focus in this section on system response to user input whose content consists solely of constraints modifications. Processing of other kinds of input such as questions (e.g. \"when does the nine A.M. flight arrive?\") is handled similarly.",
53
+ "cite_spans": [],
54
+ "ref_spans": [],
55
+ "eq_spans": [],
56
+ "section": "Deep Generation",
57
+ "sec_num": "4"
58
+ },
59
+ {
60
+ "text": "user. Specifically, we have found the following most useful to keep track of:",
61
+ "cite_spans": [],
62
+ "ref_spans": [],
63
+ "eq_spans": [],
64
+ "section": "Deep Generation",
65
+ "sec_num": "4"
66
+ },
67
+ {
68
+ "text": "1. the constraints the user has imposed on the flights;",
69
+ "cite_spans": [],
70
+ "ref_spans": [],
71
+ "eq_spans": [],
72
+ "section": "Deep Generation",
73
+ "sec_num": "4"
74
+ },
75
+ {
76
+ "text": "2. what information about the user input constraints the system has repeated back to the user;",
77
+ "cite_spans": [],
78
+ "ref_spans": [],
79
+ "eq_spans": [],
80
+ "section": "Deep Generation",
81
+ "sec_num": "4"
82
+ },
83
+ {
84
+ "text": "3. the flights the system has conveyed information about to the user; and 4. the constraints on flights that the system has discovered and whether those constraints have been conveyed to the user or can be deduced by the user.",
85
+ "cite_spans": [],
86
+ "ref_spans": [],
87
+ "eq_spans": [],
88
+ "section": "Deep Generation",
89
+ "sec_num": "4"
90
+ },
91
+ {
92
+ "text": "In this section we focus on two particular cases that need to be handled by any dialog system in which the user and system negotiate to find a suitable record from a source of data: the underconstrained case and the over-constrained case.",
93
+ "cite_spans": [],
94
+ "ref_spans": [],
95
+ "eq_spans": [],
96
+ "section": "Deep Generation",
97
+ "sec_num": "4"
98
+ },
99
+ {
100
+ "text": "In this section we discuss how the system decides what to say in the under-constrained case when there are many flights satisfying the user request. Examples of the system response in this case can be found in (Excerpt 1, Turn 5), (Excerpt 1, Turn 7), and Excerpt 2. The following example occurred when a user requested a departure after 10:00 A.M., after having previously imposed the constraints of flying from Chicago to Miami on March third. The system responded as follows:",
101
+ "cite_spans": [],
102
+ "ref_spans": [],
103
+ "eq_spans": [],
104
+ "section": "Grouping of Information",
105
+ "sec_num": "4.1"
106
+ },
107
+ {
108
+ "text": "(I) there are several flights which depart after ten A M.",
109
+ "cite_spans": [],
110
+ "ref_spans": [],
111
+ "eq_spans": [],
112
+ "section": "Grouping of Information",
113
+ "sec_num": "4.1"
114
+ },
115
+ {
116
+ "text": "(2) all of them leave from chicago o'hare and arrive in the afternoon.",
117
+ "cite_spans": [],
118
+ "ref_spans": [],
119
+ "eq_spans": [],
120
+ "section": "Grouping of Information",
121
+ "sec_num": "4.1"
122
+ },
123
+ {
124
+ "text": "(3) do you prefer to fly on american or united.",
125
+ "cite_spans": [],
126
+ "ref_spans": [],
127
+ "eq_spans": [],
128
+ "section": "Grouping of Information",
129
+ "sec_num": "4.1"
130
+ },
131
+ {
132
+ "text": "Part (i) of the system response summarizes the most salient constraints of the user input using the summary script of section 5 s. Part (2) is a specification of the significant information common to all flights. In part (3), the system has decided which under-specified constraint is most likely relevant to the user, grouped the flights according to the values of the constraints, and prompted the user by specifying the possible values of the constraint.",
133
+ "cite_spans": [],
134
+ "ref_spans": [],
135
+ "eq_spans": [],
136
+ "section": "Grouping of Information",
137
+ "sec_num": "4.1"
138
+ },
139
+ {
140
+ "text": "The significant common information in part (2) and the most relevant grouping in part (3) are com-B Some readers may have noticed that, in (Excerpt 1, Turn 5), the system unnecessarily reviewed constraints that have recently been reviewed. This is because the generation mechanism used before enough constraints have been satisfied to query the data base has not yet been fully unified with the mechanism discussed in this paper. The principal domain dependent data needed by the algorithm consists of utility functions for each constraint telling how high a priority it is to go into detail about that constraint. The output is a tree structure which represents the hierarchy of constraint information that is deemed most useful to convey to the user. More specifically, the inputs to the aggregation algorithm consist of a flat list of data records (e.g. a table of flights) together with a list of aggregation specifications. An aggregation specification is a triple consisting of: (1) a constraint function by which data may be grouped, (2) a sort function which orders the groups according to their constraint value, and (3) a utility function to determine how useful this grouping is (which may depend both on conversational context as well as when in the algorithm the grouping is attempted). The utility functions also have the ability to return codes that control the search for the best tree. For example, a utility function can declare itself to be the highest priority, thus pruning the search. The output is a tree with non-terminal nodes labeled by lists of constraint functions, edges labeled by values taken by the constraint functions labeling the node above, and terminal vertices labeled by a list of the data records satisfying the constraints specified by the labelings of all its ancestor nodes and edges.",
141
+ "cite_spans": [],
142
+ "ref_spans": [],
143
+ "eq_spans": [],
144
+ "section": "Grouping of Information",
145
+ "sec_num": "4.1"
146
+ },
147
+ {
148
+ "text": "For the example discussed above, the output of the aggregation algorithm is depicted in Figure 1 . The top node and the edge below it indicate that all the flights leave from Chicago O'Hare in the afternoon (i.e. the constraint depArpVal takes on 4The term \"aggregation\" is sometimes used within the generation community referring to a process of combining groups of linguistically similar phrases. One might say the aggregation here is occurring on a semantic level, i.e. the internal representations of the flights are being grouped. the SABRE code \"0RD\" for Chicago O'Hare and the constraint timeArrClass takes on the value \"morning\"). We call this node a simple node because there is only one edge emanating from it. By contrast, the node below is a complex node since the constraint function at that node airVal can take on more than one value (either \"american\" or \"united\"). The box on the lower left contains those input flights which are on American Airlines and depart from Chicago O'Hare in the morning, and similarly for the box on the lower right.",
149
+ "cite_spans": [],
150
+ "ref_spans": [
151
+ {
152
+ "start": 88,
153
+ "end": 96,
154
+ "text": "Figure 1",
155
+ "ref_id": "FIGREF0"
156
+ }
157
+ ],
158
+ "eq_spans": [],
159
+ "section": "Grouping of Information",
160
+ "sec_num": "4.1"
161
+ },
162
+ {
163
+ "text": "For our application we have found it best to use the same kind of utility function for all constraints. When only this type of utility function is used the behavior of the aggregation algorithm is quite simple and always l:~od~uces a tree similar to the one in Figure 1 , namely~rm with two nodes: a simple one above a complex one. Corresponding to the notion of simple node and complex node, we call a constraint function (chosen from the aggregation specification) simple if it yields the same value when applied to all of the data records and complex otherwise. The simplified aggregation algorithm effectively proceeds as follows:",
164
+ "cite_spans": [],
165
+ "ref_spans": [
166
+ {
167
+ "start": 261,
168
+ "end": 269,
169
+ "text": "Figure 1",
170
+ "ref_id": "FIGREF0"
171
+ }
172
+ ],
173
+ "eq_spans": [],
174
+ "section": "Grouping of Information",
175
+ "sec_num": "4.1"
176
+ },
177
+ {
178
+ "text": "(1) For each simple constraint function (whose value is not known to the user based on the conversational history) apply a significance test. Place those constraints functions that pass the test (if there are any) in the top node of the tree.",
179
+ "cite_spans": [],
180
+ "ref_spans": [],
181
+ "eq_spans": [],
182
+ "section": "Grouping of Information",
183
+ "sec_num": "4.1"
184
+ },
185
+ {
186
+ "text": "(2) Pick the complex constraint function of maximum positive utility and place that in the node below the top. If all utilities are negative, the node remains empty.",
187
+ "cite_spans": [],
188
+ "ref_spans": [],
189
+ "eq_spans": [],
190
+ "section": "Grouping of Information",
191
+ "sec_num": "4.1"
192
+ },
193
+ {
194
+ "text": "As an example, when depArpVal is a simple constraint it is deemed significant if it is not the only airport serving the departure location the user requested. In our example, since Chicago is served by both O'Hare and Midway airports, the fact that all flights land in O'Hare is deemed significant to tell the user. As our airline travel system develops we expect to have available more expert knowledge about the airline travel domain. For example, the significance test for depArpVal may be modified in the future if the system has a way of knowing that Chicago O'Hare is the airport the user would. naturally expect in many circumstances.",
195
+ "cite_spans": [],
196
+ "ref_spans": [],
197
+ "eq_spans": [],
198
+ "section": "Grouping of Information",
199
+ "sec_num": "4.1"
200
+ },
201
+ {
202
+ "text": "In this section, we consider the over-constrained case in which no suitable flights can be found that satisfy the user request. One example of the system response in such a case occurs in (Excerpt 3, line 11). Another example is the following:",
203
+ "cite_spans": [],
204
+ "ref_spans": [],
205
+ "eq_spans": [],
206
+ "section": "Relaxation",
207
+ "sec_num": "4.2"
208
+ },
209
+ {
210
+ "text": "(i) there don't seem to be any nonstop flights from san francisco to newark new jersey on united which serve breakfast and depart after nine A M on february tenth.",
211
+ "cite_spans": [],
212
+ "ref_spans": [],
213
+ "eq_spans": [],
214
+ "section": "Relaxation",
215
+ "sec_num": "4.2"
216
+ },
217
+ {
218
+ "text": "(2) you may want to try changing your choice of meal, the airline to Continental, or the departure time to seven oh five A M or eight twenty A M.",
219
+ "cite_spans": [],
220
+ "ref_spans": [],
221
+ "eq_spans": [],
222
+ "section": "Relaxation",
223
+ "sec_num": "4.2"
224
+ },
225
+ {
226
+ "text": "In part (I), the system first reviews detailed information about what it believes the current user request is. This is particularly useful to help alert the user to any previous conversational error. In part (2), the system suggests possible relaxations that may be of interest to the user. A relaxation here is just a change of a single constraint in the user request which would allow flights to be found. For example, the system response (2) above indicates that there ar~ flights on united which satisfy all of the other user constraints listed in (1) above.",
227
+ "cite_spans": [],
228
+ "ref_spans": [],
229
+ "eq_spans": [],
230
+ "section": "Relaxation",
231
+ "sec_num": "4.2"
232
+ },
233
+ {
234
+ "text": "Surface Generation There are many approaches to generating text from an underlying semantic representation. Simple templates are adequate for many purposes, but result in a combinatorial explosion in the number of templates required to produce output for all possible circumstances. There are also several powerful generation packages available. One package in particular that we found it insightful to experiment with was FUF (Elhadad, 1989) , which is short for \"Functional Unification Framework\" (Elhadad and Robin, 1992) . FUF comes available with a reusable grammar of English (Elhadad and Robin, 1996) . Although we found the sophisticated linguistic framework of FUF/SURGE difficult to adapt to our needs, we have found it helpful to include analogues of some elements of that framework in the approach we now describe.",
235
+ "cite_spans": [
236
+ {
237
+ "start": 427,
238
+ "end": 442,
239
+ "text": "(Elhadad, 1989)",
240
+ "ref_id": "BIBREF2"
241
+ },
242
+ {
243
+ "start": 499,
244
+ "end": 524,
245
+ "text": "(Elhadad and Robin, 1992)",
246
+ "ref_id": "BIBREF1"
247
+ },
248
+ {
249
+ "start": 582,
250
+ "end": 607,
251
+ "text": "(Elhadad and Robin, 1996)",
252
+ "ref_id": "BIBREF1"
253
+ }
254
+ ],
255
+ "ref_spans": [],
256
+ "eq_spans": [],
257
+ "section": "5",
258
+ "sec_num": null
259
+ },
260
+ {
261
+ "text": "After our initial experiments, we decided to \"evolve\" a surface generation module starting with the straight forward model of template filling and procedure calls provided by the programming language tel. To overcome the problem of combinatorial explosion in program size, our surface generation makes use of an exception catching mechanism which allows sub-phrases within a complicated phrase to be \"turned on\" if the semantic input required for them is present. This can be done recursively. This approach has a side benefit of being very robust because detailed error catching is built in. Even if the script writer makes an unintentional error in part of a script (and no alternatives for generating the information in the erroneous part are available) only that part will fail to be generated.",
262
+ "cite_spans": [],
263
+ "ref_spans": [],
264
+ "eq_spans": [],
265
+ "section": "5",
266
+ "sec_num": null
267
+ },
268
+ {
269
+ "text": "Our system makes available to the developer several useful domain independent constructs. In addition to these basic constructs, our surface generation Table 1 ).",
270
+ "cite_spans": [],
271
+ "ref_spans": [
272
+ {
273
+ "start": 152,
274
+ "end": 159,
275
+ "text": "Table 1",
276
+ "ref_id": null
277
+ }
278
+ ],
279
+ "eq_spans": [],
280
+ "section": "5",
281
+ "sec_num": null
282
+ },
283
+ {
284
+ "text": "has a morphology module (giving the correct form of a word based on number, tense, etc.) and a library of routines for generating simple phrases. To give the reader a flavor of our approach, we discuss the example of the script which generates phrases such as those in Table 1. 1. There are I several flights.",
285
+ "cite_spans": [],
286
+ "ref_spans": [
287
+ {
288
+ "start": 269,
289
+ "end": 277,
290
+ "text": "Table 1.",
291
+ "ref_id": null
292
+ }
293
+ ],
294
+ "eq_spans": [],
295
+ "section": "5",
296
+ "sec_num": null
297
+ },
298
+ {
299
+ "text": "2. I can't find any I roundtrip flights from New York to Chicago.",
300
+ "cite_spans": [],
301
+ "ref_spans": [],
302
+ "eq_spans": [],
303
+ "section": "5",
304
+ "sec_num": null
305
+ },
306
+ {
307
+ "text": "3. There don't seem to be any I nonstop flights which serve breakfast and make a connection in Dallas.",
308
+ "cite_spans": [],
309
+ "ref_spans": [],
310
+ "eq_spans": [],
311
+ "section": "5",
312
+ "sec_num": null
313
+ },
314
+ {
315
+ "text": "4. There is only one I flight on American which departs between six p m and nine p m on February second and arrives in the morning on February third.",
316
+ "cite_spans": [],
317
+ "ref_spans": [],
318
+ "eq_spans": [],
319
+ "section": "5",
320
+ "sec_num": null
321
+ },
322
+ {
323
+ "text": "5. I see quite a few ] flights which cost less than $1000 and arrive in the morning. Table 1 : Sample output from summarization script. (The vertical bar has been added to demarcate the separation between parts generated by separate subscripts.)",
324
+ "cite_spans": [],
325
+ "ref_spans": [
326
+ {
327
+ "start": 85,
328
+ "end": 92,
329
+ "text": "Table 1",
330
+ "ref_id": null
331
+ }
332
+ ],
333
+ "eq_spans": [],
334
+ "section": "5",
335
+ "sec_num": null
336
+ },
337
+ {
338
+ "text": "Phrases such as the ones above are generated by surface generation when it is asked by deep generation to summarize some of the constraints on what kind of flight the user is looking for and the approximate number of flights found. The script fragment in Figure 2 generates phrases like the ones after the vertical bar in the above examples. Variables such as locFr, dateDep, and air correspond to user specified constraints on departure location, departure date, airline, and so on. Only those variables will be set which deep generation has decided should be summarized. Since there are thirteen variables referred to in the short script below and the (even shorter) subscripts it refers to, they are capable of generating 213 different kinds of phrases expressing the desired content. It is perhaps a fortunate prop-erty of the airline travel domain we are restricting to that this approach allows fairly simple scripts to be used in circumstances where an inordinate number of templates would h~ve been required.",
339
+ "cite_spans": [],
340
+ "ref_spans": [
341
+ {
342
+ "start": 255,
343
+ "end": 263,
344
+ "text": "Figure 2",
345
+ "ref_id": "FIGREF1"
346
+ }
347
+ ],
348
+ "eq_spans": [],
349
+ "section": "5",
350
+ "sec_num": null
351
+ },
352
+ {
353
+ "text": "We offer a few words of explanation of the script in Figure 2 . First, the \"morphology\" procedure Verb provides the appropriate morphological form of a verb (depending on the current setting of number, tense, etc.). The procedure subst is used for expanding the subscripts referred to. The procedures DoAir, DoArp DoPrlceRange, and DoStops are from the \"phrase library\". They generate appropriate phrases associated with an airline, an airport, a price range, or whether or not a flight is nonstop. One may think of these as rules for converting the semantic information, previously determined by deep generation and stored in variables such as air and price, into a surface realization. For example, \"[DoAir Sair]\" returns \"American\" and \"[DoPrice SPrite]\" returns \"less than $1000\".",
354
+ "cite_spans": [],
355
+ "ref_spans": [
356
+ {
357
+ "start": 53,
358
+ "end": 61,
359
+ "text": "Figure 2",
360
+ "ref_id": "FIGREF1"
361
+ }
362
+ ],
363
+ "eq_spans": [],
364
+ "section": "5",
365
+ "sec_num": null
366
+ },
367
+ {
368
+ "text": "The construct opt-s (short for op__t)onal substitution) includes the text generated by expanding its argument if that expansion is successful, or else catches and ignores any errors if the expansion was not successful. The construct NonEmptyConjunetion is used to adjoin a list of phrases. (The separators between phrases are optional arguments.) If the input list is empty, however, an error is generated. In such a case (e.g. examples 1 and 2 above), the error is caught by the enclosing opt-s, so the entire \"which\" clause is omitted.",
369
+ "cite_spans": [],
370
+ "ref_spans": [],
371
+ "eq_spans": [],
372
+ "section": "5",
373
+ "sec_num": null
374
+ },
375
+ {
376
+ "text": "Another example of a construct is SayOnce. This is used when generating a list of phrases, so that a particular script fragment will only be expanded and included the first time it is encountered. For example, SayOnce has been used to omit the second occurrence of the word \"departing\" in (Excerpt 1, Turn 7). Similarly, in the following response to a user query about the arrival times of the flights under discussion, the second occurrence of the word \"flights\" has been omitted by a simple application 2@ of SayOnce: i see at least 3 flights which arrive between two P M and six P M, and 4 which arrive between six P M and ten P",
377
+ "cite_spans": [],
378
+ "ref_spans": [],
379
+ "eq_spans": [],
380
+ "section": "5",
381
+ "sec_num": null
382
+ },
383
+ {
384
+ "text": "In developing our deep and surface generation modules we have followed a strategy of starting with a simple approach and adding basic building blocks as they are warranted, for example the generation constructs described in section 5 and the utility functions of sections 4.1. This strategy has helped us develop generation modules which are flexible, robust, and interact well with the other components of our system. Also, the tools presented here tend to reduce the growth in code size with complexity (as measured by the number of possible constraints). We are optimistic that these methods can be applied to other domains, although certainly additional features would have to be added. For instance, in Excerpt 2, we gave an example of a shortcoming of our system that arose when we summarized details about a very short list of flights. This problem could be fixed either by subsuming the case of a very short list of flights into the general aggregation mechanism or by adding an additional mechanism to handle this separate case better. Since the problem seemed insignificant enough in the airline travel domain we have not yet resolved it, but we expect that experience with other domains will dictate the best approach.",
385
+ "cite_spans": [],
386
+ "ref_spans": [],
387
+ "eq_spans": [],
388
+ "section": "Conclusion",
389
+ "sec_num": "6"
390
+ },
391
+ {
392
+ "text": "We consider it to be an advantage of this approach that it is not tied to a particular linguistic framework and affords rather straight forward development. This certainly seems appropriate for our application so far, where the summary script of Figure 2 represents the typical level of complexity of the scripts we have had to develop. It is possible that this could become a limiting factor as the complexity, scope, and variety of domains increases. However, we expect other limitations to become more pressing. For example, we plan to investigate additional building blocks which will be useful as we begin to delve into issues such as improving our help messages or adding emphasis to particular parts of the information we want to convey, either via prosody or more finely crafted text.",
393
+ "cite_spans": [],
394
+ "ref_spans": [
395
+ {
396
+ "start": 246,
397
+ "end": 254,
398
+ "text": "Figure 2",
399
+ "ref_id": "FIGREF1"
400
+ }
401
+ ],
402
+ "eq_spans": [],
403
+ "section": "Conclusion",
404
+ "sec_num": "6"
405
+ }
406
+ ],
407
+ "back_matter": [
408
+ {
409
+ "text": "The author would like to thank Mark Epstein, Adwait Ratnaparhki, Salim Roukos, Kishore Palyineni, and Todd Ward for helpful discussions. This work was supported in part by DARPA contract MDA972-97-C-0012.",
410
+ "cite_spans": [],
411
+ "ref_spans": [],
412
+ "eq_spans": [],
413
+ "section": "Acknowledgements",
414
+ "sec_num": null
415
+ }
416
+ ],
417
+ "bib_entries": {
418
+ "BIBREF0": {
419
+ "ref_id": "b0",
420
+ "title": "Spoken dialog systems: Some case studies from AT~T",
421
+ "authors": [
422
+ {
423
+ "first": "K",
424
+ "middle": [],
425
+ "last": "References",
426
+ "suffix": ""
427
+ },
428
+ {
429
+ "first": "E",
430
+ "middle": [],
431
+ "last": "Biatov",
432
+ "suffix": ""
433
+ },
434
+ {
435
+ "first": "G",
436
+ "middle": [
437
+ "Di"
438
+ ],
439
+ "last": "Bocchieri",
440
+ "suffix": ""
441
+ },
442
+ {
443
+ "first": "C",
444
+ "middle": [],
445
+ "last": "Frabbrizio",
446
+ "suffix": ""
447
+ },
448
+ {
449
+ "first": "E",
450
+ "middle": [],
451
+ "last": "Kahm",
452
+ "suffix": ""
453
+ },
454
+ {
455
+ "first": "S",
456
+ "middle": [],
457
+ "last": "Levin",
458
+ "suffix": ""
459
+ },
460
+ {
461
+ "first": "A",
462
+ "middle": [],
463
+ "last": "Narayanan",
464
+ "suffix": ""
465
+ },
466
+ {
467
+ "first": "P",
468
+ "middle": [],
469
+ "last": "Pokrovsky",
470
+ "suffix": ""
471
+ },
472
+ {
473
+ "first": "M",
474
+ "middle": [],
475
+ "last": "Ruscitti",
476
+ "suffix": ""
477
+ },
478
+ {
479
+ "first": "L",
480
+ "middle": [],
481
+ "last": "Rahim",
482
+ "suffix": ""
483
+ },
484
+ {
485
+ "first": "",
486
+ "middle": [],
487
+ "last": "Walker",
488
+ "suffix": ""
489
+ }
490
+ ],
491
+ "year": 2000,
492
+ "venue": "Presentation at DARPA Communicator Workshop",
493
+ "volume": "",
494
+ "issue": "",
495
+ "pages": "",
496
+ "other_ids": {},
497
+ "num": null,
498
+ "urls": [],
499
+ "raw_text": "References K. Biatov, E. Bocchieri, G. Di Frabbrizio, C. Kahm, E. Levin, S. Narayanan, A. Pokrovsky, P. Ruscitti, M. Rahim, and L. Walker. 2000. Spoken dialog systems: Some case studies from AT~T. In Pre- sentation at DARPA Communicator Workshop, Charleston, SC, Jan. 2000. See http://www.dsic- web.net :8501/pub/comra_2000jan/ATT- Narayanan.pdf for presentation and http://www.dsic-web.net/ito/mectings/ communicator_jan00/agenda.html for conference agenda.",
500
+ "links": null
501
+ },
502
+ "BIBREF1": {
503
+ "ref_id": "b1",
504
+ "title": "An overview of SUKGE: A re-usable comprehensive syntactic realization component",
505
+ "authors": [
506
+ {
507
+ "first": "Michael",
508
+ "middle": [],
509
+ "last": "Elhadad",
510
+ "suffix": ""
511
+ },
512
+ {
513
+ "first": "Jacques",
514
+ "middle": [],
515
+ "last": "Robin",
516
+ "suffix": ""
517
+ }
518
+ ],
519
+ "year": 1992,
520
+ "venue": "Proceedings of the 8th International Workshop on Natural Language Generation",
521
+ "volume": "587",
522
+ "issue": "",
523
+ "pages": "89--104",
524
+ "other_ids": {},
525
+ "num": null,
526
+ "urls": [],
527
+ "raw_text": "Michael Elhadad and Jacques Robin. 1992. Control- ling content realization with functional unification grammars. In Aspects of Automated Natural Lan- guage Generation, Lecture Notes in Artificial In- telligence, 587, pages 89-104. Springer, Berlin. Michael Elhadad and Jacques Robin. 1996. An overview of SUKGE: A re-usable comprehensive syntactic realization component. In Proceedings of the 8th International Workshop on Natural Lan- guage Generation, Beer Sheva, Israel.",
528
+ "links": null
529
+ },
530
+ "BIBREF2": {
531
+ "ref_id": "b2",
532
+ "title": "FUF: The universal unifier user manual",
533
+ "authors": [
534
+ {
535
+ "first": "Michael",
536
+ "middle": [
537
+ "Elhadad"
538
+ ],
539
+ "last": "",
540
+ "suffix": ""
541
+ }
542
+ ],
543
+ "year": 1989,
544
+ "venue": "",
545
+ "volume": "",
546
+ "issue": "",
547
+ "pages": "",
548
+ "other_ids": {},
549
+ "num": null,
550
+ "urls": [],
551
+ "raw_text": "Michael Elhadad. 1989. FUF: The universal uni- fier user manual. Technical report, Department of Computer Science, Columbia University. URL = http://www.cs.bgu.ac.il/surge/index.htm:",
552
+ "links": null
553
+ },
554
+ "BIBREF3": {
555
+ "ref_id": "b3",
556
+ "title": "Free-flow dialog management using forms",
557
+ "authors": [
558
+ {
559
+ "first": "K",
560
+ "middle": [
561
+ "A"
562
+ ],
563
+ "last": "Papineni",
564
+ "suffix": ""
565
+ },
566
+ {
567
+ "first": "S",
568
+ "middle": [],
569
+ "last": "Roukos",
570
+ "suffix": ""
571
+ },
572
+ {
573
+ "first": "R",
574
+ "middle": [
575
+ "T"
576
+ ],
577
+ "last": "Ward",
578
+ "suffix": ""
579
+ }
580
+ ],
581
+ "year": 1999,
582
+ "venue": "Proceedings of Eurospeech-99",
583
+ "volume": "",
584
+ "issue": "",
585
+ "pages": "1411--1414",
586
+ "other_ids": {},
587
+ "num": null,
588
+ "urls": [],
589
+ "raw_text": "K. A. Papineni, S. Roukos, and R. T. Ward. 1999. Free-flow dialog management using forms. In Pro- ceedings of Eurospeech-99, pages 1411-1414, Sept. 1999.",
590
+ "links": null
591
+ },
592
+ "BIBREF4": {
593
+ "ref_id": "b4",
594
+ "title": "Creating natural dialogs in the Carnegie Mellon Communicator system",
595
+ "authors": [
596
+ {
597
+ "first": "A",
598
+ "middle": [
599
+ "I"
600
+ ],
601
+ "last": "Rudnicky",
602
+ "suffix": ""
603
+ },
604
+ {
605
+ "first": "E",
606
+ "middle": [],
607
+ "last": "Thayer",
608
+ "suffix": ""
609
+ },
610
+ {
611
+ "first": "P",
612
+ "middle": [],
613
+ "last": "Constantinides",
614
+ "suffix": ""
615
+ },
616
+ {
617
+ "first": "C",
618
+ "middle": [],
619
+ "last": "Tchou",
620
+ "suffix": ""
621
+ },
622
+ {
623
+ "first": "R",
624
+ "middle": [],
625
+ "last": "Shern",
626
+ "suffix": ""
627
+ },
628
+ {
629
+ "first": "K",
630
+ "middle": [],
631
+ "last": "Lenzo",
632
+ "suffix": ""
633
+ },
634
+ {
635
+ "first": "W",
636
+ "middle": [],
637
+ "last": "Xu",
638
+ "suffix": ""
639
+ },
640
+ {
641
+ "first": "A",
642
+ "middle": [],
643
+ "last": "Oh",
644
+ "suffix": ""
645
+ }
646
+ ],
647
+ "year": 1999,
648
+ "venue": "Proceedings of Eurospeech-1999",
649
+ "volume": "",
650
+ "issue": "",
651
+ "pages": "931--934",
652
+ "other_ids": {},
653
+ "num": null,
654
+ "urls": [],
655
+ "raw_text": "A.I. Rudnicky, E. Thayer, P. Constantinides, C. Tchou, R. Shern, K. Lenzo, W. Xu, and A. Oh. 1999. Creating natural dialogs in the Carnegie Mellon Communicator system. In Proceedings of Eurospeech-1999, pages 931-934, Budapest, Hun- gary, Sept. 1999.",
656
+ "links": null
657
+ },
658
+ "BIBREF5": {
659
+ "ref_id": "b5",
660
+ "title": "Galaxy-II: A reference architecture for conversational system development",
661
+ "authors": [
662
+ {
663
+ "first": "S",
664
+ "middle": [],
665
+ "last": "Seneff",
666
+ "suffix": ""
667
+ },
668
+ {
669
+ "first": "E",
670
+ "middle": [],
671
+ "last": "Hurley",
672
+ "suffix": ""
673
+ },
674
+ {
675
+ "first": "R",
676
+ "middle": [],
677
+ "last": "Lau",
678
+ "suffix": ""
679
+ },
680
+ {
681
+ "first": "C",
682
+ "middle": [],
683
+ "last": "Pao",
684
+ "suffix": ""
685
+ },
686
+ {
687
+ "first": "P",
688
+ "middle": [],
689
+ "last": "Schmid",
690
+ "suffix": ""
691
+ },
692
+ {
693
+ "first": "V",
694
+ "middle": [],
695
+ "last": "Zue",
696
+ "suffix": ""
697
+ }
698
+ ],
699
+ "year": 1998,
700
+ "venue": "Pi'oceedings of ICSLP-1998",
701
+ "volume": "",
702
+ "issue": "",
703
+ "pages": "1153--1156",
704
+ "other_ids": {},
705
+ "num": null,
706
+ "urls": [],
707
+ "raw_text": "S. Seneff, E. Hurley, R. Lau, C. Pao, P. Schmid, and V. Zue. 1998. Galaxy-II: A reference architecture for conversational system development. In Pi'o- ceedings of ICSLP-1998, pages 1153-1156, Syd- ney, Australia, Nov. 30-Dec. 4, 1998.",
708
+ "links": null
709
+ },
710
+ "BIBREF6": {
711
+ "ref_id": "b6",
712
+ "title": "The CU Communicator system",
713
+ "authors": [
714
+ {
715
+ "first": "Wayne",
716
+ "middle": [],
717
+ "last": "Ward",
718
+ "suffix": ""
719
+ },
720
+ {
721
+ "first": "Bryan",
722
+ "middle": [],
723
+ "last": "Pellom",
724
+ "suffix": ""
725
+ }
726
+ ],
727
+ "year": 1999,
728
+ "venue": "IEEE Workshop on Automatic Speech Recognition and Understanding",
729
+ "volume": "",
730
+ "issue": "",
731
+ "pages": "",
732
+ "other_ids": {},
733
+ "num": null,
734
+ "urls": [],
735
+ "raw_text": "Wayne Ward and Bryan Pellom. 1999. The CU Communicator system. In 1999 IEEE Work- shop on Automatic Speech Recognition and Un- derstanding, Keystone Colorado, Dec. 1999.",
736
+ "links": null
737
+ }
738
+ },
739
+ "ref_entries": {
740
+ "FIGREF0": {
741
+ "type_str": "figure",
742
+ "num": null,
743
+ "uris": null,
744
+ "text": "Example of an Aggregation puted by what we call the aggregation algorithm 4."
745
+ },
746
+ "FIGREF1": {
747
+ "type_str": "figure",
748
+ "num": null,
749
+ "uris": null,
750
+ "text": "Fragment from summarization script (generating text after vertical bar in examples in"
751
+ },
752
+ "TABREF0": {
753
+ "html": null,
754
+ "num": null,
755
+ "type_str": "table",
756
+ "text": "SYSTEM: Okay, continuing with leg two. flying from phoenix to newark on what date? 4 JANE: 0KAY THAT WOULD BE FINE JUNE THIRD 5 SYSTEM: what time would you like to leave on june third? 6 JANE: OF THE TWO O'CLOCK IN THE AFTERNOON",
757
+ "content": "<table><tr><td>7 SYSTEM: america west flight 7 74</td></tr><tr><td>departs phoenix at one thirty six P</td></tr><tr><td>M on Saturday, june third and arrives</td></tr><tr><td>in las vegas at two thirty eight P M,</td></tr><tr><td>connecting to continental flight 12</td></tr><tr><td>82 departing at three fifteen P M and</td></tr><tr><td>arriving in newark at ten forty one P M.</td></tr><tr><td>Is this flight okay?</td></tr><tr><td>8 JANE: NO IS THERE A DIRECT FLIGHT</td></tr><tr><td>FROM PHOENIX TO NEWARK I ON JUNE THIRD</td></tr><tr><td>ANYTIME TO THIRD</td></tr><tr><td>9 SYSTEM: pardon me, what time would you</td></tr><tr><td>like to leave?</td></tr></table>"
758
+ }
759
+ }
760
+ }
761
+ }
Full_text_JSON/prefixW/json/W00/W00-0306.json ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0306",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:35:22.242981Z"
6
+ },
7
+ "title": "Stochastic Language Generation for Spoken Dialogue Systems",
8
+ "authors": [
9
+ {
10
+ "first": "Alice",
11
+ "middle": [
12
+ "H"
13
+ ],
14
+ "last": "Oh",
15
+ "suffix": "",
16
+ "affiliation": {
17
+ "laboratory": "",
18
+ "institution": "Carnegie Mellon University",
19
+ "location": {
20
+ "addrLine": "5000 Forbes Ave. Pittsburgh",
21
+ "postCode": "15213",
22
+ "region": "PA"
23
+ }
24
+ },
25
+ "email": "aliceo@cs.cmu.edu"
26
+ },
27
+ {
28
+ "first": "Alexander",
29
+ "middle": [
30
+ "I"
31
+ ],
32
+ "last": "Rudnicky",
33
+ "suffix": "",
34
+ "affiliation": {
35
+ "laboratory": "",
36
+ "institution": "Carnegie Mellon University",
37
+ "location": {
38
+ "addrLine": "5000 Forbes Ave. Pittsburgh",
39
+ "postCode": "15213",
40
+ "region": "PA"
41
+ }
42
+ },
43
+ "email": ""
44
+ }
45
+ ],
46
+ "year": "",
47
+ "venue": null,
48
+ "identifiers": {},
49
+ "abstract": "The two current approaches to language generation, Template-based and rule-based (linguistic) NLG, have limitations when applied to spoken dialogue systems, in part because they were developed for text generation. In this paper, we propose a new corpus-based approach to natural language generation, specifically designed for spoken dialogue systems.",
50
+ "pdf_parse": {
51
+ "paper_id": "W00-0306",
52
+ "_pdf_hash": "",
53
+ "abstract": [
54
+ {
55
+ "text": "The two current approaches to language generation, Template-based and rule-based (linguistic) NLG, have limitations when applied to spoken dialogue systems, in part because they were developed for text generation. In this paper, we propose a new corpus-based approach to natural language generation, specifically designed for spoken dialogue systems.",
56
+ "cite_spans": [],
57
+ "ref_spans": [],
58
+ "eq_spans": [],
59
+ "section": "Abstract",
60
+ "sec_num": null
61
+ }
62
+ ],
63
+ "body_text": [
64
+ {
65
+ "text": "Several general-purpose rule-based generation systems have been developed, some of which are available publicly (cf. Elhadad, 1992) . Unfortunately these systems, because of their generality, can be difficult to adapt to small, task-oriented applications. Bateman and Henschel (1999) have described a lower cost and more efficient generation system for a specific application using an automatically customized subgrammar. Busemann and Horacek (1998) describe a system that mixes templates and rulebased generation. This approach takes advantages of templates and rule-based generation as needed by specific sentences or utterances. Stent (1999) has proposed a similar approach for a spoken dialogue system. However, there is still the burden of writing and maintaining grammar rules, and processing time is probably too slow for sentences using grammar rules (only the average time for templates and rule-based sentences combined is reported in Busemann and Horacek, 1998) , for use in spoken dialogue systems.",
66
+ "cite_spans": [
67
+ {
68
+ "start": 112,
69
+ "end": 131,
70
+ "text": "(cf. Elhadad, 1992)",
71
+ "ref_id": null
72
+ },
73
+ {
74
+ "start": 256,
75
+ "end": 283,
76
+ "text": "Bateman and Henschel (1999)",
77
+ "ref_id": "BIBREF0"
78
+ },
79
+ {
80
+ "start": 422,
81
+ "end": 449,
82
+ "text": "Busemann and Horacek (1998)",
83
+ "ref_id": "BIBREF1"
84
+ },
85
+ {
86
+ "start": 632,
87
+ "end": 644,
88
+ "text": "Stent (1999)",
89
+ "ref_id": null
90
+ },
91
+ {
92
+ "start": 945,
93
+ "end": 972,
94
+ "text": "Busemann and Horacek, 1998)",
95
+ "ref_id": "BIBREF1"
96
+ }
97
+ ],
98
+ "ref_spans": [],
99
+ "eq_spans": [],
100
+ "section": "Introduction",
101
+ "sec_num": null
102
+ },
103
+ {
104
+ "text": "Because comparatively less effort is needed, many current dialogue systems use templatebased generation. But there is one obvious disadvantage: the quality of the output depends entirely on the set of templates. Even in a relatively simple domain, such as travel reservations, the number of templates necessary for reasonable quality can become quite large that maintenance becomes a serious problem. There is an unavoidfible trade-off between the amount of time and effort in creating and maintaining templates and the variety and quality of the output utterances.",
105
+ "cite_spans": [],
106
+ "ref_spans": [],
107
+ "eq_spans": [],
108
+ "section": "Introduction",
109
+ "sec_num": null
110
+ },
111
+ {
112
+ "text": "Given these shortcomings of the above approaches, we developed a corpus-based generation system, in which we model language spoken by domain experts performing the task of interest, and use that model to stochastically generate system utterances. We have applied this technique to sentence realization and content planning, and have incorporated the resulting generation component into a working natural dialogue system (see Figure 1 ). In this paper, we describe the technique and report the results of two evaluations.",
113
+ "cite_spans": [],
114
+ "ref_spans": [
115
+ {
116
+ "start": 425,
117
+ "end": 433,
118
+ "text": "Figure 1",
119
+ "ref_id": null
120
+ }
121
+ ],
122
+ "eq_spans": [],
123
+ "section": "Introduction",
124
+ "sec_num": null
125
+ },
126
+ {
127
+ "text": "We used two corpora in the travel reservations domain to build n-gram language models. One corpus (henceforth, the CMU corpus) consists of 39 dialogues between a travel agent and clients (Eskenazi, et al. 1999 ). The utterances in the two corpora were tagged with utterance classes and word classes (see Figure 2 and Figure 3 ). The CMU corpus was manually tagged, and back-off trigram models built (using Clarkson and Rosenfeld, 1997) . These language models were used to automatically tag the SRI corpus; the tags were manually checked.",
128
+ "cite_spans": [
129
+ {
130
+ "start": 187,
131
+ "end": 209,
132
+ "text": "(Eskenazi, et al. 1999",
133
+ "ref_id": null
134
+ },
135
+ {
136
+ "start": 399,
137
+ "end": 435,
138
+ "text": "(using Clarkson and Rosenfeld, 1997)",
139
+ "ref_id": null
140
+ }
141
+ ],
142
+ "ref_spans": [
143
+ {
144
+ "start": 304,
145
+ "end": 312,
146
+ "text": "Figure 2",
147
+ "ref_id": null
148
+ },
149
+ {
150
+ "start": 317,
151
+ "end": 325,
152
+ "text": "Figure 3",
153
+ "ref_id": "FIGREF0"
154
+ }
155
+ ],
156
+ "eq_spans": [],
157
+ "section": "Introduction",
158
+ "sec_num": null
159
+ },
160
+ {
161
+ "text": "In content planning we decide which attributes (represented as word classes, see Figure 3 ) should be included in an utterance. In a taskoriented dialogue, the number of attributes generally increases during the course of the dialogue. Therefore, as the dialogue progresses, we need to decide which ones to include at each system turn. If we include all of them every time (indirect echoing, see Hayes and Reddy, 1983) , the utterances become overly lengthy, but if we remove all unnecessary attributes, the user may get confused. With a fairly high recognition error rate, this becomes an even more important issue.",
162
+ "cite_spans": [
163
+ {
164
+ "start": 396,
165
+ "end": 418,
166
+ "text": "Hayes and Reddy, 1983)",
167
+ "ref_id": null
168
+ }
169
+ ],
170
+ "ref_spans": [
171
+ {
172
+ "start": 81,
173
+ "end": 89,
174
+ "text": "Figure 3",
175
+ "ref_id": "FIGREF0"
176
+ }
177
+ ],
178
+ "eq_spans": [],
179
+ "section": "Content Planning",
180
+ "sec_num": "1"
181
+ },
182
+ {
183
+ "text": "The problem, then, is to find a compromise between the two. We compared two ways to systematically generate system utterances with only selected attributes, such that the user hears repetition of some of the constraints he/she has specified, at appropriate points in the dialogue, without sacrificing naturalness and efficiency. The specific problems, then, are deciding what should be repeated, and when. We first describe a simple heuristic of old versus new information. Then we present a statistical approach, based on bigram models.",
184
+ "cite_spans": [],
185
+ "ref_spans": [],
186
+ "eq_spans": [],
187
+ "section": "Content Planning",
188
+ "sec_num": "1"
189
+ },
190
+ {
191
+ "text": "As a simple solution, we can use the previous dialogue history, by tagging the attribute-value pairs as old (previously said by the system) information or new (not said by the system yet) information. The generation module would select only new information to be included in the system utterances. Consequently, information \u2022 given by the user is repeated only once in the dialogue, usually in the utterance immediately following the user utterance in which the new information was given 1.",
192
+ "cite_spans": [],
193
+ "ref_spans": [],
194
+ "eq_spans": [],
195
+ "section": "First approach: old versus new",
196
+ "sec_num": "1.1"
197
+ },
198
+ {
199
+ "text": "Although this approach seems to work fairly well, echoing user's constraints only once may not be the right thing to do. Looking at humanhuman dialogues, we observe that this is not very natural for a conversation; humans often repeat mutually known information, and they also often do not repeat some information at all. Also, this model does not capture the close relationship between two consecutive utterances within a dialogue. The second approach tries to address these issues.",
200
+ "cite_spans": [],
201
+ "ref_spans": [],
202
+ "eq_spans": [],
203
+ "section": "First approach: old versus new",
204
+ "sec_num": "1.1"
205
+ },
206
+ {
207
+ "text": "Second approach: statistical model For this approach, we adopt the first of the two sub-maxims in (Oberlander, 1998) '\u2022'Do the human thing\". Oberlander (1998) talks about generation of referring expressions, but it is universally valid, at least within natural language generation, to say the best we can do is",
208
+ "cite_spans": [],
209
+ "ref_spans": [],
210
+ "eq_spans": [],
211
+ "section": "1.2",
212
+ "sec_num": null
213
+ },
214
+ {
215
+ "text": "When the system utterance uses a template that does not contain the slots for the new information given in the previous user utterance, then that new information will be confirmed in the next available system utterance in which the template contains those slots.",
216
+ "cite_spans": [],
217
+ "ref_spans": [],
218
+ "eq_spans": [],
219
+ "section": "1.2",
220
+ "sec_num": null
221
+ },
222
+ {
223
+ "text": "to mimic human behavior. Hence, we built a two-stage statistical model of human-human dialogues using the CMU corpus. The model first predicts the number of attributes in the system utterance given the utterance class, then predicts the attributes given the attributes in the previous user utterance.",
224
+ "cite_spans": [],
225
+ "ref_spans": [],
226
+ "eq_spans": [],
227
+ "section": "1.2",
228
+ "sec_num": null
229
+ },
230
+ {
231
+ "text": "The first model will predict the number of attributes in a system utterance given the utterance class. The model is the probability distribution P(nk) = P(nklck), where nk is the number of attributes and Ck is the utterance class for system utte~anee k.",
232
+ "cite_spans": [],
233
+ "ref_spans": [],
234
+ "eq_spans": [],
235
+ "section": "The number of attributes model",
236
+ "sec_num": "1.2.1"
237
+ },
238
+ {
239
+ "text": "This model will predict which attributes to use in a system utterance. Using a statistical model, what we need to do is find the set of attributes A* = {al, az ..... an } such that A * = arg max FI P(al, a2 ..... an)",
240
+ "cite_spans": [],
241
+ "ref_spans": [],
242
+ "eq_spans": [],
243
+ "section": "The bigram model of the attributes",
244
+ "sec_num": "1.2.2"
245
+ },
246
+ {
247
+ "text": "We assume that the distributions of the ai's are dependent on the attributes in the previous utterances. As a simple model, we look only at the utterance immediately preceding the current utterance and build a bigram model of the attributes. In other words, A* = arg max P(AIB), where B = {bl, b2 ..... bin}, the set of m attributes in the preceding user utterance.",
248
+ "cite_spans": [],
249
+ "ref_spans": [],
250
+ "eq_spans": [],
251
+ "section": "The bigram model of the attributes",
252
+ "sec_num": "1.2.2"
253
+ },
254
+ {
255
+ "text": "If we took the above model and tried to apply it directly, we would run into a serious data sparseness problem, so we make two independence assumptions. The first assumption is that the attributes in the user utterance contribute independently to the probabilities of the attributes in the system utterance following it. Applying this assumption to the model above, we get the following: The second independence assumption is that the attributes in the system utterance are independent of each other. This gives the final model that we used for selecting the attributes. ",
256
+ "cite_spans": [],
257
+ "ref_spans": [],
258
+ "eq_spans": [],
259
+ "section": "The bigram model of the attributes",
260
+ "sec_num": "1.2.2"
261
+ },
262
+ {
263
+ "text": "Although this independence assumption is an oversimplification, this simple model is a good starting point for our initial implementation of this approach.",
264
+ "cite_spans": [],
265
+ "ref_spans": [],
266
+ "eq_spans": [],
267
+ "section": "29",
268
+ "sec_num": null
269
+ },
270
+ {
271
+ "text": "We follow Busemann and Horacek (1998) in designing our generation engine with \"different levels of granularity.\" The different levels contribute to the specific needs of the various utterance classes. For example, at the beginning of the dialogue, a system greeting can be simply generated by a \"canned\" expression. Other short, simple utterances can be generated efficiently by templates. In Busemann and Horacek (1998) , the remaining output is generated by grammar rules. We replace the gefieration grammar with a simple statistical language model to generate more complex utterances.",
272
+ "cite_spans": [
273
+ {
274
+ "start": 10,
275
+ "end": 37,
276
+ "text": "Busemann and Horacek (1998)",
277
+ "ref_id": "BIBREF1"
278
+ },
279
+ {
280
+ "start": 393,
281
+ "end": 420,
282
+ "text": "Busemann and Horacek (1998)",
283
+ "ref_id": "BIBREF1"
284
+ }
285
+ ],
286
+ "ref_spans": [],
287
+ "eq_spans": [],
288
+ "section": "Stochastic Surface Realization",
289
+ "sec_num": "2"
290
+ },
291
+ {
292
+ "text": "There are four aspects to our stochastic surface realizer: building language models, generating candidate utterances, scoring the utterances, and filling in the slots. We explain each of these below.",
293
+ "cite_spans": [],
294
+ "ref_spans": [],
295
+ "eq_spans": [],
296
+ "section": "Stochastic Surface Realization",
297
+ "sec_num": "2"
298
+ },
299
+ {
300
+ "text": "Using the tagged utterances as described in the introduction, we built an unsmoothed n-gram language model for each utterance class. Tokens that belong in word classes (e.g., \"U.S. Airways\" in class \"airline\") were replaced by the word classes before building the language models. We selected 5 as the n in n-gram to introduce some variability in the output utterances while preventing nonsense utterances.",
301
+ "cite_spans": [],
302
+ "ref_spans": [],
303
+ "eq_spans": [],
304
+ "section": "Building Language Models",
305
+ "sec_num": "2.1"
306
+ },
307
+ {
308
+ "text": "Note that language models are not used here in the same way as in speech recognition. In speech recognition, the language model probability acts as a 'prior' in determining the most probable sequence of words given the acoustics. In other words, W* = arg max P(WIA) = arg max P(AI W)Pr(W)",
309
+ "cite_spans": [],
310
+ "ref_spans": [],
311
+ "eq_spans": [],
312
+ "section": "Building Language Models",
313
+ "sec_num": "2.1"
314
+ },
315
+ {
316
+ "text": "where W is the string of words, wl, ..., wn, and A is the acoustic evidence (Jelinek 1998).",
317
+ "cite_spans": [],
318
+ "ref_spans": [],
319
+ "eq_spans": [],
320
+ "section": "Building Language Models",
321
+ "sec_num": "2.1"
322
+ },
323
+ {
324
+ "text": "Although we use the same statistical tool, we compute and use the language model probability directly to predict the next word. In other words, the most likely utterance is W* = arg max P(WIu), where u is the utterance class. We do not, however, look for the most likely hypothesis, but rather generate each word randomly according to the distribution, as illustrated in the next section.",
325
+ "cite_spans": [],
326
+ "ref_spans": [],
327
+ "eq_spans": [],
328
+ "section": "Building Language Models",
329
+ "sec_num": "2.1"
330
+ },
331
+ {
332
+ "text": "The input to NLG from the dialogue manager is a frame of attribute-value pairs. The first two attribute-value pairs specify the utterance class. The rest of the frame contains word classes and their values. Figure 4 is an example of an input frame to NLG.",
333
+ "cite_spans": [],
334
+ "ref_spans": [
335
+ {
336
+ "start": 207,
337
+ "end": 215,
338
+ "text": "Figure 4",
339
+ "ref_id": null
340
+ }
341
+ ],
342
+ "eq_spans": [],
343
+ "section": "Generating Utterances",
344
+ "sec_num": "2.2"
345
+ },
346
+ {
347
+ "text": "act-query content depart_time depart_city New York arrive_city San Francisco depart_date 19991117 } Figure 4 : an input frame to NLG",
348
+ "cite_spans": [],
349
+ "ref_spans": [
350
+ {
351
+ "start": 100,
352
+ "end": 108,
353
+ "text": "Figure 4",
354
+ "ref_id": null
355
+ }
356
+ ],
357
+ "eq_spans": [],
358
+ "section": "Generating Utterances",
359
+ "sec_num": "2.2"
360
+ },
361
+ {
362
+ "text": "The generation engine uses the appropriate language model for the utterance class and generates word sequences randomly according to the language model distributions. As in speech recognition, the probability of a word using the n-gram language model is P(wi) = P(wilwi.1, wi.2 .... Wi.(n.1) , U)",
363
+ "cite_spans": [],
364
+ "ref_spans": [],
365
+ "eq_spans": [],
366
+ "section": "Generating Utterances",
367
+ "sec_num": "2.2"
368
+ },
369
+ {
370
+ "text": "where u is the utterance class. Since we have built separate models for each of the utterance classes, we can ignore u, and say that P(wi) = P(wilw|.l, wi-2 .... Wi.(n.1)) using the language model for u.",
371
+ "cite_spans": [],
372
+ "ref_spans": [],
373
+ "eq_spans": [],
374
+ "section": "Generating Utterances",
375
+ "sec_num": "2.2"
376
+ },
377
+ {
378
+ "text": "Since we use unsmoothed 5,grams, we will not generate any unseen 5-grams (or smaller ngrams at the beginning and end of an utterance). This precludes generation of nonsense utterances, at least within the 5-word window. Using a smoothed n-gram would result in more randomness, but using the conventional back-off methods (Jelinek 1998) , the probability mass assigned to unseen 5-grams would be very small, and those rare occurrences of unseen ngrams may not make sense anyway. There is the problem, as in speech recognition using n-gram language models, that long-distance dependency cannot be captured. ",
379
+ "cite_spans": [
380
+ {
381
+ "start": 321,
382
+ "end": 335,
383
+ "text": "(Jelinek 1998)",
384
+ "ref_id": null
385
+ }
386
+ ],
387
+ "ref_spans": [],
388
+ "eq_spans": [],
389
+ "section": "Generating Utterances",
390
+ "sec_num": "2.2"
391
+ },
392
+ {
393
+ "text": "For each randomly generated utterance, we compute a penalty score. The score is based on the heuristics we've empirically selected. Various penalty scores are assigned for an utterance that 1. is too short or too long (determined by utterance-class dependent thresholds), 2. contains repetitions of any of the slots, 3. contains slots for which there is no valid value in the frame, or 4. does not have some required slots (see section 2 for deciding which slots are required).",
394
+ "cite_spans": [],
395
+ "ref_spans": [],
396
+ "eq_spans": [],
397
+ "section": "Scoring Utterances",
398
+ "sec_num": "2.3"
399
+ },
400
+ {
401
+ "text": "The generation engine generates a candidate utterance, scores it, keeping only the best-scored utterance up to that point. It stops and returns the best utterance when it finds an utterance with a zero penalty scoreTor runs out of time.",
402
+ "cite_spans": [],
403
+ "ref_spans": [],
404
+ "eq_spans": [],
405
+ "section": "Scoring Utterances",
406
+ "sec_num": "2.3"
407
+ },
408
+ {
409
+ "text": "The last step is filling slots with the appropriate values. For example, the utterance \"What time would you like to leave {depart_city}?\" becomes \"What time would you like to leave New York?\".",
410
+ "cite_spans": [],
411
+ "ref_spans": [],
412
+ "eq_spans": [],
413
+ "section": "Filling Slots",
414
+ "sec_num": "2.4"
415
+ },
416
+ {
417
+ "text": "It is generally difficult to empirically evaluate a generation system. In the context of spoken dialogue systems, evaluation of NLG becomes an even more difficult problem. One reason is simply that there has been very little effort in building generation engines for spoken dialogue systems. Another reason is that it is hard to separate NLG from the rest of the system. It is especially hard to separate evaluation of language generation and speech synthesis. As a simple solution, we have conducted a comparative evaluation by running two identical systems varying only the generation component. In this section we present results from two preliminary evaluations of our generation algorithms described in the previous sections.",
418
+ "cite_spans": [],
419
+ "ref_spans": [],
420
+ "eq_spans": [],
421
+ "section": "Evaluation",
422
+ "sec_num": "3"
423
+ },
424
+ {
425
+ "text": "For the content planning part of the generation -system, we conducted a comparative evaluation of the two different generation algorithms: old/new and bigrams. Twelve subjects had two dialogues each, one with the old/new generation system, and another with the bigrams generation system (in counterbalanced order); all other modules were held fixed. Afterwards, each subject answered seven questions on a usability survey. Immediately after, each subject was given transcribed logs of his/her dialogues and asked to rate each system utterance on a scale of 1 to 3 (1 = good; 2 = okay; 3 = bad).",
426
+ "cite_spans": [],
427
+ "ref_spans": [],
428
+ "eq_spans": [],
429
+ "section": "Content Planning: Experiment",
430
+ "sec_num": "3.1"
431
+ },
432
+ {
433
+ "text": "For the usability survey, the results seem to indicate subjects' preference for the old/new system, but the difference is not statistically significant (p -0.06). However, six out of the twelve subjects chose the bigram system to the question \"Durqng-the session, which system's responses were easier to understand?\" compared to three subjects choosing the old/new system.",
434
+ "cite_spans": [],
435
+ "ref_spans": [],
436
+ "eq_spans": [],
437
+ "section": "Content Planning: Results",
438
+ "sec_num": "3.2"
439
+ },
440
+ {
441
+ "text": "For surface realization, we conducted a batchmode evaluation. We picked six recent calls to our system and ran two generation algorithms (template-based generation and stochastic generation) on the input frames. We then presented to seven subjects the generated dialogues, consisting of decoder output of the user utterances and corresponding system responses, for each of the two generation algorithms. Subjects then selected the output utterance they would prefer, for each of the utterances that differ between the two systems. The results show a trend that subjects preferred stochastic generation over template-based generation, but a t-test shows no significant difference (p = 0.18). We are in the process of designing a larger evaluation.",
442
+ "cite_spans": [],
443
+ "ref_spans": [],
444
+ "eq_spans": [],
445
+ "section": "Surface Realization: Experiment",
446
+ "sec_num": "3.3"
447
+ },
448
+ {
449
+ "text": "We have presented a new approach to language generation for spoken dialogue systems. For content planning, we built a simple bigram model of attributes, and found that, in our first implementation, it performs as well as a heuristic of old vs. new information. For surface realization, we used an n-gram language model to stochastically generate each utterance and found that the stochastic system performs at least as well as the template-based system. Our stochastic generation system has several advantages. One of those, an important issue for spoken dialogue systems, is the response time. With stochastic surface realization, the average generation time for the longest utterance class (10 -20 words long) is about 200 milliseconds, which is much faster than any rule-based systems. Another advantage is that by using a corpus-based approach, we are directly mimicking the language of a real domain expert, rather than attempting to model it by rule. Corpus collection is usually the first step in building a dialogue system, so we are leveraging the effort rather than creating more work. This also means adapting this approach to new domains and even new languages will be relatively simple.",
450
+ "cite_spans": [],
451
+ "ref_spans": [],
452
+ "eq_spans": [],
453
+ "section": "Conclusion",
454
+ "sec_num": "4"
455
+ },
456
+ {
457
+ "text": "The approach we present does require some amount of knowledge engineering, though this appears to overlap with work needed for other parts of the dialogue system. First, defining the class of utterance and the attribute-value pairs requires care. Second, tagging the human-human corpus with the right classes and attributes requires effort. However, we believe the tagging effort is much less difficult than knowledge acquisition for most rule-based systems or even template-based systems. Finally, what may sound right for a human speaker may sound awkward for a computer, but we believe that mimicking a human, especially a domain expert, is the best we can do, at least for now.",
458
+ "cite_spans": [],
459
+ "ref_spans": [],
460
+ "eq_spans": [],
461
+ "section": "Conclusion",
462
+ "sec_num": "4"
463
+ }
464
+ ],
465
+ "back_matter": [
466
+ {
467
+ "text": "We are thankful for significant contribution by other members of the CMU Communicator Project, especially Eric Thayer, Wei Xu, and Rande Shern. We would like to thank the subjects who participated in our evaluations. We also extend our thanks to two anonymous reviewers.",
468
+ "cite_spans": [],
469
+ "ref_spans": [],
470
+ "eq_spans": [],
471
+ "section": "Acknowledgements",
472
+ "sec_num": null
473
+ }
474
+ ],
475
+ "bib_entries": {
476
+ "BIBREF0": {
477
+ "ref_id": "b0",
478
+ "title": "From full generation to 'near-templates' without losing generality",
479
+ "authors": [
480
+ {
481
+ "first": "J",
482
+ "middle": [],
483
+ "last": "Bateman",
484
+ "suffix": ""
485
+ },
486
+ {
487
+ "first": "R",
488
+ "middle": [],
489
+ "last": "Henschel",
490
+ "suffix": ""
491
+ }
492
+ ],
493
+ "year": 1999,
494
+ "venue": "Proceedings of the KI'99 workshop",
495
+ "volume": "",
496
+ "issue": "",
497
+ "pages": "",
498
+ "other_ids": {},
499
+ "num": null,
500
+ "urls": [],
501
+ "raw_text": "Bateman, J. and Henschel, R. (1999) From full generation to 'near-templates' without losing generality. In Proceedings of the KI'99 workshop, \"May I Speak Freely?\"",
502
+ "links": null
503
+ },
504
+ "BIBREF1": {
505
+ "ref_id": "b1",
506
+ "title": "A flexible shallow approach to text generation",
507
+ "authors": [
508
+ {
509
+ "first": "S",
510
+ "middle": [],
511
+ "last": "Busemann",
512
+ "suffix": ""
513
+ },
514
+ {
515
+ "first": "H",
516
+ "middle": [],
517
+ "last": "Horacek",
518
+ "suffix": ""
519
+ }
520
+ ],
521
+ "year": 1998,
522
+ "venue": "Proceedings of the International Natural Language Generation Workshop",
523
+ "volume": "",
524
+ "issue": "",
525
+ "pages": "",
526
+ "other_ids": {},
527
+ "num": null,
528
+ "urls": [],
529
+ "raw_text": "Busemann, S. and Horacek, H. (1998) A flexible shallow approach to text generation. In Proceedings of the International Natural Language Generation Workshop. Niagara-on-the-Lake, Canada..",
530
+ "links": null
531
+ }
532
+ },
533
+ "ref_entries": {
534
+ "FIGREF0": {
535
+ "uris": null,
536
+ "type_str": "figure",
537
+ "text": ": word classes Another corpus (henceforth, the SRI corpus) consists of 68 dialogues between a travel agent and users in the SRI community (Kowtko and Price 1989).",
538
+ "num": null
539
+ },
540
+ "FIGREF1": {
541
+ "uris": null,
542
+ "type_str": "figure",
543
+ "text": "arg max ~ P(bk)P(A I bk) k=l",
544
+ "num": null
545
+ },
546
+ "FIGREF2": {
547
+ "uris": null,
548
+ "type_str": "figure",
549
+ "text": "m tl A*. = arg max ~ P(bk ) ['I P(al I bk) k=l i=1",
550
+ "num": null
551
+ },
552
+ "FIGREF3": {
553
+ "uris": null,
554
+ "type_str": "figure",
555
+ "text": "",
556
+ "num": null
557
+ }
558
+ }
559
+ }
560
+ }
Full_text_JSON/prefixW/json/W00/W00-0307.json ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0307",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:48.500407Z"
6
+ },
7
+ "title": "",
8
+ "authors": [],
9
+ "year": "",
10
+ "venue": null,
11
+ "identifiers": {},
12
+ "abstract": "",
13
+ "pdf_parse": {
14
+ "paper_id": "W00-0307",
15
+ "_pdf_hash": "",
16
+ "abstract": [],
17
+ "body_text": [
18
+ {
19
+ "text": "gency dispatcher, cooperating with the system to dynamically allocate resources to and make plans for solving problems as they arise in the world. The setting, Monroe County, NY, is considerably more complex than our previous domains (e.g. Pacifica, TRAINS) , and raises new issues in knowledge representation and reference. Emergencies include requests for medical assistance, car accidents, civil disorder, and larger problems such as flooding and snow storms. Resources at the user's disposal may include road crews, electric crews, ambulances, police units and helicopters. Some of the increase in mixed-initiative interaction comes from givi-n~ the_ system more knowledge of the tasks being solved. Some comes from the fact that the solution to one problem may conflict with the solution to another, either because of scheduling conflicts, scarce resources, or aspects of the physical world (e.g. an ambulance can't go down a road that has not been plowed). The range of tasks and complexity of the world allow for problem solving at different levels of granularity, making it possible for the system to take as much control over the task as the user permits.",
20
+ "cite_spans": [
21
+ {
22
+ "start": 234,
23
+ "end": 257,
24
+ "text": "(e.g. Pacifica, TRAINS)",
25
+ "ref_id": null
26
+ }
27
+ ],
28
+ "ref_spans": [],
29
+ "eq_spans": [],
30
+ "section": "",
31
+ "sec_num": null
32
+ },
33
+ {
34
+ "text": "While a number of robust dialogue systems have been built in recent years, they mostly have operated in domains that require little if any reasoning. Rather, the task is hard-coded into the system operation. One of the major goals of the TRIPS project has been to develop dialogue models and system architectures that support conversational interaction in domains where complex reasoning systems are required. One goal has been to build a fairly generic model in which different domains can then be specified fairly easily. On this front, we are seeing some success as we have now constructed versions of TRIPS in three different domains, and TRIPS\u00b0 911 will be the fourth. In developing the system for new domains, the bulk of the work by far has been in system enhancements rather than in developing the domain models.",
35
+ "cite_spans": [],
36
+ "ref_spans": [],
37
+ "eq_spans": [],
38
+ "section": "I m p o r t a n t C o n t r i b u t i o n s",
39
+ "sec_num": "4."
40
+ },
41
+ {
42
+ "text": "The TRIPS-911 domain has forced a rethinking of the relationship between dialoguemanagement, problem-solving, the system's own goal-pursuit and generation. The new architecture is designed to support research into mixed-initiative interactions, incremental generation of content (in which the user might intervene before the system completes all it has to say), rich reference resolution models, and the introduction of plan monitoring and plan repair into the suite of plan management operations supported. The domain also can support longer and richer dialogues than in previous domains. More complex domains mean even more complex dialogues. The complexity arises from many factors. First, more complex dialogues will involve topic progression, development and resumption, and more complex referential phenomena. On the problem solving front, there will be more complex corrections, elaborations and modifications--forcing us to develop richer discourse models. In addition, the complexity of the domain demonstrates a need for better grounding behavior and a need for incremental dialogue-based generation.",
43
+ "cite_spans": [],
44
+ "ref_spans": [],
45
+ "eq_spans": [],
46
+ "section": "I m p o r t a n t C o n t r i b u t i o n s",
47
+ "sec_num": "4."
48
+ },
49
+ {
50
+ "text": "We have by no means solved these problems. Rather we have built a rich testbed, designed and implemented a plausible architecture, and have constructed an initial system to demonstrate basic capabilities in each of the problem areas.",
51
+ "cite_spans": [],
52
+ "ref_spans": [],
53
+ "eq_spans": [],
54
+ "section": "I m p o r t a n t C o n t r i b u t i o n s",
55
+ "sec_num": "4."
56
+ },
57
+ {
58
+ "text": "TRIPS-911 is a first attempt at handling a domain of this complexity. As such there are many capabilities that people have in such situations that are beyond the system's current capabilities. Some of the most important are: \u2022 Scale -we can only handle small domains and the existing techniques would not extend directly to a realistic size 911 operation.",
59
+ "cite_spans": [],
60
+ "ref_spans": [],
61
+ "eq_spans": [],
62
+ "section": "Limitations",
63
+ "sec_num": "5."
64
+ },
65
+ {
66
+ "text": "To scale up we must face some difficult problems including reasoning about quantities and aggregates, planning in large-scale domains (i.e., the real domains are beyond the capabilities of current plan technology), and performing intention recognition as the number of options increases. In addition, for an effective dialogue system, all this must be done in real-time. \u2022 Meta-talk -when faced with complex problems, people often first generally discuss the problem and possible strategies for solving it, and later may explicitly direct attention to specific subproblems. The current TRIPS system does not support such discussion. \u2022 Time -in the 911 domain there are at least two temporal contexts that can be \"used\" by the conversants: there is the actual time (i.e., when they are talking), but there also is the time relative to a point of focus in a plan, or even simply talking about the past or the future. TRIPS-911 can currently interpret expressions with respect to the actual time. \u2022 Interleaved generation -when people are discussing complex issues, they often have to plan to communicate their content across several different utterances. There is no guarantee that the other conversant will not \"interrupt\" (e.g., to clarify, correct, suggest alternatives, etc) before the entire content is conveyed. This requires a rethinking of current practice in generation to make it incremental and interactive. \u2022 True interruptions -people may interrupt the system while it is talking. It is unclear at this stage what the system should assume was conveyed. The strategies of assuming nothing was conveyed, or that all was conveyed have obvious faults. We are pursuing alternatives based on knowing when speech was interrupted, but using this ififormation successfully remains a difficult problem.",
67
+ "cite_spans": [],
68
+ "ref_spans": [],
69
+ "eq_spans": [],
70
+ "section": "Limitations",
71
+ "sec_num": "5."
72
+ }
73
+ ],
74
+ "back_matter": [],
75
+ "bib_entries": {
76
+ "BIBREF0": {
77
+ "ref_id": "b0",
78
+ "title": "An Architecture for a Generic Dialogue Shell, to appear",
79
+ "authors": [
80
+ {
81
+ "first": "James",
82
+ "middle": [],
83
+ "last": "Allen",
84
+ "suffix": ""
85
+ }
86
+ ],
87
+ "year": 2000,
88
+ "venue": "J. Natural Language Engineering",
89
+ "volume": "",
90
+ "issue": "",
91
+ "pages": "",
92
+ "other_ids": {},
93
+ "num": null,
94
+ "urls": [],
95
+ "raw_text": "Allen, James et al, An Architecture for a Generic Dialogue Shell, to appear, J. Natural Language Engineering, 2000.",
96
+ "links": null
97
+ },
98
+ "BIBREF1": {
99
+ "ref_id": "b1",
100
+ "title": "TRIPS: An Integrated Intelligent Problem-Solving Assistant",
101
+ "authors": [
102
+ {
103
+ "first": "George",
104
+ "middle": [],
105
+ "last": "Ferguson",
106
+ "suffix": ""
107
+ },
108
+ {
109
+ "first": "J",
110
+ "middle": [],
111
+ "last": "Allen",
112
+ "suffix": ""
113
+ }
114
+ ],
115
+ "year": 1998,
116
+ "venue": "Proc. National Conference on AI (AAAI-98)",
117
+ "volume": "",
118
+ "issue": "",
119
+ "pages": "",
120
+ "other_ids": {},
121
+ "num": null,
122
+ "urls": [],
123
+ "raw_text": "Ferguson, George and J. Allen,-TRIPS: An Integrated Intelligent Problem-Solving Assistant, Proc. Na- tional Conference on AI (AAAI-98), Madison, WI, 1998.",
124
+ "links": null
125
+ }
126
+ },
127
+ "ref_entries": {
128
+ "FIGREF0": {
129
+ "text": "Monroe County map used in TRIPS-911",
130
+ "uris": null,
131
+ "type_str": "figure",
132
+ "num": null
133
+ }
134
+ }
135
+ }
136
+ }
Full_text_JSON/prefixW/json/W00/W00-0308.json ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0308",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:34.921180Z"
6
+ },
7
+ "title": "Epiphenomenal Grammar Acquisition with GG SG",
8
+ "authors": [
9
+ {
10
+ "first": "Marsal",
11
+ "middle": [],
12
+ "last": "Gavaldh",
13
+ "suffix": "",
14
+ "affiliation": {},
15
+ "email": ""
16
+ }
17
+ ],
18
+ "year": "",
19
+ "venue": null,
20
+ "identifiers": {},
21
+ "abstract": "As a step toward conversational systems that allow for a more natural human-computer interaction, we rep6r~ on GSG, a system that, while providing a natural-l~nguage interface to a variety of applications, engages in clarification dialogues with the end user through which new semantic mappings are dynamically acquired. GsG exploits task-and language-dependent information but is fully taskand language-independent in its architecture and strategies.",
22
+ "pdf_parse": {
23
+ "paper_id": "W00-0308",
24
+ "_pdf_hash": "",
25
+ "abstract": [
26
+ {
27
+ "text": "As a step toward conversational systems that allow for a more natural human-computer interaction, we rep6r~ on GSG, a system that, while providing a natural-l~nguage interface to a variety of applications, engages in clarification dialogues with the end user through which new semantic mappings are dynamically acquired. GsG exploits task-and language-dependent information but is fully taskand language-independent in its architecture and strategies.",
28
+ "cite_spans": [],
29
+ "ref_spans": [],
30
+ "eq_spans": [],
31
+ "section": "Abstract",
32
+ "sec_num": null
33
+ }
34
+ ],
35
+ "body_text": [
36
+ {
37
+ "text": "As conversational systems move from the realm of science fiction and research labs into people's everyday life, and as they evolve from the plain, systemdirected interactions ~ la press or say one of socalled interactive voice response systems based on isolated-word recognizers and fixed-menu navigation, to the more open, mixed-initiative dialogues carried out in spoken dialogue systems based on large-vocabulary continuous speech recognizers and flexible dialogue managers (see, e.g., (Allen et al., 1996; Denecke, 1997; Walker et al., 1998; Rudnicky et al., 1999; Zue et al., 2000) ), the overall experiential quality of the human-computer interaction becomes increasingly important. That is, beyond the obvious factors of speech recognition accuracy and speech synthesis naturalness, the most critical challenge is that of providing conversational interactions that feel natural to human users (cf. (Glass, 1999) ). This, we believe, mainly translates into building systems that possess some degree of linguistic, reasoning, and learning abilities.",
38
+ "cite_spans": [
39
+ {
40
+ "start": 489,
41
+ "end": 509,
42
+ "text": "(Allen et al., 1996;",
43
+ "ref_id": "BIBREF0"
44
+ },
45
+ {
46
+ "start": 510,
47
+ "end": 524,
48
+ "text": "Denecke, 1997;",
49
+ "ref_id": "BIBREF2"
50
+ },
51
+ {
52
+ "start": 525,
53
+ "end": 545,
54
+ "text": "Walker et al., 1998;",
55
+ "ref_id": "BIBREF7"
56
+ },
57
+ {
58
+ "start": 546,
59
+ "end": 568,
60
+ "text": "Rudnicky et al., 1999;",
61
+ "ref_id": "BIBREF6"
62
+ },
63
+ {
64
+ "start": 569,
65
+ "end": 586,
66
+ "text": "Zue et al., 2000)",
67
+ "ref_id": "BIBREF8"
68
+ },
69
+ {
70
+ "start": 905,
71
+ "end": 918,
72
+ "text": "(Glass, 1999)",
73
+ "ref_id": "BIBREF4"
74
+ }
75
+ ],
76
+ "ref_spans": [],
77
+ "eq_spans": [],
78
+ "section": "Introduction",
79
+ "sec_num": "1"
80
+ },
81
+ {
82
+ "text": "In this paper we report on GSG, a conversational system that partially addresses these issues by being able to dynamically extend its linguistic knowledge through simple, natural-language only interactions with non-expert users: On a purely on-need basis, i.e., when the system does not understand what the user means, GSG makes educated guesses, poses confirmation and clarification questions, and learns new semantic mappings from the answers given by the users, as well as from other linguistic information that they may volunteer. GSG provides, therefore, an extremely robust interface, and, at the same time, significantly reduces grammar development time because the original grammar, while complete with respect to the semantic representation of the domain at hand, need only cover a small portion of the surface variability, since it will be automatically extended as an epip~enomenon of engaging in clarification dialogues with end users.",
83
+ "cite_spans": [],
84
+ "ref_spans": [],
85
+ "eq_spans": [],
86
+ "section": "Introduction",
87
+ "sec_num": "1"
88
+ },
89
+ {
90
+ "text": "As sketched in Figure 1 , GSG is a conversational 1 system built around the Soup parser (Gavald~, 2000 ).",
91
+ "cite_spans": [
92
+ {
93
+ "start": 88,
94
+ "end": 102,
95
+ "text": "(Gavald~, 2000",
96
+ "ref_id": "BIBREF3"
97
+ }
98
+ ],
99
+ "ref_spans": [
100
+ {
101
+ "start": 15,
102
+ "end": 23,
103
+ "text": "Figure 1",
104
+ "ref_id": "FIGREF0"
105
+ }
106
+ ],
107
+ "eq_spans": [],
108
+ "section": "Brief System Description",
109
+ "sec_num": "2"
110
+ },
111
+ {
112
+ "text": "GSG's principal (and possibly sole) knowledge source is a task-dependent, semantic context-free grammar (the Kernel Grammar). At run-time, the Grammar is initialized as the union of the Kernel Grammar and, possibly, the User Grammar A (userdependent rules learned in previous sessions). The Grammar gives rise to the 0nto]ogy and to a parsebank (collection of parse trees), which, together with a possible Kernel Parsebank, becomes the Parsebank, from which the statistical Prediction Models are trained. The Ontology is a directed acyelic graph automatically derived from the Grammar in which the nodes correspond to grammar nonterminals (NTs) and the arcs record immediate dominance relation, i.e., the presence of, say, NTi in a right-hand side (RHS) alternative of NTj will result in an arc from NTi to NTj. Nodes are annotated as being \"Principal\" vs. \"Auxiliary\" (via naming convention), \"Top-level\" vs. \"Non-top level\" (i.e., whether they are starting symbols of the grammar), and with having \"Only NT daughters\" vs. \"Only T daughters\" vs. \"Mixed\"; arcs are annotated as being \"Is-a\" (estimated from being the only non-optional NT in a RHS alternative) vs. \"Expresses\" links, \"Always-required\" vs. \"Alwaysoptional\" vs. \"Mixed,\" and \"Never-repeatable\" vs.",
113
+ "cite_spans": [],
114
+ "ref_spans": [],
115
+ "eq_spans": [],
116
+ "section": "Brief System Description",
117
+ "sec_num": "2"
118
+ },
119
+ {
120
+ "text": "ZIn the work reported here, GsG's interactions are textbased (keyboard as input, text window as output), but GsG is being integrated with both a speech recognizer and a speech synthesizer. \"Always-repeatable\" vs. \"Mixed\". Also, a topological sort 2 on the nodes is computed to derive a general-to-specific partial order of the NTs.",
121
+ "cite_spans": [],
122
+ "ref_spans": [],
123
+ "eq_spans": [],
124
+ "section": "Brief System Description",
125
+ "sec_num": "2"
126
+ },
127
+ {
128
+ "text": "A full system description is beyond the scope of this paper, but, very briefly, the User Interface mediates all interactions with the end-user, the stackbased Dialogue Manager keeps track of current and past utterances and ensuing clarification dialogues, and, together with the History Interaction, ensures that no answered question is asked again. The GSG Engine manages the core of the systems' \"intelligence,\" namely hypothesizing interpretations (together with the Parse Tree Builder) and on-line learning of semantic mappings.",
129
+ "cite_spans": [],
130
+ "ref_spans": [],
131
+ "eq_spans": [],
132
+ "section": "Brief System Description",
133
+ "sec_num": "2"
134
+ },
135
+ {
136
+ "text": "To illustrate the workings of GSG, let's analyze an example interaction in an e-mail client task. Figure 2 shows the example dialogue, Figure 3 presents a relevant fragment of the semantic context-free grammar 3 used to analyze the input, and Table 1 above 2Requires that the grammar be acyclic. 3Containing, approximately, 300 NTs, 500 Ts, and 800 RHS alternatives, out of which about 55% is dedicated to lists the main prediction and learning strategies employed.",
137
+ "cite_spans": [],
138
+ "ref_spans": [
139
+ {
140
+ "start": 98,
141
+ "end": 106,
142
+ "text": "Figure 2",
143
+ "ref_id": null
144
+ },
145
+ {
146
+ "start": 135,
147
+ "end": 143,
148
+ "text": "Figure 3",
149
+ "ref_id": null
150
+ },
151
+ {
152
+ "start": 243,
153
+ "end": 256,
154
+ "text": "Table 1 above",
155
+ "ref_id": null
156
+ }
157
+ ],
158
+ "eq_spans": [],
159
+ "section": "Example Dialogue",
160
+ "sec_num": "3"
161
+ },
162
+ {
163
+ "text": "The user inputs (U1) and, as it turns out, neither retrieve, nor bob, nor christmas are in the grammar and no full parse of (U1) can be found. Rather than responding with a I don't understand --please rephrase, GSG sets its predictive machinery in motion with the goal of constructing a full interpretation for (U1). 4",
164
+ "cite_spans": [],
165
+ "ref_spans": [],
166
+ "eq_spans": [],
167
+ "section": "Example Dialogue",
168
+ "sec_num": "3"
169
+ },
170
+ {
171
+ "text": "The first step is to reparse (U1) in a mode in which all NTs are considered top-level, i.e., able to slmnd at the root Of a parse tree. This produces a set of still unparsed words, such as retrieve, and parse subtrees, such as from bob being parsed under SENDER (via grammar rules (R6) to (R10)) and, ambiguously, under [sourceFolder] as well (via grammar rules (Rll) and (R12)). All of it (i.e., the full content of the chart) is taken as evidence by the Prediction Models to postulate the overall meaning of the original utterance. In this case (see U2cover temporal and numeric expressions, and digits and letters.",
172
+ "cite_spans": [
173
+ {
174
+ "start": 320,
175
+ "end": 334,
176
+ "text": "[sourceFolder]",
177
+ "ref_id": null
178
+ }
179
+ ],
180
+ "ref_spans": [],
181
+ "eq_spans": [],
182
+ "section": "Example Dialogue",
183
+ "sec_num": "3"
184
+ },
185
+ {
186
+ "text": "4The hypothesized interpretation assumes that a single parse tree will cover the entire problematic sentence.",
187
+ "cite_spans": [],
188
+ "ref_spans": [],
189
+ "eq_spans": [],
190
+ "section": "Example Dialogue",
191
+ "sec_num": "3"
192
+ },
193
+ {
194
+ "text": "(U9) (UIO) (Ull) (U12) (U1)",
195
+ "cite_spans": [],
196
+ "ref_spans": [],
197
+ "eq_spans": [],
198
+ "section": "Example Dialogue",
199
+ "sec_num": "3"
200
+ },
201
+ {
202
+ "text": "U: retrieve all messages from bob that were sent after christmas (U2) S: I don't understand right away what you mean but let me guess... \"retrieve all messages from bob that were sent after christmas\" is a way to express: 1. send mail 2. reply mail 3. read mall 0. None of the above (U3) U: none (U4) S: \"retrieve all messages from bob that were sent after christmas\" is a way to express: 1. move mail 2. list mail 0. None of the above (U5) U: list (U6) S: By \"from bob\" do you mean: 1. source folder 2. sender (UT) U: sender (US) S \" 2christmas\" is a way to express: -1. yesterday (date relative yesterday) 2. tomorrow (date relative tomorrow) 3. today (date relative today) 0. None of the above U: by christmas i'm referring to december twenty fifth S: Thanks for teaching me the meaning of \"christmas\"! Thanks for teaching me the meaning of \"retrieve all messages from bob that were sent after christmas\"! I understand \"retrieve all messages from bob that were sent after december twenty fifth\" U: retrieve last email to mary before christmas S: I understand \"retrieve last email to mary before december twenty fifth\" Figure 2 : Example dialogue between a user (U) to (U5) 5) the suggestions of the Prediction Models are not particularly accurate (the correct choice is presented only in fifth place), but, considering that the head verb (retrieve) is not even in the grammar, such a response to (U1) is definitely better than giving up. The effect of (U5) is to select [listMail] as (U1)'s \"anchor mother\" (logical root of the overall interpretation). But to complete the parse tree a few details still need to be filled in. To that effect (U6) is generated to disambiguate ~rorn bob and (US) to find the right mapping for christmas. The reasoning behind the rather puzzling choices offered by (US) comes from applying the Parser Predictions strategy: given the context in which an unparsed sequence (in this case, single word) christmas appears, i.e., the subtree DATE..AFTER..PRE covering after (via (Pal4)), the grammar is traversed to find likely continuations of the context (left context only in this case). Since DATE.AFTER_PRE can be immediately followed by [datehfter] (see (R13)) that makes [datehfter] a candidate to cover the unparsed se-5The options presented in (U2) and (U4) are generated at the same time, the only reason why they are split is to prevent overwhelming the end user, who may be hearing the choices spoken over the telephone. Also, note that in (U3) the user could have also said zero, or none of the above and achieve the same result --or, alternatively, they could have volunteered information as in (ug). 7Obviously \"the meaning of Christmas\" (cf. cheerful (U10)) may be much more profound than a shorthand for December 25 --but, alas, conveying that is well beyond the simple grammar presented here.",
203
+ "cite_spans": [
204
+ {
205
+ "start": 1473,
206
+ "end": 1483,
207
+ "text": "[listMail]",
208
+ "ref_id": null
209
+ },
210
+ {
211
+ "start": 2170,
212
+ "end": 2181,
213
+ "text": "[datehfter]",
214
+ "ref_id": null
215
+ }
216
+ ],
217
+ "ref_spans": [
218
+ {
219
+ "start": 1121,
220
+ "end": 1129,
221
+ "text": "Figure 2",
222
+ "ref_id": null
223
+ }
224
+ ],
225
+ "eq_spans": [],
226
+ "section": "Example Dialogue",
227
+ "sec_num": "3"
228
+ },
229
+ {
230
+ "text": "SA \"verbness\" ratio is automatically computed for each candidate NT. by running the POS tagger on automatically generated sentences from the NTs in question. Figure 3 : Grammar fragment for an e-mail client task. '*' indicates optiSnality of adjacent token, '+' repeatability, and '1' separates RHS alternatives. Terminals are italicized. NILDCARD is a special NT that matches any out-of-vocabulary word or any in-vocabulary word present in a list for that purpose.",
231
+ "cite_spans": [],
232
+ "ref_spans": [
233
+ {
234
+ "start": 158,
235
+ "end": 166,
236
+ "text": "Figure 3",
237
+ "ref_id": null
238
+ }
239
+ ],
240
+ "eq_spans": [],
241
+ "section": "Example Dialogue",
242
+ "sec_num": "3"
243
+ },
244
+ {
245
+ "text": "[listMail]. The result was highly positive and led to the acquisition of the RHS alternative (M1). It is worth mentioning here that there are two kinds of mappings that GSG learns: RHS alternatives and subtree mappings. Learning new RHS alternatives is the preferred way because the knowledge can be incorporated into the Parsebank (and, in turn, into the Prediction Models). That is the effect of adding (M1) to the Grammar: Since the Parsebank and the Prediction Models are updated on-line, the presence of the word retrieve in subsequent utterances becomes a strong indicator of LIST and, associatively, of [listMa\u00b1l] . However, when the source expression can not be mapped into the desired target structure via grammar rules, as in (M2), the only solution is to remember the equivalence. This kind of learning, although definitely useful since the meaning of the source expression will be henceforth remembered, cannot be incorporated into the Prediction Models.",
246
+ "cite_spans": [
247
+ {
248
+ "start": 582,
249
+ "end": 620,
250
+ "text": "LIST and, associatively, of [listMa\u00b1l]",
251
+ "ref_id": null
252
+ }
253
+ ],
254
+ "ref_spans": [],
255
+ "eq_spans": [],
256
+ "section": "Example Dialogue",
257
+ "sec_num": "3"
258
+ },
259
+ {
260
+ "text": "Right after (U9), (U1) is considered fully understood and the interpretation is automatically mapped into the feature structure (FS1) 9 in Figure 5 , which is then shipped to the Back-end Application Manager.",
261
+ "cite_spans": [],
262
+ "ref_spans": [
263
+ {
264
+ "start": 139,
265
+ "end": 147,
266
+ "text": "Figure 5",
267
+ "ref_id": null
268
+ }
269
+ ],
270
+ "eq_spans": [],
271
+ "section": "Example Dialogue",
272
+ "sec_num": "3"
273
+ },
274
+ {
275
+ "text": "Finally, when (Ull) comes in, a correct analysis is produced thanks to the mappings just learned from (U1), 1\u00b0 and (FS2) in is generated. modified version of Brill's tagger (Brill 1994).) 9The mapping is simply a removal of auxiliary NTs from the parse tree, plus value extraction of dates, numbers and strings from certain subtrees, e.g., subtree in (M2) becomes the substructure tinder da~ePoin~ in (FSI).",
276
+ "cite_spans": [
277
+ {
278
+ "start": 173,
279
+ "end": 187,
280
+ "text": "(Brill 1994).)",
281
+ "ref_id": "BIBREF1"
282
+ }
283
+ ],
284
+ "ref_spans": [],
285
+ "eq_spans": [],
286
+ "section": "Example Dialogue",
287
+ "sec_num": "3"
288
+ },
289
+ {
290
+ "text": "1\u00b0Note that rule [listMail] ~ LIST +MAIL_ARGUMENTS",
291
+ "cite_spans": [],
292
+ "ref_spans": [],
293
+ "eq_spans": [],
294
+ "section": "Example Dialogue",
295
+ "sec_num": "3"
296
+ },
297
+ {
298
+ "text": "The example above illustrates the philosophy of GSG, n namely, to exploit task and linguistic knowledge to pose clarification questions in the face of incomplete analyses, 12 build correct interpretations, and acquire new semantic mappings. Thus, a contribution of Gso, is the demonstration that from a simple context-free grammar, with a very lightweight formalism, one can extract enough information (Ontology, Parsebank, Parser Predictions strategy) to conduct meaningful clarification dialogues. Note, moreover, that such dialogues occur entirely within GSG, with the Back-end Application Manager receiving only finalized feature structures. 13 Another advantage is the ease with which naturallanguage interfaces can be constructed for new domains: Since all the task and linguistic knowledge is extracted from the grammar, 14 one need only develop a Kernel Grammar that models the domain at (extracted from the final interpretation of (U1)) would have been learned too, but its subsumption by existing rule (R1) was automatically detected.",
299
+ "cite_spans": [],
300
+ "ref_spans": [],
301
+ "eq_spans": [],
302
+ "section": "Discussion",
303
+ "sec_num": "4"
304
+ },
305
+ {
306
+ "text": "nBased on the pioneering work of (Lehman, 1989) . 12Detected by a lack of interpretation, excessively fragmented interpretation, or by being told by the end user that the automatically generated paraphrase of their input is not what they meant.",
307
+ "cite_spans": [
308
+ {
309
+ "start": 33,
310
+ "end": 47,
311
+ "text": "(Lehman, 1989)",
312
+ "ref_id": "BIBREF5"
313
+ }
314
+ ],
315
+ "ref_spans": [],
316
+ "eq_spans": [],
317
+ "section": "Discussion",
318
+ "sec_num": "4"
319
+ },
320
+ {
321
+ "text": "lsOf course, prediction accuracy can improve if the Backend Application Manager can be incorporated as a knowledge source to, for example, contribute in the ranking of hypotheses, but the point is that it is not necessary and that, as long as the capabilities of the back-end application are adequately modeled by the Grammar, the construction of the correct interpretation can be performed within Gsc alone.",
322
+ "cite_spans": [],
323
+ "ref_spans": [],
324
+ "eq_spans": [],
325
+ "section": "Discussion",
326
+ "sec_num": "4"
327
+ },
328
+ {
329
+ "text": "t4Except for the POS Tagger and the Syntactic Grammar. hand via its NTs 15 but need not provide a high coverage of the utterances possible in the domain (data which may not be available anyway). Also, reuse of existing grammar modules for, e.g., dates and numbers, is straightforward. However, a fear of letting the end user (indirectly) modify a grammar is that the grammar may grow untamed and become filled with new rules that disrupt the Kernel Grammar. To prevent that, besides the careful construction of interpretations via the strategies described above, GSG employs two safety mechanisms: before a rule is added to the grammar, it is checked whether it introduces ambiguity to the grammar, 16 and whether it disrupts existing 15Knowledge of, e.g., how the Ontology is computed helps, but it coincides with the most natural way of writing wellstructured, context-free semantic grammars.",
330
+ "cite_spans": [],
331
+ "ref_spans": [],
332
+ "eq_spans": [],
333
+ "section": "Discussion",
334
+ "sec_num": "4"
335
+ },
336
+ {
337
+ "text": "lSAccomplished by using the SouP parser in yet another mode: parsing of RHSs (expanded to RHS paths) instead of terminals. In this case, existence of a parse tree covering an entire RHS path indicates ambiguity. Note that if all RHS paths of the new rule can be parsed under the current RHS of the new rule's left-hand side, then the new rule is subsumed by the existing RHS and can therefore be discarded (cf. note Io).",
338
+ "cite_spans": [],
339
+ "ref_spans": [],
340
+ "eq_spans": [],
341
+ "section": "Discussion",
342
+ "sec_num": "4"
343
+ },
344
+ {
345
+ "text": "(correct) interpretations. 17 In this way, some of the new rules may have to be discarded, but at least the health of the grammar is preserved, is Another concern may be that the new mappings end up generating feature structures that are not understood by the Back-end Application Manager. To avoid that, GSG only allows a principal NT to be dominated by another principal NT if such dominance relation is licensed by the Kernel Grammar. This guarantees that all resulting feature structures be structurally correct (although they may contain unexpected atomic values).",
346
+ "cite_spans": [],
347
+ "ref_spans": [],
348
+ "eq_spans": [],
349
+ "section": "Discussion",
350
+ "sec_num": "4"
351
+ },
352
+ {
353
+ "text": "A current limitation of GsG lies in the difficulty of segmenting long sequences of unparsed words: GSG uses POS tagging followed by noun-phrase bracketing (via parsing with a shallow Syntactic Grammar), which represents an improvement over the Single Segment Assumption (cf. (Lehman, 1989) ), but is still far from perfect and can disrupt the ensuing clarification dialogue. Also, the number of questions that the system can pose as it builds an interpre-17Achieved by reparsing (a subset of) the Parsebank. Note that SOUP can typically parse in the order of 100 utterances per second (cf. (Gavaldb. 2000) ).",
354
+ "cite_spans": [
355
+ {
356
+ "start": 275,
357
+ "end": 289,
358
+ "text": "(Lehman, 1989)",
359
+ "ref_id": "BIBREF5"
360
+ },
361
+ {
362
+ "start": 585,
363
+ "end": 605,
364
+ "text": "(cf. (Gavaldb. 2000)",
365
+ "ref_id": null
366
+ }
367
+ ],
368
+ "ref_spans": [],
369
+ "eq_spans": [],
370
+ "section": "Discussion",
371
+ "sec_num": "4"
372
+ },
373
+ {
374
+ "text": "ISAssuming minimally co6perative and consistent users.",
375
+ "cite_spans": [],
376
+ "ref_spans": [],
377
+ "eq_spans": [],
378
+ "section": "Discussion",
379
+ "sec_num": "4"
380
+ },
381
+ {
382
+ "text": "tation, may, in occasion, exceed the patience of the end user (but the command cancel is always understood).",
383
+ "cite_spans": [],
384
+ "ref_spans": [],
385
+ "eq_spans": [],
386
+ "section": "Discussion",
387
+ "sec_num": "4"
388
+ },
389
+ {
390
+ "text": "The hardest problem we have encountered so far is typical of natural-language interfaces but is exacerbated in GSG (as it treats every unparsable sentence as an opportunity to learn), and that is the difficulty of identifiying in-domain end-user sentences that go beyond the capabilities of the end application, or, in other words, are not expressible in the grammar.",
391
+ "cite_spans": [],
392
+ "ref_spans": [],
393
+ "eq_spans": [],
394
+ "section": "Discussion",
395
+ "sec_num": "4"
396
+ },
397
+ {
398
+ "text": "Finally, as Gsc becomes fully integrated with a speech recognizer, it remains to be seen how an optimal point in the tradeoff between the wide coverage but relatively low word recognition accuracy obtained with a loose dictation grammar, and the narrow coverage but high word accuracy achieved with a tight, task-dependent grammar, can be found, and how the degradations of the input is going to affect GSG'S behavior.",
399
+ "cite_spans": [],
400
+ "ref_spans": [],
401
+ "eq_spans": [],
402
+ "section": "Discussion",
403
+ "sec_num": "4"
404
+ },
405
+ {
406
+ "text": "Overall, however, we believe that Gsc, by virtue of its built-in robustness, minimal initial knowledge requirements, and learning abilities, begins to embody the kind of qualities that are necessary for conversational systems, if they are to provide, without exorbitant development effort, an interaction thay feels truly natural to humans.",
407
+ "cite_spans": [],
408
+ "ref_spans": [],
409
+ "eq_spans": [],
410
+ "section": "Discussion",
411
+ "sec_num": "4"
412
+ }
413
+ ],
414
+ "back_matter": [],
415
+ "bib_entries": {
416
+ "BIBREF0": {
417
+ "ref_id": "b0",
418
+ "title": "Robust Understanding in a Dialogue System",
419
+ "authors": [
420
+ {
421
+ "first": "James",
422
+ "middle": [],
423
+ "last": "Allen",
424
+ "suffix": ""
425
+ }
426
+ ],
427
+ "year": 1996,
428
+ "venue": "Proceedings o] A CL-1996",
429
+ "volume": "",
430
+ "issue": "",
431
+ "pages": "",
432
+ "other_ids": {},
433
+ "num": null,
434
+ "urls": [],
435
+ "raw_text": "Allen, James, et al. (1996). Robust Understanding in a Dialogue System. In Proceedings o] A CL- 1996.",
436
+ "links": null
437
+ },
438
+ "BIBREF1": {
439
+ "ref_id": "b1",
440
+ "title": "Some Advances in Part of Speech Tagging",
441
+ "authors": [
442
+ {
443
+ "first": "Eric",
444
+ "middle": [],
445
+ "last": "Brill",
446
+ "suffix": ""
447
+ }
448
+ ],
449
+ "year": 1994,
450
+ "venue": "Proceedings o] AAAI-1994",
451
+ "volume": "",
452
+ "issue": "",
453
+ "pages": "",
454
+ "other_ids": {},
455
+ "num": null,
456
+ "urls": [],
457
+ "raw_text": "Brill, Eric. (1994). Some Advances in Part of Speech Tagging. In Proceedings o] AAAI-1994.",
458
+ "links": null
459
+ },
460
+ "BIBREF2": {
461
+ "ref_id": "b2",
462
+ "title": "An Informationbased Approach for Guiding Multi-modal Human-Computer Interaction",
463
+ "authors": [
464
+ {
465
+ "first": "Matthias",
466
+ "middle": [],
467
+ "last": "Denecke",
468
+ "suffix": ""
469
+ }
470
+ ],
471
+ "year": 1997,
472
+ "venue": "Proceedings of IJCAI-199Z",
473
+ "volume": "",
474
+ "issue": "",
475
+ "pages": "",
476
+ "other_ids": {},
477
+ "num": null,
478
+ "urls": [],
479
+ "raw_text": "Denecke, Matthias. (1997). An Information- based Approach for Guiding Multi-modal Human- Computer Interaction. In .Proceedings of IJCAI- 199Z",
480
+ "links": null
481
+ },
482
+ "BIBREF3": {
483
+ "ref_id": "b3",
484
+ "title": "SouP: A Parser for Realworld Spontaneous Speech",
485
+ "authors": [
486
+ {
487
+ "first": "Marsal",
488
+ "middle": [],
489
+ "last": "Gavald~",
490
+ "suffix": ""
491
+ }
492
+ ],
493
+ "year": 2000,
494
+ "venue": "Proceedings o] the Sixth International Workshop on Parsing Technologies",
495
+ "volume": "",
496
+ "issue": "",
497
+ "pages": "",
498
+ "other_ids": {},
499
+ "num": null,
500
+ "urls": [],
501
+ "raw_text": "Gavald~, Marsal. (2000). SouP: A Parser for Real- world Spontaneous Speech. In Proceedings o] the Sixth International Workshop on Parsing Tech- nologies (IWPT-2000).",
502
+ "links": null
503
+ },
504
+ "BIBREF4": {
505
+ "ref_id": "b4",
506
+ "title": "Challenges for Spoken Dialogue Systems",
507
+ "authors": [
508
+ {
509
+ "first": "James",
510
+ "middle": [],
511
+ "last": "Glass",
512
+ "suffix": ""
513
+ }
514
+ ],
515
+ "year": 1999,
516
+ "venue": "Proceedings of the 1999 IEEE ASRU Workshop",
517
+ "volume": "",
518
+ "issue": "",
519
+ "pages": "",
520
+ "other_ids": {},
521
+ "num": null,
522
+ "urls": [],
523
+ "raw_text": "Glass, James. (1999). Challenges for Spoken Dia- logue Systems. In Proceedings of the 1999 IEEE ASRU Workshop.",
524
+ "links": null
525
+ },
526
+ "BIBREF5": {
527
+ "ref_id": "b5",
528
+ "title": "Adaptive Parsing: Sellextending Natural Language Interfaces",
529
+ "authors": [
530
+ {
531
+ "first": "Jill",
532
+ "middle": [],
533
+ "last": "Lehman",
534
+ "suffix": ""
535
+ }
536
+ ],
537
+ "year": 1989,
538
+ "venue": "",
539
+ "volume": "",
540
+ "issue": "",
541
+ "pages": "",
542
+ "other_ids": {},
543
+ "num": null,
544
+ "urls": [],
545
+ "raw_text": "Lehman, Jill. (1989). Adaptive Parsing: Sell- extending Natural Language Interfaces. Ph.D. dis- sertation, School of Computer Science, Carnegie Mellon University.",
546
+ "links": null
547
+ },
548
+ "BIBREF6": {
549
+ "ref_id": "b6",
550
+ "title": "Creating Natural Dialogs in the Carnegie Mellon COMMUNICATOR System",
551
+ "authors": [
552
+ {
553
+ "first": "Alex",
554
+ "middle": [],
555
+ "last": "Rudnicky",
556
+ "suffix": ""
557
+ }
558
+ ],
559
+ "year": 1999,
560
+ "venue": "Proceedings o] Eurospeech-1999",
561
+ "volume": "",
562
+ "issue": "",
563
+ "pages": "",
564
+ "other_ids": {},
565
+ "num": null,
566
+ "urls": [],
567
+ "raw_text": "Rudnicky, Alex, et al. (1999). Creating Natural Dialogs in the Carnegie Mellon COMMUNICATOR System. In Proceedings o] Eurospeech-1999.",
568
+ "links": null
569
+ },
570
+ "BIBREF7": {
571
+ "ref_id": "b7",
572
+ "title": "Learning Optimal Dialogue Strategies: A Case Study of a Spoken Dialogue Agent for Email",
573
+ "authors": [
574
+ {
575
+ "first": "Marilyn",
576
+ "middle": [],
577
+ "last": "Walker",
578
+ "suffix": ""
579
+ }
580
+ ],
581
+ "year": 1998,
582
+ "venue": "Proceedings of COLING/A CL-i998",
583
+ "volume": "",
584
+ "issue": "",
585
+ "pages": "",
586
+ "other_ids": {},
587
+ "num": null,
588
+ "urls": [],
589
+ "raw_text": "Walker, Marilyn, et al. (1998). Learning Optimal Dialogue Strategies: A Case Study of a Spo- ken Dialogue Agent for Email. In Proceedings of COLING/A CL-i998.",
590
+ "links": null
591
+ },
592
+ "BIBREF8": {
593
+ "ref_id": "b8",
594
+ "title": "JUPITER: A Telephone-Based Conversational Interface for Weather Information",
595
+ "authors": [
596
+ {
597
+ "first": "",
598
+ "middle": [],
599
+ "last": "Zue",
600
+ "suffix": ""
601
+ },
602
+ {
603
+ "first": "",
604
+ "middle": [],
605
+ "last": "Victor",
606
+ "suffix": ""
607
+ }
608
+ ],
609
+ "year": 2000,
610
+ "venue": "IEEE Transactions on Speech and Audio Processing",
611
+ "volume": "8",
612
+ "issue": "",
613
+ "pages": "",
614
+ "other_ids": {},
615
+ "num": null,
616
+ "urls": [],
617
+ "raw_text": "Zue, Victor, et al. (2000). JUPITER: A Telephone- Based Conversational Interface for Weather Infor- mation. In IEEE Transactions on Speech and Au- dio Processing, Vol. 8 , No. 1.",
618
+ "links": null
619
+ }
620
+ },
621
+ "ref_entries": {
622
+ "FIGREF0": {
623
+ "text": "GsG's system diagram. Ovals enclose knowledge sources, rectangles modules, and arrows indicate information flow. Dashed components are optional.",
624
+ "num": null,
625
+ "uris": null,
626
+ "type_str": "figure"
627
+ },
628
+ "FIGREF2": {
629
+ "text": "Mappings learned from the dialogue inFigure 2. Feature structures sent to the Back-end Application Manager after (U10) and (U12) inFigure 2.",
630
+ "num": null,
631
+ "uris": null,
632
+ "type_str": "figure"
633
+ },
634
+ "TABREF1": {
635
+ "text": "and the system (S) on an e-mall client task. quence christmas. However, since, according to the Ontology, [dateAfter] does not allow terminals as immediate daughters, a search is performed to find NTs under [dateAfter] that permit it. In this case (via (R15) to (R19)) it suggests yesterday, tomorrow, etc. 6 The user, though, realizing that the system does not directly understand christmas, volunteers (ug) 7, from which the mapping (M2) inFigure 4is learned.At this point one may wonder about the fate of the unparsed word retrieve, since no question was asked about it. The answer is that GsG need not ask about every single prediction, if the confidence value is high enough. In this case, as soon as[listgail] was established (in (U5)) as the anchor mother, a Verbal Head Search strategy was launched to see whether, among the unparsed words, a verb was found that could be placed in a mostly-verb NT s directly under ~In fact it suggests [DATE_RELATIVE:yesterday], [DATE_RELATIVE:tomorrow], etc, but it presents an example automatically generated from such NTs.",
636
+ "num": null,
637
+ "content": "<table/>",
638
+ "html": null,
639
+ "type_str": "table"
640
+ }
641
+ }
642
+ }
643
+ }
Full_text_JSON/prefixW/json/W00/W00-0309.json ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0309",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:47.839236Z"
6
+ },
7
+ "title": "Task-based dialog management using an agenda",
8
+ "authors": [
9
+ {
10
+ "first": "Wei",
11
+ "middle": [],
12
+ "last": "Xu",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "Carnegie Mellon University",
17
+ "location": {
18
+ "addrLine": "5000 Forbes Ave -Pittsburgh",
19
+ "postCode": "15213",
20
+ "region": "PA"
21
+ }
22
+ },
23
+ "email": ""
24
+ },
25
+ {
26
+ "first": "Alexander",
27
+ "middle": [
28
+ "I"
29
+ ],
30
+ "last": "Rudnicky",
31
+ "suffix": "",
32
+ "affiliation": {
33
+ "laboratory": "",
34
+ "institution": "Carnegie Mellon University",
35
+ "location": {
36
+ "addrLine": "5000 Forbes Ave -Pittsburgh",
37
+ "postCode": "15213",
38
+ "region": "PA"
39
+ }
40
+ },
41
+ "email": ""
42
+ }
43
+ ],
44
+ "year": "",
45
+ "venue": null,
46
+ "identifiers": {},
47
+ "abstract": "Dialog man tigement addresses two specific problems: (1) providing a coherent overall structure to interaction that extends beyond the single turn, (2) correctly managing mixedinitiative interaction. We propose a dialog management architecture based on the following elements: handlers that manage interaction focussed on tightly coupled sets of information, a product that reflects mutually agreed-upon information and an agenda that orders the topics relevant to task completion.",
48
+ "pdf_parse": {
49
+ "paper_id": "W00-0309",
50
+ "_pdf_hash": "",
51
+ "abstract": [
52
+ {
53
+ "text": "Dialog man tigement addresses two specific problems: (1) providing a coherent overall structure to interaction that extends beyond the single turn, (2) correctly managing mixedinitiative interaction. We propose a dialog management architecture based on the following elements: handlers that manage interaction focussed on tightly coupled sets of information, a product that reflects mutually agreed-upon information and an agenda that orders the topics relevant to task completion.",
54
+ "cite_spans": [],
55
+ "ref_spans": [],
56
+ "eq_spans": [],
57
+ "section": "Abstract",
58
+ "sec_num": null
59
+ }
60
+ ],
61
+ "body_text": [
62
+ {
63
+ "text": "Spoken language interaction can take many forms. Even fairly simple interaction can be very useful, for example in auto-attendant systems. For many other applications, however, more complex interactions seem necessary, either because users cannot always be expected to exactly specify what they want in a single utterance (e.g., obtaining schedule information) or because the task at hand requires some degree of exploration of complex alternatives (e.g., travel planning). Additionally, unpredictable complexity is introduced through error or misunderstanding and the system needs to detect and deal with these cases. We are interested in managing interaction in the context of a goaloriented task that extends oveg multiple tums.",
64
+ "cite_spans": [],
65
+ "ref_spans": [],
66
+ "eq_spans": [],
67
+ "section": "Introduction",
68
+ "sec_num": "1"
69
+ },
70
+ {
71
+ "text": "Dialog management in the context of purposeful tasks must solve two problems: (1) Keep track of the overall interaction with a view to ensuring steady progress towards task completion. That is, the system must have some idea of how much of the task has been completed and more importantly some idea of what is yet to be done, so that it can participate in the setting of intermediate goals and generally shepherd the interaction towards a successful completion of the task at hand. (2) Robustly handle deviations from the nominal progression towards problem solution. Deviations are varied: the user may ask for something that is not satisfiable (i. e., proposes a set of mutually-incompatible constraints), the user may misspeak (or, more likely, the system may misunderstand) a request and perhaps cause an unintended (and maybe unnoticed) deviation from the task. The user might also underspecify a request while the system requires that a single solution be chosen. Finally the user's conception of the task might deviate from the system's (and its developers) conception, requiring the system to alter the order in which it expects to perform the task. Ideally, a robust dialog management architecture can accommodate all of these circumstances within a single framework.",
72
+ "cite_spans": [
73
+ {
74
+ "start": 482,
75
+ "end": 485,
76
+ "text": "(2)",
77
+ "ref_id": "BIBREF1"
78
+ }
79
+ ],
80
+ "ref_spans": [],
81
+ "eq_spans": [],
82
+ "section": "Introduction",
83
+ "sec_num": "1"
84
+ },
85
+ {
86
+ "text": "We have been exploring dialog management issues in the context of the Communicator [3] task. The Communicator handles a complex travel task, consisting of air travel, hotels and car reservations.",
87
+ "cite_spans": [
88
+ {
89
+ "start": 83,
90
+ "end": 86,
91
+ "text": "[3]",
92
+ "ref_id": "BIBREF2"
93
+ }
94
+ ],
95
+ "ref_spans": [],
96
+ "eq_spans": [],
97
+ "section": "Introduction",
98
+ "sec_num": "1"
99
+ },
100
+ {
101
+ "text": "Existing approaches to dialog management are difficult to adapt to the current problem because they either impose a rigid structure on the interaction or because they are not capable of managing data structures beyond a certain level of complexity. Call-flow based systems (more generally, graph-based systems) handle the complexity of dialog management by explicitly enumerating all possible dialog states, as well as allowable transitions between states. This serves the purpose of partitioning the problem into a finite set of states, with which can be associated topic-specific elements (such as grammar, prompts, help and interactions with other system components, e.g., database interaction). Transition between states is predicated on the occurrence of specific events, either the user's spoken inputs or through (e.g.) a change in backend state. It is the nature of these systems that the graphs are often but not exclusively trees. Except for the simplest tasks, graph systems have several limitations: Unless the graph is carefully designed, users will find themselves unable to switch to a topic that is coded in a different sub-tree without going through the common par~e~t of the two. Often this is through the root node-of the dialog. Similarly it is not always possible to navigate an existing tree, in order, e.g., to correct information supplied in an earlier node.",
102
+ "cite_spans": [],
103
+ "ref_spans": [],
104
+ "eq_spans": [],
105
+ "section": "Modeling Dialog",
106
+ "sec_num": "2"
107
+ },
108
+ {
109
+ "text": "beforehand the exact type of trip an individual might take (though the building blocks of an itinerary are indeed known). The system benefits from being able to construct the itinerary dynamically; we denote these solution objects products. Users also expect to be able to manipulate and inspect the itinerary under construction. By contrast, frame systems do not afford the user the ability to manipulate the form, past supplying fillers for slots. The exception is the selection of an item from a solution set. We do not abandon the concept of a form altogether: an itinerary is actually a hierarchical composition of forms, where the forms in this case correspond to tightly-bound slots (e.g., those corresponding to the constraints on a particular flight leg) and which can be treated as part of the -same topic.",
110
+ "cite_spans": [],
111
+ "ref_spans": [],
112
+ "eq_spans": [],
113
+ "section": "Modeling Dialog",
114
+ "sec_num": "2"
115
+ },
116
+ {
117
+ "text": "Frame-based systems provide an alternate, more flexible approach. Here the problem is cast as form filling: the form specifies all relevant information (slots) for an action. Dialog management consists of monitoring the form for completion, setting elements as these are specified by the user and using the presence of empty slots as a trigger for questions to the user. Form-filling does away with the need to specify a particular order in which slots need to be filled and allows for a more natural, unconstrained, form of input. While ideally suited for tasks that can be expressed in terms of filling a single form, form-filling can be combined with graph representations (typically ergodic) to support a set of (possibly) related activities, each of which can be cast into a form-filling format.",
118
+ "cite_spans": [],
119
+ "ref_spans": [],
120
+ "eq_spans": [],
121
+ "section": "Modeling Dialog",
122
+ "sec_num": "2"
123
+ },
124
+ {
125
+ "text": "Both graph and frame systems share the property that the task usually has a fixed goal which is achieved by having the user specify information (fill slots) on successive turns. Using a filled out form the system performs some action, such as information retrieval. While this capability encompasses a large number of useful applications it does not necessarily extend to more complex tasks, for example ones where the goal is to create a complex data object (e.g. [1] ). We have been building a system that allows users to construct travel itineraries. This domain poses several problems: there is no \"form\" as such to fill out, since we do not know",
126
+ "cite_spans": [
127
+ {
128
+ "start": 465,
129
+ "end": 468,
130
+ "text": "[1]",
131
+ "ref_id": "BIBREF0"
132
+ }
133
+ ],
134
+ "ref_spans": [],
135
+ "eq_spans": [],
136
+ "section": "Modeling Dialog",
137
+ "sec_num": "2"
138
+ },
139
+ {
140
+ "text": "Intuitively (as well as evident from our empirical studies of human travel agents and clients) travel planning develops over time as a succession of episodes, each focused on a specific topic (such as a given flight leg, a hotel in a particular city, etc.). Users treat the task as a succession of topics, each of which ought to be discussed in full and closed, before moving on to the next topic. Topics can certainly be revisited, but doing so corresponds to an explicit conversational move on the part of the participants.",
141
+ "cite_spans": [],
142
+ "ref_spans": [],
143
+ "eq_spans": [],
144
+ "section": "Task Structure and Scripts",
145
+ "sec_num": "3"
146
+ },
147
+ {
148
+ "text": "Our first dialog manager took advantage of this task structure ( [3] ). By analogy to what we observed in the human-human data we refer to it as a script-based dialog manager. Script in this context simply refers to an explicit sequencing of task-related topics. Each topic is expressed as a form-filling task, with conventional free-order input allowed for form slots and a slot-state driven mixed-initiative interaction (i.e., ask the user about any empty slot). The topic-specific form is actually composed of two parts: constraint slots (typically corresponding to elements of a query) and a solution slot (containing the result of an executed query).",
149
+ "cite_spans": [
150
+ {
151
+ "start": 65,
152
+ "end": 68,
153
+ "text": "[3]",
154
+ "ref_id": "BIBREF2"
155
+ }
156
+ ],
157
+ "ref_spans": [],
158
+ "eq_spans": [],
159
+ "section": "Task Structure and Scripts",
160
+ "sec_num": "3"
161
+ },
162
+ {
163
+ "text": "The control strategy is also actually more complex: slots are pre-ordered based on their (domain-derived) ability to constrain the solution; this ordering provides a default sequence in which the system selects elements to ask the user about. Control is predicated on the state of a slot (whether constraint or solution). The state can either be \"empty\", in which case the system should ask the user for a value, filled with a single value, in which case it is \"complete\", or filled with multiple values. The last case is cause to engage the user in a clarification sub-dialog whose goal is to reduce multiple values to a single value, either by selecting-ma item in the solution set or by restating a-constraint. Figure 1 shows the structure of the Flight Leg topic in the scriptbased system. ",
164
+ "cite_spans": [],
165
+ "ref_spans": [
166
+ {
167
+ "start": 714,
168
+ "end": 722,
169
+ "text": "Figure 1",
170
+ "ref_id": "FIGREF0"
171
+ }
172
+ ],
173
+ "eq_spans": [],
174
+ "section": "Task Structure and Scripts",
175
+ "sec_num": "3"
176
+ },
177
+ {
178
+ "text": "While capable of efficiently handling routine travel arrangements, the script-based approach has a number of perceived limitations: the script is very closely identified with the product data structure. Specifically, we used a fixed product structure that served as a form to fill out. While the entire form does not need to be filled out to create a valid itinerary, it nevertheless set limits on what the user can construct. Instead we wanted a form structure that could be dynamically constructed over the course of a session, with contributions from both the user and the system. The script-based approach also seemed to make navigation over the product difficult. While we implemented a simple undo and correction mechanism that allowed the user to revisit preceding product elements, users had difficulty using it correctly. While some of the difficulty could be traced to inadequate support of orientation, the source was more likely the inability of the system to treat the product structure independent of the script.",
179
+ "cite_spans": [],
180
+ "ref_spans": [],
181
+ "eq_spans": [],
182
+ "section": "An Agenda-based Architecture",
183
+ "sec_num": "4"
184
+ },
185
+ {
186
+ "text": "We sought to address-these problems by introducing two new data structures: an agenda to replace a fixed script and a dynamic product that can evolve over the course of a session. In the agenda-based system, the product is represented as a tree, which reflects the natural hierarchy, and order, of the information needed to complete the task. A dynamic product is simply one that can be modified over the course of a session, for example by adding legs to a trip as these are requested by the user rather than working from a fixed form. Operationally, this means providing a set of operators over tree structures and making these available to the user and to the system. In our case, we defined a library of sub-trees (say air travel legs or local arrangements) and a way to attach these to the product structure, triggered either by the setting of particular values in the existing tree or through explicit requests on the part of the user (\"and then I'd like to fly to Chicago\").",
187
+ "cite_spans": [],
188
+ "ref_spans": [],
189
+ "eq_spans": [],
190
+ "section": "An Agenda-based Architecture",
191
+ "sec_num": "4"
192
+ },
193
+ {
194
+ "text": "Each node in the product tree corresponds to a handler, which encapsulates computation relevant to a single information item. All handlers have the same form: they specify a set of receptors corresponding to input nets, a transform to be applied to obtain a value and a specification of what the system might say to the user in relation to the information governed by the handler. Handlers correspond to the schema and compound schema of the script-based system (see Figure 1 ).",
195
+ "cite_spans": [],
196
+ "ref_spans": [
197
+ {
198
+ "start": 467,
199
+ "end": 475,
200
+ "text": "Figure 1",
201
+ "ref_id": "FIGREF0"
202
+ }
203
+ ],
204
+ "eq_spans": [],
205
+ "section": "An Agenda-based Architecture",
206
+ "sec_num": "4"
207
+ },
208
+ {
209
+ "text": "The agenda is an ordered list of topics, represented by handlers that govern some single item or some collection of information. The agenda specifies the overall \"plan\" for carrying out a task. The system's priorities for action are captured by the agenda, an ordered list of handlers generated through traversal of the product structure. The handler on the top of the agenda has the highest priority and represents the focused topic. A handler can capture relevant input from the user and can generate prompts to the user. A single handler deals only with a mini dialog centering on a particular piece of information (e.g. departure date). The agenda is a generalization of a stack. It indicates both the current focus of interaction (i.e., the top-most handler) as well as all undealt-with business, and captures the order in which such business should be dealt with. (The system's high-level goal is to ensure that all values in the current product tree have valid settings.) As all items in the agenda are potentially activatable through what the user speaks, the user has corresponding control over the topic in focus. The agenda .aLso contains generic handlers that sort to the bottom-of the agenda. These can be used to consume any inputs that are not caught by product-derived handlers (for example, requests for help). Figure 2 shows how a simplest handler looks like The order of the agenda is generated from the left-to-right, depth-first traversal of the product tree. When a user input comes in, the system calls each handler per their order in the agenda and each handler will try to interpret the user input. We call the process of letting each handler to consume user input as input pass. When a handler captures a single piece of information, the information is marked as consumed. This guarantees that a single information item can be consumed by only one handler. After an input pass, if the user's input does not directly cause a particular handler to generate a question, then the system will go through an output pass during which each handler will have a chance to generate a prompt about itself (e.g. the handler for departure date can ask user for a departure date). The framework can determine the next step from the return code of a handler, it can choose to continue the current pass, exit input pass and switch to output pass, exit current pass and wait for input from user, etc. During a pass, a handler can also declare itself as the focus through its return code. In this case, it will be promoted to the top of the agenda. In order to preserve the context of a specific topic, we use a method called sub-tree promotion. In this method, a handler is first promoted to the left-most node among its siblings. The system also handles the dependencies among the nodes of the product tree. A typical dependent relationship is between The change of agenda along the session a parent node and a child node. Usually, the value of a parent node is dependent on its children. Each node maintains a list of its dependent nodes and it will notify its dependents about any changes of its value. The dependent node can then declare itself invalid and therefore a candidate topic for conversation.",
210
+ "cite_spans": [],
211
+ "ref_spans": [
212
+ {
213
+ "start": 1328,
214
+ "end": 1336,
215
+ "text": "Figure 2",
216
+ "ref_id": "FIGREF1"
217
+ }
218
+ ],
219
+ "eq_spans": [],
220
+ "section": "An Agenda-based Architecture",
221
+ "sec_num": "4"
222
+ },
223
+ {
224
+ "text": "The dialog in figure 3, generated using the system, shows a number of features: the ability to absorb an implicit change of topic on the part of the user (A1-A3), adding to an existing itinerary (A8-A10) and handling an explicit topic shift (U11). Figure 2 and Figure 3 show how the product tree and agenda evolve over the course of the dialog",
225
+ "cite_spans": [],
226
+ "ref_spans": [
227
+ {
228
+ "start": 248,
229
+ "end": 256,
230
+ "text": "Figure 2",
231
+ "ref_id": "FIGREF1"
232
+ },
233
+ {
234
+ "start": 261,
235
+ "end": 269,
236
+ "text": "Figure 3",
237
+ "ref_id": "FIGREF2"
238
+ }
239
+ ],
240
+ "eq_spans": [],
241
+ "section": "AI",
242
+ "sec_num": null
243
+ },
244
+ {
245
+ "text": "The Communicator is telephone-based and is implemented as a modular distributed system, running across NT and Linux platforms. Currently the task is captured in an approximately 2500-word language based on corpora derived from human-human, Wizard of Oz and human-computer interaction in this domain. Domain information is obtained from various sources on the Web. The system has information about 500 destinations worldwide, though with a majority of these are in the United States. To date, we have collected approximately 6000 calls, from over 360 individuals.",
246
+ "cite_spans": [],
247
+ "ref_spans": [],
248
+ "eq_spans": [],
249
+ "section": "System Implementation",
250
+ "sec_num": "5"
251
+ },
252
+ {
253
+ "text": "The agenda-based approach addresses the problem of dialog management in complex problem-solving tasks. It does so by treating the task at hand as one of cooperatively constructing a complex data structure, a product, and uses this structure to guide the task. The product consists of a tree of handlers, each handler encapsulates processing relevant to a particular schema. Handlers correspond to simple or compound schema, the latter acting essentially as multi-slOi=fofms. A handler encapsulates knowledge n~cessary for interacting about a specific information slot, including specification of user and system language and of interactions with domain agents. Handlers that deal with compound schema coordinate tightly bound schema and correspond to specific identifiable topics of conversation. We define tightly bound as those schema that users expect to discuss interchangeably, without explicit shifts in conversational focus.",
254
+ "cite_spans": [],
255
+ "ref_spans": [],
256
+ "eq_spans": [],
257
+ "section": "Summary and Conclusions",
258
+ "sec_num": "6"
259
+ },
260
+ {
261
+ "text": "We believe that individual handlers can be authored independently of others at the same level of hierarchy; in turn we believe this will simplify the problem of developing dialog systems by managing the complexity of the process.",
262
+ "cite_spans": [],
263
+ "ref_spans": [],
264
+ "eq_spans": [],
265
+ "section": "Summary and Conclusions",
266
+ "sec_num": "6"
267
+ },
268
+ {
269
+ "text": "The agenda contains all topics relevant to the current task. The order of handlers on the agenda determines how user input will be will be attached to product nodes. Both the system and the user however have the ability to reorder items on the agenda, the system to foreground items that need to be discussed, the user to reflect their current priorities within the task. factored out as independent pro~esses.",
270
+ "cite_spans": [],
271
+ "ref_spans": [],
272
+ "eq_spans": [],
273
+ "section": "Summary and Conclusions",
274
+ "sec_num": "6"
275
+ },
276
+ {
277
+ "text": "We believe that the agenda mechanism can be adapted easily to less-complex domains that might currently be implemented as a standard form-based system (for example a movie schedule service). We do not know as yet how well the technique will succeed for domains of complexity comparable to travel planning but with different task structure.",
278
+ "cite_spans": [],
279
+ "ref_spans": [],
280
+ "eq_spans": [],
281
+ "section": "Summary and Conclusions",
282
+ "sec_num": "6"
283
+ },
284
+ {
285
+ "text": "The mechanisms described in this paper do not cover all necessary aspects of dialog management but do provide an overall control architecture.",
286
+ "cite_spans": [],
287
+ "ref_spans": [],
288
+ "eq_spans": [],
289
+ "section": "Summary and Conclusions",
290
+ "sec_num": "6"
291
+ },
292
+ {
293
+ "text": "For example, clarification processes, which involve possibly extended interaction with respect to the state of a value slot, fit into the confines of a single handler and are implemented as such. Ideally they could be",
294
+ "cite_spans": [],
295
+ "ref_spans": [],
296
+ "eq_spans": [],
297
+ "section": "Summary and Conclusions",
298
+ "sec_num": "6"
299
+ }
300
+ ],
301
+ "back_matter": [],
302
+ "bib_entries": {
303
+ "BIBREF0": {
304
+ "ref_id": "b0",
305
+ "title": "The TRAINS Project: A case study in building a conversational planning agent",
306
+ "authors": [
307
+ {
308
+ "first": "James",
309
+ "middle": [
310
+ "F"
311
+ ],
312
+ "last": "Allen",
313
+ "suffix": ""
314
+ },
315
+ {
316
+ "first": "Lenhart",
317
+ "middle": [
318
+ "K"
319
+ ],
320
+ "last": "Schubert",
321
+ "suffix": ""
322
+ },
323
+ {
324
+ "first": "George",
325
+ "middle": [],
326
+ "last": "Ferguson",
327
+ "suffix": ""
328
+ },
329
+ {
330
+ "first": "Peter",
331
+ "middle": [],
332
+ "last": "Heeman",
333
+ "suffix": ""
334
+ },
335
+ {
336
+ "first": "Chung",
337
+ "middle": [
338
+ "Hee"
339
+ ],
340
+ "last": "Hwang",
341
+ "suffix": ""
342
+ },
343
+ {
344
+ "first": "Tsuneaki",
345
+ "middle": [],
346
+ "last": "Kato",
347
+ "suffix": ""
348
+ },
349
+ {
350
+ "first": "Marc",
351
+ "middle": [],
352
+ "last": "Light",
353
+ "suffix": ""
354
+ },
355
+ {
356
+ "first": "Nathaniel",
357
+ "middle": [
358
+ "G"
359
+ ],
360
+ "last": "Martin",
361
+ "suffix": ""
362
+ },
363
+ {
364
+ "first": "Bradford",
365
+ "middle": [
366
+ "W"
367
+ ],
368
+ "last": "Miller",
369
+ "suffix": ""
370
+ },
371
+ {
372
+ "first": "Massimo",
373
+ "middle": [],
374
+ "last": "Poesio",
375
+ "suffix": ""
376
+ },
377
+ {
378
+ "first": "David",
379
+ "middle": [
380
+ "R"
381
+ ],
382
+ "last": "Traum",
383
+ "suffix": ""
384
+ }
385
+ ],
386
+ "year": null,
387
+ "venue": "Journal of Experimental and Theoretical AI",
388
+ "volume": "",
389
+ "issue": "7",
390
+ "pages": "7--48",
391
+ "other_ids": {},
392
+ "num": null,
393
+ "urls": [],
394
+ "raw_text": "James F. Allen, Lenhart K. Schubert, George Ferguson, Peter Heeman, Chung Hee Hwang, Tsuneaki Kato, Marc Light, Nathaniel G. Martin, Bradford W. Miller, Massimo Poesio, and David R. Traum, \"The TRAINS Project: A case study in building a conversational planning agent\" Journal of Experimental and Theoretical AI, 7(I 995), 7-48.",
395
+ "links": null
396
+ },
397
+ "BIBREF1": {
398
+ "ref_id": "b1",
399
+ "title": "New features for confidence annotation",
400
+ "authors": [
401
+ {
402
+ "first": "D",
403
+ "middle": [],
404
+ "last": "Bansal",
405
+ "suffix": ""
406
+ },
407
+ {
408
+ "first": "M",
409
+ "middle": [],
410
+ "last": "Ravishankar",
411
+ "suffix": ""
412
+ }
413
+ ],
414
+ "year": 1998,
415
+ "venue": "Proceedings of the 5th International Conference on Spoken Language Processing (ICSLP)",
416
+ "volume": "",
417
+ "issue": "",
418
+ "pages": "",
419
+ "other_ids": {},
420
+ "num": null,
421
+ "urls": [],
422
+ "raw_text": "Bansal, D. and Ravishankar, M. \"New features for confidence annotation\" In Proceedings of the 5th International Conference on Spoken Language Processing (ICSLP), December 1998, Sydney, Australia",
423
+ "links": null
424
+ },
425
+ "BIBREF2": {
426
+ "ref_id": "b2",
427
+ "title": "Creating natural dialogs in the Carnegie Mellon Communicator system",
428
+ "authors": [
429
+ {
430
+ "first": "A",
431
+ "middle": [],
432
+ "last": "Rudnicky",
433
+ "suffix": ""
434
+ },
435
+ {
436
+ "first": "E",
437
+ "middle": [],
438
+ "last": "Thayer",
439
+ "suffix": ""
440
+ },
441
+ {
442
+ "first": "P",
443
+ "middle": [],
444
+ "last": "Constantinides",
445
+ "suffix": ""
446
+ },
447
+ {
448
+ "first": "C",
449
+ "middle": [],
450
+ "last": "Tchou",
451
+ "suffix": ""
452
+ },
453
+ {
454
+ "first": "R",
455
+ "middle": [],
456
+ "last": "Shern",
457
+ "suffix": ""
458
+ },
459
+ {
460
+ "first": "K",
461
+ "middle": [],
462
+ "last": "Lenzo",
463
+ "suffix": ""
464
+ },
465
+ {
466
+ "first": "W",
467
+ "middle": [],
468
+ "last": "Xu",
469
+ "suffix": ""
470
+ },
471
+ {
472
+ "first": "A",
473
+ "middle": [],
474
+ "last": "Oh",
475
+ "suffix": ""
476
+ }
477
+ ],
478
+ "year": 1999,
479
+ "venue": "Proceedings of Eurospeech",
480
+ "volume": "",
481
+ "issue": "",
482
+ "pages": "",
483
+ "other_ids": {},
484
+ "num": null,
485
+ "urls": [],
486
+ "raw_text": "Rudnicky, A., Thayer, E., Constantinides, P., Tchou, C., Shern, R., Lenzo, K., Xu W., Oh, A. \"Creating natural dialogs in the Carnegie Mellon Communicator system\" Proceedings of Eurospeech, 1999, Paper r014.",
487
+ "links": null
488
+ },
489
+ "BIBREF3": {
490
+ "ref_id": "b3",
491
+ "title": "Recent improvements in the CMU spoken language understanding system",
492
+ "authors": [
493
+ {
494
+ "first": "W",
495
+ "middle": [],
496
+ "last": "Ward",
497
+ "suffix": ""
498
+ },
499
+ {
500
+ "first": "S",
501
+ "middle": [],
502
+ "last": "Issar",
503
+ "suffix": ""
504
+ }
505
+ ],
506
+ "year": 1994,
507
+ "venue": "Proceedings of the ARPA Human Language Technology Workshop",
508
+ "volume": "",
509
+ "issue": "",
510
+ "pages": "213--216",
511
+ "other_ids": {},
512
+ "num": null,
513
+ "urls": [],
514
+ "raw_text": "Ward, W. and Issar, S. \"Recent improvements in the CMU spoken language understanding system\" In Proceedings of the ARPA Human Language Technology Workshop, March 1994, 213-216.",
515
+ "links": null
516
+ }
517
+ },
518
+ "ref_entries": {
519
+ "FIGREF0": {
520
+ "uris": null,
521
+ "text": "Task-based dialog control in a script-based system, as determined by the structure of a compound schema, with contributions from three simple schema.",
522
+ "num": null,
523
+ "type_str": "figure"
524
+ },
525
+ "FIGREF1": {
526
+ "uris": null,
527
+ "text": "An example of handler",
528
+ "num": null,
529
+ "type_str": "figure"
530
+ },
531
+ "FIGREF2": {
532
+ "uris": null,
533
+ "text": "An example dialog",
534
+ "num": null,
535
+ "type_str": "figure"
536
+ }
537
+ }
538
+ }
539
+ }
Full_text_JSON/prefixW/json/W00/W00-0310.json ADDED
@@ -0,0 +1,1106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0310",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:20.217475Z"
6
+ },
7
+ "title": "Using Dialogue Representations for Concept-to-Speech Generation",
8
+ "authors": [
9
+ {
10
+ "first": "Christine",
11
+ "middle": [
12
+ "H"
13
+ ],
14
+ "last": "Nakatani",
15
+ "suffix": "",
16
+ "affiliation": {},
17
+ "email": ""
18
+ },
19
+ {
20
+ "first": "Jennifer",
21
+ "middle": [],
22
+ "last": "Chu-Carroll",
23
+ "suffix": "",
24
+ "affiliation": {},
25
+ "email": ""
26
+ }
27
+ ],
28
+ "year": "",
29
+ "venue": null,
30
+ "identifiers": {},
31
+ "abstract": "We present an implemented concept-to-speech (CTS) syst@n'~J tl~at offers original proposals for certain couplings-oi r dialogue computation with prosodic computation. Specifically, the semantic interpretation, task modeling and dialogue strategy modules in a working spoken dialogue system are used to generate prosodic features to better convey the meaning of system replies. The new CTS system embodies and extends theoretical work on intonational meaning in a more general, robust and rigorous way than earlier approaches, by reflecting compositional aspects of both dialogue and intonation interepretation in an original computational framework for prosodic generation.",
32
+ "pdf_parse": {
33
+ "paper_id": "W00-0310",
34
+ "_pdf_hash": "",
35
+ "abstract": [
36
+ {
37
+ "text": "We present an implemented concept-to-speech (CTS) syst@n'~J tl~at offers original proposals for certain couplings-oi r dialogue computation with prosodic computation. Specifically, the semantic interpretation, task modeling and dialogue strategy modules in a working spoken dialogue system are used to generate prosodic features to better convey the meaning of system replies. The new CTS system embodies and extends theoretical work on intonational meaning in a more general, robust and rigorous way than earlier approaches, by reflecting compositional aspects of both dialogue and intonation interepretation in an original computational framework for prosodic generation.",
38
+ "cite_spans": [],
39
+ "ref_spans": [],
40
+ "eq_spans": [],
41
+ "section": "Abstract",
42
+ "sec_num": null
43
+ }
44
+ ],
45
+ "body_text": [
46
+ {
47
+ "text": "1 Introduction Conversational systems that use speech as the input and output modality are often realized by architectures that decouple speech processing components from language processing components. In this paper, we show how speech generation can be more closely coupled with the dialogue manager of a working mixed-initiative spoken dialogue system. In particular, we use representations from the semantic interpretation, task model and dialogue strategy modules to better communicate the meaning of system replies through prosodically appropriate synthetic speech.",
48
+ "cite_spans": [],
49
+ "ref_spans": [],
50
+ "eq_spans": [],
51
+ "section": "",
52
+ "sec_num": null
53
+ },
54
+ {
55
+ "text": "While dialogue prosody has been a topic of much study, our implemented concept-to-speech (CTS) system offers original proposals for specific couplings of dialogue computation with prosodic computation. Further, it embodies and extends theoretical work on intonational meaning in a more general, robust and rigorous way than earlier CTS systems, in an architecture that reflects compositional aspects of dialogue and intonation interpretation.",
56
+ "cite_spans": [],
57
+ "ref_spans": [],
58
+ "eq_spans": [],
59
+ "section": "",
60
+ "sec_num": null
61
+ },
62
+ {
63
+ "text": "In this work, we implement and extend the compositional theory of intonational meaning proposed by Pierrehumbert and Hirschberg (1986; , who sought to identify correspondences between the Bell Laboratories, Lucent Technologies 600 Mountain Avenue Murray Hill, NJ 07974 USA {chn I j encc}\u00a9research, bell-labs, com Grosz and Sidner (1986) computational model of discourse interpretation and Pierrehumbert's prosodic grammar for American English (1980) .",
64
+ "cite_spans": [
65
+ {
66
+ "start": 99,
67
+ "end": 134,
68
+ "text": "Pierrehumbert and Hirschberg (1986;",
69
+ "ref_id": "BIBREF6"
70
+ },
71
+ {
72
+ "start": 313,
73
+ "end": 336,
74
+ "text": "Grosz and Sidner (1986)",
75
+ "ref_id": "BIBREF4"
76
+ },
77
+ {
78
+ "start": 435,
79
+ "end": 449,
80
+ "text": "English (1980)",
81
+ "ref_id": null
82
+ }
83
+ ],
84
+ "ref_spans": [],
85
+ "eq_spans": [],
86
+ "section": "Theoretical Foundations",
87
+ "sec_num": "2"
88
+ },
89
+ {
90
+ "text": "In the present work, certain aspects of the original theories are modified and adapted to the architecture of the dialogue system in which the CTS component is embedded. Below, we present the important fundamental definitions and principles of intonation underlying our CTS system.",
91
+ "cite_spans": [],
92
+ "ref_spans": [],
93
+ "eq_spans": [],
94
+ "section": "Theoretical Foundations",
95
+ "sec_num": "2"
96
+ },
97
+ {
98
+ "text": "In our CTS system, the prosodic elements that are computed are based on the intonational system of Pierrehumbert (1980) , who defined a formal language for describing American English intonation using the following regular grammar:",
99
+ "cite_spans": [
100
+ {
101
+ "start": 99,
102
+ "end": 119,
103
+ "text": "Pierrehumbert (1980)",
104
+ "ref_id": "BIBREF12"
105
+ }
106
+ ],
107
+ "ref_spans": [],
108
+ "eq_spans": [],
109
+ "section": "Intonational System",
110
+ "sec_num": "2.1"
111
+ },
112
+ {
113
+ "text": "Inton Phrase ---~ (Interm Phrase) + Bndry Tone Interm Phrase ~ (Pitch Acc)+ Phrase Ace Major phrases, or inlonational phrases, are made up of one or more minor phrases, or inlermediale phrases. Melodic movements in intermediate and intonational phrases are in turn expressed by three kinds of tonal elements. These include six pilch accents: a low pitch excursion (L*), a high pitch excursion (H*), or a combination of both low and high excursions (L*+H, L+H*, H*+L, It+L*); two phrase accents: a high (H-) or low (L-) tonal target that guides the interpolation of the melodic contour from final pitch accent to intermediate phrase ending; and two boundary lones: a high (H%) or low (L%) tonal target that guides interpolation from phrase accent to intonational phrase ending.",
114
+ "cite_spans": [],
115
+ "ref_spans": [],
116
+ "eq_spans": [],
117
+ "section": "Intonational System",
118
+ "sec_num": "2.1"
119
+ },
120
+ {
121
+ "text": "Theoretical work on intonational meaning has attempted to relate the grammatical elements of Pierrehumbert's system --pitch accent, phrase accent and boundary tone, to interpretive processes at different levels of discourse and dialogue structure. Hirschberg and Pierrehumbert (1986) conjectured that the absence or presence of accentuation conveys discourse focus status, while the tonal properties of the accent itself (i.e. pitch accent type) convey semantic focus information.",
122
+ "cite_spans": [
123
+ {
124
+ "start": 248,
125
+ "end": 283,
126
+ "text": "Hirschberg and Pierrehumbert (1986)",
127
+ "ref_id": "BIBREF6"
128
+ }
129
+ ],
130
+ "ref_spans": [],
131
+ "eq_spans": [],
132
+ "section": "Intonational Meaning",
133
+ "sec_num": "2.2"
134
+ },
135
+ {
136
+ "text": "User: MIMIC:",
137
+ "cite_spans": [],
138
+ "ref_spans": [],
139
+ "eq_spans": [],
140
+ "section": "MIMIC:",
141
+ "sec_num": null
142
+ },
143
+ {
144
+ "text": "User: MIMIC:",
145
+ "cite_spans": [],
146
+ "ref_spans": [],
147
+ "eq_spans": [],
148
+ "section": "MIMIC:",
149
+ "sec_num": null
150
+ },
151
+ {
152
+ "text": "hello this is mimic the movie information system how can I help you where in hoboken is october sky playing october sky is playing at hoboken cinema in hoboken can I help you with anything else when is it playing there october sky is playing at hoboken cinema in hoboken at 3:45pm, 5:50pm, 7:lOpm, and lOpm can i help you with anything else Figure 1 : A MIMIC dialogue.",
153
+ "cite_spans": [],
154
+ "ref_spans": [
155
+ {
156
+ "start": 341,
157
+ "end": 349,
158
+ "text": "Figure 1",
159
+ "ref_id": null
160
+ }
161
+ ],
162
+ "eq_spans": [],
163
+ "section": "MIMIC:",
164
+ "sec_num": null
165
+ },
166
+ {
167
+ "text": "In later work, pitch accent type was said to express whether the accented information was intended by the speaker to be \"predicated\" or not by the hearer (Pierrehumbert and Hirschberg, 1990) . Nompredicated-~forernation was said to bear lowstar accentuation (L*-, L*+H, H+L*), while predicated information would be marked by high-star accents (H*, L+H*, H*+L). The theory further stated that L*+H conveys uncertainty or lack of speaker commitment to the expressed propositional content, while L+H* marks correction or contrast. The complex accent, H*+L, was said to convey that an inference path was required to support the predication; usage of H+L* similarly was said to imply an inference path, but did not suggest a predication of a mutual belief. Finally, phrase accents and boundary tones were said to reflect aspects of discourse structure.",
168
+ "cite_spans": [
169
+ {
170
+ "start": 154,
171
+ "end": 190,
172
+ "text": "(Pierrehumbert and Hirschberg, 1990)",
173
+ "ref_id": "BIBREF11"
174
+ }
175
+ ],
176
+ "ref_spans": [],
177
+ "eq_spans": [],
178
+ "section": "MIMIC:",
179
+ "sec_num": null
180
+ },
181
+ {
182
+ "text": "Our task is to improve the communicative competence of a spoken dialogue agent, by making recourse to our knowledge of intonational meaning, dialogue processing and relations between the two. Of course, a worthwhile CTS system must also outperform out-of-the-box text-to-speech (TTS) systems that may determine prosodic mark-up in linguistically sophisticated ways. As in (Nakatani, 1998) , we take the prosodic output of an advanced research system that implements the Pierrehumbert theory of intonation, namely the Bell Labs TTS system, as our baseline experimental system to be enhanced by CTS algorithms. We embed the CTS system in MIMIC, a working spoken dialogue system representing state-of-the-art dialogue management practices, to develop CTS algorithms that can be eventually realistically evaluated using task-based performance metrics.",
183
+ "cite_spans": [
184
+ {
185
+ "start": 372,
186
+ "end": 388,
187
+ "text": "(Nakatani, 1998)",
188
+ "ref_id": "BIBREF10"
189
+ }
190
+ ],
191
+ "ref_spans": [],
192
+ "eq_spans": [],
193
+ "section": "Systems Foundations",
194
+ "sec_num": "3"
195
+ },
196
+ {
197
+ "text": "Movie Information Consultant (MIMIC)",
198
+ "cite_spans": [],
199
+ "ref_spans": [],
200
+ "eq_spans": [],
201
+ "section": "Dialogue System: Mixed-Initiative",
202
+ "sec_num": "3.1"
203
+ },
204
+ {
205
+ "text": "The dialogue system whose baseline speech generation capabilities we enhance is the Mixed-Initiative Movie Information Consultant (MIMIC) (Chu-Carroll, 2000) . MIMIC\" provides movie list-ing information involving knowledge about towns, theaters, movies and showtimes, as demonstrated in Figure 1 . MIMIC currently utilizes templatedriven text generation, and passes on text strings to a stand-alone TTS system. In the version of MIMIC enhanced with concept-to-speech capabilities, MIMIC-CTS, contextual knowledge is used to modify the prosodic features of the slot and filler material in the templates; we are currently integrating the algorithms in MIMIC-CTS with a grammardriven generation system. Further details of MIMIC are presented in the relevant sections below, but see (Chu-Carroll, 2000) for a complete overview.",
206
+ "cite_spans": [
207
+ {
208
+ "start": 138,
209
+ "end": 157,
210
+ "text": "(Chu-Carroll, 2000)",
211
+ "ref_id": "BIBREF2"
212
+ },
213
+ {
214
+ "start": 779,
215
+ "end": 798,
216
+ "text": "(Chu-Carroll, 2000)",
217
+ "ref_id": "BIBREF2"
218
+ }
219
+ ],
220
+ "ref_spans": [
221
+ {
222
+ "start": 287,
223
+ "end": 295,
224
+ "text": "Figure 1",
225
+ "ref_id": null
226
+ }
227
+ ],
228
+ "eq_spans": [],
229
+ "section": "Dialogue System: Mixed-Initiative",
230
+ "sec_num": "3.1"
231
+ },
232
+ {
233
+ "text": "For default prosodic processing and speech synthesis realization, we use a research version of the Bell Labs TTS System, circa 1992 (Sproat, 1997), that generates intonational contours based on Pierrehumbert's intonation theory (1980), as described in (Pierrehumbert, 1981) . Of relevance is the fact that various pitch accent types, phrase accent and boundary tones in Pierrehumbert's theory are directly implemented in this system, so that by generating a Pierrehumbert-style prosodic transcription, the work of the CTS system is done. More precisely, MIMIC-CTS computes prosodic annotations that override the default prosodic processing that is performed by the Bell Labs TTS system. To our knowledge, the intonation component of the Bell Labs TTS system utilizes more linguistic knowledge to compute prosodic annotations than any other unrestricted TTS system, so it is reasonable to assume that improvements upon it are meaningful in practice as well as in theory.",
234
+ "cite_spans": [
235
+ {
236
+ "start": 252,
237
+ "end": 273,
238
+ "text": "(Pierrehumbert, 1981)",
239
+ "ref_id": "BIBREF13"
240
+ }
241
+ ],
242
+ "ref_spans": [],
243
+ "eq_spans": [],
244
+ "section": "TTS: The Bell Labs System",
245
+ "sec_num": "3.2"
246
+ },
247
+ {
248
+ "text": "Component (MIMIC-CTS)",
249
+ "cite_spans": [],
250
+ "ref_spans": [],
251
+ "eq_spans": [],
252
+ "section": "MIMIC's Concept-to-Speech",
253
+ "sec_num": "4"
254
+ },
255
+ {
256
+ "text": "In MIMIC-CTS, the MIMIC dialogue system is enhanced with a CTS component to better communicate the meaning of system replies through contextually conditioned prosodic features. MIMIC-CTS makes use of three distinct levels of dialogue representations to convey meaning through intonation. MIMIC's semantic representations allow MIMIC-CTS to decide which information to prosodically highlight. MIMIC's task model in turn determines how to prosodically highlight selected information, based on the pragmatic properties of the system reply. MIMIC's dialogue strategy selection process informs various choices in prosodic contour and accenting that convey logico-semantic aspects of meaning, such as contradiction.",
257
+ "cite_spans": [],
258
+ "ref_spans": [],
259
+ "eq_spans": [],
260
+ "section": "MIMIC's Concept-to-Speech",
261
+ "sec_num": "4"
262
+ },
263
+ {
264
+ "text": "Semantic Representations MIMIC employs a statistically-driven semantic interpretation engine to \"spot\" values for key attributes that make up a valid MIMIC query in a robust fashion) To simplify matters, for each utterance, MIMIC computes an attribute-value matrix (AVM)-~epresentation, identifying important pieces of information for accomplishing a given set of tasks. The AVM created from the following utterance, \"When is October Sky playing at Hoboken Cinema in Hoboken?\", for example, is given in Attribute names and attribute values are critical to the task at hand. In MIMIC-CTS, attribute names and values that occur in templates are typed, so that MIMIC-CTS can highlight these items in the following way:",
265
+ "cite_spans": [],
266
+ "ref_spans": [],
267
+ "eq_spans": [],
268
+ "section": "Highlighting Information using",
269
+ "sec_num": "4.1"
270
+ },
271
+ {
272
+ "text": "1. All lexical items realizing attribute values are accented.",
273
+ "cite_spans": [],
274
+ "ref_spans": [],
275
+ "eq_spans": [],
276
+ "section": "Highlighting Information using",
277
+ "sec_num": "4.1"
278
+ },
279
+ {
280
+ "text": "2. Attribute values are synthesized at a slower speaking rate.",
281
+ "cite_spans": [],
282
+ "ref_spans": [],
283
+ "eq_spans": [],
284
+ "section": "Highlighting Information using",
285
+ "sec_num": "4.1"
286
+ },
287
+ {
288
+ "text": "3. Attribute values are set off by phrase boundaries.",
289
+ "cite_spans": [],
290
+ "ref_spans": [],
291
+ "eq_spans": [],
292
+ "section": "Highlighting Information using",
293
+ "sec_num": "4.1"
294
+ },
295
+ {
296
+ "text": "4. Attribute names are always accented.",
297
+ "cite_spans": [],
298
+ "ref_spans": [],
299
+ "eq_spans": [],
300
+ "section": "Highlighting Information using",
301
+ "sec_num": "4.1"
302
+ },
303
+ {
304
+ "text": "These modifications are entirely rule-based, given a list of attribute names and typed attribute values.",
305
+ "cite_spans": [],
306
+ "ref_spans": [],
307
+ "eq_spans": [],
308
+ "section": "Highlighting Information using",
309
+ "sec_num": "4.1"
310
+ },
311
+ {
312
+ "text": "1 Specifically, MIMIC uses an n-dimensional call router front-end (Chu-Carroll, 2000) , which is a generalization of the vector-based call-routing paradigm of semantic interpretation (Chu-CarroU and Carpenter, 1999) ; that is, instead of detecting one concept per utterance, MIMIC's semantic interpretation engine detects multiple (n) concepts or classes conveyed by a single utterance, by using n call touters in parallel.",
313
+ "cite_spans": [
314
+ {
315
+ "start": 66,
316
+ "end": 85,
317
+ "text": "(Chu-Carroll, 2000)",
318
+ "ref_id": "BIBREF2"
319
+ },
320
+ {
321
+ "start": 183,
322
+ "end": 215,
323
+ "text": "(Chu-CarroU and Carpenter, 1999)",
324
+ "ref_id": null
325
+ }
326
+ ],
327
+ "ref_spans": [],
328
+ "eq_spans": [],
329
+ "section": "Highlighting Information using",
330
+ "sec_num": "4.1"
331
+ },
332
+ {
333
+ "text": "Even such minimal use of dialogue information can make a difference. For example, changing the default accent for the following utterance highlights the kind of information that the system is seeking, instead of highlighting the semantically vacuous main verb, like: 2 defined by a task model. MIMIC processes the AVM for each utterance and then evaluates whether it should perform a database query based on the task specifications given in Figure 3 . The task mode] defines which attribute values must be filled in (Y), must not 56 filled in (N), or may optionally be filled in (-), to \"license\" a database query action. If no task is \"specified\" by the current AVM state,",
334
+ "cite_spans": [],
335
+ "ref_spans": [
336
+ {
337
+ "start": 441,
338
+ "end": 449,
339
+ "text": "Figure 3",
340
+ "ref_id": "FIGREF1"
341
+ }
342
+ ],
343
+ "eq_spans": [],
344
+ "section": "Highlighting Information using",
345
+ "sec_num": "4.1"
346
+ },
347
+ {
348
+ "text": "Movie Theater Town MIMIC employs various strategies to progress toward a complete and valid task specification. For example, in response to the follgwing user utterance, MIMIC initiates an information-seeking subdialogue to instantiate the theater attribute value to accomplish a when task:",
349
+ "cite_spans": [],
350
+ "ref_spans": [],
351
+ "eq_spans": [],
352
+ "section": "Task",
353
+ "sec_num": null
354
+ },
355
+ {
356
+ "text": "User: when is october sky playing in hoboken",
357
+ "cite_spans": [],
358
+ "ref_spans": [],
359
+ "eq_spans": [],
360
+ "section": "Task",
361
+ "sec_num": null
362
+ },
363
+ {
364
+ "text": "To better convey the structure of the task model, which is learned by the user through interaction with the system, we define four information statuses based on properties of the task model, which align on a scale of given and new in the following order: Figure 4 : Above, dialogue excerpt of MIMIC performing a where task. Below, the modified version of the bold-faced reply string, generated by MIMIC-CTS.",
365
+ "cite_spans": [],
366
+ "ref_spans": [
367
+ {
368
+ "start": 255,
369
+ "end": 263,
370
+ "text": "Figure 4",
371
+ "ref_id": null
372
+ }
373
+ ],
374
+ "eq_spans": [],
375
+ "section": "MIMIC-CTS: what THEATER would you like",
376
+ "sec_num": null
377
+ },
378
+ {
379
+ "text": "user, but is derived by MIMIC's limited inference engine that seeks to instantiate as many attribute values as possible. For instance, a theater name may be inferred given a town name, if there is only one theater in the given town. OLD information is inherited from the discourse history, based on updating rules relying on confidence scores for attribute values. HEARER-NEW information (c.f. (Prince, 1988) ) is that which is requested by the user, and constitutes the only new information on the scale. But note that KEY information, while given, is still clearly in discourse focus, along with HEARER-NEW information.",
380
+ "cite_spans": [
381
+ {
382
+ "start": 394,
383
+ "end": 408,
384
+ "text": "(Prince, 1988)",
385
+ "ref_id": "BIBREF14"
386
+ }
387
+ ],
388
+ "ref_spans": [],
389
+ "eq_spans": [],
390
+ "section": "MIMIC-CTS: what THEATER would you like",
391
+ "sec_num": null
392
+ },
393
+ {
394
+ "text": "The next step is to map the information statuses, ordered from given to new, to a scale of pitch accent, or accent melodies, ordered from given to new as follows: ",
395
+ "cite_spans": [],
396
+ "ref_spans": [],
397
+ "eq_spans": [],
398
+ "section": "MIMIC-CTS: what THEATER would you like",
399
+ "sec_num": null
400
+ },
401
+ {
402
+ "text": "L* L*\u00f7H L+H* H* &ivan] [new]",
403
+ "cite_spans": [],
404
+ "ref_spans": [],
405
+ "eq_spans": [],
406
+ "section": "MIMIC-CTS: what THEATER would you like",
407
+ "sec_num": null
408
+ },
409
+ {
410
+ "text": "As in earlier CTS systems, special logico-semantic relations, such as contrast or correction, are effectively conveyed in MIMIC-CTS by prosodic cues. In MIMIC-CTS, however, these situations are not stipulated in an ad hoc manner, but can be determined to a large degree by MIMIC's dialogue strategy selection process that identifies appropriate dialogue acts to realize a dialogue goal. a For example, the dialogue act ANSWER may be selected to achieve the dialogue goal of providing an answer to a successful user query, while the dialogue act NOTIFYFAILURE may be performed to achieve the dialogue goal of providing an answer in situations where no movie listing in the database matches the user query. The template associated with the dialogue act, NOTIFYFAILURE, when compared with that for ANSWER, contains an additional negative auxiliary associated with the key attribute responsible for the query failure, in an utterance conveying a contradiction in beliefs between the user and system (namely, the presupposition on the part of the user that the query can be satisfied).",
411
+ "cite_spans": [],
412
+ "ref_spans": [],
413
+ "eq_spans": [],
414
+ "section": "Assigning \"Dialogue Prosody\" using Dialogue Strategies",
415
+ "sec_num": "4.3"
416
+ },
417
+ {
418
+ "text": "Theoretical work on intonational interpretation leads us to prosodically mark the negative auxiliary, as well as the associated focus position (Rooth, 1985) . We choose to mark the negative auxiliary not with the L+H* pitch accent to convey correction, while marking the material in the associated focus position with the L*+H pitch accent to convey (the 3Importantly, MIMIC's adaptive dialogue strategy selection algorithm takes into account the outcome of an initiative tracking module that we do not discuss here (see (Chu-Carroll, 2000) ).",
419
+ "cite_spans": [
420
+ {
421
+ "start": 143,
422
+ "end": 156,
423
+ "text": "(Rooth, 1985)",
424
+ "ref_id": "BIBREF15"
425
+ },
426
+ {
427
+ "start": 521,
428
+ "end": 540,
429
+ "text": "(Chu-Carroll, 2000)",
430
+ "ref_id": "BIBREF2"
431
+ }
432
+ ],
433
+ "ref_spans": [],
434
+ "eq_spans": [],
435
+ "section": "Assigning \"Dialogue Prosody\" using Dialogue Strategies",
436
+ "sec_num": "4.3"
437
+ },
438
+ {
439
+ "text": "where is the corruptor playing in cranford the corruptor is not playing in cranford the corruptor is playing at lincoln cinemas in arlington THE CORRUPTOR is NOT playing in CRANFORD L+H* L+H* LWH* !H* L*+H L-H% Figure 5 : Above, dialogue excerpt of MIMIC performing a NOTIFYFAILURE dialogue act. Below, the modified version of the bold-faced reply string, generated by MIMIC-CTS. Note the diacritic \"!\" denotes a downstepped accent (see (Pierrehumbert, 1980) ).",
440
+ "cite_spans": [
441
+ {
442
+ "start": 437,
443
+ "end": 458,
444
+ "text": "(Pierrehumbert, 1980)",
445
+ "ref_id": "BIBREF12"
446
+ }
447
+ ],
448
+ "ref_spans": [
449
+ {
450
+ "start": 211,
451
+ "end": 219,
452
+ "text": "Figure 5",
453
+ "ref_id": null
454
+ }
455
+ ],
456
+ "eq_spans": [],
457
+ "section": "User: MIMIC:",
458
+ "sec_num": null
459
+ },
460
+ {
461
+ "text": "system's) lack of commitment to the (user's) presupposition at hand. Finally, the NOTIFYFAILURE dialogue act is conveyed by assigning the so-called rise-fall-risd-cbntfadiction contour, L*+tt L-H%, to the utterance-at large (c.f. (Hirschberg and Ward, 1991) ). An example generated by MIMIC-CTS appears in Figure 5 . Note that pitch accent types for the remaining attribute values are assigned using the task model, as described in section 4.2. Thus in Figure 5 , the movie title is treated as KEY information, marked by the L+H* pitch accent. MIMIC-CTS contains additional prosodic rules for logical connectives, and clarification and confirmation suhdialogues.",
462
+ "cite_spans": [
463
+ {
464
+ "start": 230,
465
+ "end": 257,
466
+ "text": "(Hirschberg and Ward, 1991)",
467
+ "ref_id": "BIBREF7"
468
+ }
469
+ ],
470
+ "ref_spans": [
471
+ {
472
+ "start": 306,
473
+ "end": 314,
474
+ "text": "Figure 5",
475
+ "ref_id": null
476
+ },
477
+ {
478
+ "start": 453,
479
+ "end": 461,
480
+ "text": "Figure 5",
481
+ "ref_id": null
482
+ }
483
+ ],
484
+ "eq_spans": [],
485
+ "section": "User: MIMIC:",
486
+ "sec_num": null
487
+ },
488
+ {
489
+ "text": "Although a number of earlier CTS systems have captured linguistic phenomena that we address in our work, the computation of prosody from dialogue representations is often not as rigorous, detailed or complete as in MIMIC-CTS. Further, while several systems use given/new information status to decide whether to accent or deaccent a lexical item, no system has directly implemented general rules for pitch accent type assignment. Together, MIMIC-CTS's computation of accentuation, pitch accent type and dialogue prosody constitutes the most general and complete implementation of a compositional theory of intonational meaning in a CTS system to date.",
490
+ "cite_spans": [],
491
+ "ref_spans": [],
492
+ "eq_spans": [],
493
+ "section": "Related Work",
494
+ "sec_num": "5"
495
+ },
496
+ {
497
+ "text": "Nevertheless, elements of a handful of previous CTS systems support the approaches taken in MIMIC-CTS toward conveying semantic, task and dialogue level meaning. For example, the Direction Assistant system (Davis and Hirschberg, 1988 ) mapped a hand-crafted route grammar to a discourse structure for generated directions. The discourse structure determined accentuation, with deaccenting of discourse-old entities realized (by lexically identical morphs) in the current or previous discourse segment. Other material was assigned accentuation based on lexical category information, with the exception that certain contrastive cases of accenting, such as left versus right, were stipulated for the domain.",
498
+ "cite_spans": [
499
+ {
500
+ "start": 206,
501
+ "end": 233,
502
+ "text": "(Davis and Hirschberg, 1988",
503
+ "ref_id": "BIBREF3"
504
+ }
505
+ ],
506
+ "ref_spans": [],
507
+ "eq_spans": [],
508
+ "section": "Related Work",
509
+ "sec_num": "5"
510
+ },
511
+ {
512
+ "text": "Accent assignment in the SUNDIAL travel infor-mation system (House and Yond, 1990 ) also relied on discourse and task models. Mutually known entities, said to be in negative focus, were deaccented; entities in the current task space, in referring focus, received (possibly contrastive) accenting; and entities of the same type as a previously mentioned object, were classified-as in either referring or emphatic focus, depending on the dialogue act~ in the cases of corrective situations or repeated system-intitiated queries, the contrasting or corrective items were emphatically accented. The BRIDGE project on speech generation (Zacharski etal., 1992) identified four main factors affecting accentability: linear 0rder, lexical category, semantic weight and givenness. In relatedwork (Monaghan, 1994) , word accentability was quantitatively scored by hand-crafted rules based on information status, semantic focus and Word class. The givenness hierarchy of Gundel and'colleagues (1989) , which associates lexical forms of expression with information statuses, was divided into four intervals, with scores assigned to each. A binary semantic focus score was based on whether the word occurred in the topic or comment of a sentence. Finally, lexical categories determined word class scores. These scores were combined, and metrical phonological rules then referred to final acce'ntability scores to assign a final accenting pattern.",
513
+ "cite_spans": [
514
+ {
515
+ "start": 60,
516
+ "end": 81,
517
+ "text": "(House and Yond, 1990",
518
+ "ref_id": null
519
+ },
520
+ {
521
+ "start": 631,
522
+ "end": 654,
523
+ "text": "(Zacharski etal., 1992)",
524
+ "ref_id": null
525
+ },
526
+ {
527
+ "start": 787,
528
+ "end": 803,
529
+ "text": "(Monaghan, 1994)",
530
+ "ref_id": null
531
+ },
532
+ {
533
+ "start": 960,
534
+ "end": 988,
535
+ "text": "Gundel and'colleagues (1989)",
536
+ "ref_id": null
537
+ }
538
+ ],
539
+ "ref_spans": [],
540
+ "eq_spans": [],
541
+ "section": "Related Work",
542
+ "sec_num": "5"
543
+ },
544
+ {
545
+ "text": "To summarize, all of the above CTS systems employ either hand-crafted or heuristic techniques for representing semantic and discourse focus information. Further, only SUNDIAL makes use of dialogue acts.",
546
+ "cite_spans": [],
547
+ "ref_spans": [],
548
+ "eq_spans": [],
549
+ "section": "Related Work",
550
+ "sec_num": "5"
551
+ },
552
+ {
553
+ "text": "We are presently carrying out evaluations of MIMIC-CTS. An initial corpus-based analysis compares the prosodic annotations assigned to three actual MIMIC dialogues, which were previously collected during an overall system evaluation (Chu-Carroll and Nickerson, 2000) . The corpus of dialogues is made up of 37 system/user turns, including 40 system-generated sentences. Three versions of the MIMIC dialogues are being analysed, with prosodic features arising from three differ-ent sources: MIMIC-CTS, MIMIC operating with default Bell Labs TTS, and a professional voice talent who read the dialogue scripts in context. This corpus-based assessment --comparing the prosody of CTS-generated, TTS-generated, and human speech, will enable more domain-dependent tuning of the MIMIC-CTS algorithms, as well as the refinement of general prosodic patterns for linguistic structures, such as lists and conjunctive phrases. Ultimately; the value of MIMIC-CTS must be measured based on its contribution to overall task peformance by real MIMIC users. Such a study is under design, following (Chu-Carroll and Nickerson, 2000) .",
554
+ "cite_spans": [
555
+ {
556
+ "start": 233,
557
+ "end": 266,
558
+ "text": "(Chu-Carroll and Nickerson, 2000)",
559
+ "ref_id": "BIBREF1"
560
+ },
561
+ {
562
+ "start": 1080,
563
+ "end": 1113,
564
+ "text": "(Chu-Carroll and Nickerson, 2000)",
565
+ "ref_id": "BIBREF1"
566
+ }
567
+ ],
568
+ "ref_spans": [],
569
+ "eq_spans": [],
570
+ "section": "Conclusion and Future Work",
571
+ "sec_num": "6"
572
+ },
573
+ {
574
+ "text": "In conclusion, we have shown how prosodic computation can be conditioned on various dialogue representations, for robust and domain-independent CTS synthesis. -While some rules for prosody assignment depend on the task model, others must be tied closely to the particular choices of content in the replies, at the level of dialogue goals and dialogue acts. At this level as well, however, linguistic principles of intonation interpretation can be applied to determine the mappings. In sum, the lesson learned is that a unitary notion of \"concept\" from which we generate a unitary prosodic structure, does not apply to state-of-the-art spoken dialogue generation. Instead, the representation of dialogue meaning in experimental architectures, such as MIMIC's, is compositional to some degree, and we take advantage of this fact to implement a compositional theory of intonational meaning in a new concept-to-speech system, MIMIC-CTS.",
575
+ "cite_spans": [],
576
+ "ref_spans": [],
577
+ "eq_spans": [],
578
+ "section": "Conclusion and Future Work",
579
+ "sec_num": "6"
580
+ },
581
+ {
582
+ "text": "2In the examples, small capitalization denotes a word is accented.",
583
+ "cite_spans": [],
584
+ "ref_spans": [],
585
+ "eq_spans": [],
586
+ "section": "",
587
+ "sec_num": null
588
+ }
589
+ ],
590
+ "back_matter": [],
591
+ "bib_entries": {
592
+ "BIBREF0": {
593
+ "ref_id": "b0",
594
+ "title": "Vector:based natural language call routing",
595
+ "authors": [
596
+ {
597
+ "first": "Jennifer",
598
+ "middle": [],
599
+ "last": "Chu",
600
+ "suffix": ""
601
+ },
602
+ {
603
+ "first": "-",
604
+ "middle": [],
605
+ "last": "Carroll",
606
+ "suffix": ""
607
+ },
608
+ {
609
+ "first": "Bob",
610
+ "middle": [],
611
+ "last": "Carpenter",
612
+ "suffix": ""
613
+ }
614
+ ],
615
+ "year": 1999,
616
+ "venue": "Computational Linguistics",
617
+ "volume": "25",
618
+ "issue": "3",
619
+ "pages": "361--388",
620
+ "other_ids": {},
621
+ "num": null,
622
+ "urls": [],
623
+ "raw_text": "Jennifer Chu-Carroll and Bob Carpenter. 1999. Vector:based natural language call routing. Com- putational Linguistics, 25(3):361-388.",
624
+ "links": null
625
+ },
626
+ "BIBREF1": {
627
+ "ref_id": "b1",
628
+ "title": "Evaluating automatic dialogue strategy adaptation for a spoken dialogue system",
629
+ "authors": [
630
+ {
631
+ "first": "Jennifer",
632
+ "middle": [],
633
+ "last": "Chu",
634
+ "suffix": ""
635
+ },
636
+ {
637
+ "first": "-",
638
+ "middle": [],
639
+ "last": "Carroll",
640
+ "suffix": ""
641
+ },
642
+ {
643
+ "first": "Jill",
644
+ "middle": [
645
+ "S"
646
+ ],
647
+ "last": "Nickerson",
648
+ "suffix": ""
649
+ }
650
+ ],
651
+ "year": 2000,
652
+ "venue": "Proceedings of the 1st Conference of the North American Chapter of the Association for Computational Linguistics",
653
+ "volume": "",
654
+ "issue": "",
655
+ "pages": "",
656
+ "other_ids": {},
657
+ "num": null,
658
+ "urls": [],
659
+ "raw_text": "Jennifer Chu-Carroll and Jill S. Nickerson. 2000. Evaluating automatic dialogue strategy adapta- tion for a spoken dialogue system. In Proceed- ings of the 1st Conference of the North Ameri- can Chapter of the Association for Computational Linguistics, Seattle.",
660
+ "links": null
661
+ },
662
+ "BIBREF2": {
663
+ "ref_id": "b2",
664
+ "title": "Mimic: an adaptive mixed initiative spoken dialogue system for information queries",
665
+ "authors": [
666
+ {
667
+ "first": "Jennifer",
668
+ "middle": [],
669
+ "last": "Chu-Carroll",
670
+ "suffix": ""
671
+ }
672
+ ],
673
+ "year": 2000,
674
+ "venue": "Proceedings of the 6th Conference on Applied Natural Language Processing",
675
+ "volume": "",
676
+ "issue": "",
677
+ "pages": "",
678
+ "other_ids": {},
679
+ "num": null,
680
+ "urls": [],
681
+ "raw_text": "Jennifer Chu-Carroll. 2000. Mimic: an adaptive mixed initiative spoken dialogue system for infor- mation queries. In Proceedings of the 6th Con- ference on Applied Natural Language Processing, Seattle.",
682
+ "links": null
683
+ },
684
+ "BIBREF3": {
685
+ "ref_id": "b3",
686
+ "title": "Assigning intonational features in synthesized spoken directions",
687
+ "authors": [
688
+ {
689
+ "first": "J",
690
+ "middle": [
691
+ "R"
692
+ ],
693
+ "last": "Davis",
694
+ "suffix": ""
695
+ },
696
+ {
697
+ "first": "",
698
+ "middle": [],
699
+ "last": "Hirschberg",
700
+ "suffix": ""
701
+ }
702
+ ],
703
+ "year": 1988,
704
+ "venue": "Proceedings of the 26th Annual Meeting of the Association for Computational Linguistics",
705
+ "volume": "",
706
+ "issue": "",
707
+ "pages": "187--193",
708
+ "other_ids": {},
709
+ "num": null,
710
+ "urls": [],
711
+ "raw_text": "J. R. Davis and :l. Hirschberg. 1988. Assigning into- national features in synthesized spoken directions. In Proceedings of the 26th Annual Meeting of the Association for Computational Linguistics, pages 187-193, Buffalo.",
712
+ "links": null
713
+ },
714
+ "BIBREF4": {
715
+ "ref_id": "b4",
716
+ "title": "Attention, intentions, and the structure of discourse",
717
+ "authors": [
718
+ {
719
+ "first": "Barbara",
720
+ "middle": [],
721
+ "last": "Grosz",
722
+ "suffix": ""
723
+ },
724
+ {
725
+ "first": "Candace",
726
+ "middle": [],
727
+ "last": "Sidner",
728
+ "suffix": ""
729
+ }
730
+ ],
731
+ "year": 1986,
732
+ "venue": "Computational Linguistics",
733
+ "volume": "12",
734
+ "issue": "3",
735
+ "pages": "175--204",
736
+ "other_ids": {},
737
+ "num": null,
738
+ "urls": [],
739
+ "raw_text": "Barbara Grosz and Candace Sidner. 1986. Atten- tion, intentions, and the structure of discourse. Computational Linguistics, 12(3):175-204.",
740
+ "links": null
741
+ },
742
+ "BIBREF5": {
743
+ "ref_id": "b5",
744
+ "title": "Givenness, implicature and demonstrative expressions in English discourse",
745
+ "authors": [
746
+ {
747
+ "first": "J",
748
+ "middle": [],
749
+ "last": "Gundel",
750
+ "suffix": ""
751
+ },
752
+ {
753
+ "first": "N",
754
+ "middle": [],
755
+ "last": "Hedberg",
756
+ "suffix": ""
757
+ },
758
+ {
759
+ "first": "R",
760
+ "middle": [],
761
+ "last": "Zacharski",
762
+ "suffix": ""
763
+ }
764
+ ],
765
+ "year": 1989,
766
+ "venue": "Proceedings of CLS-25, Parasession on Language in Context",
767
+ "volume": "",
768
+ "issue": "",
769
+ "pages": "89--103",
770
+ "other_ids": {},
771
+ "num": null,
772
+ "urls": [],
773
+ "raw_text": "J. Gundel, N. Hedberg, and R. Zacharski. 1989. Givenness, implicature and demonstrative expres- sions in English discourse. In Proceedings of CLS- 25, Parasession on Language in Context, pages 89-103. Chicago Linguistics Society.",
774
+ "links": null
775
+ },
776
+ "BIBREF6": {
777
+ "ref_id": "b6",
778
+ "title": "The intonational structuring of discourse",
779
+ "authors": [
780
+ {
781
+ "first": "Julia",
782
+ "middle": [],
783
+ "last": "Hirschberg",
784
+ "suffix": ""
785
+ },
786
+ {
787
+ "first": "Janet",
788
+ "middle": [],
789
+ "last": "Pierrehumbert",
790
+ "suffix": ""
791
+ }
792
+ ],
793
+ "year": 1986,
794
+ "venue": "Proceedings of the 2~lh Annual Meeting of the Association for Computational -Linguistics",
795
+ "volume": "",
796
+ "issue": "",
797
+ "pages": "",
798
+ "other_ids": {},
799
+ "num": null,
800
+ "urls": [],
801
+ "raw_text": "Julia Hirschberg and Janet Pierrehumbert. 1986. The intonational structuring of discourse. In Pro- ceedings of the 2~lh Annual Meeting of the Asso- ciation for Computational -Linguistics, New York.",
802
+ "links": null
803
+ },
804
+ "BIBREF7": {
805
+ "ref_id": "b7",
806
+ "title": "The influence of pitch range, duration, amplitude, and spectral features on the interpretation of l*+h I h%",
807
+ "authors": [
808
+ {
809
+ "first": "J",
810
+ "middle": [],
811
+ "last": "Hirschberg",
812
+ "suffix": ""
813
+ },
814
+ {
815
+ "first": "G",
816
+ "middle": [],
817
+ "last": "Ward",
818
+ "suffix": ""
819
+ }
820
+ ],
821
+ "year": 1991,
822
+ "venue": "Journal of Phonetics",
823
+ "volume": "",
824
+ "issue": "",
825
+ "pages": "",
826
+ "other_ids": {},
827
+ "num": null,
828
+ "urls": [],
829
+ "raw_text": "J. Hirschberg and G. Ward. 1991. The influence of pitch range, duration, amplitude, and spectral fea- tures on the interpretation of l*+h I h%. Journal of Phonetics.",
830
+ "links": null
831
+ },
832
+ "BIBREF8": {
833
+ "ref_id": "b8",
834
+ "title": "Contextually appropriate intonation in speech synthesis",
835
+ "authors": [
836
+ {
837
+ "first": "Jill",
838
+ "middle": [],
839
+ "last": "House",
840
+ "suffix": ""
841
+ },
842
+ {
843
+ "first": "Nick",
844
+ "middle": [],
845
+ "last": "Youd",
846
+ "suffix": ""
847
+ }
848
+ ],
849
+ "year": 1990,
850
+ "venue": "Proceedings of the European Speech Communication Association Workshop on Speech Synthesis",
851
+ "volume": "",
852
+ "issue": "",
853
+ "pages": "185--188",
854
+ "other_ids": {},
855
+ "num": null,
856
+ "urls": [],
857
+ "raw_text": "Jill House and Nick Youd. 1990. Contextually ap- propriate intonation in speech synthesis. In Pro- ceedings of the European Speech Communication Association Workshop on Speech Synthesis, pages 185-188, Autrans.",
858
+ "links": null
859
+ },
860
+ "BIBREF9": {
861
+ "ref_id": "b9",
862
+ "title": "1,994. Intonation accent placement in a concept-to-dialogue system",
863
+ "authors": [
864
+ {
865
+ "first": "A",
866
+ "middle": [
867
+ "I C"
868
+ ],
869
+ "last": "Monaghan",
870
+ "suffix": ""
871
+ }
872
+ ],
873
+ "year": null,
874
+ "venue": "Proceedings of the ESCA/IEEE Workshop on Speech Synthesis",
875
+ "volume": "",
876
+ "issue": "",
877
+ "pages": "171--174",
878
+ "other_ids": {},
879
+ "num": null,
880
+ "urls": [],
881
+ "raw_text": "A. I. C. Monaghan. 1,994. Intonation accent place- ment in a concept-to-dialogue system. In Proceed- ings of the ESCA/IEEE Workshop on Speech Syn- thesis, pages 171-174, New Paltz, NY.",
882
+ "links": null
883
+ },
884
+ "BIBREF10": {
885
+ "ref_id": "b10",
886
+ "title": "Constituent-based accent prediction",
887
+ "authors": [
888
+ {
889
+ "first": "H",
890
+ "middle": [],
891
+ "last": "Nakatani",
892
+ "suffix": ""
893
+ }
894
+ ],
895
+ "year": 1998,
896
+ "venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics",
897
+ "volume": "",
898
+ "issue": "",
899
+ "pages": "",
900
+ "other_ids": {},
901
+ "num": null,
902
+ "urls": [],
903
+ "raw_text": "H. Nakatani. 1998. Constituent-based accent prediction. In Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics, Montreal.",
904
+ "links": null
905
+ },
906
+ "BIBREF11": {
907
+ "ref_id": "b11",
908
+ "title": "The meaning of intonational contours in the interpretation of discourse",
909
+ "authors": [
910
+ {
911
+ "first": "J",
912
+ "middle": [],
913
+ "last": "Pierrehumbert",
914
+ "suffix": ""
915
+ },
916
+ {
917
+ "first": "J",
918
+ "middle": [],
919
+ "last": "Hirschberg",
920
+ "suffix": ""
921
+ }
922
+ ],
923
+ "year": 1990,
924
+ "venue": "Intentions in Communication",
925
+ "volume": "",
926
+ "issue": "",
927
+ "pages": "",
928
+ "other_ids": {},
929
+ "num": null,
930
+ "urls": [],
931
+ "raw_text": "J. Pierrehumbert and J. Hirschberg. 1990. The meaning of intonational contours in the interpre- tation of discourse. In Intentions in Communica- tion. MIT Press, Cambridge, MA.",
932
+ "links": null
933
+ },
934
+ "BIBREF12": {
935
+ "ref_id": "b12",
936
+ "title": "The Phonology and Phonetics of English Intonation",
937
+ "authors": [
938
+ {
939
+ "first": "Janet",
940
+ "middle": [],
941
+ "last": "Pierrehumbert",
942
+ "suffix": ""
943
+ }
944
+ ],
945
+ "year": 1980,
946
+ "venue": "",
947
+ "volume": "",
948
+ "issue": "",
949
+ "pages": "",
950
+ "other_ids": {},
951
+ "num": null,
952
+ "urls": [],
953
+ "raw_text": "Janet Pierrehumbert. 1980. The Phonology and Phonetics of English Intonation. Ph.D. thesis, Massachusetts Institute of Technology, Septem- ber. Distributed by the Indiana University Lin- guistics Club.",
954
+ "links": null
955
+ },
956
+ "BIBREF13": {
957
+ "ref_id": "b13",
958
+ "title": "Synthesising intonation",
959
+ "authors": [
960
+ {
961
+ "first": "J",
962
+ "middle": [],
963
+ "last": "Pierrehumbert",
964
+ "suffix": ""
965
+ }
966
+ ],
967
+ "year": 1981,
968
+ "venue": "Journal of the Acoustical Society of America",
969
+ "volume": "70",
970
+ "issue": "4",
971
+ "pages": "985--995",
972
+ "other_ids": {},
973
+ "num": null,
974
+ "urls": [],
975
+ "raw_text": "J. Pierrehumbert. 1981. Synthesising intonation. Journal of the Acoustical Society of America, 70(4):985-995.",
976
+ "links": null
977
+ },
978
+ "BIBREF14": {
979
+ "ref_id": "b14",
980
+ "title": "The ZPG letter: subjects, definiteness, and information status",
981
+ "authors": [
982
+ {
983
+ "first": "Ellen",
984
+ "middle": [
985
+ "Prince"
986
+ ],
987
+ "last": "",
988
+ "suffix": ""
989
+ }
990
+ ],
991
+ "year": 1988,
992
+ "venue": "Discourse Description: Diverse Analyses of a Fund Raising Text",
993
+ "volume": "",
994
+ "issue": "",
995
+ "pages": "",
996
+ "other_ids": {},
997
+ "num": null,
998
+ "urls": [],
999
+ "raw_text": "Ellen Prince. 1988. The ZPG letter: subjects, defi- niteness, and information status. In S. Thompson and W. Mann, editors, Discourse Description: Di- verse Analyses of a Fund Raising Text. Elsevier Science Publishers, Amsterdam.",
1000
+ "links": null
1001
+ },
1002
+ "BIBREF15": {
1003
+ "ref_id": "b15",
1004
+ "title": "Association with Focus",
1005
+ "authors": [
1006
+ {
1007
+ "first": "Mats",
1008
+ "middle": [],
1009
+ "last": "Rooth",
1010
+ "suffix": ""
1011
+ }
1012
+ ],
1013
+ "year": 1985,
1014
+ "venue": "",
1015
+ "volume": "",
1016
+ "issue": "",
1017
+ "pages": "",
1018
+ "other_ids": {},
1019
+ "num": null,
1020
+ "urls": [],
1021
+ "raw_text": "Mats Rooth. 1985. Association with Focus. Ph.D. thesis, University of Massachusetts, Amherst MA.",
1022
+ "links": null
1023
+ },
1024
+ "BIBREF16": {
1025
+ "ref_id": "b16",
1026
+ "title": "Multilingual Textto-Speech Synthesis: The Bell Labs Approach",
1027
+ "authors": [],
1028
+ "year": 1997,
1029
+ "venue": "",
1030
+ "volume": "",
1031
+ "issue": "",
1032
+ "pages": "",
1033
+ "other_ids": {},
1034
+ "num": null,
1035
+ "urls": [],
1036
+ "raw_text": "Richard Sproat, editor. 1997. Multilingual Text- to-Speech Synthesis: The Bell Labs Approach. Kluwer Academic, Boston.",
1037
+ "links": null
1038
+ },
1039
+ "BIBREF17": {
1040
+ "ref_id": "b17",
1041
+ "title": "BaIDGE: Basic research on intonation for dialogue generation",
1042
+ "authors": [
1043
+ {
1044
+ "first": "Ron",
1045
+ "middle": [],
1046
+ "last": "Zacharski",
1047
+ "suffix": ""
1048
+ },
1049
+ {
1050
+ "first": "A",
1051
+ "middle": [
1052
+ "I C"
1053
+ ],
1054
+ "last": "Monaghan",
1055
+ "suffix": ""
1056
+ },
1057
+ {
1058
+ "first": "D",
1059
+ "middle": [
1060
+ "R"
1061
+ ],
1062
+ "last": "Ladd",
1063
+ "suffix": ""
1064
+ },
1065
+ {
1066
+ "first": "Judy",
1067
+ "middle": [],
1068
+ "last": "Delin",
1069
+ "suffix": ""
1070
+ }
1071
+ ],
1072
+ "year": 1992,
1073
+ "venue": "",
1074
+ "volume": "",
1075
+ "issue": "",
1076
+ "pages": "",
1077
+ "other_ids": {},
1078
+ "num": null,
1079
+ "urls": [],
1080
+ "raw_text": "Ron Zacharski, A. I. C. Monaghan, D. R. Ladd, and Judy Delin. 1992. BaIDGE: Basic research on intonation for dialogue generation. Technical report, University of Edinburgh.",
1081
+ "links": null
1082
+ }
1083
+ },
1084
+ "ref_entries": {
1085
+ "FIGREF0": {
1086
+ "num": null,
1087
+ "type_str": "figure",
1088
+ "uris": null,
1089
+ "text": "Attribute Value Matrix (AVM), computed by MIMIC's semantic interpreter."
1090
+ },
1091
+ "FIGREF1": {
1092
+ "num": null,
1093
+ "type_str": "figure",
1094
+ "uris": null,
1095
+ "text": "Task Specifications for MIMIC."
1096
+ },
1097
+ "TABREF2": {
1098
+ "html": null,
1099
+ "content": "<table><tr><td>summarizes this original mapping of infor-</td></tr><tr><td>mation statuses to pitch accent melodies, and Fig-</td></tr><tr><td>ure 4 illustrates the use of this mapping in an ex-</td></tr><tr><td>ample. It obeys the general principle of Pierrehum-</td></tr><tr><td>bert and Hirschberg's work, that low tonality sig-</td></tr><tr><td>nifies discourse givenness and high tonality signifies</td></tr><tr><td>discourse newness, but extends this principle beyond</td></tr><tr><td>its vague definition in terms of predication of mutual</td></tr><tr><td>beliefs. Instead, the principle is operationalized here</td></tr><tr><td>in a practically motivated manner that is consistent</td></tr><tr><td>with and perhaps illuminating of the theory.</td></tr></table>",
1100
+ "num": null,
1101
+ "type_str": "table",
1102
+ "text": ""
1103
+ }
1104
+ }
1105
+ }
1106
+ }
Full_text_JSON/prefixW/json/W00/W00-0311.json ADDED
@@ -0,0 +1,1357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0311",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:49.485111Z"
6
+ },
7
+ "title": "A Compact Architecture for Dialogue Management Based on Scripts and Meta-Outputs",
8
+ "authors": [
9
+ {
10
+ "first": "Manny",
11
+ "middle": [],
12
+ "last": "Rayner",
13
+ "suffix": "",
14
+ "affiliation": {},
15
+ "email": ""
16
+ },
17
+ {
18
+ "first": "Beth",
19
+ "middle": [
20
+ "Ann"
21
+ ],
22
+ "last": "Hockey~",
23
+ "suffix": "",
24
+ "affiliation": {},
25
+ "email": "bahockey@riacs.edu"
26
+ },
27
+ {
28
+ "first": "Frankie",
29
+ "middle": [],
30
+ "last": "James",
31
+ "suffix": "",
32
+ "affiliation": {},
33
+ "email": "fjames@riacs.edu"
34
+ }
35
+ ],
36
+ "year": "",
37
+ "venue": null,
38
+ "identifiers": {},
39
+ "abstract": "We describe an architecture for spoken dialogue interfaces to semi-autonomous systems that transforms speech~signals through successive representations of linguistic, dialogue, and domain knowledge. Each step produces an output, and a meta-output describing the transformation, with an executable program in a simple scripting language as the final result. The output/meta-output distinction permits perspicuous treatment of diverse tasks such as resolving pronouns, correcting user misconceptions, and optimizing scripts.",
40
+ "pdf_parse": {
41
+ "paper_id": "W00-0311",
42
+ "_pdf_hash": "",
43
+ "abstract": [
44
+ {
45
+ "text": "We describe an architecture for spoken dialogue interfaces to semi-autonomous systems that transforms speech~signals through successive representations of linguistic, dialogue, and domain knowledge. Each step produces an output, and a meta-output describing the transformation, with an executable program in a simple scripting language as the final result. The output/meta-output distinction permits perspicuous treatment of diverse tasks such as resolving pronouns, correcting user misconceptions, and optimizing scripts.",
46
+ "cite_spans": [],
47
+ "ref_spans": [],
48
+ "eq_spans": [],
49
+ "section": "Abstract",
50
+ "sec_num": null
51
+ }
52
+ ],
53
+ "body_text": [
54
+ {
55
+ "text": "The basic task we consider in this paper is that of using spoken language to give commands to a semiautonomous robot or other similar system. As evidence of the importance of this ta~k in the NLP community note that the early, influential system SHRDLU (Winograd, 1973) was intended to address just this type of problem. More recent work on spoken language interfaces to semi-autonomous robots include SRI's Flakey robot (Konolige et al., 1993) and NCARAI's InterBOT project (Perzanowski et al., 1998; Perzanowski et al., 1999) . A number of other systems have addressed part of the task. Com-mandTalk (Moore et al., 1997) , Circuit Fix-It Shop (Smith, 1997) and Tl:tAINS-96 (Traum and Allen, 1994; Traum and Andersen, 1999) are spoken language systems but they interface to simulation or help facilities rather than semi-autonomous agents. Jack's MOOse Lodge (Badler et al., 1999 ) takes text rather than speech as natural language input and the avatars being controlled are not semi-autonomous. Other researchers have considered particular aspects of the problem such as accounting for various aspects of actions (Webber, 1995; Pym et al., 1995) . In most of this and other related work the treatment is some variant of the following. If there is a speech interface, the input speech signal is converted into text. Text either from the recognizer or directly input by IThis paper also appears in the proceedings of the Sixth International Conference on Applied Natural Language Pro-cessing~ Seattle, WA, April 2000. the user is then converted into some kind of logical formula, which abstractly represents the user's intended command; this formula is.then fed into a command interpreter, which execdtes the command.",
56
+ "cite_spans": [
57
+ {
58
+ "start": 253,
59
+ "end": 269,
60
+ "text": "(Winograd, 1973)",
61
+ "ref_id": "BIBREF16"
62
+ },
63
+ {
64
+ "start": 421,
65
+ "end": 444,
66
+ "text": "(Konolige et al., 1993)",
67
+ "ref_id": "BIBREF2"
68
+ },
69
+ {
70
+ "start": 475,
71
+ "end": 501,
72
+ "text": "(Perzanowski et al., 1998;",
73
+ "ref_id": "BIBREF6"
74
+ },
75
+ {
76
+ "start": 502,
77
+ "end": 527,
78
+ "text": "Perzanowski et al., 1999)",
79
+ "ref_id": "BIBREF7"
80
+ },
81
+ {
82
+ "start": 602,
83
+ "end": 622,
84
+ "text": "(Moore et al., 1997)",
85
+ "ref_id": "BIBREF4"
86
+ },
87
+ {
88
+ "start": 645,
89
+ "end": 658,
90
+ "text": "(Smith, 1997)",
91
+ "ref_id": "BIBREF10"
92
+ },
93
+ {
94
+ "start": 675,
95
+ "end": 698,
96
+ "text": "(Traum and Allen, 1994;",
97
+ "ref_id": "BIBREF12"
98
+ },
99
+ {
100
+ "start": 699,
101
+ "end": 724,
102
+ "text": "Traum and Andersen, 1999)",
103
+ "ref_id": "BIBREF13"
104
+ },
105
+ {
106
+ "start": 860,
107
+ "end": 880,
108
+ "text": "(Badler et al., 1999",
109
+ "ref_id": "BIBREF0"
110
+ },
111
+ {
112
+ "start": 1115,
113
+ "end": 1129,
114
+ "text": "(Webber, 1995;",
115
+ "ref_id": "BIBREF15"
116
+ },
117
+ {
118
+ "start": 1130,
119
+ "end": 1147,
120
+ "text": "Pym et al., 1995)",
121
+ "ref_id": "BIBREF9"
122
+ }
123
+ ],
124
+ "ref_spans": [],
125
+ "eq_spans": [],
126
+ "section": "Introduction 1",
127
+ "sec_num": "1"
128
+ },
129
+ {
130
+ "text": "We do not think the standard treatment outlined above is in essence incorrect, but we do believe that, as it stands, it is in-need of some modification. This paper will in particular make three points. First, we suggest that the output representation should not be regarded as a logical expression, but: rather as a program in some kind of scripting language. Second, we argue that it is not merely the case that the process of converting the input signal to the final representation can sometimes go wrong; rather, this is the normal course of events, and the inferpretatiofi process should be organized with that assumption in mind. Third, we claim, perhaps surprisingly, that the first and second points are related. These claims are elaborated in Section 2.",
131
+ "cite_spans": [],
132
+ "ref_spans": [],
133
+ "eq_spans": [],
134
+ "section": "Introduction 1",
135
+ "sec_num": "1"
136
+ },
137
+ {
138
+ "text": "The remainder of the paper describes an architecture which addresses the issues outlined above, and which has been used to implement a prototype speech interface to a simulated semi-autonomous robot intended for deployment on the International Space Station. Sections 3 and 4 present an overview of the implemented interface, focussingon representational issues relevant to dialogue management. Illustrative examples of interactions with the system are provided in Section 5. Section 6 concludes.",
139
+ "cite_spans": [],
140
+ "ref_spans": [],
141
+ "eq_spans": [],
142
+ "section": "Introduction 1",
143
+ "sec_num": "1"
144
+ },
145
+ {
146
+ "text": "2 Theoretical Ideas,",
147
+ "cite_spans": [],
148
+ "ref_spans": [],
149
+ "eq_spans": [],
150
+ "section": "Introduction 1",
151
+ "sec_num": "1"
152
+ },
153
+ {
154
+ "text": "Let's first look in a little more detail at the question of what the output representation of a spoken language interface to a semi-autonomous robot/agent should be. In practice, there seem to be two main choices: atheoretical representations, or some kind of logic.",
155
+ "cite_spans": [],
156
+ "ref_spans": [],
157
+ "eq_spans": [],
158
+ "section": "Scripts vs Logical Forms",
159
+ "sec_num": "2.1"
160
+ },
161
+ {
162
+ "text": "Logic is indeed an excellent way to think about representing static relationships like database queries, but it is much less clear that it is a good way to represent commands. In real life, when people wish to give a command to a computer, they usually do so via its operating system; a complex com-mand is an expression in a scripting language like CSHELL, Perl, or VBScript. These languages are related to logical formalisms, but cannot be mapped onto them in a simple way. Here are some of the obvious differences:",
163
+ "cite_spans": [],
164
+ "ref_spans": [],
165
+ "eq_spans": [],
166
+ "section": "Scripts vs Logical Forms",
167
+ "sec_num": "2.1"
168
+ },
169
+ {
170
+ "text": "\u2022 A scripting language is essentially imperative, rather than relational.",
171
+ "cite_spans": [],
172
+ "ref_spans": [],
173
+ "eq_spans": [],
174
+ "section": "Scripts vs Logical Forms",
175
+ "sec_num": "2.1"
176
+ },
177
+ {
178
+ "text": "\u2022 The notion of temporal sequence is fundamental to the language. \"Do P and then Q\" is not the same as \"Make the goals P and Q true\"; it is explicitly stated that P is to be done first. Simi~ larly, \"For each X in the list (A B C), do P(X)\"",
179
+ "cite_spans": [],
180
+ "ref_spans": [],
181
+ "eq_spans": [],
182
+ "section": "Scripts vs Logical Forms",
183
+ "sec_num": "2.1"
184
+ },
185
+ {
186
+ "text": "is not the same as \"For all X, make P(X) true\"; once again, the scripting language defines an order, but no~ the logical language 2.",
187
+ "cite_spans": [],
188
+ "ref_spans": [],
189
+ "eq_spans": [],
190
+ "section": "Scripts vs Logical Forms",
191
+ "sec_num": "2.1"
192
+ },
193
+ {
194
+ "text": "\u2022 Scripting languages assume that commands do not always succeed. For example, UNIX-based scripting languages like CSHELL provide each script with the three predefined streams stdin, stdout and stderr. Input is read from stdin and written to stdout; error messages, warnings and other comments are sent to stderr.",
195
+ "cite_spans": [],
196
+ "ref_spans": [],
197
+ "eq_spans": [],
198
+ "section": "Scripts vs Logical Forms",
199
+ "sec_num": "2.1"
200
+ },
201
+ {
202
+ "text": "Wedo not think that these properties of scripting language are accidental. They have evolved as the result of strong selectional pressure from real users with real-world tasks that need to be carried out, and represent a competitive way to meet said users' needs. We consequently think it is worth taking seriously the idea that a target representation produced by a spoken language interface should share many of these properties.",
203
+ "cite_spans": [],
204
+ "ref_spans": [],
205
+ "eq_spans": [],
206
+ "section": "Scripts vs Logical Forms",
207
+ "sec_num": "2.1"
208
+ },
209
+ {
210
+ "text": "Meta-outputs We now move on to the question of modelling the interpretation process, that is to say the process that converts the input (speech) signal to the output (executable) representation. As already indicated, we think it is important to realize that interpretation is a process which, like any other process, may succeed more or less well in achieving its intended goals. Users may express themselves unclearly or incompletely, or the system may more or less seriously fail to understand exactly what they mean. A good interpretation architecture will keep these considerations in mind.",
211
+ "cite_spans": [],
212
+ "ref_spans": [],
213
+ "eq_spans": [],
214
+ "section": "Fallible Interpretation: Outputs and",
215
+ "sec_num": "2.2"
216
+ },
217
+ {
218
+ "text": "Taking our lead from the description of scripting languages sketched above, we adapt the notion of the \"error stream\" to the interpretation process. In the course of interpreting an utterance, the system 2In cases like these, the theorem prover or logic programming interpreter used to evaluate the logical formula typically assigns a conventional order to the conjuncts; note however that this is part of the procedural semantics of the theorem prover/interpreter, and does not follow from the declarative semantics of the logical formalism. translates it into successively \"deeper\" levels of representation. Each translation step has not only an input (the representation consumed) and an output (the representation produced), but also something we will refer to as a \"meta-output': this provides information about how the translation was performed.",
219
+ "cite_spans": [],
220
+ "ref_spans": [],
221
+ "eq_spans": [],
222
+ "section": "Fallible Interpretation: Outputs and",
223
+ "sec_num": "2.2"
224
+ },
225
+ {
226
+ "text": "At a high level of abstraction, our architecture will be as follows. Interpretation proceeds as a series of non-deterministic translation steps, each producing a set of possible outputs and associated metaoutputs. The final translation step produces an executable script. The interface attempts to simulate execution of each possible script produced, in order to determine what would happen if that script were selected; simulated execution can itself produce further meta-outputs. Finally, the system uses the meta-output information to decide what to do with the various possible interpretations it has produced. Possible actions incl~tde selection and execution of an output script, paraphrasing meta-output information back to the user, or some combination of the two.",
227
+ "cite_spans": [],
228
+ "ref_spans": [],
229
+ "eq_spans": [],
230
+ "section": "Fallible Interpretation: Outputs and",
231
+ "sec_num": "2.2"
232
+ },
233
+ {
234
+ "text": "In the following section, we present a more detailed description showing how the output/metaoutput'distinction works in a practical system.",
235
+ "cite_spans": [],
236
+ "ref_spans": [],
237
+ "eq_spans": [],
238
+ "section": "Fallible Interpretation: Outputs and",
239
+ "sec_num": "2.2"
240
+ },
241
+ {
242
+ "text": "The ideas sketched out above have been realized as a prototype spoken language dialogue interface to a simulated version of the Personal Satellite Assistant (PSi; (PSA, 2000) ). This section gives an overview of the implementation; in the following section, we focus on the specific aspects of dialogue management which are facilitated by the output/meta-output architecture.",
243
+ "cite_spans": [
244
+ {
245
+ "start": 163,
246
+ "end": 174,
247
+ "text": "(PSA, 2000)",
248
+ "ref_id": null
249
+ }
250
+ ],
251
+ "ref_spans": [],
252
+ "eq_spans": [],
253
+ "section": "A Prototype Implementation",
254
+ "sec_num": "3"
255
+ },
256
+ {
257
+ "text": "The real PSA is a miniature robot currently being developed at NASA Ames Research Center, which is intended for deployment on the Space Shuttle and/or International Space Station. It will be capable of free navigation in an indoor micro-gravity environment, and will provide mobile sensory capacity as a backup to a network of fixed sensors. The PSA will primarily be controlled by voice commands through a hand-held or head-mounted microphone, with speech and language processing being handled by an offboard processor. Since the speech processing units are not in fact physically connected to the PSA we envisage that they could also be used to control or monitor other environmental functions. In particular, our simulation allows voice access to the current and past values of the fixed sensor readings.",
258
+ "cite_spans": [],
259
+ "ref_spans": [],
260
+ "eq_spans": [],
261
+ "section": "Levels of Representation",
262
+ "sec_num": "3.1"
263
+ },
264
+ {
265
+ "text": "The initial PSA speech interface demo consists of a simple simulation of the Shuttle. State parameters include the PSA's current position, some environmental variables such as local temperature, pressure and carbon dioxide levels, and the status of the Shuttle's doors (open/closed). A visual display gives direct feedback on some of these parameters.",
266
+ "cite_spans": [],
267
+ "ref_spans": [],
268
+ "eq_spans": [],
269
+ "section": "Levels of Representation",
270
+ "sec_num": "3.1"
271
+ },
272
+ {
273
+ "text": "The speech and language processing architecture is based on that of the SRI CommandTalk system (Moore et al., 1997; Stent et al., 1999) . The system comprises a suite of about 20 agents, connected together using the SRI Open Agent Architecture (OAA; (Martin et al., 1998) ). Speech recognition is performed using a version of the Nuance recognizer (Nuance, 2000) . Initial language processing is carried out Using the SRI Gemini system (Dowding et al., 1993) , using a domain-independent unification. grammar and a domain-specific lexicon. The language processing grammar is compiled into a recognition grammar using the methods of (Moore et al., 1997) ; the n~ resnlt is that only grammatically wellformed utterances Gan be recognized. Output from the initial language-processing step is represented in a version of Quasi Logical Form (van Eijck and Moore, 1992) , and passed in that form to the dialogue manager. We refer to these as linguistic level representations.",
274
+ "cite_spans": [
275
+ {
276
+ "start": 95,
277
+ "end": 115,
278
+ "text": "(Moore et al., 1997;",
279
+ "ref_id": "BIBREF4"
280
+ },
281
+ {
282
+ "start": 116,
283
+ "end": 135,
284
+ "text": "Stent et al., 1999)",
285
+ "ref_id": "BIBREF11"
286
+ },
287
+ {
288
+ "start": 250,
289
+ "end": 271,
290
+ "text": "(Martin et al., 1998)",
291
+ "ref_id": "BIBREF3"
292
+ },
293
+ {
294
+ "start": 348,
295
+ "end": 362,
296
+ "text": "(Nuance, 2000)",
297
+ "ref_id": "BIBREF5"
298
+ },
299
+ {
300
+ "start": 436,
301
+ "end": 458,
302
+ "text": "(Dowding et al., 1993)",
303
+ "ref_id": "BIBREF1"
304
+ },
305
+ {
306
+ "start": 632,
307
+ "end": 652,
308
+ "text": "(Moore et al., 1997)",
309
+ "ref_id": "BIBREF4"
310
+ },
311
+ {
312
+ "start": 851,
313
+ "end": 863,
314
+ "text": "Moore, 1992)",
315
+ "ref_id": "BIBREF14"
316
+ }
317
+ ],
318
+ "ref_spans": [],
319
+ "eq_spans": [],
320
+ "section": "Levels of Representation",
321
+ "sec_num": "3.1"
322
+ },
323
+ {
324
+ "text": "The aspects of the system which are of primary interest here concern the dialogue manager (DM) and related modules. Once a linguistic level representation has been produced, the following processing steps occur:",
325
+ "cite_spans": [],
326
+ "ref_spans": [],
327
+ "eq_spans": [],
328
+ "section": "Levels of Representation",
329
+ "sec_num": "3.1"
330
+ },
331
+ {
332
+ "text": "\u2022 The linguistic level representation is converted into a discourse level representation. This primarily involves regularizing differences in surface form: so, for example, \"measure the pressure\" and \"what is the pressure?\" have different representations at the linguistic level, but the same representation at the discourse level.",
333
+ "cite_spans": [],
334
+ "ref_spans": [],
335
+ "eq_spans": [],
336
+ "section": "Levels of Representation",
337
+ "sec_num": "3.1"
338
+ },
339
+ {
340
+ "text": "\u2022 If necessary, the system attempts to resolve instances of ellipsis and anaphoric reference. For example, if the previous command was \"measure temperature at flight deck\", then the new command \"lower deck\" will be resolved to an expression meaning \"measure temperature at lower deck\". Similarly, if the previous command was \"move to the crew hatch\", then the command \"open it\" will be resolved to \"open the crew hatch\". We call the output of this step a resolved discourse level representation.",
341
+ "cite_spans": [],
342
+ "ref_spans": [],
343
+ "eq_spans": [],
344
+ "section": "Levels of Representation",
345
+ "sec_num": "3.1"
346
+ },
347
+ {
348
+ "text": "\u2022 The resolved discourse level representation is converted into an executable script in a language essentially equivalent to a subset of CSHELL. This involves two sub-steps. First, quantified variables are given scope: for example, \"go to the flight deck and lower deck and measure pressure\" becomes something approximately equivalent to the script The point to note here is that :the foreach has scope over both the go_to and the measure actions; an alternate (incorrect) sCoping would be foreach x (flight_deck lower_deck)",
349
+ "cite_spans": [],
350
+ "ref_spans": [],
351
+ "eq_spans": [],
352
+ "section": "Levels of Representation",
353
+ "sec_num": "3.1"
354
+ },
355
+ {
356
+ "text": "go_to Sx end measure pressure",
357
+ "cite_spans": [],
358
+ "ref_spans": [],
359
+ "eq_spans": [],
360
+ "section": "Levels of Representation",
361
+ "sec_num": "3.1"
362
+ },
363
+ {
364
+ "text": "The second sub-step is to attempt to optimize the plan. In the current example, this can be done by reordering the list (flight_deck lowerAeck). For instance, if the PSA is already at the lower deck, reversing the list will mean that the robot only makes one trip, instead of two.",
365
+ "cite_spans": [],
366
+ "ref_spans": [],
367
+ "eq_spans": [],
368
+ "section": "Levels of Representation",
369
+ "sec_num": "3.1"
370
+ },
371
+ {
372
+ "text": ",,",
373
+ "cite_spans": [],
374
+ "ref_spans": [],
375
+ "eq_spans": [],
376
+ "section": "Levels of Representation",
377
+ "sec_num": "3.1"
378
+ },
379
+ {
380
+ "text": "The final step in the 'interlJretation process is plan evaluation: the syStem tries to work out what will happen if it actually executes the plan. (The relationship between plan evaluation and plan execution is described in more detail in Section 4.1). Among other things, this gives the dialogue manager the possibility of comparing different interpretations of the original command, and picking the one which is most efficient.",
381
+ "cite_spans": [],
382
+ "ref_spans": [],
383
+ "eq_spans": [],
384
+ "section": "Levels of Representation",
385
+ "sec_num": "3.1"
386
+ },
387
+ {
388
+ "text": "The above sketch shows how context-dependent interpretation is arranged as a series of nondeterministic translation stepS; in each case, we have described the input and the output for the step in question. We now go back to the concerns of Section 2. First, note that each translation step is in general fallible. We give severalexamples:",
389
+ "cite_spans": [],
390
+ "ref_spans": [],
391
+ "eq_spans": [],
392
+ "section": "How Meta-outputs Participate in the Translation",
393
+ "sec_num": "3.2"
394
+ },
395
+ {
396
+ "text": "\u2022 One of the most obvious cases arises when the user simply issues an invalid command, such as requesting the PSA to open a door D which is already open. Here, one of the meta~outputs issued by the plan evaluation step will be the term presupposition_failure(already_open(D));",
397
+ "cite_spans": [],
398
+ "ref_spans": [],
399
+ "eq_spans": [],
400
+ "section": "How Meta-outputs Participate in the Translation",
401
+ "sec_num": "3.2"
402
+ },
403
+ {
404
+ "text": "the DM can decide to paraphrase this back to the user as a surface string of the form \"D is already open\". Note that plan evaluation does not involve actually executing the final script, which can be important. For instance, if the command is \"go to the crew hatch and open it\" and the crew hatch is already open, the interface has the option of informing the user that there is a problem without-first carrying out the \"go to\" action.",
405
+ "cite_spans": [],
406
+ "ref_spans": [],
407
+ "eq_spans": [],
408
+ "section": "How Meta-outputs Participate in the Translation",
409
+ "sec_num": "3.2"
410
+ },
411
+ {
412
+ "text": "\u2022 The resolution step can give rise to similar kinds of meta-output. For example, a command may include a referring expression that has no denotation, or an ambiguous denotation; for example, the user might say \"both decks\", presumably being unaware that there are in fact three of them. This time, the meta-output produced is presupposition_failure ( incorrect_size_of_set (2,3) )",
413
+ "cite_spans": [],
414
+ "ref_spans": [
415
+ {
416
+ "start": 350,
417
+ "end": 380,
418
+ "text": "( incorrect_size_of_set (2,3)",
419
+ "ref_id": null
420
+ }
421
+ ],
422
+ "eq_spans": [],
423
+ "section": "How Meta-outputs Participate in the Translation",
424
+ "sec_num": "3.2"
425
+ },
426
+ {
427
+ "text": "representing the user'sincorrect belief abou[ the number of decks. The DM then has the possibility of informing the user of this misconception by realizipg the meta-output term as the surface stung \"in. fact there are three of them\". Ambiguous denotation occurs when a description is under-specified. For instance, the user might say \"the deck\" in a situation where there is no clearly salient deck, either in the discourse situation :or in the simulated world: here, the meta-output will be presupposition_failure ( under specif ied_def inite (deck))",
428
+ "cite_spans": [],
429
+ "ref_spans": [],
430
+ "eq_spans": [],
431
+ "section": "How Meta-outputs Participate in the Translation",
432
+ "sec_num": "3.2"
433
+ },
434
+ {
435
+ "text": "which can be realized as the clarification question \"which deck do you mean?\"",
436
+ "cite_spans": [],
437
+ "ref_spans": [],
438
+ "eq_spans": [],
439
+ "section": "How Meta-outputs Participate in the Translation",
440
+ "sec_num": "3.2"
441
+ },
442
+ {
443
+ "text": "\u2022 A slightly more complex case involves plan costs. During plan evaluation, the system simulates execution of the output script while keeping track of execution cost. (Currently, the cost is just an estimate of the time required to execute the script). Execution costs are treated as meta-outputs of the form cost(C) and passed back through the interpreter so that the plan optimization step can make use of them.",
444
+ "cite_spans": [],
445
+ "ref_spans": [],
446
+ "eq_spans": [],
447
+ "section": "How Meta-outputs Participate in the Translation",
448
+ "sec_num": "3.2"
449
+ },
450
+ {
451
+ "text": "Finally,. we consider what happens when the system receives incorrect input from the speech recognizer. Although the recognizer's language model is constrained so that it can only produce grammatical utterances, it can still misrecognize one grammatical string as another one. Many of these cases fall into one of a small number of syntactic patterns, which function as fairly reliable indicators of bad recognition. A typical example is conjunction involving a pronoun: if the system hears \"it and flight deck\", this is most likely a misrecognition of something like \"go to flight deck\". During the processing phase which translates linguistic level representations into discourse level representations, the system attempts to match each misrecognition pattern against the input linguistic form, and if successful produces a meta-output of the form presupposition_failure ( dubious_if (<Type>))",
452
+ "cite_spans": [],
453
+ "ref_spans": [],
454
+ "eq_spans": [],
455
+ "section": "How Meta-outputs Participate in the Translation",
456
+ "sec_num": "3.2"
457
+ },
458
+ {
459
+ "text": "These mete-outputs are passed down to the DM, which in the absence of sufficiently compelling contrary evidence will normally issue a response of the form \"I'm sorry, I think I misheard you\".",
460
+ "cite_spans": [],
461
+ "ref_spans": [],
462
+ "eq_spans": [],
463
+ "section": "How Meta-outputs Participate in the Translation",
464
+ "sec_num": "3.2"
465
+ },
466
+ {
467
+ "text": "Based on Scripts and Meta-Outputs None of the individual functionalities outlined above are particularly novel in themselves. What we find new and interesting-is the fact that they can all be expressed in a uniform way in terms of the script output/meta-output architecture. This section presents three examples illustrating how the architecture can be used to simplify the overall organization of the system.",
468
+ "cite_spans": [],
469
+ "ref_spans": [],
470
+ "eq_spans": [],
471
+ "section": "A Compact Architecture for Dialogue Management",
472
+ "sec_num": "4"
473
+ },
474
+ {
475
+ "text": "execution and dialogue management. Recall that the DM simulates evaluation of the plan before running it, in order to obtain relevant metainformation. At plan execution time, plan actions result in changes to the world; at plan evaluation time, they result in simulated changes to the world and/or produce meta-outputs.",
476
+ "cite_spans": [],
477
+ "ref_spans": [],
478
+ "eq_spans": [],
479
+ "section": "Integration of plan evaluation, plan",
480
+ "sec_num": "4.1"
481
+ },
482
+ {
483
+ "text": "Conceptualizing plans as scripts rather than logical formulas permits an elegant treatment of the execution/evaluation dichotomy. There is one script interpreter, which functions both as a script executive and a script evaluator, and one set of rules which defines the procedural semantics of script actions. Rules are parameterized by execution type which is either \"execute\" or \"evaluate\". In \"evaluate\" mode, primitive actions modify a state vector which is threaded through the interpreter; in \"execute\" mode, they result in commands being sent to (real or simulated) effector agents. Conversely, \"meta-information\" actions, such as presupposition failures, result in output being sent to the metaoutput stream in \"evaluate\" mode, and in a null action in \"execute\" mode. The upshot is that a simple semantics can be assigned to rules like the following one, which defines the action 4.2 Using meta-outputs to choose between interpretations",
484
+ "cite_spans": [],
485
+ "ref_spans": [],
486
+ "eq_spans": [],
487
+ "section": "Integration of plan evaluation, plan",
488
+ "sec_num": "4.1"
489
+ },
490
+ {
491
+ "text": "As described in the preceding section, the resolution step is in general non-deterministic and gives rise to meta-outputs which describe the type of resolution carried out. For example, consider a command involving a definite description, like \"open the door\". Depending on the preceding context, resolution will produce a number of possible interpretations; \"the door\" may be resolved to one or more contextually available doors, or the expression may be left unresolved. In each case, the type of resolution used appears as a meta-output, and is available to the dialogue manager when it decides which interpretation is most felicitous. By default, the DM's strategy is to attempt to~pply antecedents for referring expressions, preferring gh~ most recently occurring sortally appropriate candidate. In some cases, however, it is desirable to allow the default strategy to be overridden: for instance, it may result in a script which produces a presupposition failure during plan evaluation. Treating resolution choices and plan evaluation problems as similar types of objects makes it easy to implement this kind of idea.",
492
+ "cite_spans": [],
493
+ "ref_spans": [],
494
+ "eq_spans": [],
495
+ "section": "Integration of plan evaluation, plan",
496
+ "sec_num": "4.1"
497
+ },
498
+ {
499
+ "text": "Perhaps the key advantage of our architecture is that collecting together several types of information as a bag of meta-outputs simplifies the top-level structure of the dialogue manager. In our application, the critical choice of dialogue move comes after the dialogue manager has selected the most plausible interpretation. It now has to make two choices. First, it must decide whether or not to paraphrase any of the meta-outputs back to the user; for example, if resolution was unable to fill some argument position or find an antecedent for a pronoun, it may be appropriate to paraphrase the corresponding metaoutput as a question, e.g. \"where do you mean?\", or \"what do you mean by 'it' ?\". Having all the metaoutputs available together means that the DM is able to plan a coherent response: so if there are several meta-outputs which could potentially be worth paraphrasing, it typically realizes only the most important one. Second, if interpretation was able to produce a well-formed plan, the DM currently has the three options of executing it, paraphrasing it back to the user as a confirmation question, or doing nothing. Once again, this decision often requires global information about what has happened during the interpretation process. For example, knowing that plan evaluation showed that the plan would take significant time to execute makes it more plausible that the user would prefer to receive a confirmation.",
500
+ "cite_spans": [],
501
+ "ref_spans": [],
502
+ "eq_spans": [],
503
+ "section": "Using meta-outputs to choose between dialogue management moves",
504
+ "sec_num": "4.3"
505
+ },
506
+ {
507
+ "text": "This section presents a Commented sample dialogue with the implemented PSA interface demo chosen to illustrate the points discussed inthe previous sections. We focus in particular On ,the :role that metaoutputs play in interpretation. Text preceded by USER represents spoken utterances from the user. Text preceded by PSA represents synthesized spoken responses from the PSA. Text inside square brackets describes the actions carried out bythe PSA.",
508
+ "cite_spans": [],
509
+ "ref_spans": [],
510
+ "eq_spans": [],
511
+ "section": "5 Examples",
512
+ "sec_num": "58"
513
+ },
514
+ {
515
+ "text": "1. The PSA can move to various locations and measure environmental factors. In the first exzhange, reference resolution is used to interpret the phrase \"all three decks\" as referring to the lower deck, mid deck and flight deck. Plan optimization finds the most efficient way to order the locations, an d paraphrases the resulting plan back to the user. 3. Resolution of ellipsis can involve filling in unspecified semantic parameters to a syntactically complete command. It is not meaningful to talk about a sensor reading without reference to the place at which that reading was carried out; in the following example, resolution assumes that the location in question is the one most recently mentioned in the dialogue. Note also that since the PSA does not have to move in order to carry out the command, and",
516
+ "cite_spans": [],
517
+ "ref_spans": [],
518
+ "eq_spans": [],
519
+ "section": "5 Examples",
520
+ "sec_num": "58"
521
+ },
522
+ {
523
+ "text": ". . . . .",
524
+ "cite_spans": [],
525
+ "ref_spans": [],
526
+ "eq_spans": [],
527
+ "section": "5 Examples",
528
+ "sec_num": "58"
529
+ },
530
+ {
531
+ "text": "hence can execute it immediately, the DM reasonably decides to skip the confirmation question. This decision is made based on the meta-Output encoding the estimated cost of executing the :script.",
532
+ "cite_spans": [],
533
+ "ref_spans": [],
534
+ "eq_spans": [],
535
+ "section": "5 Examples",
536
+ "sec_num": "58"
537
+ },
538
+ {
539
+ "text": "'i USER: What was the carbon dioxide level at fifteen oh five according to the fixed sensors?",
540
+ "cite_spans": [],
541
+ "ref_spans": [],
542
+ "eq_spans": [],
543
+ "section": "5 Examples",
544
+ "sec_num": "58"
545
+ },
546
+ {
547
+ "text": "PSA: According to the fixed sensors, at fifteen oh five the carbon dioxide level at the pilot's seat was one percent. -.",
548
+ "cite_spans": [],
549
+ "ref_spans": [],
550
+ "eq_spans": [],
551
+ "section": "5 Examples",
552
+ "sec_num": "58"
553
+ },
554
+ {
555
+ "text": "The system encounters a user misconception, which results in a \"presupposition failure\" meta-output being produced during reference resolutiom It \u00a3onsequently decides to alert the user to this, rather than attempt to carry out the command.",
556
+ "cite_spans": [],
557
+ "ref_spans": [],
558
+ "eq_spans": [],
559
+ "section": "5 Examples",
560
+ "sec_num": "58"
561
+ },
562
+ {
563
+ "text": "USER: Close both doors.",
564
+ "cite_spans": [],
565
+ "ref_spans": [],
566
+ "eq_spans": [],
567
+ "section": "5 Examples",
568
+ "sec_num": "58"
569
+ },
570
+ {
571
+ "text": "PSA: There are in fact three of them.",
572
+ "cite_spans": [],
573
+ "ref_spans": [],
574
+ "eq_spans": [],
575
+ "section": "5 Examples",
576
+ "sec_num": "58"
577
+ },
578
+ {
579
+ "text": "The user utterance includes a pronoun, \"it\". The system resolves \"it\" to. \"the crew hatch\", and issues an appropriate paraphrase in order to confirm that this is what the user meant.",
580
+ "cite_spans": [],
581
+ "ref_spans": [],
582
+ "eq_spans": [],
583
+ "section": "5 Examples",
584
+ "sec_num": "58"
585
+ },
586
+ {
587
+ "text": "USER: Go to crew hatch and close it.",
588
+ "cite_spans": [],
589
+ "ref_spans": [],
590
+ "eq_spans": [],
591
+ "section": "5 Examples",
592
+ "sec_num": "58"
593
+ },
594
+ {
595
+ "text": "PSA: I will go to crew hatch and I will close crew hatch, okay?",
596
+ "cite_spans": [],
597
+ "ref_spans": [],
598
+ "eq_spans": [],
599
+ "section": "5 Examples",
600
+ "sec_num": "58"
601
+ },
602
+ {
603
+ "text": "The user asks the system to close the door, but it is unclear which door they are referring to. An \"ambiguous reference\" meta-output is produced by reference resolution, resulting in the DM issuing a clarification question.",
604
+ "cite_spans": [],
605
+ "ref_spans": [],
606
+ "eq_spans": [],
607
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
608
+ "sec_num": null
609
+ },
610
+ {
611
+ "text": "USER: Close the door.",
612
+ "cite_spans": [],
613
+ "ref_spans": [],
614
+ "eq_spans": [],
615
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
616
+ "sec_num": null
617
+ },
618
+ {
619
+ "text": "PSA: Which door do you mean?",
620
+ "cite_spans": [],
621
+ "ref_spans": [],
622
+ "eq_spans": [],
623
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
624
+ "sec_num": null
625
+ },
626
+ {
627
+ "text": "The user response is successfully interpreted by reference resolution as an instance of ellipsis. Trying to close the crew hatch a second time however causes a presupposition failure metaoutput to be generated during plan evaluation.",
628
+ "cite_spans": [],
629
+ "ref_spans": [],
630
+ "eq_spans": [],
631
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
632
+ "sec_num": null
633
+ },
634
+ {
635
+ "text": "The system informs the user of this and takes no action.",
636
+ "cite_spans": [],
637
+ "ref_spans": [],
638
+ "eq_spans": [],
639
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
640
+ "sec_num": null
641
+ },
642
+ {
643
+ "text": "USER: The crew hatch.",
644
+ "cite_spans": [],
645
+ "ref_spans": [],
646
+ "eq_spans": [],
647
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
648
+ "sec_num": null
649
+ },
650
+ {
651
+ "text": "PSA: The crew hatch is already closed.",
652
+ "cite_spans": [],
653
+ "ref_spans": [],
654
+ "eq_spans": [],
655
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
656
+ "sec_num": null
657
+ },
658
+ {
659
+ "text": "The user presents three locations in a suboptimal order. Plan evaluation finds a more efficient ordering.",
660
+ "cite_spans": [],
661
+ "ref_spans": [],
662
+ "eq_spans": [],
663
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
664
+ "sec_num": null
665
+ },
666
+ {
667
+ "text": "USER: Move to storage lockers, commander's seat and flight deck and measure temperature.",
668
+ "cite_spans": [],
669
+ "ref_spans": [],
670
+ "eq_spans": [],
671
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
672
+ "sec_num": null
673
+ },
674
+ {
675
+ "text": "PSA: I will move to flight deck, commander's seat and then storage lockers and I will measure temperature, okay? USER: Sure.",
676
+ "cite_spans": [],
677
+ "ref_spans": [],
678
+ "eq_spans": [],
679
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
680
+ "sec_num": null
681
+ },
682
+ {
683
+ "text": "[PSA moves to flight deck, commander's seat and storage lockers in that order and reports temperature at each location] 9. The user asks the system to repeat the previous command. Since the PSA is now starting from a different position, its most efficient plan is no longer the same, and it informs the user of this.",
684
+ "cite_spans": [],
685
+ "ref_spans": [],
686
+ "eq_spans": [],
687
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
688
+ "sec_num": null
689
+ },
690
+ {
691
+ "text": "USER: Do that again.",
692
+ "cite_spans": [],
693
+ "ref_spans": [],
694
+ "eq_spans": [],
695
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
696
+ "sec_num": null
697
+ },
698
+ {
699
+ "text": "'PSA:",
700
+ "cite_spans": [],
701
+ "ref_spans": [],
702
+ "eq_spans": [],
703
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
704
+ "sec_num": null
705
+ },
706
+ {
707
+ "text": "I will move to storage lockers, commander's seat and then flight deck and I will measure temperature, okay? USER: Yes. [PSA moves to storage lockers] PSA: The temperature at the storage lockers is 19.9 degrees Celsius. [PSA starts moving to commander's seat] 10. The user interrupts the PSA in the middle of the plan, and asks it to go back to its last stopping place. Since \"stop\" may involve a potentially hazardous condition, the interface obeys immediately, without attempting to confirm first. We have described a generic architecture for spoken language dialogue interfaces to semi-autonomous agents, based on the standard notion of translating to successively deeper levels of representation. The novel aspects of our scheme center around two ideas: first, that the final output representations are best conceptualized not as logical expressions but rather as programs in a scripting language; second, that steps in the translation process should produce not only a simple output, but also meta-information describing how the output was produced. We have presented examples suggesting how several apparently diverse types of dialogue behavior can be captured simply within our framework, and outlined a prototype implementation of the scheme.",
708
+ "cite_spans": [
709
+ {
710
+ "start": 119,
711
+ "end": 149,
712
+ "text": "[PSA moves to storage lockers]",
713
+ "ref_id": null
714
+ },
715
+ {
716
+ "start": 219,
717
+ "end": 258,
718
+ "text": "[PSA starts moving to commander's seat]",
719
+ "ref_id": null
720
+ }
721
+ ],
722
+ "ref_spans": [],
723
+ "eq_spans": [],
724
+ "section": "USER: Yeah. [PSA moves to crew hatch and closes it]",
725
+ "sec_num": null
726
+ }
727
+ ],
728
+ "back_matter": [],
729
+ "bib_entries": {
730
+ "BIBREF0": {
731
+ "ref_id": "b0",
732
+ "title": "Real time virtual humans",
733
+ "authors": [
734
+ {
735
+ "first": "N",
736
+ "middle": [],
737
+ "last": "Badler",
738
+ "suffix": ""
739
+ },
740
+ {
741
+ "first": "R",
742
+ "middle": [],
743
+ "last": "Bindiganavale",
744
+ "suffix": ""
745
+ },
746
+ {
747
+ "first": "J",
748
+ "middle": [],
749
+ "last": "Bourne",
750
+ "suffix": ""
751
+ },
752
+ {
753
+ "first": "J",
754
+ "middle": [],
755
+ "last": "Allbeck",
756
+ "suffix": ""
757
+ },
758
+ {
759
+ "first": "J",
760
+ "middle": [],
761
+ "last": "Shi",
762
+ "suffix": ""
763
+ },
764
+ {
765
+ "first": "M",
766
+ "middle": [],
767
+ "last": "Palmer",
768
+ "suffix": ""
769
+ }
770
+ ],
771
+ "year": 1999,
772
+ "venue": "International Conference on Digital Media Futures",
773
+ "volume": "",
774
+ "issue": "",
775
+ "pages": "",
776
+ "other_ids": {},
777
+ "num": null,
778
+ "urls": [],
779
+ "raw_text": "N. Badler, R. Bindiganavale, J. Bourne, J. Allbeck, J. Shi, and M. Palmer. 1999. Real time virtual humans. In International Conference on Digital Media Futures.",
780
+ "links": null
781
+ },
782
+ "BIBREF1": {
783
+ "ref_id": "b1",
784
+ "title": "Gemini: A natural language system for spoken language understanding",
785
+ "authors": [
786
+ {
787
+ "first": "J",
788
+ "middle": [],
789
+ "last": "Dowding",
790
+ "suffix": ""
791
+ },
792
+ {
793
+ "first": "M",
794
+ "middle": [],
795
+ "last": "Gawron",
796
+ "suffix": ""
797
+ },
798
+ {
799
+ "first": "D",
800
+ "middle": [],
801
+ "last": "Appelt",
802
+ "suffix": ""
803
+ },
804
+ {
805
+ "first": "L",
806
+ "middle": [],
807
+ "last": "Cherny",
808
+ "suffix": ""
809
+ },
810
+ {
811
+ "first": "R",
812
+ "middle": [],
813
+ "last": "Moore",
814
+ "suffix": ""
815
+ },
816
+ {
817
+ "first": "D",
818
+ "middle": [],
819
+ "last": "Moran",
820
+ "suffix": ""
821
+ }
822
+ ],
823
+ "year": 1993,
824
+ "venue": "Proceedings of the Thirty-First Annual Meeting of the Association for Computational Linguistics",
825
+ "volume": "",
826
+ "issue": "",
827
+ "pages": "",
828
+ "other_ids": {},
829
+ "num": null,
830
+ "urls": [],
831
+ "raw_text": "J. Dowding, M. Gawron, D. Appelt, L. Cherny, R. Moore, and D. Moran. 1993. Gemini: A nat- ural language system for spoken language un- derstanding. In Proceedings of the Thirty-First Annual Meeting of the Association for Computa- tional Linguistics.",
832
+ "links": null
833
+ },
834
+ "BIBREF2": {
835
+ "ref_id": "b2",
836
+ "title": "Flakey in action: The 1992 AAAI robot competition",
837
+ "authors": [
838
+ {
839
+ "first": "K",
840
+ "middle": [],
841
+ "last": "Konolige",
842
+ "suffix": ""
843
+ },
844
+ {
845
+ "first": "K",
846
+ "middle": [],
847
+ "last": "Myers",
848
+ "suffix": ""
849
+ },
850
+ {
851
+ "first": "E",
852
+ "middle": [],
853
+ "last": "Ruspini",
854
+ "suffix": ""
855
+ },
856
+ {
857
+ "first": "A",
858
+ "middle": [],
859
+ "last": "Saffiotti",
860
+ "suffix": ""
861
+ },
862
+ {
863
+ "first": ";",
864
+ "middle": [],
865
+ "last": "Sri",
866
+ "suffix": ""
867
+ },
868
+ {
869
+ "first": "A",
870
+ "middle": [
871
+ "I"
872
+ ],
873
+ "last": "Center",
874
+ "suffix": ""
875
+ }
876
+ ],
877
+ "year": 1993,
878
+ "venue": "SRI International, 333 Ravenswood Ave",
879
+ "volume": "",
880
+ "issue": "",
881
+ "pages": "",
882
+ "other_ids": {},
883
+ "num": null,
884
+ "urls": [],
885
+ "raw_text": "K. Konolige, K. Myers, E. Ruspini, and A. Saf- fiotti. 1993. Flakey in action: The 1992 AAAI robot competition. Technical Report SRI Techni- cal Note 528, SRI, AI Center, SRI International, 333 Ravenswood Ave., Menlo Park, CA 94025.",
886
+ "links": null
887
+ },
888
+ "BIBREF3": {
889
+ "ref_id": "b3",
890
+ "title": "Building distributed software systems with the open agent architecture",
891
+ "authors": [
892
+ {
893
+ "first": "D",
894
+ "middle": [],
895
+ "last": "Martin",
896
+ "suffix": ""
897
+ },
898
+ {
899
+ "first": "A",
900
+ "middle": [],
901
+ "last": "Cheyer",
902
+ "suffix": ""
903
+ },
904
+ {
905
+ "first": "D",
906
+ "middle": [],
907
+ "last": "Moran",
908
+ "suffix": ""
909
+ }
910
+ ],
911
+ "year": 1998,
912
+ "venue": "Proceedings of the Third International Conference on the Practical Application of Intelligent Agents and Multi-Agent Tech~ nology",
913
+ "volume": "",
914
+ "issue": "",
915
+ "pages": "",
916
+ "other_ids": {},
917
+ "num": null,
918
+ "urls": [],
919
+ "raw_text": "D. Martin, A. Cheyer, and D. Moran. 1998. Build- ing distributed software systems with the open agent architecture. In Proceedings of the Third International Conference on the Practical Appli- cation of Intelligent Agents and Multi-Agent Tech~ nology.",
920
+ "links": null
921
+ },
922
+ "BIBREF4": {
923
+ "ref_id": "b4",
924
+ "title": "CommandTalk: A spoken:language interface for battlefield simulations",
925
+ "authors": [
926
+ {
927
+ "first": "R",
928
+ "middle": [],
929
+ "last": "Moore",
930
+ "suffix": ""
931
+ },
932
+ {
933
+ "first": "J",
934
+ "middle": [],
935
+ "last": "Dowding",
936
+ "suffix": ""
937
+ },
938
+ {
939
+ "first": "H",
940
+ "middle": [],
941
+ "last": "Bratt",
942
+ "suffix": ""
943
+ },
944
+ {
945
+ "first": "J",
946
+ "middle": [],
947
+ "last": "Gawron",
948
+ "suffix": ""
949
+ },
950
+ {
951
+ "first": "Y",
952
+ "middle": [],
953
+ "last": "Gorfu",
954
+ "suffix": ""
955
+ },
956
+ {
957
+ "first": "A",
958
+ "middle": [],
959
+ "last": "Cheyer",
960
+ "suffix": ""
961
+ }
962
+ ],
963
+ "year": 1997,
964
+ "venue": "ProCeedings of the Fifth Conference on Applied NaturaiLanguage Processing",
965
+ "volume": "",
966
+ "issue": "",
967
+ "pages": "1--7",
968
+ "other_ids": {},
969
+ "num": null,
970
+ "urls": [],
971
+ "raw_text": "R. Moore, J. Dowding, H. Bratt, J. Gawron, Y. Gorfu, and A. Cheyer. 1997. CommandTalk: A spoken:language interface for battlefield simu- lations. In ProCeedings of the Fifth Conference on Applied NaturaiLanguage Processing, pages 1-7.",
972
+ "links": null
973
+ },
974
+ "BIBREF5": {
975
+ "ref_id": "b5",
976
+ "title": "Nuance Communications",
977
+ "authors": [
978
+ {
979
+ "first": "",
980
+ "middle": [],
981
+ "last": "Nuance",
982
+ "suffix": ""
983
+ }
984
+ ],
985
+ "year": 2000,
986
+ "venue": "",
987
+ "volume": "",
988
+ "issue": "",
989
+ "pages": "",
990
+ "other_ids": {},
991
+ "num": null,
992
+ "urls": [],
993
+ "raw_text": "Nuance, 2000. Nuance Communications, Inc. http://www.nuance.com. As of 9 March 2000.",
994
+ "links": null
995
+ },
996
+ "BIBREF6": {
997
+ "ref_id": "b6",
998
+ "title": "Integrating natural language and gesture in a robotics domain",
999
+ "authors": [
1000
+ {
1001
+ "first": "D",
1002
+ "middle": [],
1003
+ "last": "Perzanowski",
1004
+ "suffix": ""
1005
+ },
1006
+ {
1007
+ "first": "A",
1008
+ "middle": [],
1009
+ "last": "Schultz",
1010
+ "suffix": ""
1011
+ },
1012
+ {
1013
+ "first": "W",
1014
+ "middle": [],
1015
+ "last": "Adams",
1016
+ "suffix": ""
1017
+ }
1018
+ ],
1019
+ "year": 1998,
1020
+ "venue": "IEEE International Symposium on Intelligent Control: ISIC/CIRA//ISAS Joint Conference",
1021
+ "volume": "",
1022
+ "issue": "",
1023
+ "pages": "247--252",
1024
+ "other_ids": {},
1025
+ "num": null,
1026
+ "urls": [],
1027
+ "raw_text": "D. Perzanowski, A. Schultz, and W. Adams. 1998. Integrating natural language and gesture in a robotics domain. In IEEE International Sympo- sium on Intelligent Control: ISIC/CIRA//ISAS Joint Conference, pages 247-252, Gaithersburg, MD: National Institute of Standards and Tech- nology.",
1028
+ "links": null
1029
+ },
1030
+ "BIBREF7": {
1031
+ "ref_id": "b7",
1032
+ "title": "Goal tracking in a natural language interface: Towards achieving adjustable autonomy",
1033
+ "authors": [
1034
+ {
1035
+ "first": "D",
1036
+ "middle": [],
1037
+ "last": "Perzanowski",
1038
+ "suffix": ""
1039
+ },
1040
+ {
1041
+ "first": "A",
1042
+ "middle": [],
1043
+ "last": "Schultz",
1044
+ "suffix": ""
1045
+ },
1046
+ {
1047
+ "first": "W",
1048
+ "middle": [],
1049
+ "last": "Adams",
1050
+ "suffix": ""
1051
+ },
1052
+ {
1053
+ "first": "E",
1054
+ "middle": [],
1055
+ "last": "Marsh",
1056
+ "suffix": ""
1057
+ }
1058
+ ],
1059
+ "year": 1999,
1060
+ "venue": "ISIS/CIRA99 Conference",
1061
+ "volume": "",
1062
+ "issue": "",
1063
+ "pages": "",
1064
+ "other_ids": {},
1065
+ "num": null,
1066
+ "urls": [],
1067
+ "raw_text": "D. Perzanowski, A. Schultz, W. Adams, and E. Marsh. 1999. Goal tracking in a natural lan- guage interface: Towards achieving adjustable au- tonomy. In ISIS/CIRA99 Conference, Monterey, CA. IEEE.",
1068
+ "links": null
1069
+ },
1070
+ "BIBREF8": {
1071
+ "ref_id": "b8",
1072
+ "title": "Personal Satellite Assistant (PSA) Project",
1073
+ "authors": [],
1074
+ "year": 2000,
1075
+ "venue": "PSA",
1076
+ "volume": "",
1077
+ "issue": "",
1078
+ "pages": "",
1079
+ "other_ids": {},
1080
+ "num": null,
1081
+ "urls": [],
1082
+ "raw_text": "PSA, 2000. Personal Satellite Assistant (PSA) Project. http://ic.arc.nasa.gov/ic/psa/. As of 9 March 2000.",
1083
+ "links": null
1084
+ },
1085
+ "BIBREF9": {
1086
+ "ref_id": "b9",
1087
+ "title": "Actions as processes: a position on planning",
1088
+ "authors": [
1089
+ {
1090
+ "first": "D",
1091
+ "middle": [],
1092
+ "last": "Pym",
1093
+ "suffix": ""
1094
+ },
1095
+ {
1096
+ "first": "L",
1097
+ "middle": [],
1098
+ "last": "Pryor",
1099
+ "suffix": ""
1100
+ },
1101
+ {
1102
+ "first": "D",
1103
+ "middle": [],
1104
+ "last": "Murphy",
1105
+ "suffix": ""
1106
+ }
1107
+ ],
1108
+ "year": 1995,
1109
+ "venue": "Working Notes, AAAI Symposium on Extending Theories of Action",
1110
+ "volume": "",
1111
+ "issue": "",
1112
+ "pages": "169--173",
1113
+ "other_ids": {},
1114
+ "num": null,
1115
+ "urls": [],
1116
+ "raw_text": "D. Pym, L. Pryor, and D. Murphy. 1995. Actions as processes: a position on planning. In Working Notes, AAAI Symposium on Extending Theories of Action, pages 169-173.",
1117
+ "links": null
1118
+ },
1119
+ "BIBREF10": {
1120
+ "ref_id": "b10",
1121
+ "title": "An evaluation of strategies for selective utterance verification for spoken natural language dialog",
1122
+ "authors": [
1123
+ {
1124
+ "first": "R",
1125
+ "middle": [
1126
+ "W"
1127
+ ],
1128
+ "last": "Smith",
1129
+ "suffix": ""
1130
+ }
1131
+ ],
1132
+ "year": 1997,
1133
+ "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
1134
+ "volume": "",
1135
+ "issue": "",
1136
+ "pages": "41--48",
1137
+ "other_ids": {},
1138
+ "num": null,
1139
+ "urls": [],
1140
+ "raw_text": "R. W. Smith. 1997. An evaluation of strategies for selective utterance verification for spoken natural language dialog. In Proceedings of the Fifth Con- ference on Applied Natural Language Processing, pages 41-48.",
1141
+ "links": null
1142
+ },
1143
+ "BIBREF11": {
1144
+ "ref_id": "b11",
1145
+ "title": "The CommandTalk spoken dialogue system",
1146
+ "authors": [
1147
+ {
1148
+ "first": "A",
1149
+ "middle": [],
1150
+ "last": "Stent",
1151
+ "suffix": ""
1152
+ },
1153
+ {
1154
+ "first": "J",
1155
+ "middle": [],
1156
+ "last": "Dowding",
1157
+ "suffix": ""
1158
+ },
1159
+ {
1160
+ "first": "J",
1161
+ "middle": [],
1162
+ "last": "Gawron",
1163
+ "suffix": ""
1164
+ },
1165
+ {
1166
+ "first": "E",
1167
+ "middle": [],
1168
+ "last": "Bratt",
1169
+ "suffix": ""
1170
+ },
1171
+ {
1172
+ "first": "R",
1173
+ "middle": [],
1174
+ "last": "Moore",
1175
+ "suffix": ""
1176
+ }
1177
+ ],
1178
+ "year": 1999,
1179
+ "venue": "Proceedings of the Thirty-Seventh Annual Meeting of the Association for Computational Linguistics",
1180
+ "volume": "",
1181
+ "issue": "",
1182
+ "pages": "183--190",
1183
+ "other_ids": {},
1184
+ "num": null,
1185
+ "urls": [],
1186
+ "raw_text": "A. Stent, J. Dowding, J. Gawron, E. Bratt, and R. Moore. 1999. The CommandTalk spoken di- alogue system. In Proceedings of the Thirty- Seventh Annual Meeting of the Association for Computational Linguistics, pages 183-190.",
1187
+ "links": null
1188
+ },
1189
+ "BIBREF12": {
1190
+ "ref_id": "b12",
1191
+ "title": "Discourse obligations in dialogue processing",
1192
+ "authors": [
1193
+ {
1194
+ "first": "D",
1195
+ "middle": [
1196
+ "R"
1197
+ ],
1198
+ "last": "Traum",
1199
+ "suffix": ""
1200
+ },
1201
+ {
1202
+ "first": "J",
1203
+ "middle": [],
1204
+ "last": "Allen",
1205
+ "suffix": ""
1206
+ }
1207
+ ],
1208
+ "year": 1994,
1209
+ "venue": "Proceedings of the Thirty-Second Annual Meeting of the Association for Computational Linguistics",
1210
+ "volume": "",
1211
+ "issue": "",
1212
+ "pages": "1--8",
1213
+ "other_ids": {},
1214
+ "num": null,
1215
+ "urls": [],
1216
+ "raw_text": "D. R. Traum and J. Allen. 1994. Discourse obliga- tions in dialogue processing. In Proceedings of the Thirty-Second Annual Meeting of the Association for Computational Linguistics, pages 1-8.",
1217
+ "links": null
1218
+ },
1219
+ "BIBREF13": {
1220
+ "ref_id": "b13",
1221
+ "title": "Representations of dialogue state for domain and task independent meta-dialogue",
1222
+ "authors": [
1223
+ {
1224
+ "first": "D",
1225
+ "middle": [
1226
+ "R"
1227
+ ],
1228
+ "last": "Traum",
1229
+ "suffix": ""
1230
+ },
1231
+ {
1232
+ "first": "C",
1233
+ "middle": [
1234
+ "F"
1235
+ ],
1236
+ "last": "Andersen",
1237
+ "suffix": ""
1238
+ }
1239
+ ],
1240
+ "year": 1999,
1241
+ "venue": "Proceedings of the I J-CAI'99 Workshop on KnowTedge and Reasoning in Practical Dialogue Systems",
1242
+ "volume": "",
1243
+ "issue": "",
1244
+ "pages": "113--120",
1245
+ "other_ids": {},
1246
+ "num": null,
1247
+ "urls": [],
1248
+ "raw_text": "D. R. Traum and C. F. Andersen. 1999. Represen- tations of dialogue state for domain and task inde- pendent meta-dialogue. In Proceedings of the I J- CAI'99 Workshop on KnowTedge and Reasoning in Practical Dialogue Systems, pages 113-120.",
1249
+ "links": null
1250
+ },
1251
+ "BIBREF14": {
1252
+ "ref_id": "b14",
1253
+ "title": "Semantic rules for English",
1254
+ "authors": [
1255
+ {
1256
+ "first": "J",
1257
+ "middle": [],
1258
+ "last": "Van Eijck",
1259
+ "suffix": ""
1260
+ },
1261
+ {
1262
+ "first": "R",
1263
+ "middle": [],
1264
+ "last": "Moore",
1265
+ "suffix": ""
1266
+ }
1267
+ ],
1268
+ "year": 1992,
1269
+ "venue": "",
1270
+ "volume": "",
1271
+ "issue": "",
1272
+ "pages": "",
1273
+ "other_ids": {},
1274
+ "num": null,
1275
+ "urls": [],
1276
+ "raw_text": "J. van Eijck and R. Moore. 1992. Semantic rules for English. In H. Alshawi, editor, The Core Lan- guage Engine. MIT Press.",
1277
+ "links": null
1278
+ },
1279
+ "BIBREF15": {
1280
+ "ref_id": "b15",
1281
+ "title": "Instructing animated agents: Viewing language in behavioral terms",
1282
+ "authors": [
1283
+ {
1284
+ "first": "B",
1285
+ "middle": [],
1286
+ "last": "Webber",
1287
+ "suffix": ""
1288
+ }
1289
+ ],
1290
+ "year": 1995,
1291
+ "venue": "Proceedings of the International Conference on Cooperative Multi-modal Communication",
1292
+ "volume": "",
1293
+ "issue": "",
1294
+ "pages": "",
1295
+ "other_ids": {},
1296
+ "num": null,
1297
+ "urls": [],
1298
+ "raw_text": "B. Webber. 1995. Instructing animated agents: Viewing language in behavioral terms. In Proceed- ings of the International Conference on Coopera- tive Multi-modal Communication.",
1299
+ "links": null
1300
+ },
1301
+ "BIBREF16": {
1302
+ "ref_id": "b16",
1303
+ "title": "A procedural model of language understanding",
1304
+ "authors": [
1305
+ {
1306
+ "first": "T",
1307
+ "middle": [
1308
+ "A"
1309
+ ],
1310
+ "last": "Winograd",
1311
+ "suffix": ""
1312
+ }
1313
+ ],
1314
+ "year": 1973,
1315
+ "venue": "",
1316
+ "volume": "",
1317
+ "issue": "",
1318
+ "pages": "",
1319
+ "other_ids": {},
1320
+ "num": null,
1321
+ "urls": [],
1322
+ "raw_text": "T. A. Winograd. 1973. A procedural model of lan- guage understanding. In R. C. Shank and K. M.",
1323
+ "links": null
1324
+ },
1325
+ "BIBREF17": {
1326
+ "ref_id": "b17",
1327
+ "title": "Computer Models of Thought and Language",
1328
+ "authors": [
1329
+ {
1330
+ "first": "Colby",
1331
+ "middle": [],
1332
+ "last": "",
1333
+ "suffix": ""
1334
+ }
1335
+ ],
1336
+ "year": null,
1337
+ "venue": "",
1338
+ "volume": "",
1339
+ "issue": "",
1340
+ "pages": "",
1341
+ "other_ids": {},
1342
+ "num": null,
1343
+ "urls": [],
1344
+ "raw_text": "Colby, editors, Computer Models of Thought and Language. Freeman, San Francisco, CA. i",
1345
+ "links": null
1346
+ }
1347
+ },
1348
+ "ref_entries": {
1349
+ "FIGREF1": {
1350
+ "text": "of attempting to open a door which may already be open: procedure ( open_door (D) , if_then_else (status (D, open_closed, open), presupposition_failure (already_open(D)), change_status (D, open_closed, open)))",
1351
+ "uris": null,
1352
+ "type_str": "figure",
1353
+ "num": null
1354
+ }
1355
+ }
1356
+ }
1357
+ }
Full_text_JSON/prefixW/json/W00/W00-0312.json ADDED
@@ -0,0 +1,683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0312",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:35:05.476040Z"
6
+ },
7
+ "title": "Building a Robust Dialogue System with Limited Data *",
8
+ "authors": [
9
+ {
10
+ "first": "Sharon",
11
+ "middle": [
12
+ "J"
13
+ ],
14
+ "last": "Goldwater",
15
+ "suffix": "",
16
+ "affiliation": {
17
+ "laboratory": "",
18
+ "institution": "SRI International",
19
+ "location": {
20
+ "addrLine": "333 Ravenswood Avenue",
21
+ "postCode": "94025",
22
+ "settlement": "Menlo Park",
23
+ "region": "CA"
24
+ }
25
+ },
26
+ "email": "goldwater@ai.sri.cora"
27
+ },
28
+ {
29
+ "first": "Elizabeth",
30
+ "middle": [
31
+ "Owen"
32
+ ],
33
+ "last": "Bratt",
34
+ "suffix": "",
35
+ "affiliation": {
36
+ "laboratory": "",
37
+ "institution": "SRI International",
38
+ "location": {
39
+ "addrLine": "333 Ravenswood Avenue",
40
+ "postCode": "94025",
41
+ "settlement": "Menlo Park",
42
+ "region": "CA"
43
+ }
44
+ },
45
+ "email": ""
46
+ },
47
+ {
48
+ "first": "Jean",
49
+ "middle": [
50
+ "Mark"
51
+ ],
52
+ "last": "Gawron",
53
+ "suffix": "",
54
+ "affiliation": {
55
+ "laboratory": "",
56
+ "institution": "SRI International",
57
+ "location": {
58
+ "addrLine": "333 Ravenswood Avenue",
59
+ "postCode": "94025",
60
+ "settlement": "Menlo Park",
61
+ "region": "CA"
62
+ }
63
+ },
64
+ "email": "gawron@ai.sri.cora"
65
+ },
66
+ {
67
+ "first": "John",
68
+ "middle": [],
69
+ "last": "Dowdingt",
70
+ "suffix": "",
71
+ "affiliation": {
72
+ "laboratory": "",
73
+ "institution": "SRI International",
74
+ "location": {
75
+ "addrLine": "333 Ravenswood Avenue",
76
+ "postCode": "94025",
77
+ "settlement": "Menlo Park",
78
+ "region": "CA"
79
+ }
80
+ },
81
+ "email": "dowding@ai.sri.cora"
82
+ }
83
+ ],
84
+ "year": "",
85
+ "venue": null,
86
+ "identifiers": {},
87
+ "abstract": "We describe robustness techniques used in the Com-mandTalk system at: the recognition level, the parsing level, and th~ dia16gue level, and how these were influenced by the lack of domain data. We used interviews with subject matter experts (SME's) to develop a single grammar for recognition, understanding, and generation, thus eliminating the need for a robust parser. We broadened the coverage of the recognition grammar by allowing word insertions and deletions, and we implemented clarification and correction subdialogues to increase robustness at the dialogue level. We discuss the applicability of these techniques to other domains.",
88
+ "pdf_parse": {
89
+ "paper_id": "W00-0312",
90
+ "_pdf_hash": "",
91
+ "abstract": [
92
+ {
93
+ "text": "We describe robustness techniques used in the Com-mandTalk system at: the recognition level, the parsing level, and th~ dia16gue level, and how these were influenced by the lack of domain data. We used interviews with subject matter experts (SME's) to develop a single grammar for recognition, understanding, and generation, thus eliminating the need for a robust parser. We broadened the coverage of the recognition grammar by allowing word insertions and deletions, and we implemented clarification and correction subdialogues to increase robustness at the dialogue level. We discuss the applicability of these techniques to other domains.",
94
+ "cite_spans": [],
95
+ "ref_spans": [],
96
+ "eq_spans": [],
97
+ "section": "Abstract",
98
+ "sec_num": null
99
+ }
100
+ ],
101
+ "body_text": [
102
+ {
103
+ "text": "Three types of robustness must be considered when designing a dialogue system. First, there is robustness at the recognition level. When plentiful data is available, a robust n-gram language model can be produced, but when data is limited, producing a robust language model for recognition can be problematic. Second, there is robustness at the level of the parser. Robust parsing is often achieved by combining a full parser with a partial parser and fragment-combining rules, but even then some utterances may be correctly recognized, only to be parsed incorrectly or not at all. Finally, there is robustness at the dialogue level. Utterances may be uninterpretable within the context of the dialogue due to errors on the part of either the system or the user, and the dialogue manager should be able to handle such problems gracefully. Our CommandTalk dialogue system was designed for a highly specialized domain with little available data, so finding ways to build a robust system with * This research was supported by the Defense Advanced Research Projects Agency under Contract N66001-94-C-6046 with the Space and Naval Warfare Systems Center. The views and conclusions contained in this document are those of the authors and should not be interpreted as necessarily representing the official policies, either express or implied, of the Defense Advanced Research Projects Agency of the U.S. Government. ? Currently affiliated with GO.corn limited data was a major concern. In this paper, we discuss our methods and their applicability to other domains. Section 2 gives a brief overview of the CommandTalk system. In Section 3, we discuss the approach we took to building recognition, understanding, and generaffon models for CommandTalk, and how it relates to the first two types of robustness mentioned. Section 4 discusses additional robustness techniques at the recognizer level, and Section 5 describes dialogue-level robustness techniques. Section 6 discusses the applicability of our methods to other domains.",
104
+ "cite_spans": [],
105
+ "ref_spans": [],
106
+ "eq_spans": [],
107
+ "section": "Introduction",
108
+ "sec_num": "1"
109
+ },
110
+ {
111
+ "text": "CommandTalk is a spoken-language interface to the ModSAF (Modular Semi-Automated Forces) battlefield simulator, developed with the goal of allowing military commanders to interact with simulated forces in a manner as similar as possible to the way they would command actual forces. CommandTalk allows the use of ordinary English commands and mouse gestures to",
112
+ "cite_spans": [],
113
+ "ref_spans": [],
114
+ "eq_spans": [],
115
+ "section": "CommandTalk",
116
+ "sec_num": "2"
117
+ },
118
+ {
119
+ "text": "\u2022 Create forces and control measures (points and lines)",
120
+ "cite_spans": [],
121
+ "ref_spans": [],
122
+ "eq_spans": [],
123
+ "section": "CommandTalk",
124
+ "sec_num": "2"
125
+ },
126
+ {
127
+ "text": "\u2022 Assign missions to forces",
128
+ "cite_spans": [],
129
+ "ref_spans": [],
130
+ "eq_spans": [],
131
+ "section": "CommandTalk",
132
+ "sec_num": "2"
133
+ },
134
+ {
135
+ "text": "\u2022 Modify missions during execution \u2022 Control ModSAF system functions, such as the map display",
136
+ "cite_spans": [],
137
+ "ref_spans": [],
138
+ "eq_spans": [],
139
+ "section": "CommandTalk",
140
+ "sec_num": "2"
141
+ },
142
+ {
143
+ "text": "\u2022 Get information about the state of the simulation CommandTalk consists of a number of independent, cooperating agents interacting through SRI's Open Agent Architecture (OAA) (Martin et al., 1998) . OAA uses a facilitator agent that plans and coordinates interactions among agents during distributed computation. An introduction to the basic CommandTalk agents can be found in Moore et al. (1997) . CommandTalk's dialogue component is described in detail in Stent et al. (1999) , and its use of linguistic and situational context is described in .",
144
+ "cite_spans": [
145
+ {
146
+ "start": 176,
147
+ "end": 197,
148
+ "text": "(Martin et al., 1998)",
149
+ "ref_id": "BIBREF2"
150
+ },
151
+ {
152
+ "start": 378,
153
+ "end": 397,
154
+ "text": "Moore et al. (1997)",
155
+ "ref_id": "BIBREF3"
156
+ },
157
+ {
158
+ "start": 459,
159
+ "end": 478,
160
+ "text": "Stent et al. (1999)",
161
+ "ref_id": "BIBREF5"
162
+ }
163
+ ],
164
+ "ref_spans": [],
165
+ "eq_spans": [],
166
+ "section": "CommandTalk",
167
+ "sec_num": "2"
168
+ },
169
+ {
170
+ "text": "The One-Grammar Approach",
171
+ "cite_spans": [],
172
+ "ref_spans": [],
173
+ "eq_spans": [],
174
+ "section": "3",
175
+ "sec_num": null
176
+ },
177
+ {
178
+ "text": "In a domain with limited data, the inability to collect a sufficient corpus for training a statistical language model can be a significant problem. For CommandTalk, we did not create a statistical language model. Instead, with information gathered from interviews of subject matter experts (SME's), we developed a handwritten grammar using Gemini (Dowding et al., 1993) , a unification-based grammar formalism. We used this unification grammar for both natural language understanding and generation, and, using a grammar compiler we developed, compiled it into a context-free form suitable for the speech recognizer as well. The effe~s_ of this single-grammar approach on the robustness of the CommandTalk system were twofold. On the negative side, we presumably ended up with a recognition language model with less coverage than a statistical model would have had. Our attempts to deal with this are discussed in the next section. On the positive side, we eliminated the usual discrepancy in coverage between the recognizer and the natural language parser. This was advantageous, since no fragment-combining or other parsing robustness techniques were needed.",
179
+ "cite_spans": [
180
+ {
181
+ "start": 347,
182
+ "end": 369,
183
+ "text": "(Dowding et al., 1993)",
184
+ "ref_id": "BIBREF0"
185
+ }
186
+ ],
187
+ "ref_spans": [],
188
+ "eq_spans": [],
189
+ "section": "3",
190
+ "sec_num": null
191
+ },
192
+ {
193
+ "text": "Our approach had other advantages as well. Any changes we made to the understanding grammar were automatically reflected in the recognition and generation grammars, making additions and modifications efficient. Also, anecdotal evidence suggests that the language used by the system often influences the language used by speakers, so maintaining consistency between the input and output of the system is desirable.",
194
+ "cite_spans": [],
195
+ "ref_spans": [],
196
+ "eq_spans": [],
197
+ "section": "3",
198
+ "sec_num": null
199
+ },
200
+ {
201
+ "text": "It is difficult to write a grammar that is constrained enough to be useful without excluding some reasonable user utterances. To alleviate this problem, we modified the speech recognition grammar and natural language parser to allow certain \"closeto-grammar\" utterances. Utterances with inserted words, such as Center on Checkpoint 1 now or zoom way out (where Center on Checkpoint 1 and zoom out are grammatical) were permitted by allowing the recognizer to skip unknown words. We also allowed utterances with deleted words, as long as those words did not contribute to the semantics of the utterance as determined by the Gemini semantic rules constraining logical forms. For example, a user could say, Set speed, 40 kph rather than Set speed to 40 kph. The idea behind these modifications was to allow utterances with a slightly broader range of wordings than those in the grammar, but with essentially the same meanings:",
202
+ "cite_spans": [],
203
+ "ref_spans": [],
204
+ "eq_spans": [],
205
+ "section": "Utterance-Level Robustness",
206
+ "sec_num": "4"
207
+ },
208
+ {
209
+ "text": "We began by testing the effects of these modifications on in-grammar utterances, to ensure that Time, CPURT SRR AWER SER Non-Robust Robust 0.664 : 1.05 2.56%",
210
+ "cite_spans": [],
211
+ "ref_spans": [],
212
+ "eq_spans": [],
213
+ "section": "Utterance-Level Robustness",
214
+ "sec_num": "4"
215
+ },
216
+ {
217
+ "text": "1.70% 1.68% 2.94% 10.00% ~ 12.07% Table 1 : In-Grammar Recognition Results they did not significantly decr egse recognition performance. We used a small test corpus of approximately 800 utterances read by SRI employees. We collected four measures of performance:",
218
+ "cite_spans": [],
219
+ "ref_spans": [
220
+ {
221
+ "start": 34,
222
+ "end": 41,
223
+ "text": "Table 1",
224
+ "ref_id": null
225
+ }
226
+ ],
227
+ "eq_spans": [],
228
+ "section": "Utterance-Level Robustness",
229
+ "sec_num": "4"
230
+ },
231
+ {
232
+ "text": "\u2022 Recognition time, measured, in multiples of CPU real time (CPURT). A recognition time of lxCPURT means that on,our CPU (a Sun Ultra2), recognition took exactly as~ long as the duration of the utterance. :",
233
+ "cite_spans": [],
234
+ "ref_spans": [],
235
+ "eq_spans": [],
236
+ "section": "Utterance-Level Robustness",
237
+ "sec_num": "4"
238
+ },
239
+ {
240
+ "text": "\u2022 Sentence reject rate (SRR).' The percentage of sentences that the recognizer rejects.",
241
+ "cite_spans": [],
242
+ "ref_spans": [],
243
+ "eq_spans": [],
244
+ "section": "Utterance-Level Robustness",
245
+ "sec_num": "4"
246
+ },
247
+ {
248
+ "text": "\u2022 Adjusted word error rate (A:WER). The percentage of words in non:rejected sentences that are misrecognized.",
249
+ "cite_spans": [],
250
+ "ref_spans": [],
251
+ "eq_spans": [],
252
+ "section": "Utterance-Level Robustness",
253
+ "sec_num": "4"
254
+ },
255
+ {
256
+ "text": "\u2022 Sentence error rate (SER). The percentage of sentences in which some sort of error occurred, either a complete rejection or misrecognized word.",
257
+ "cite_spans": [],
258
+ "ref_spans": [],
259
+ "eq_spans": [],
260
+ "section": "Utterance-Level Robustness",
261
+ "sec_num": "4"
262
+ },
263
+ {
264
+ "text": "Several parameters affected the results, most notably the numerical penalties assigned for inserting or deleting words, and the pruning threshold of the recognizer. Raising the pruning threshold caused both reject and error rates to go down, but slowed recognition. Lowering the penalties caused rejection rates to go down, but word and Sentence error rates to go up, since some sentences which had been rejected were now recognized partially correctly, and some sentences which had been recognized correctly now included some errors. Lowering the penalties also led to slower recognition. Table 1 shows recognition results for the nonrobust and robust versions 0f the recognition grammar on in-grammar utterances: Th e pruning threshold is the same for both versions and the insertion and deletion penalties are set to intermediate values. Recognition times for the robust grammar are about 60% slower than those of the control grammar, but still at acceptable levels. Reject and error rates are fairly close for the two grammars. Overall, adding robustness to the recognition grammar did not severely penalize in-grammar recognition performance.",
265
+ "cite_spans": [],
266
+ "ref_spans": [
267
+ {
268
+ "start": 590,
269
+ "end": 597,
270
+ "text": "Table 1",
271
+ "ref_id": null
272
+ }
273
+ ],
274
+ "eq_spans": [],
275
+ "section": "Utterance-Level Robustness",
276
+ "sec_num": "4"
277
+ },
278
+ {
279
+ "text": "We had very little out-of-grammar data for Com-mandTalk, and finding subjects in this highly specialized domain would have been difficult and expensive. To test our robustness techniques on out-of-grammar utterances, we decided to port them to another domain with easily accessible users and data; namely, the ATIS air travel domain. We wrote a small grammar covering part of the ATIS data and ,compiled it into a recognition grammar using the same techniques as in CommandTalk. Unfortunately, we were unable to carry out any experiments, because the recognition grammar we derived yielded recognition times that were so slow as to be impractical. We discuss these results further in Section 6.",
280
+ "cite_spans": [],
281
+ "ref_spans": [],
282
+ "eq_spans": [],
283
+ "section": "Utterance-Level Robustness",
284
+ "sec_num": "4"
285
+ },
286
+ {
287
+ "text": "To be considered robust at the dialogue level, a system must be able to deal with situations where an utterance is recognized and parsed, but cannot be interpreted withi~4he current system state or dialogue context. In addition~it must be easy for the user to correct faulty interpretations on the part of the system. Contextual interpretation problems may occur for a variety of reasons, including misrecognitions, incorrect reference resolution, and confusion or incompleteness on the part of the user.",
288
+ "cite_spans": [],
289
+ "ref_spans": [],
290
+ "eq_spans": [],
291
+ "section": "Diaiogue-Level Robustness",
292
+ "sec_num": "5"
293
+ },
294
+ {
295
+ "text": "The CommandTalk dialogue manager maintains a Stack to ~keep 'track of the current discourse context and uses small finite-state machines to represent different~ types of subdialogues. Below we illustrate some types of subdialogues and other techniques which provide robustness at the dialogue level. Note that for each utterance, we write what the system recognizes, not what the user actually says. Allowing the user to correct full or partial utterances can remedy interpretation problems caused by misrecognitions, incorrect reference resolution, or user error.",
296
+ "cite_spans": [],
297
+ "ref_spans": [],
298
+ "eq_spans": [],
299
+ "section": "Diaiogue-Level Robustness",
300
+ "sec_num": "5"
301
+ },
302
+ {
303
+ "text": "In Example 1, the system responds to the user's first utterance by producing a rising tone, illustrated by the \u00ae symbol, to indicate successful interpretation and execution of the command, in this case creation of a CEV, a type of vehicle. (Unsuccessful interpretation is indicated by a falling tone, illustrated by the \u00ae symbol.) In utterances 3 through 6, a misrecognition causes the system to perform the wrong behavior. The user initiates a correction subdialogue, and the system goes on to correctly reinterpret the full utterance.",
304
+ "cite_spans": [],
305
+ "ref_spans": [],
306
+ "eq_spans": [],
307
+ "section": "Correction Subdlalogues",
308
+ "sec_num": "5.1"
309
+ },
310
+ {
311
+ "text": "Ex. 2: U 7 CEV, advance to Checkpoint 1 S 8 \u00ae 100All will proceed to Checkpoint 1",
312
+ "cite_spans": [],
313
+ "ref_spans": [],
314
+ "eq_spans": [],
315
+ "section": "Implicit Confirmation",
316
+ "sec_num": "5.2"
317
+ },
318
+ {
319
+ "text": "Use of implicit confirmation in combination with correction subdialogues makes it easy to correct faulty interpretations as soon as possible by alerting the user to possible sources of error. In utterances 7 and 8, the system must resolve the user's reference, \"CEV\", to a particular unit. It therefore echoes the user's command using the CEV's unique call sign. This makes explicit the system's interpretation of the user's utterance, giving the user a chance to correct the system if necessary.",
320
+ "cite_spans": [],
321
+ "ref_spans": [],
322
+ "eq_spans": [],
323
+ "section": "Implicit Confirmation",
324
+ "sec_num": "5.2"
325
+ },
326
+ {
327
+ "text": "Note that utterance 4 also contains an implicit confirmation, since the system has resolved the user's gesture to a set of coordinates.",
328
+ "cite_spans": [],
329
+ "ref_spans": [],
330
+ "eq_spans": [],
331
+ "section": "Implicit Confirmation",
332
+ "sec_num": "5.2"
333
+ },
334
+ {
335
+ "text": "Ex Clarification subdialogues are generally initiated by the system as a result of errors or incomplete commands on the part of the user. Example 3 illustrates three different types of problems that can be corrected by system questions. First, the user's reference to \"CEV\" in utterance 11 is ambiguous, so the system asks a question to determine which CEV the user is referring to. Next, the system asks the user to supply a missing piece of information that is required to carry out the command. Finally, when the user makes an error by referring to a point that doesn't exist, the system prompts for a correction.",
336
+ "cite_spans": [],
337
+ "ref_spans": [],
338
+ "eq_spans": [],
339
+ "section": "Clarification Subdialogues",
340
+ "sec_num": "5.3"
341
+ },
342
+ {
343
+ "text": "CommandTalk is an example of a successful and robust dialogue system in a domain with limited ac-cess to both data and subjects. The pre-dialogue version of CommandTalk was used in the STOW (Synthetic Theater of War) '97 ACTD (Advanced Concept Technology Demonstration) exercise, an intensive 48-hour continuous military simulation by all four U.S. military services, and received high praise. The dialogue portion of the system has increased CommandTalk's usefulness and robustness. Nevertheless, several questions remain, not the least of which is whether the robustness techniques used for CommandTalk can be successfully transferred to other domains.",
344
+ "cite_spans": [],
345
+ "ref_spans": [],
346
+ "eq_spans": [],
347
+ "section": "Discussion and Conclusions",
348
+ "sec_num": "6"
349
+ },
350
+ {
351
+ "text": "We have no doubt that our methods for adding robustness at the dialogue level can and should be implemented in other domains, but this is not as clear for our parsing a-nd recognition robustness methods.",
352
+ "cite_spans": [],
353
+ "ref_spans": [],
354
+ "eq_spans": [],
355
+ "section": "Discussion and Conclusions",
356
+ "sec_num": "6"
357
+ },
358
+ {
359
+ "text": "The one-grammar approach is key to our eliminating the necessity for robust parsing, renders a large corpus for generating a recognition model unnecessary, and has other advantages as well. Yet our experience in the ATIS domain suggests that further research into this approach is needed. Our ATIS grammar is based on a grammar of general English and has a very different structure from that of CommandTalk's semantic grammar, but we were unable to isolate the factor or factors responsible for its poor recognition performance. Recent research (Rayner et al., 2000) suggests that it may be possible to compile a useful recognition model from a general English unification grammar if the grammar is constructed carefully and a few compromises are made. We also believe that using an appropriate grammar approximation algorithm to reduce the complexity of the recognition model may prove fruitful. This would reintroduce some discrepancy between the recognition and understanding language models, but maintain the other advantages of the one-grammar approach.",
360
+ "cite_spans": [
361
+ {
362
+ "start": 545,
363
+ "end": 566,
364
+ "text": "(Rayner et al., 2000)",
365
+ "ref_id": "BIBREF4"
366
+ }
367
+ ],
368
+ "ref_spans": [],
369
+ "eq_spans": [],
370
+ "section": "Discussion and Conclusions",
371
+ "sec_num": "6"
372
+ },
373
+ {
374
+ "text": "In either case, the effectiveness of our recognition robustness techniques remains an open question. We know they have no significant negative impact on ingrammar recognition, but whether they are helpful in recognizing and~ more importantly, interpreting out-of-grammar utterances is unknown. We have been unable to evaluate them so far in the Com-mandTalk or any other domain, although we hope to do so in the future.",
375
+ "cite_spans": [],
376
+ "ref_spans": [],
377
+ "eq_spans": [],
378
+ "section": "Discussion and Conclusions",
379
+ "sec_num": "6"
380
+ },
381
+ {
382
+ "text": "Another possible solution to the problem of producing a workable robust recognition grammar would return to a statistical approach rather than using word insertions and deletions. Stolcke and Segal (1994) describe a method for combining a context-free grammar with an n-gram model generated from a small corpus of a few hundred utterances to create a more accurate n-gram model. This method would provide a robust recognition model based on the context-free grammar compiled from our unification grammar. We would'still have to write only one grammar for the system, it would still influence the recognition model, and we could still be sure that the system would never say anything it couldn't recognize. This approach Would require using robust parsing methods, but might be the best solution for other domains if compiling a practical recognition grammar proves too difficult.",
383
+ "cite_spans": [],
384
+ "ref_spans": [],
385
+ "eq_spans": [],
386
+ "section": "Discussion and Conclusions",
387
+ "sec_num": "6"
388
+ },
389
+ {
390
+ "text": "Despite the success of the CommandTalk system, it is clear that more investigation is called for to determine how best to develop dialogue systems in domains with limited data. Researchers must determine which types of unification grammars can be compiled into practical recognition grammars using existing technology, whether grammar approximations or other techniques can produce good results for a broader range of grammars, whether allowing word insertions and deletions is an effective robustness technique, orwhether we should use other methods altogether.",
391
+ "cite_spans": [],
392
+ "ref_spans": [],
393
+ "eq_spans": [],
394
+ "section": "Discussion and Conclusions",
395
+ "sec_num": "6"
396
+ }
397
+ ],
398
+ "back_matter": [],
399
+ "bib_entries": {
400
+ "BIBREF0": {
401
+ "ref_id": "b0",
402
+ "title": "Gemini: A Natural Language System for Spoken Language Understanding",
403
+ "authors": [
404
+ {
405
+ "first": "J",
406
+ "middle": [],
407
+ "last": "Dowding",
408
+ "suffix": ""
409
+ },
410
+ {
411
+ "first": "J",
412
+ "middle": [],
413
+ "last": "Gawron",
414
+ "suffix": ""
415
+ },
416
+ {
417
+ "first": "D",
418
+ "middle": [],
419
+ "last": "Appelt",
420
+ "suffix": ""
421
+ },
422
+ {
423
+ "first": "L",
424
+ "middle": [],
425
+ "last": "Cherny",
426
+ "suffix": ""
427
+ },
428
+ {
429
+ "first": "R",
430
+ "middle": [],
431
+ "last": "Moore",
432
+ "suffix": ""
433
+ },
434
+ {
435
+ "first": "D",
436
+ "middle": [],
437
+ "last": "Moran",
438
+ "suffix": ""
439
+ }
440
+ ],
441
+ "year": 1993,
442
+ "venue": "Proceedings of the Thirty-First Annual Meeting of the ACL, Columbus, OH. Association for Computational Linguistics",
443
+ "volume": "",
444
+ "issue": "",
445
+ "pages": "",
446
+ "other_ids": {},
447
+ "num": null,
448
+ "urls": [],
449
+ "raw_text": "J. Dowding, J. Gawron, D. Appelt, L. Cherny, R. Moore, and D. Moran. 1993. Gemini: A Natu- ral Language System for Spoken Language Under- standing. In Proceedings of the Thirty-First An- nual Meeting of the ACL, Columbus, OH. Associ- ation for Computational Linguistics.",
450
+ "links": null
451
+ },
452
+ "BIBREF1": {
453
+ "ref_id": "b1",
454
+ "title": "Interpreting Language in Context in Com-mandTalk",
455
+ "authors": [
456
+ {
457
+ "first": "J",
458
+ "middle": [],
459
+ "last": "Dowding",
460
+ "suffix": ""
461
+ },
462
+ {
463
+ "first": "E",
464
+ "middle": [
465
+ "Owen"
466
+ ],
467
+ "last": "Bratt",
468
+ "suffix": ""
469
+ },
470
+ {
471
+ "first": "S",
472
+ "middle": [],
473
+ "last": "Goldwater",
474
+ "suffix": ""
475
+ }
476
+ ],
477
+ "year": 1999,
478
+ "venue": "Communicative Agents: The Use of Natural Language in Embodied Systems",
479
+ "volume": "",
480
+ "issue": "",
481
+ "pages": "63--67",
482
+ "other_ids": {},
483
+ "num": null,
484
+ "urls": [],
485
+ "raw_text": "J. Dowding, E. Owen Bratt, and S. Goldwater. 1999. Interpreting Language in Context in Com- mandTalk. In Communicative Agents: The Use of Natural Language in Embodied Systems, pages 63-67.",
486
+ "links": null
487
+ },
488
+ "BIBREF2": {
489
+ "ref_id": "b2",
490
+ "title": "Building Distributed Software Systems with the Open Agent Architecture",
491
+ "authors": [
492
+ {
493
+ "first": "D",
494
+ "middle": [],
495
+ "last": "Martin",
496
+ "suffix": ""
497
+ },
498
+ {
499
+ "first": "A",
500
+ "middle": [],
501
+ "last": "Cheyer",
502
+ "suffix": ""
503
+ },
504
+ {
505
+ "first": "D",
506
+ "middle": [],
507
+ "last": "Moran",
508
+ "suffix": ""
509
+ }
510
+ ],
511
+ "year": 1998,
512
+ "venue": "Proceedings of the Third International Conference on the Practical Application of Intelligent Agents and Multi-Agent Technology",
513
+ "volume": "",
514
+ "issue": "",
515
+ "pages": "",
516
+ "other_ids": {},
517
+ "num": null,
518
+ "urls": [],
519
+ "raw_text": "D. Martin, A. Cheyer, and D. Moran. 1998. Build- ing Distributed Software Systems with the Open Agent Architecture. In Proceedings of the Third International Conference on the Practical Appli- cation of Intelligent Agents and Multi-Agent Tech- nology, Blackpool, Lancashire, UK. The Practical Application Company Ltd.",
520
+ "links": null
521
+ },
522
+ "BIBREF3": {
523
+ "ref_id": "b3",
524
+ "title": "CommandTalk: A Spoken-Language Interface for Battlefield Simulations",
525
+ "authors": [
526
+ {
527
+ "first": "R",
528
+ "middle": [],
529
+ "last": "Moore",
530
+ "suffix": ""
531
+ },
532
+ {
533
+ "first": "J",
534
+ "middle": [],
535
+ "last": "Dowding",
536
+ "suffix": ""
537
+ },
538
+ {
539
+ "first": "H",
540
+ "middle": [],
541
+ "last": "Bratt",
542
+ "suffix": ""
543
+ },
544
+ {
545
+ "first": "J",
546
+ "middle": [],
547
+ "last": "Gawron",
548
+ "suffix": ""
549
+ },
550
+ {
551
+ "first": "Y",
552
+ "middle": [],
553
+ "last": "Gorfu",
554
+ "suffix": ""
555
+ },
556
+ {
557
+ "first": "A",
558
+ "middle": [],
559
+ "last": "Cheyer",
560
+ "suffix": ""
561
+ }
562
+ ],
563
+ "year": 1997,
564
+ "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
565
+ "volume": "",
566
+ "issue": "",
567
+ "pages": "1--7",
568
+ "other_ids": {},
569
+ "num": null,
570
+ "urls": [],
571
+ "raw_text": "R. Moore, J. Dowding, H. Bratt, J. Gawron, Y. Gorfu, and A. Cheyer. 1997. CommandTalk: A Spoken-Language Interface for Battlefield Sim- ulations. In Proceedings of the Fifth Conference on Applied Natural Language Processing, pages 1-7, Washington, DC. Association for Computa- tional Linguistics.",
572
+ "links": null
573
+ },
574
+ "BIBREF4": {
575
+ "ref_id": "b4",
576
+ "title": "Compiling Language Models from a Linquistically Motivated Unification Grammar",
577
+ "authors": [
578
+ {
579
+ "first": "M",
580
+ "middle": [],
581
+ "last": "Rayner",
582
+ "suffix": ""
583
+ },
584
+ {
585
+ "first": "B",
586
+ "middle": [
587
+ "A"
588
+ ],
589
+ "last": "Hockey",
590
+ "suffix": ""
591
+ },
592
+ {
593
+ "first": "F",
594
+ "middle": [],
595
+ "last": "James",
596
+ "suffix": ""
597
+ },
598
+ {
599
+ "first": "E",
600
+ "middle": [
601
+ "Owen"
602
+ ],
603
+ "last": "Bratt",
604
+ "suffix": ""
605
+ },
606
+ {
607
+ "first": "S",
608
+ "middle": [],
609
+ "last": "Goldwater",
610
+ "suffix": ""
611
+ },
612
+ {
613
+ "first": "J",
614
+ "middle": [
615
+ "M"
616
+ ],
617
+ "last": "Gawron",
618
+ "suffix": ""
619
+ }
620
+ ],
621
+ "year": 2000,
622
+ "venue": "",
623
+ "volume": "",
624
+ "issue": "",
625
+ "pages": "",
626
+ "other_ids": {},
627
+ "num": null,
628
+ "urls": [],
629
+ "raw_text": "M. Rayner, B. A. Hockey, F. James, E. Owen Bratt, S. Goldwater, and J. M. Gawron. 2000. Compil- ing Language Models from a Linquistically Moti- vated Unification Grammar. Submitted to COL- ING '00.",
630
+ "links": null
631
+ },
632
+ "BIBREF5": {
633
+ "ref_id": "b5",
634
+ "title": "The CommandTalk Spoken Dialogu.e System",
635
+ "authors": [
636
+ {
637
+ "first": "A",
638
+ "middle": [],
639
+ "last": "Stent",
640
+ "suffix": ""
641
+ },
642
+ {
643
+ "first": "J",
644
+ "middle": [],
645
+ "last": "Dowding",
646
+ "suffix": ""
647
+ },
648
+ {
649
+ "first": "J",
650
+ "middle": [],
651
+ "last": "Gawron",
652
+ "suffix": ""
653
+ },
654
+ {
655
+ "first": "E",
656
+ "middle": [
657
+ "Owen"
658
+ ],
659
+ "last": "Bratt",
660
+ "suffix": ""
661
+ },
662
+ {
663
+ "first": "R",
664
+ "middle": [],
665
+ "last": "Moore",
666
+ "suffix": ""
667
+ }
668
+ ],
669
+ "year": 1999,
670
+ "venue": "Proceedings of the 37th Annual Meeting of the A CL. Association of Computational Linguistics",
671
+ "volume": "",
672
+ "issue": "",
673
+ "pages": "",
674
+ "other_ids": {},
675
+ "num": null,
676
+ "urls": [],
677
+ "raw_text": "A. Stent, J. Dowding, J. Gawron, E. Owen Bratt, and R. Moore. 1999. The CommandTalk Spoken Dialogu.e System. In Proceedings of the 37th An- nual Meeting of the A CL. Association of Compu- tational Linguistics.",
678
+ "links": null
679
+ }
680
+ },
681
+ "ref_entries": {}
682
+ }
683
+ }
Full_text_JSON/prefixW/json/W00/W00-0400.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0400",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:50.939232Z"
6
+ },
7
+ "title": "",
8
+ "authors": [],
9
+ "year": "",
10
+ "venue": null,
11
+ "identifiers": {},
12
+ "abstract": "",
13
+ "pdf_parse": {
14
+ "paper_id": "W00-0400",
15
+ "_pdf_hash": "",
16
+ "abstract": [],
17
+ "body_text": [
18
+ {
19
+ "text": "i I PREFACE",
20
+ "cite_spans": [],
21
+ "ref_spans": [],
22
+ "eq_spans": [],
23
+ "section": "",
24
+ "sec_num": null
25
+ },
26
+ {
27
+ "text": "Automatic summarization aims at producing a concise, condensed representation of the key information content in an information source for a particular user and task. Interest in automatic summarization continues to grow, motivated by the explosion of on-line information sources and advances in natural language processing and information retrieval. In fact, some form of automatic summarization may be indispensable given the massive information universes that lie ahead in the 21 st century.",
28
+ "cite_spans": [],
29
+ "ref_spans": [],
30
+ "eq_spans": [],
31
+ "section": "",
32
+ "sec_num": null
33
+ },
34
+ {
35
+ "text": "The problem of automatic summarization poses a variety of tough challenges in both NL understanding and generation. A spate of recent papers and tutorials on this subject at conferences such as ACL/EACL, AAAI, ECAI, IJCAI, and SIGIR point to a growing interest in research in this field. Several commercial summarization products have,also' appeared. There have been several workshops in the past on this subject: in Dagstuhl (1994 ), Madrid (1997 ), and Stanford (1998 . It is our great pleasure to bring the fourth such event to you.",
36
+ "cite_spans": [
37
+ {
38
+ "start": 417,
39
+ "end": 431,
40
+ "text": "Dagstuhl (1994",
41
+ "ref_id": null
42
+ },
43
+ {
44
+ "start": 432,
45
+ "end": 447,
46
+ "text": "), Madrid (1997",
47
+ "ref_id": null
48
+ },
49
+ {
50
+ "start": 448,
51
+ "end": 469,
52
+ "text": "), and Stanford (1998",
53
+ "ref_id": null
54
+ }
55
+ ],
56
+ "ref_spans": [],
57
+ "eq_spans": [],
58
+ "section": "",
59
+ "sec_num": null
60
+ },
61
+ {
62
+ "text": "While the field continues to progress, there are also many problems that need to be addressed before the promises of automatic text summarization can be fully realized. The papers included in this volume present a snapshot of recent progress in solving some of the problems: concept identification, identification of discourse markers, multi-document summarization, content visualization, evaluation, and use. Out of 29 submissions to the workshop, 10 are included in these proceedings.",
63
+ "cite_spans": [],
64
+ "ref_spans": [],
65
+ "eq_spans": [],
66
+ "section": "",
67
+ "sec_num": null
68
+ },
69
+ {
70
+ "text": "We would like to thank all authors for their hard work. This workshop would not be possible without them. We would also like to thank the program committee and all the reviewers for their valuable feedback. In addition, we are grateful to the ACL for sponsoring the workshop and to the MITRE Corporation for supporting the publication of the proceedings. Centroid-based summarization of multiple documents:sentence extraction, utility-based evaluation, and user studies",
71
+ "cite_spans": [],
72
+ "ref_spans": [],
73
+ "eq_spans": [],
74
+ "section": "",
75
+ "sec_num": null
76
+ }
77
+ ],
78
+ "back_matter": [],
79
+ "bib_entries": {},
80
+ "ref_entries": {
81
+ "TABREF2": {
82
+ "text": "Concept Identification and Presentation in the Context of Technical Text SummarizationHoracio Saggion and Guy Lapalme ............................................................................................................Mining Discourse Markers for Chinese Textual SummarizationSamuel W. K. Chan, Tom B, Y. Lai, W. J.Gao, and Benjamin K. T'sou ................................................ 11",
83
+ "content": "<table/>",
84
+ "num": null,
85
+ "type_str": "table",
86
+ "html": null
87
+ }
88
+ }
89
+ }
90
+ }
Full_text_JSON/prefixW/json/W00/W00-0401.json ADDED
@@ -0,0 +1,1228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0401",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:31.160743Z"
6
+ },
7
+ "title": "Concept Identification and Presentation in the Context of Technical Text Summarization",
8
+ "authors": [
9
+ {
10
+ "first": "Horacio",
11
+ "middle": [],
12
+ "last": "Saggion",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "Succ Centre-Ville Montreal",
17
+ "location": {
18
+ "postCode": "H3C 3J7",
19
+ "region": "Quebec",
20
+ "country": "Canada"
21
+ }
22
+ },
23
+ "email": "saggion@ca"
24
+ },
25
+ {
26
+ "first": "Guy",
27
+ "middle": [],
28
+ "last": "Lapalme",
29
+ "suffix": "",
30
+ "affiliation": {
31
+ "laboratory": "",
32
+ "institution": "Succ Centre-Ville Montreal",
33
+ "location": {
34
+ "postCode": "H3C 3J7",
35
+ "region": "Quebec",
36
+ "country": "Canada"
37
+ }
38
+ },
39
+ "email": "lapalme@iro@ca"
40
+ }
41
+ ],
42
+ "year": "",
43
+ "venue": null,
44
+ "identifiers": {},
45
+ "abstract": "We describe a method of text summarization that produces indicative-informative abstracts / for technical papers. The abstracts are generated by a process of conceptual identification, topic extraction and re-generation. We have carried out an evaluation to assess indicativeness and text acceptability relying on human judgment. The results so far indicate good performance in both tasks when compared with other summarization technologies.",
46
+ "pdf_parse": {
47
+ "paper_id": "W00-0401",
48
+ "_pdf_hash": "",
49
+ "abstract": [
50
+ {
51
+ "text": "We describe a method of text summarization that produces indicative-informative abstracts / for technical papers. The abstracts are generated by a process of conceptual identification, topic extraction and re-generation. We have carried out an evaluation to assess indicativeness and text acceptability relying on human judgment. The results so far indicate good performance in both tasks when compared with other summarization technologies.",
52
+ "cite_spans": [],
53
+ "ref_spans": [],
54
+ "eq_spans": [],
55
+ "section": "Abstract",
56
+ "sec_num": null
57
+ }
58
+ ],
59
+ "body_text": [
60
+ {
61
+ "text": "We have specified a method of text summarization which produces indicative-informative abstracts for technical documents. The method was designed to identify the \"topics\" of a document and present them in an indicative abstract. Eventually, they can be elaborated in .specific ways.",
62
+ "cite_spans": [],
63
+ "ref_spans": [],
64
+ "eq_spans": [],
65
+ "section": "Introduction",
66
+ "sec_num": "1"
67
+ },
68
+ {
69
+ "text": "In Figure 1 , we present an indicative abstract for the document \"Facilitating designer-.",
70
+ "cite_spans": [],
71
+ "ref_spans": [
72
+ {
73
+ "start": 3,
74
+ "end": 11,
75
+ "text": "Figure 1",
76
+ "ref_id": null
77
+ }
78
+ ],
79
+ "eq_spans": [],
80
+ "section": "Introduction",
81
+ "sec_num": "1"
82
+ },
83
+ {
84
+ "text": "Web\" (Internet Research:",
85
+ "cite_spans": [],
86
+ "ref_spans": [],
87
+ "eq_spans": [],
88
+ "section": "\u2022 customer communication in the World Wide",
89
+ "sec_num": null
90
+ },
91
+ {
92
+ "text": "Electronic Networking Applications and Policy, Vol 8, Issue 5,1998) produced with our implementation of this method. The abstract includes a list of topics which are terms appearing in the automatic abstract (e.g. WebShaman) or obtained from the source document by the process of term expansion (e.g. WWW technique obtained from technique). It also * The first author is supported by Agence Canadienne de D~veloppement International (ACDI) and FundaciSn Antorchas (A-13671/1-47) , Argentin a. He was previously supported by Ministerio de EducaciSn de la Naci6n de la Repfiblica Argentina (ResoluciSn 1041/96) and Departamento de ComputaciSn, Facultad de Ciencias Exactas y Naturales, UBA, Argentina.",
93
+ "cite_spans": [
94
+ {
95
+ "start": 464,
96
+ "end": 478,
97
+ "text": "(A-13671/1-47)",
98
+ "ref_id": null
99
+ }
100
+ ],
101
+ "ref_spans": [],
102
+ "eq_spans": [],
103
+ "section": "\u2022 customer communication in the World Wide",
104
+ "sec_num": null
105
+ },
106
+ {
107
+ "text": "includes term elaborations which can be used to answer specific questions about the topics such as what is topic? how topic is used?",
108
+ "cite_spans": [],
109
+ "ref_spans": [],
110
+ "eq_spans": [],
111
+ "section": "\u2022 customer communication in the World Wide",
112
+ "sec_num": null
113
+ },
114
+ {
115
+ "text": "who developed topic? and what are the advantages of topic?",
116
+ "cite_spans": [],
117
+ "ref_spans": [],
118
+ "eq_spans": [],
119
+ "section": "\u2022 customer communication in the World Wide",
120
+ "sec_num": null
121
+ },
122
+ {
123
+ "text": ".",
124
+ "cite_spans": [],
125
+ "ref_spans": [],
126
+ "eq_spans": [],
127
+ "section": "\u2022 customer communication in the World Wide",
128
+ "sec_num": null
129
+ },
130
+ {
131
+ "text": "In this paper, we will describe how we dealt with the problem of content selection and presentation and how we have evaluated our method of text summarization.",
132
+ "cite_spans": [],
133
+ "ref_spans": [],
134
+ "eq_spans": [],
135
+ "section": "\u2022 customer communication in the World Wide",
136
+ "sec_num": null
137
+ },
138
+ {
139
+ "text": "The process of producing a summary from a source text consists of the following steps: (i) the interpretation of the text; (ii) the extraction of the relevant information which ideally includes the \"topics\" of the source; (iii) the condensation of the extracted information and construction of a summary representation; and (iv) the presentation of the summary representation to the reader in natural language.",
140
+ "cite_spans": [],
141
+ "ref_spans": [],
142
+ "eq_spans": [],
143
+ "section": "Text Summarization",
144
+ "sec_num": "2"
145
+ },
146
+ {
147
+ "text": "While some techniques exist for producing summaries for domain independent texts (Luhn, 1958; Marcu, 1997) it seems that domain specific texts require domain specific techniques (DeJong, 1982; Paice and Jones, 1993) . In our case, we are dealing with technical articles which are the result of the complex process of scientific inquiry that starts with the. identification of a knowledge problem and eventually culminates with the discovery of an answer to it. Even if authors of technical articles write about several concepts in their articles, not all of them are topics. In order to address the issue of topic identification, content selection and presentation, we have studied alignments (manually produced) of sentences from professional abstracts with sentences from Abstract Introducing the Topics",
148
+ "cite_spans": [
149
+ {
150
+ "start": 81,
151
+ "end": 93,
152
+ "text": "(Luhn, 1958;",
153
+ "ref_id": "BIBREF6"
154
+ },
155
+ {
156
+ "start": 94,
157
+ "end": 106,
158
+ "text": "Marcu, 1997)",
159
+ "ref_id": "BIBREF8"
160
+ },
161
+ {
162
+ "start": 178,
163
+ "end": 192,
164
+ "text": "(DeJong, 1982;",
165
+ "ref_id": "BIBREF1"
166
+ },
167
+ {
168
+ "start": 193,
169
+ "end": 215,
170
+ "text": "Paice and Jones, 1993)",
171
+ "ref_id": "BIBREF11"
172
+ }
173
+ ],
174
+ "ref_spans": [],
175
+ "eq_spans": [],
176
+ "section": "Text Summarization",
177
+ "sec_num": "2"
178
+ },
179
+ {
180
+ "text": "Virtual prototyping is a technique which has been suggested for use in, for example, telecommunication product development as a high-end technology to achieve a quick digital model that could be used in the same way as a real prototype. Presents the design rationale of WebShaman, starting from the concept design perspective by introducing a set of requirements to support communication via a concept model between industrial designer and a customer. In the article, the authors suggest that virtual prototyping in collaborative use between designers is a potential technique to facilitate design and alleviate the problems created by geographical distance and complexities in the work between different parties. The technique, was implemented in the VRP project, allows component level manipulation of a virtual prototype in a WWW (World Wide Web) browser. The user services, the software architecture, and the techniques of WebShaman were developed iteratively during the fieldwork in order to illustrate the ideas and the feasibility of the system. The server is not much different from the other servers constructed to support synchronous collaboration.",
181
+ "cite_spans": [],
182
+ "ref_spans": [],
183
+ "eq_spans": [],
184
+ "section": "! !",
185
+ "sec_num": null
186
+ },
187
+ {
188
+ "text": "Identified Topics: 3D model -VIRPI project -WWW -WW-vV technique -WebShaman -CAD system -conceptual.model-customer -object-oriented model-product -product concept -product design -requirement -simulation model -smart virtual prototype -software component -system-technique -technology -use -virtual componentvirtual prototype -virtual prototype system-virtual prototyping",
189
+ "cite_spans": [],
190
+ "ref_spans": [],
191
+ "eq_spans": [],
192
+ "section": "! !",
193
+ "sec_num": null
194
+ },
195
+ {
196
+ "text": "An example of a conceptual model, a pen-shaped'wireless user interface for a mobile telephone.",
197
+ "cite_spans": [],
198
+ "ref_spans": [],
199
+ "eq_spans": [],
200
+ "section": "Information about the Topics",
201
+ "sec_num": null
202
+ },
203
+ {
204
+ "text": "A virtual prototype is a computer-based simulation of a prototype or a subsystem with a degree of functional realism, comparable to that of a physical prototype. A computer system implementing the high-end aspects of virtual prototyping has been developed in the VRP project (VRP, 1998) at VTT Electronics, in Oulu, Finland. The two-and-a-half-year VIRPI project consists of three parts. Nowadays, CAD (computer-aided design) systems are used as an aid in industrial, mechanical and electronics design for the specification and development of a product. A virtual prototype system can be used for concept testing in the early phase of product development.",
205
+ "cite_spans": [
206
+ {
207
+ "start": 275,
208
+ "end": 286,
209
+ "text": "(VRP, 1998)",
210
+ "ref_id": null
211
+ }
212
+ ],
213
+ "ref_spans": [],
214
+ "eq_spans": [],
215
+ "section": "Information about the Topics",
216
+ "sec_num": null
217
+ },
218
+ {
219
+ "text": "I I i l i H I i I I I",
220
+ "cite_spans": [],
221
+ "ref_spans": [],
222
+ "eq_spans": [],
223
+ "section": "Figure h Indicative Abstract, Topics and Topic Elaboration",
224
+ "sec_num": null
225
+ },
226
+ {
227
+ "text": "source documents. One of the alignments is presented in Table 1 . The first column contains the information of the professional abstract. The second and third columns contain the information from the source document that matches the sentences of the professional abstract, and its location in the source document. We have produced 100 of these tables containing a total of 309 sentences of professional abstracts aligned with 568 sentences of source documents. These alignments allowed us to identify on one hand, concepts, relations and types of information usually conveyed in abstracts; and on the other hand, valid transformations in the source in order to produce a compact and coherent text. abbreviation, merge and split. In our corpus, 89% of the sentences from the professional abstracts included at least one transformation.",
228
+ "cite_spans": [],
229
+ "ref_spans": [
230
+ {
231
+ "start": 56,
232
+ "end": 63,
233
+ "text": "Table 1",
234
+ "ref_id": "TABREF1"
235
+ }
236
+ ],
237
+ "eq_spans": [],
238
+ "section": "Figure h Indicative Abstract, Topics and Topic Elaboration",
239
+ "sec_num": null
240
+ },
241
+ {
242
+ "text": "Results of the corpus study are detailed in (Saggion and Lapalme, 1998) and (Saggion and Lapalme, 2000) .",
243
+ "cite_spans": [
244
+ {
245
+ "start": 44,
246
+ "end": 71,
247
+ "text": "(Saggion and Lapalme, 1998)",
248
+ "ref_id": "BIBREF14"
249
+ },
250
+ {
251
+ "start": 76,
252
+ "end": 103,
253
+ "text": "(Saggion and Lapalme, 2000)",
254
+ "ref_id": "BIBREF15"
255
+ }
256
+ ],
257
+ "ref_spans": [],
258
+ "eq_spans": [],
259
+ "section": "Figure h Indicative Abstract, Topics and Topic Elaboration",
260
+ "sec_num": null
261
+ },
262
+ {
263
+ "text": "We have identified a total of 52 different types of information (coming from the corpus and from technical articles) for technical text summarization that we use to identify some of the main themes. These types include: the explicit topic of the document, the situation, the identification of the problem, the 'identification of the solution, the research goal, the explicit topic of a section, the \u2022 authors' development, the inferences, the description of a topical entity, the definition \u2022 of a topical entity, the relevance of a topical enthy, the advantages, etc. Information types are classified as indicative or informative depending on the type of abstract they contribute to (i.e. the topic of a document is indicative while the description of a topical entity is informative). Types of information are identified in sentences of the source document using co-occurrence of concepts and relations and specific linguistic patterns. Technical articles from different domains refer to specific concepts and relations (diseases and treatments in Medicine, atoms and chemical reactions in Chemistry, and theorems and proofs in Mathematics). We have focused on concepts and relations that are common across domains such as problem, solution, research need, experiment, relevance, researchers~ etc.",
264
+ "cite_spans": [],
265
+ "ref_spans": [],
266
+ "eq_spans": [],
267
+ "section": "Figure h Indicative Abstract, Topics and Topic Elaboration",
268
+ "sec_num": null
269
+ },
270
+ {
271
+ "text": "3 Text Interpretation Our approach to text summarization is based on a superficial analysis of the source document and on the implementation of some text re-generation techniques such as merging of topical information, re-expression of concepts and acronym expansion. The article (plain text in English without mark-up) is segmented in main units (title, author information, author abstract, keywords, main sections and references) using typographic information and some keywords. Each unit is passed through a bipos statistical tagger. In each unit, the system identifies titles, sentences and paragraphs, and then, sentences are interpreted using finite state transducers identifying and packing linguistic constructions and domain specific constructions. Following that, a conceptual dictionary that relates lexical items to domain concepts and relations is used to associate semantic tags to the different structural elements in the sentence. Subsequently, terms (canonical form of noun groups), their associated semantic (head of the noun group) and theirs positions are extracted from each sentence and stored in an AVL tree (te~ tree) along with their frequency. A conceptual index is created which specifies to which particular type of information each sentence could contribute. Finally, terms and words are extracted from titles and stored in a list (the topical structure) and acronyms and their expansions are recorded.",
272
+ "cite_spans": [],
273
+ "ref_spans": [],
274
+ "eq_spans": [],
275
+ "section": "Figure h Indicative Abstract, Topics and Topic Elaboration",
276
+ "sec_num": null
277
+ },
278
+ {
279
+ "text": "In order to represent types of information we use templates. In Table 2 , we present the Topic of the Document, Topic of the Section and Signaling Information templates. Also presented are some indicative and informative patterns. Indicative patterns contain variables, syntactic constructions, domain concepts and relations. Informative patterns also include one specific position for the topic under consideration. Each element of the pattern matches one or more elements of the sentence (conceptual, syntactic and lexical elements match one element while variables match zero or more).",
280
+ "cite_spans": [],
281
+ "ref_spans": [
282
+ {
283
+ "start": 64,
284
+ "end": 71,
285
+ "text": "Table 2",
286
+ "ref_id": "TABREF3"
287
+ }
288
+ ],
289
+ "eq_spans": [],
290
+ "section": "Content Selection",
291
+ "sec_num": "3.1"
292
+ },
293
+ {
294
+ "text": "The system considers sentences ~hat were identified as carrying indicative information (their position is found in the conceptual index). Given a sentence\u2022 S and a type of information T the system verifies if the sentence matches some of the patterns associated with type T. For each matched pattern, the system extracts information from the sentence and instantiates a template of type T.",
295
+ "cite_spans": [],
296
+ "ref_spans": [],
297
+ "eq_spans": [],
298
+ "section": "Indicativeness",
299
+ "sec_num": "3.1.1"
300
+ },
301
+ {
302
+ "text": "For example, the Content slot of the problem identification template is instantiated with all the sentence \u2022 :(avoiding references, structural elements and parenthetical expressions) while the What slot 'of the topic of the document template is instantiated with a parsed sentence fragment \u2022 to the left or to the right of the make known relation depending on the attribute voice of the verb (active vs. passive). All the instantiated templates constitute the Indicative Data Base (IDB).",
303
+ "cite_spans": [],
304
+ "ref_spans": [],
305
+ "eq_spans": [],
306
+ "section": "Indicativeness",
307
+ "sec_num": "3.1.1"
308
+ },
309
+ {
310
+ "text": "The system matches the topical structure with the topic candidate slots from the IDB. The system selects one template for each term in that structure: the one with the greatest weight (heuristics are applied if there are more than one). The selected templates constitute the indicative content and the terms appearing in the topic candidate slots and their expansions constitute the potential topics of the document. Expansions are obtained looking for terms in the term tree sharing the semantic of some terms in the indicative content.",
311
+ "cite_spans": [],
312
+ "ref_spans": [],
313
+ "eq_spans": [],
314
+ "section": "Indicativeness",
315
+ "sec_num": "3.1.1"
316
+ },
317
+ {
318
+ "text": "The indicative content is sorted using positional information and the following conceptual order: situation, need for research, problem, solution, entity introduction, topical information, goal of conceptual entity, focus of conceptual entity, methodological aspects, inferences and structural information. Templates of the same type are grouped together if they appeared in sequence in the list. The types considered in this process are: the topic, section topic and structural information. The sorted templates constitute the text plan.",
319
+ "cite_spans": [],
320
+ "ref_spans": [],
321
+ "eq_spans": [],
322
+ "section": "Indicativeness",
323
+ "sec_num": "3.1.1"
324
+ },
325
+ {
326
+ "text": "For each potential: topic and sentence where it appears (that information is found on the term tree) the system verifies if the sentence contains an informative marker (conceptual index) and satisfies an informative pattern. If so, the potential topic is considered a topic of the document and a link will be created between the topic and the sentence which will be part of the informative abstract.",
327
+ "cite_spans": [],
328
+ "ref_spans": [],
329
+ "eq_spans": [],
330
+ "section": "Informativeness",
331
+ "sec_num": "3.1.2"
332
+ },
333
+ {
334
+ "text": "Our approach to text generation is based on the regularities observed in the corpus of professional abstracts and so, it does not implement a general theory of text generation by computers. Each element in the text plan is used to produce a sentence. The structure of the sentence depends on the type of template. The information about the situation, the problem, the need for research, etc. is reported as in the original document with few modifications (concept re-expression). Instead other types require additional re-generation: for the topic of the document template the generation procedure is as follows: (i) the verb form for the predicate in the Predicate slot is generated in the present tense (topical information is always reported in present tense), 3rd person of singular in active voice at the beginning of the sentence; (ii) the parsed sentence fragment from the N'hat slot is generated in the middle of the sentence (so the appropriate case for the first element integer identifier instance of make known instance of {research paper, study, work, research} instance of{research paper, author, study, work, research, none} parsed sentence fragment section and sentence id list of terms from the What filler number has, to be generated); and (iii) a full stop is generated. This schema of generation avoids the formulation of expressions like \"X will be presented\", \"X have been presented\" or \"We have presented here X\" which are usually found on source documents but which are awkward in the context of the abstract text-type. Note that each type of information prescribes its own schema of generation.",
335
+ "cite_spans": [],
336
+ "ref_spans": [],
337
+ "eq_spans": [],
338
+ "section": "Content Presentation",
339
+ "sec_num": "4"
340
+ },
341
+ {
342
+ "text": "Some elements in the parsed sentence fragment require re-expression while others are presented in \"the words of the author.\" If the system detects an acronym without expansion in the string it would expand it and record that situation in order to avoid repetitions. Note that as the templates contain parsed sentence fragments, the correct punctuation has to be re-generated. For merged templates the generator implements the following patterns of production: if n adjacent templates are to be presented using the same predicate, only one verb will be generated whose argument is the conjunction of the arguments from the n templates. If the sequence of templates have no common predicate, the information will be presented as a conjunction of propositions. These patterns of sentence production are exemplified in Table 3 .",
343
+ "cite_spans": [],
344
+ "ref_spans": [
345
+ {
346
+ "start": 815,
347
+ "end": 822,
348
+ "text": "Table 3",
349
+ "ref_id": null
350
+ }
351
+ ],
352
+ "eq_spans": [],
353
+ "section": "Content Presentation",
354
+ "sec_num": "4"
355
+ },
356
+ {
357
+ "text": "The elaboration of the topics is presented upon reader's demand. The information is presented in the order of the original text. The informative abstract is the information obtained by this process as it is shown in Figure 1 .",
358
+ "cite_spans": [],
359
+ "ref_spans": [
360
+ {
361
+ "start": 216,
362
+ "end": 224,
363
+ "text": "Figure 1",
364
+ "ref_id": null
365
+ }
366
+ ],
367
+ "eq_spans": [],
368
+ "section": "Content Presentation",
369
+ "sec_num": "4"
370
+ },
371
+ {
372
+ "text": "Our approach is based on the empirical examination of abstracts published by second services. In our first study, we examined 100 abstracts and source documents in order to deduce a conceptual and linguistic model for the task of summarization of technical articles. Then, we expanded the corpus with 100 more items in order to validate the model. We believe that the concepts, relations and types .of information identified account for interesting ,phenomena appearing in the corpus and constitute a sound basis for text summarization. 'Nevertheless, we have identified only a few \u2022 linguistic expressions used in order to express -particular elements of the conceptual model (241 domain verbs, 163 domain nouns, 129 adj.ectives , 174 indicative patterns, 87 informative patterns). This is because we are mainly concerned with the development of a general method of automatic abstracting and the task of constructing such linguistic resources is time consuming as recent work have shown (Minel et al., 2000) .",
373
+ "cite_spans": [
374
+ {
375
+ "start": 988,
376
+ "end": 1008,
377
+ "text": "(Minel et al., 2000)",
378
+ "ref_id": "BIBREF10"
379
+ }
380
+ ],
381
+ "ref_spans": [],
382
+ "eq_spans": [],
383
+ "section": "Limitations of the Approach",
384
+ "sec_num": "5"
385
+ },
386
+ {
387
+ "text": "The implementation of our method relies\u2022 on State-of-the-art techniques in natural language processing including noun and verb group identification and conceptual tagging. The interpreter relies on the output produced by a shallow text segmenter and on a statistical POStagger. Our prototype only analyses sentences for the specific purpose of text summarization and implements some patterns of generation observed in the corpus. Additional analysis could be done on the obtained representation to produce better results.",
388
+ "cite_spans": [],
389
+ "ref_spans": [],
390
+ "eq_spans": [],
391
+ "section": "Limitations of the Approach",
392
+ "sec_num": "5"
393
+ },
394
+ {
395
+ "text": "6 Related Work (Paice and Jones, 1993) have already addressed the issue of content identification and expression in technical summarization using templates, but while they produced indicative abstracts for a specific domain, we are producing domain independent indicative-informative abstracts. Being designed for one specific domain, their abstracts are fixed in structure while our abstracts are dynamically constructed. Radev and McKeown (1998) also used instantiated templates, but in order to produce summaries of multiple documents in one specific domain. They focus on the generation of the text while we are addressing the overall process of automatic abstracting. Our concern regarding the presentation of the information is now being addressed by other researchers as well (Jing and McKeown, 1999) .",
396
+ "cite_spans": [
397
+ {
398
+ "start": 15,
399
+ "end": 38,
400
+ "text": "(Paice and Jones, 1993)",
401
+ "ref_id": "BIBREF11"
402
+ },
403
+ {
404
+ "start": 423,
405
+ "end": 447,
406
+ "text": "Radev and McKeown (1998)",
407
+ "ref_id": "BIBREF12"
408
+ },
409
+ {
410
+ "start": 783,
411
+ "end": 807,
412
+ "text": "(Jing and McKeown, 1999)",
413
+ "ref_id": "BIBREF5"
414
+ }
415
+ ],
416
+ "ref_spans": [],
417
+ "eq_spans": [],
418
+ "section": "Limitations of the Approach",
419
+ "sec_num": "5"
420
+ },
421
+ {
422
+ "text": "Text Summarization Abstracts are texts used in tasks such as assessing the content of the document and deciding if the source is worth reading. If text summarization systems are designed to fulfill those requirements, the generated texts have to be evaluated according to their intended function and its quality. The quality and success of human produced abstracts have already been addressed in the literature (Grant, 1992; Gibson, 1993) using linguistic criteria such as cohesion and coherence, thematic structure, sentence structure and lexical density. But in automatic text summarization, this is an emergent research topic. (Minel et al., 1997) have proposed two methods of evaluation addressing the content of the abstract and its quality. For content evaluation, they asked human judges to classify summaries in broad categories and also verify if the key ideas of source documents are appropriately expressed in the Summaries. For text quality, they asked human judges to identify problems such as dangling anaphora and broken textual segments and also to make subjective judgments about readability. In the context of the TIPSTER program, (Firmin and PAWS (the programmable automated welding system) was designed to provide an automated means of planning, controlling, and performing critical welding operations for improving productivity and quality. Describes HuDL (local autonomy) in greater detail; discusses system integration and the 1MA (the intelligent machine architecture); and also gives an example implementation. Figure 1 Virtual prototyping models and techniques illustrates the principle of virtual prototyping and the different techniques and models required. After a brief introduction, we present the mechanical and electronic design of the robot harvester including all subsystems, namely, fruit localisation module, harvesting arm and gripper-cutter as well as the integration of subsystems. Throughout this work, we present the specific mechanical design of the picking arm addressing the reduction of undesirable dynamic effects during high velocity operation. The final prototype consists of two jointed harvesting arms mounted on a human guided vehicle as shown schematically in Figure 1 Configuration of the robotic . fruit' harvester Agribot.",
423
+ "cite_spans": [
424
+ {
425
+ "start": 411,
426
+ "end": 424,
427
+ "text": "(Grant, 1992;",
428
+ "ref_id": "BIBREF4"
429
+ },
430
+ {
431
+ "start": 425,
432
+ "end": 438,
433
+ "text": "Gibson, 1993)",
434
+ "ref_id": "BIBREF3"
435
+ },
436
+ {
437
+ "start": 630,
438
+ "end": 650,
439
+ "text": "(Minel et al., 1997)",
440
+ "ref_id": "BIBREF9"
441
+ },
442
+ {
443
+ "start": 1149,
444
+ "end": 1160,
445
+ "text": "(Firmin and",
446
+ "ref_id": null
447
+ }
448
+ ],
449
+ "ref_spans": [
450
+ {
451
+ "start": 1536,
452
+ "end": 1544,
453
+ "text": "Figure 1",
454
+ "ref_id": null
455
+ },
456
+ {
457
+ "start": 2213,
458
+ "end": 2221,
459
+ "text": "Figure 1",
460
+ "ref_id": null
461
+ }
462
+ ],
463
+ "eq_spans": [],
464
+ "section": "Evaluating Content and Quality in",
465
+ "sec_num": "7"
466
+ },
467
+ {
468
+ "text": "Schematic representation of the operations involved in the detaching step can be seen in Figure 5 Schematic view of the detaching tool and operation. PAWS was designed to provide an automated means of planning, controlling, and performing critical welding operations for improving productivity and quality.",
469
+ "cite_spans": [],
470
+ "ref_spans": [
471
+ {
472
+ "start": 89,
473
+ "end": 97,
474
+ "text": "Figure 5",
475
+ "ref_id": null
476
+ }
477
+ ],
478
+ "eq_spans": [],
479
+ "section": "Evaluating Content and Quality in",
480
+ "sec_num": "7"
481
+ },
482
+ {
483
+ "text": "Section 2 describes HuDL in greater detail and section 3 discusses system integration and the IMA. An example implementation is given in section 4 and section 5 contains the conclusions. Table 3 : Re-Generated Sentences 1999) and (Mani et al., 1998) also used a cat-.egorization task using TREC topics. For text quality, they addressed subjective aspects such \u2022 as the length of the summary, its intelligibility and its usefulness. We have carried out an eval-\u2022 uation of our summarization method in order to assess the function of the abstract and its text quality.",
484
+ "cite_spans": [
485
+ {
486
+ "start": 230,
487
+ "end": 249,
488
+ "text": "(Mani et al., 1998)",
489
+ "ref_id": "BIBREF7"
490
+ }
491
+ ],
492
+ "ref_spans": [
493
+ {
494
+ "start": 187,
495
+ "end": 194,
496
+ "text": "Table 3",
497
+ "ref_id": null
498
+ }
499
+ ],
500
+ "eq_spans": [],
501
+ "section": "Evaluating Content and Quality in",
502
+ "sec_num": "7"
503
+ },
504
+ {
505
+ "text": "We compared abstrac\u00a3s produced by our method with abstracts produced by Microsoft'97 Summarizer and with others published with source documents (usually author abstracts). We have chosen Microsoft'97 Summarizer because, even if it only produces extracts, it was the only summarizer available in order to carry out this evaluation and because it has already been used in other evaluations (Marcu, 1997; Barzilay and Elhadad, 1997) .",
506
+ "cite_spans": [
507
+ {
508
+ "start": 388,
509
+ "end": 401,
510
+ "text": "(Marcu, 1997;",
511
+ "ref_id": "BIBREF8"
512
+ },
513
+ {
514
+ "start": 402,
515
+ "end": 429,
516
+ "text": "Barzilay and Elhadad, 1997)",
517
+ "ref_id": "BIBREF0"
518
+ }
519
+ ],
520
+ "ref_spans": [],
521
+ "eq_spans": [],
522
+ "section": "Experiment",
523
+ "sec_num": "7.1"
524
+ },
525
+ {
526
+ "text": "In order to evaluate content, we presented judges with randomly selected abstracts and five lists of keywords (content indicators). The judges had to decide to which list of keywords the abstract belongs given that different lists share some keywords and that they belong to the same technical domain.",
527
+ "cite_spans": [],
528
+ "ref_spans": [],
529
+ "eq_spans": [],
530
+ "section": "Experiment",
531
+ "sec_num": "7.1"
532
+ },
533
+ {
534
+ "text": "Those. lists were obtained from the journals where the source documents were published. The idea behind this evaluation is to see if the abstract convey the very essential content of the source document. In Order to evaluate the quality of the text, we asked the judges to provide an acceptability score between 0-5 for the abstract (0 for unacceptable and 5 for acceptable) based on the following criteria taken from (Rowley, 1982) (they were only suggestions to the evaluators and were not enforced): good spelling and grammar; clear indication of the topic of the source document; impersonal style; one paragraph; conciseness; readable and understandable; acronyms are presented along with their expansions; and other criteria that the judge considered important as an experienced reader of abstracts of technical documents.",
535
+ "cite_spans": [
536
+ {
537
+ "start": 418,
538
+ "end": 432,
539
+ "text": "(Rowley, 1982)",
540
+ "ref_id": "BIBREF13"
541
+ }
542
+ ],
543
+ "ref_spans": [],
544
+ "eq_spans": [],
545
+ "section": "Experiment",
546
+ "sec_num": "7.1"
547
+ },
548
+ {
549
+ "text": "We told the judges that we would consider the abstracts with scores above 2.5 as acceptable. Some criteria are more important than other, for example judges do not care about impersonal style but care about readability.",
550
+ "cite_spans": [],
551
+ "ref_spans": [],
552
+ "eq_spans": [],
553
+ "section": "Experiment",
554
+ "sec_num": "7.1"
555
+ },
556
+ {
557
+ "text": "Materials Source Documents: we used twelve source documents from the journal Industrial Robots found on the Emerald Electronic Library (all technical articles). The articles were downloaded in plain text format. These documents are quite long texts with an average of 23K characters (minimum of llK characters and a maximum of 41K characters). They contain an average of 3472 words (minimum of 1756 words and a maximum of 6196 words excluding punctuation), and an average of 154 sentences (with a minimum of 85 and a maximum of 288).",
558
+ "cite_spans": [],
559
+ "ref_spans": [],
560
+ "eq_spans": [],
561
+ "section": "7.1.1",
562
+ "sec_num": null
563
+ },
564
+ {
565
+ "text": "Abstracts: we produced twelve abstracts us-:ing our method and computed the compression ,ratio in number of words, then we produced twelve abstracts by Microsoft'97 Summarizer 1 using a compression rate at least as high as our (i.e. if our method produced an abstract with a compression rate of 3.3% of the source, we produced the Microsoft abstract with a compression rate of 4% of the source). We extracted the twelve abstracts and the twelve lists of keywords published with the source documents.",
566
+ "cite_spans": [],
567
+ "ref_spans": [],
568
+ "eq_spans": [],
569
+ "section": "7.1.1",
570
+ "sec_num": null
571
+ },
572
+ {
573
+ "text": "We thus obtained 36 different abstracts and twelve lists of keywords.",
574
+ "cite_spans": [],
575
+ "ref_spans": [],
576
+ "eq_spans": [],
577
+ "section": "7.1.1",
578
+ "sec_num": null
579
+ },
580
+ {
581
+ "text": "Forms: we produced 6 different forms each containing six different abstracts randomly 2 chosen out of twelve different documents (for a total of 36 abstracts). Each abstract was printed in a 1We had to format the source document in order for the Microsoft Summarizer to be able to recognize the structure of the document (titles, sections, paragraphs and sentences).",
582
+ "cite_spans": [],
583
+ "ref_spans": [],
584
+ "eq_spans": [],
585
+ "section": "7.1.1",
586
+ "sec_num": null
587
+ },
588
+ {
589
+ "text": "2Random numbers for this evaluation were produced using software provided by SICSTus Prolog. different page. It included 5 lists of keywords, a field to be completed with the quality score associated to the abstract and a field to be filled with comments about the abstract. One of the lists of keywords was the one published with the source document, the other four were randomly selected from the set of 11 remaining keyword lists, they were printed in the form in random order. One page was also available to be completed with comments about the task, in particular with the time it took to the judges to complete the evaluation. We produced three copies of each form for a total of 18 forms.",
590
+ "cite_spans": [],
591
+ "ref_spans": [],
592
+ "eq_spans": [],
593
+ "section": "7.1.1",
594
+ "sec_num": null
595
+ },
596
+ {
597
+ "text": "We had a total of 18 human judges or evaluators. Our evaluators were 18 students of the M.Sc. program in Information Science at McGill Graduate School of Library & Information Studies. All of the subjects had good reading and comprehension skills in English. This group was chosen because they have knowledge about what constitutes a good abstract and they are educated to become professionals in Information Science.",
598
+ "cite_spans": [],
599
+ "ref_spans": [],
600
+ "eq_spans": [],
601
+ "section": "Subjects",
602
+ "sec_num": "7.1.2"
603
+ },
604
+ {
605
+ "text": "The evaluation was performed in one hour session at McGill University. Each human judge received a form (so he/she evaluated six different abstracts) and an instruction booklet. No other material was required for the evaluation (i.e. dictionary). We asked the judges to read carefully the abstract. They had to decide which was the list of keywords that matched the abstract (they could chose more than one or none at all) and then, they had to associate a numeric score to the abstract representing its quality based on the given criteria. This procedure produced three different evaluations of content and text quality for each of the 36 abstracts. The overall evaluation was completed in a maximum of 40 minutes.",
606
+ "cite_spans": [],
607
+ "ref_spans": [],
608
+ "eq_spans": [],
609
+ "section": "Evaluation Procedure",
610
+ "sec_num": "7.1.3"
611
+ },
612
+ {
613
+ "text": "For each abstract, we computed the average quality using the scores given by the judges. We considered that the abstract indicated the essential content of the source document if two or more judges were able to chose the correct list of keywords for the abstract. The results for individual articles and the average information Table 4 . For a given source document and type of abstract, the value in column 'Indic?' contains the value 'yes' if the majority of the evaluator have chosen the source document list of keywords for the abstract and 'no' on the contrary. The value in column 'Quality' is the average acceptability for the abstract.",
614
+ "cite_spans": [],
615
+ "ref_spans": [
616
+ {
617
+ "start": 328,
618
+ "end": 335,
619
+ "text": "Table 4",
620
+ "ref_id": "TABREF6"
621
+ }
622
+ ],
623
+ "eq_spans": [],
624
+ "section": "Results",
625
+ "sec_num": "7.2"
626
+ },
627
+ {
628
+ "text": "Content: In 80% of the cases, the abstracts published with the source documents were correctly classified by the evaluators. Instead, the automatic abstracts were correctly classified in 70% of the cases. It is worth noting \"that the automatic systems did not use the \u2022 journal abstracts nor the lists of keywords or the.information about the journal.",
629
+ "cite_spans": [],
630
+ "ref_spans": [],
631
+ "eq_spans": [],
632
+ "section": "Results",
633
+ "sec_num": "7.2"
634
+ },
635
+ {
636
+ "text": "Quality: The figures about text acceptability indicate that the abstracts produced by Microsoft'97 Summarizer are below the acceptabil!ty level of 2.5, the abstracts produced by our method are above the acceptability level of 2.5 and that the human abstracts are highly acceptable.",
637
+ "cite_spans": [],
638
+ "ref_spans": [],
639
+ "eq_spans": [],
640
+ "section": "Results",
641
+ "sec_num": "7.2"
642
+ },
643
+ {
644
+ "text": "In a run of this experiment using 30 abstracts from a different set of 10 articles and 15 judges from ]~cole de Biblioth6conomie et des Sciences de l'Information (EBSI) at Universit6 de Montr@al we have obtained similar results (last row in Table 4 ).",
645
+ "cite_spans": [],
646
+ "ref_spans": [
647
+ {
648
+ "start": 241,
649
+ "end": 248,
650
+ "text": "Table 4",
651
+ "ref_id": "TABREF6"
652
+ }
653
+ ],
654
+ "eq_spans": [],
655
+ "section": "Results",
656
+ "sec_num": "7.2"
657
+ },
658
+ {
659
+ "text": "In this paper, we have presented a method of text summarization which produces indicativeinformative abstracts. We have described the techniques we are using to implement our method and some experiments showing the viability of the approach.",
660
+ "cite_spans": [],
661
+ "ref_spans": [],
662
+ "eq_spans": [],
663
+ "section": "Conclusions",
664
+ "sec_num": "8"
665
+ },
666
+ {
667
+ "text": "Our method was specified for summarization of one specific type of text: the scientific and technical document. Nevertheless, it is domain independent because the concepts, relations and types of information we use are common across different domains. The question of the coverage of the model will be addressed in our future work. Our method was designed without any particular reader in mind and with the assumption that a text does have a \"main\" topic. If readers were known, the abstract could be tailored towards their specific profiles. User profiles could be used in order to produce the informative abstracts elaborating those specific aspects the reader is \"usually\" interested in. This aspect will be elaborated in future work.",
668
+ "cite_spans": [],
669
+ "ref_spans": [],
670
+ "eq_spans": [],
671
+ "section": "Conclusions",
672
+ "sec_num": "8"
673
+ },
674
+ {
675
+ "text": "The experiments reported here addressed the evaluation of the indicative abstracts using a categorization task. Using the automatic abstracts reader have chosen the correct category for the articles in 70% of the cases compared with 80% of the cases when using the author abstracts. Readers found the abstracts produced by our method of better quality than a sentence-extraction based system. I I I I I I I I I! I, I I I I i, ",
676
+ "cite_spans": [],
677
+ "ref_spans": [
678
+ {
679
+ "start": 393,
680
+ "end": 440,
681
+ "text": "I I I I I I I I I! I, I I I I i,",
682
+ "ref_id": null
683
+ }
684
+ ],
685
+ "eq_spans": [],
686
+ "section": "Conclusions",
687
+ "sec_num": "8"
688
+ }
689
+ ],
690
+ "back_matter": [
691
+ {
692
+ "text": "We would like to thank three anonymous reviewers for their comments which helped us improve the final version of this paper. We are grateful to Professor Mich~le Hudon from Uni-versit~ de Montreal for fruitful discussion and to Professor John E. Leide from McGill University and to Mme Gracia Pagola from Universit~ de Montreal for their help in recruiting informants for the experiments.",
693
+ "cite_spans": [],
694
+ "ref_spans": [],
695
+ "eq_spans": [],
696
+ "section": "Acknowledgments",
697
+ "sec_num": null
698
+ }
699
+ ],
700
+ "bib_entries": {
701
+ "BIBREF0": {
702
+ "ref_id": "b0",
703
+ "title": "Using Lexical Chains for Text Summarization",
704
+ "authors": [
705
+ {
706
+ "first": "R",
707
+ "middle": [],
708
+ "last": "Barzilay",
709
+ "suffix": ""
710
+ },
711
+ {
712
+ "first": "M",
713
+ "middle": [],
714
+ "last": "Elhadad",
715
+ "suffix": ""
716
+ }
717
+ ],
718
+ "year": 1997,
719
+ "venue": "Proceedings of the A CL/EA CL '97 Workshop on Intelligent Scalable Text Summarization",
720
+ "volume": "",
721
+ "issue": "",
722
+ "pages": "10--17",
723
+ "other_ids": {},
724
+ "num": null,
725
+ "urls": [],
726
+ "raw_text": "R. Barzilay and M. Elhadad. 1997. Using Lexical Chains for Text Summarization. In Proceedings of the A CL/EA CL '97 Workshop on Intelligent Scalable Text Summarization, pages 10-17, Madrid, Spain, July.",
727
+ "links": null
728
+ },
729
+ "BIBREF1": {
730
+ "ref_id": "b1",
731
+ "title": "An Overview of the FRUMP System",
732
+ "authors": [
733
+ {
734
+ "first": "G",
735
+ "middle": [],
736
+ "last": "Dejong",
737
+ "suffix": ""
738
+ }
739
+ ],
740
+ "year": 1982,
741
+ "venue": "Strategies for Natural Language Processing",
742
+ "volume": "",
743
+ "issue": "",
744
+ "pages": "149--176",
745
+ "other_ids": {},
746
+ "num": null,
747
+ "urls": [],
748
+ "raw_text": "G. DeJong. 1982. An Overview of the FRUMP System. In W.G. Lehnert and M.H. Ringle, editors, Strategies for Natural Language Pro- cessing, pages 149-176. Lawrence Erlbaum . Associates, Publishers.",
749
+ "links": null
750
+ },
751
+ "BIBREF2": {
752
+ "ref_id": "b2",
753
+ "title": "An Evaluation of Automatic Text Summarization Systems",
754
+ "authors": [
755
+ {
756
+ "first": "T",
757
+ "middle": [],
758
+ "last": "Firmin",
759
+ "suffix": ""
760
+ },
761
+ {
762
+ "first": "M",
763
+ "middle": [
764
+ "J"
765
+ ],
766
+ "last": "Chrzanowski",
767
+ "suffix": ""
768
+ }
769
+ ],
770
+ "year": 1999,
771
+ "venue": "Advances in Automatic Text Summa-~ization",
772
+ "volume": "",
773
+ "issue": "",
774
+ "pages": "325--336",
775
+ "other_ids": {},
776
+ "num": null,
777
+ "urls": [],
778
+ "raw_text": "T. Firmin and M.J. Chrzanowski. 1999. An Evaluation of Automatic Text Summariza- tion Systems. In I. Mani and M.T. Maybury,. editors, Advances in Automatic Text Summa- ~ization, pages 325-336.",
779
+ "links": null
780
+ },
781
+ "BIBREF3": {
782
+ "ref_id": "b3",
783
+ "title": "Towards a Discourse Theory of Abstracts and Abstracting. Department of English Studies",
784
+ "authors": [
785
+ {
786
+ "first": "T",
787
+ "middle": [
788
+ "R"
789
+ ],
790
+ "last": "Gibson",
791
+ "suffix": ""
792
+ }
793
+ ],
794
+ "year": 1993,
795
+ "venue": "",
796
+ "volume": "",
797
+ "issue": "",
798
+ "pages": "",
799
+ "other_ids": {},
800
+ "num": null,
801
+ "urls": [],
802
+ "raw_text": "T.R. Gibson. 1993. Towards a Discourse The- ory of Abstracts and Abstracting. Depart- ment of English Studies. University of Not- tingham.",
803
+ "links": null
804
+ },
805
+ "BIBREF4": {
806
+ "ref_id": "b4",
807
+ "title": "The Integration of Theory and Practice in the Development of Summary-Writting Strategies",
808
+ "authors": [
809
+ {
810
+ "first": "P",
811
+ "middle": [],
812
+ "last": "Grant",
813
+ "suffix": ""
814
+ }
815
+ ],
816
+ "year": 1992,
817
+ "venue": "",
818
+ "volume": "",
819
+ "issue": "",
820
+ "pages": "",
821
+ "other_ids": {},
822
+ "num": null,
823
+ "urls": [],
824
+ "raw_text": "P. Grant. 1992. The Integration of Theory and Practice in the Development of Summary- Writting Strategies. Ph.D. thesis, Universit~ de Montreal. Facult~ des ~tudes sup~rieures.",
825
+ "links": null
826
+ },
827
+ "BIBREF5": {
828
+ "ref_id": "b5",
829
+ "title": "The Decomposition of Human-Written Summary Sentences",
830
+ "authors": [
831
+ {
832
+ "first": "H",
833
+ "middle": [],
834
+ "last": "Jing",
835
+ "suffix": ""
836
+ },
837
+ {
838
+ "first": "K",
839
+ "middle": [
840
+ "R"
841
+ ],
842
+ "last": "Mckeown",
843
+ "suffix": ""
844
+ }
845
+ ],
846
+ "year": 1999,
847
+ "venue": "Proceedings of SIGIR '99. 22nd International Conference on Research and Development in Information Retrieval",
848
+ "volume": "",
849
+ "issue": "",
850
+ "pages": "129--136",
851
+ "other_ids": {},
852
+ "num": null,
853
+ "urls": [],
854
+ "raw_text": "H. Jing and K.R. McKeown. 1999. The Decom- position of Human-Written Summary Sen- tences. In M. Hearst, Gey. F., and R. Tong, editors, Proceedings of SIGIR '99. 22nd Inter- national Conference on Research and Devel- opment in Information Retrieval, pages 129- 136, University of California, Beekely, Au- gust.",
855
+ "links": null
856
+ },
857
+ "BIBREF6": {
858
+ "ref_id": "b6",
859
+ "title": "The Automatic Creation of",
860
+ "authors": [
861
+ {
862
+ "first": "H",
863
+ "middle": [
864
+ "P"
865
+ ],
866
+ "last": "Luhn",
867
+ "suffix": ""
868
+ }
869
+ ],
870
+ "year": 1958,
871
+ "venue": "Literature Abstracts. IBM Journal of Research Development",
872
+ "volume": "2",
873
+ "issue": "2",
874
+ "pages": "159--165",
875
+ "other_ids": {},
876
+ "num": null,
877
+ "urls": [],
878
+ "raw_text": "H.P. Luhn. 1958. The Automatic Creation of \" Literature Abstracts. IBM Journal of Re- search Development, 2(2):159-165.",
879
+ "links": null
880
+ },
881
+ "BIBREF7": {
882
+ "ref_id": "b7",
883
+ "title": "The TIPSTER SUM-MAC Text Summarization Evaluation",
884
+ "authors": [
885
+ {
886
+ "first": "I",
887
+ "middle": [],
888
+ "last": "Mani",
889
+ "suffix": ""
890
+ },
891
+ {
892
+ "first": "D",
893
+ "middle": [],
894
+ "last": "House",
895
+ "suffix": ""
896
+ },
897
+ {
898
+ "first": "G",
899
+ "middle": [],
900
+ "last": "Klein",
901
+ "suffix": ""
902
+ },
903
+ {
904
+ "first": "L",
905
+ "middle": [],
906
+ "last": "Hirshman",
907
+ "suffix": ""
908
+ },
909
+ {
910
+ "first": "L",
911
+ "middle": [],
912
+ "last": "Obrst",
913
+ "suffix": ""
914
+ },
915
+ {
916
+ "first": "T",
917
+ "middle": [],
918
+ "last": "Firmin",
919
+ "suffix": ""
920
+ },
921
+ {
922
+ "first": "M",
923
+ "middle": [],
924
+ "last": "Chrzanowski",
925
+ "suffix": ""
926
+ },
927
+ {
928
+ "first": "B",
929
+ "middle": [],
930
+ "last": "Sundheim",
931
+ "suffix": ""
932
+ }
933
+ ],
934
+ "year": 1998,
935
+ "venue": "",
936
+ "volume": "",
937
+ "issue": "",
938
+ "pages": "",
939
+ "other_ids": {},
940
+ "num": null,
941
+ "urls": [],
942
+ "raw_text": "I. Mani, D. House, G. Klein, L. Hirshman, L. Obrst, T. Firmin, M. Chrzanowski, and B. Sundheim. 1998. The TIPSTER SUM- MAC Text Summarization Evaluation. Tech- nical report, The Mitre Corporation.",
943
+ "links": null
944
+ },
945
+ "BIBREF8": {
946
+ "ref_id": "b8",
947
+ "title": "From Discourse Structures to Text Summaries",
948
+ "authors": [
949
+ {
950
+ "first": "D",
951
+ "middle": [],
952
+ "last": "Marcu",
953
+ "suffix": ""
954
+ }
955
+ ],
956
+ "year": 1997,
957
+ "venue": "The Proceedings of the A CL '97lEA CL '97 Workshop on Intelligent Scalable Text Summarization",
958
+ "volume": "",
959
+ "issue": "",
960
+ "pages": "82--88",
961
+ "other_ids": {},
962
+ "num": null,
963
+ "urls": [],
964
+ "raw_text": "D. Marcu. 1997. From Discourse Structures to Text Summaries. In The Proceedings of the A CL '97lEA CL '97 Workshop on Intelli- gent Scalable Text Summarization, pages 82- 88, Madrid, Spain, July 11.",
965
+ "links": null
966
+ },
967
+ "BIBREF9": {
968
+ "ref_id": "b9",
969
+ "title": "Comment Appr~cier la Qualit~ des R~sum~s Automatiques de Textes? Les Exemples des Protocoles FAN et MLUCE et leurs R~sultats sur SERAPHIN",
970
+ "authors": [
971
+ {
972
+ "first": "J-L",
973
+ "middle": [],
974
+ "last": "Minel",
975
+ "suffix": ""
976
+ },
977
+ {
978
+ "first": "S",
979
+ "middle": [],
980
+ "last": "Nugier",
981
+ "suffix": ""
982
+ },
983
+ {
984
+ "first": "G",
985
+ "middle": [],
986
+ "last": "Piat",
987
+ "suffix": ""
988
+ }
989
+ ],
990
+ "year": 1997,
991
+ "venue": "ldres Journdes Scientificques et Techniques du Rdseau Francophone de l'Ingdnierie de la Langue de I'AUPELF-UREF",
992
+ "volume": "",
993
+ "issue": "",
994
+ "pages": "15--16",
995
+ "other_ids": {},
996
+ "num": null,
997
+ "urls": [],
998
+ "raw_text": "J-L. Minel, S. Nugier, and G. Piat. 1997. Com- ment Appr~cier la Qualit~ des R~sum~s Au- tomatiques de Textes? Les Exemples des Pro- tocoles FAN et MLUCE et leurs R~sultats sur SERAPHIN. In ldres Journdes Scientific- ques et Techniques du Rdseau Francophone de l'Ingdnierie de la Langue de I'AUPELF- UREF., pages 227-232, 15-16 avril.",
999
+ "links": null
1000
+ },
1001
+ "BIBREF10": {
1002
+ "ref_id": "b10",
1003
+ "title": "R~sum~ automatique par filtrage s~mantique d'informations dans des textes",
1004
+ "authors": [
1005
+ {
1006
+ "first": "J-L",
1007
+ "middle": [],
1008
+ "last": "Minel",
1009
+ "suffix": ""
1010
+ },
1011
+ {
1012
+ "first": "J-P",
1013
+ "middle": [],
1014
+ "last": "Descl~s",
1015
+ "suffix": ""
1016
+ },
1017
+ {
1018
+ "first": "E",
1019
+ "middle": [],
1020
+ "last": "Cartier",
1021
+ "suffix": ""
1022
+ },
1023
+ {
1024
+ "first": "G",
1025
+ "middle": [],
1026
+ "last": "Crispino",
1027
+ "suffix": ""
1028
+ },
1029
+ {
1030
+ "first": "S",
1031
+ "middle": [
1032
+ "B"
1033
+ ],
1034
+ "last": "Hazez",
1035
+ "suffix": ""
1036
+ },
1037
+ {
1038
+ "first": "A",
1039
+ "middle": [],
1040
+ "last": "Jackiewicz",
1041
+ "suffix": ""
1042
+ }
1043
+ ],
1044
+ "year": 2000,
1045
+ "venue": "",
1046
+ "volume": "",
1047
+ "issue": "",
1048
+ "pages": "",
1049
+ "other_ids": {},
1050
+ "num": null,
1051
+ "urls": [],
1052
+ "raw_text": "J-L. Minel, J-P. Descl~s, E. Cartier, G. Crispino, S.B. Hazez, and A. Jack- iewicz. 2000. R~sum~ automatique par filtrage s~mantique d'informations dans des textes. TSI, X(X/2000):l-23.",
1053
+ "links": null
1054
+ },
1055
+ "BIBREF11": {
1056
+ "ref_id": "b11",
1057
+ "title": "The Identification of Important Concepts in Highly Structured Technical Papers",
1058
+ "authors": [
1059
+ {
1060
+ "first": "C",
1061
+ "middle": [
1062
+ "D"
1063
+ ],
1064
+ "last": "Paice",
1065
+ "suffix": ""
1066
+ },
1067
+ {
1068
+ "first": "P",
1069
+ "middle": [
1070
+ "A"
1071
+ ],
1072
+ "last": "Jones",
1073
+ "suffix": ""
1074
+ }
1075
+ ],
1076
+ "year": 1993,
1077
+ "venue": "Proc. of the 16th ACM-SIGIR Conference",
1078
+ "volume": "",
1079
+ "issue": "",
1080
+ "pages": "69--78",
1081
+ "other_ids": {},
1082
+ "num": null,
1083
+ "urls": [],
1084
+ "raw_text": "C.D. Paice and P.A. Jones. 1993. The Iden- tification of Important Concepts in Highly Structured Technical Papers. In R. Korfhage, E. Rasmussen, and P. Willett, editors, Proc. of the 16th ACM-SIGIR Conference, pages 69-78.",
1085
+ "links": null
1086
+ },
1087
+ "BIBREF12": {
1088
+ "ref_id": "b12",
1089
+ "title": "Generating Natural Language Summaries from Multiple On-Line Sources",
1090
+ "authors": [
1091
+ {
1092
+ "first": "D",
1093
+ "middle": [
1094
+ "R"
1095
+ ],
1096
+ "last": "Radev",
1097
+ "suffix": ""
1098
+ },
1099
+ {
1100
+ "first": "K",
1101
+ "middle": [
1102
+ "R"
1103
+ ],
1104
+ "last": "Mckeown",
1105
+ "suffix": ""
1106
+ }
1107
+ ],
1108
+ "year": 1998,
1109
+ "venue": "Computational Linguistics",
1110
+ "volume": "24",
1111
+ "issue": "3",
1112
+ "pages": "469--500",
1113
+ "other_ids": {},
1114
+ "num": null,
1115
+ "urls": [],
1116
+ "raw_text": "D.R. Radev and K.R. McKeown. 1998. Gen- erating Natural Language Summaries from Multiple On-Line Sources. Computational Linguistics, 24(3):469-500.",
1117
+ "links": null
1118
+ },
1119
+ "BIBREF13": {
1120
+ "ref_id": "b13",
1121
+ "title": "Abstracting and Indexing",
1122
+ "authors": [
1123
+ {
1124
+ "first": "J",
1125
+ "middle": [],
1126
+ "last": "Rowley",
1127
+ "suffix": ""
1128
+ }
1129
+ ],
1130
+ "year": 1982,
1131
+ "venue": "",
1132
+ "volume": "",
1133
+ "issue": "",
1134
+ "pages": "",
1135
+ "other_ids": {},
1136
+ "num": null,
1137
+ "urls": [],
1138
+ "raw_text": "J. Rowley. 1982. Abstracting and Indexing. Clive Bingley, London.",
1139
+ "links": null
1140
+ },
1141
+ "BIBREF14": {
1142
+ "ref_id": "b14",
1143
+ "title": "Rencontre Internationale sur l'extraction le Filtrage et le Rdsumd Automatique",
1144
+ "authors": [
1145
+ {
1146
+ "first": "H",
1147
+ "middle": [],
1148
+ "last": "Saggion",
1149
+ "suffix": ""
1150
+ },
1151
+ {
1152
+ "first": "G",
1153
+ "middle": [],
1154
+ "last": "Lapalme",
1155
+ "suffix": ""
1156
+ }
1157
+ ],
1158
+ "year": 1998,
1159
+ "venue": "RIFRA'98",
1160
+ "volume": "",
1161
+ "issue": "",
1162
+ "pages": "72--83",
1163
+ "other_ids": {},
1164
+ "num": null,
1165
+ "urls": [],
1166
+ "raw_text": "H. Saggion and G. Lapalme. 1998. Where does Information come from? Corpus Analysis for Automatic Abstracting. In RIFRA'98. Ren- contre Internationale sur l'extraction le Fil- trage et le Rdsumd Automatique, pages 72-83.",
1167
+ "links": null
1168
+ },
1169
+ "BIBREF15": {
1170
+ "ref_id": "b15",
1171
+ "title": "Evaluation of Content and Text Quality in the Context of Technical Text Summarization",
1172
+ "authors": [
1173
+ {
1174
+ "first": "H",
1175
+ "middle": [],
1176
+ "last": "Saggion",
1177
+ "suffix": ""
1178
+ },
1179
+ {
1180
+ "first": "G",
1181
+ "middle": [],
1182
+ "last": "Lapalme",
1183
+ "suffix": ""
1184
+ }
1185
+ ],
1186
+ "year": 2000,
1187
+ "venue": "Proceedings of RIAO'2000",
1188
+ "volume": "",
1189
+ "issue": "",
1190
+ "pages": "12--14",
1191
+ "other_ids": {},
1192
+ "num": null,
1193
+ "urls": [],
1194
+ "raw_text": "H. Saggion and G. Lapalme. 2000. Evaluation of Content and Text Quality in the Context of Technical Text Summarization. In Pro- ceedings of RIAO'2000, Paris, France, 12-14",
1195
+ "links": null
1196
+ }
1197
+ },
1198
+ "ref_entries": {
1199
+ "FIGREF0": {
1200
+ "uris": null,
1201
+ "type_str": "figure",
1202
+ "num": null,
1203
+ "text": "+ SKIP2 + show graphically + ARGUMENT + eos noun group + author + make known + preposition + research paper + DESCRIPTION + eos Author's Goal SKIP1 + goal of author + define + GOAL + eos (indicative) Goal of SKIP + goal + preposition + TOPIC + define + GOAL + eos TOPIC"
1204
+ },
1205
+ "TABREF1": {
1206
+ "type_str": "table",
1207
+ "text": "",
1208
+ "html": null,
1209
+ "num": null,
1210
+ "content": "<table><tr><td>: LISA Abstract 1955 -Source Document: \"Efficient distributed breadth-first search algo-</td></tr><tr><td>rithm.\" S.A.M. Makki. Computer Communications, 19(8) Jul 96, p628-36.</td></tr></table>"
1211
+ },
1212
+ "TABREF3": {
1213
+ "type_str": "table",
1214
+ "text": "Templates and Patterns.",
1215
+ "html": null,
1216
+ "num": null,
1217
+ "content": "<table/>"
1218
+ },
1219
+ "TABREF6": {
1220
+ "type_str": "table",
1221
+ "text": "Results of Human Judgment about Indicativeness and Text Quality are shown in",
1222
+ "html": null,
1223
+ "num": null,
1224
+ "content": "<table/>"
1225
+ }
1226
+ }
1227
+ }
1228
+ }
Full_text_JSON/prefixW/json/W00/W00-0402.json ADDED
The diff for this file is too large to render. See raw diff
 
Full_text_JSON/prefixW/json/W00/W00-0403.json ADDED
@@ -0,0 +1,1439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0403",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:22.089946Z"
6
+ },
7
+ "title": "Centroid-based summarization of multiple documents: sentence extraction, utility-based evaluation, and user studies",
8
+ "authors": [
9
+ {
10
+ "first": "Dragomir",
11
+ "middle": [
12
+ "R"
13
+ ],
14
+ "last": "Radev",
15
+ "suffix": "",
16
+ "affiliation": {
17
+ "laboratory": "",
18
+ "institution": "University of Michigan Ann Arbor",
19
+ "location": {
20
+ "postCode": "48103",
21
+ "region": "MI"
22
+ }
23
+ },
24
+ "email": "radev@umich.edu"
25
+ },
26
+ {
27
+ "first": "Hongyan",
28
+ "middle": [],
29
+ "last": "Jing",
30
+ "suffix": "",
31
+ "affiliation": {
32
+ "laboratory": "",
33
+ "institution": "Columbia University",
34
+ "location": {
35
+ "postCode": "10027",
36
+ "settlement": "New York",
37
+ "region": "NY"
38
+ }
39
+ },
40
+ "email": "hjing@cs.columbia.edu"
41
+ },
42
+ {
43
+ "first": "Malgorzata",
44
+ "middle": [],
45
+ "last": "Budzikowska",
46
+ "suffix": "",
47
+ "affiliation": {},
48
+ "email": ""
49
+ }
50
+ ],
51
+ "year": "",
52
+ "venue": null,
53
+ "identifiers": {},
54
+ "abstract": "We present a multi-document summarizer, called MEAD, which generates summaries using cluster centroids produced by a topic detection and tracking system. We also describe two new techniques, based on sentence utility and subsumption, which we have applied to the evaluation of both single and multiple document summaries. Finally, we describe two user studies that test our models of multi-document summarization.",
55
+ "pdf_parse": {
56
+ "paper_id": "W00-0403",
57
+ "_pdf_hash": "",
58
+ "abstract": [
59
+ {
60
+ "text": "We present a multi-document summarizer, called MEAD, which generates summaries using cluster centroids produced by a topic detection and tracking system. We also describe two new techniques, based on sentence utility and subsumption, which we have applied to the evaluation of both single and multiple document summaries. Finally, we describe two user studies that test our models of multi-document summarization.",
61
+ "cite_spans": [],
62
+ "ref_spans": [],
63
+ "eq_spans": [],
64
+ "section": "Abstract",
65
+ "sec_num": null
66
+ }
67
+ ],
68
+ "body_text": [
69
+ {
70
+ "text": "On October 12, 1999, a relatively small number of news sources mentioned in passing that Pakistani Defense Minister Gen. Pervaiz Musharraf was away visiting Sri Lanka. However, all world agencies would be actively reporting on the major events that were to happen in Pakistan in the following days: Prime Minister Nawaz Sharif announced that in Gen. Musharraf's absence, the Defense Minister had been sacked and replaced by General Zia Addin. Large numbers of messages from various sources started to inundate the newswire: about the army's occupation of the capital, the Prime Minister's ouster and his subsequent placement under house arrest, Gen. Musharraf's return to his country, his ascendancy to power, and the imposition of military control over Pakistan.",
71
+ "cite_spans": [],
72
+ "ref_spans": [],
73
+ "eq_spans": [],
74
+ "section": "Introduction",
75
+ "sec_num": "1"
76
+ },
77
+ {
78
+ "text": "The paragraph above summarizes a large amount of news from different sources. While it was not automatically generated, one can imagine the use of such automatically generated summaries. In this paper we will describe how multi-document summaries are built and evaluated.",
79
+ "cite_spans": [],
80
+ "ref_spans": [],
81
+ "eq_spans": [],
82
+ "section": "Introduction",
83
+ "sec_num": "1"
84
+ },
85
+ {
86
+ "text": "The process of identifying all articles on an emerging event is called Topic Detection and Tracking (TDT) . A large body of research in TDT has been created over the past two years [Allan et al., 98] . We will present an extension of our own research on TDT [Radev et al., 1999] to cover summarization of multidocument clusters.",
87
+ "cite_spans": [
88
+ {
89
+ "start": 77,
90
+ "end": 105,
91
+ "text": "Detection and Tracking (TDT)",
92
+ "ref_id": null
93
+ },
94
+ {
95
+ "start": 181,
96
+ "end": 195,
97
+ "text": "[Allan et al.,",
98
+ "ref_id": null
99
+ },
100
+ {
101
+ "start": 196,
102
+ "end": 199,
103
+ "text": "98]",
104
+ "ref_id": null
105
+ },
106
+ {
107
+ "start": 254,
108
+ "end": 278,
109
+ "text": "TDT [Radev et al., 1999]",
110
+ "ref_id": null
111
+ }
112
+ ],
113
+ "ref_spans": [],
114
+ "eq_spans": [],
115
+ "section": "Topic detection and multi-document summarization",
116
+ "sec_num": "1.1"
117
+ },
118
+ {
119
+ "text": "Our entry in the official TDT evaluation, called CIDR [Radev et al., 1999] , uses modified TF*IDF to produce clusters of news articles on the same event. We developed a new technique for multi-document summarization (or MDS), called centroid-based summarization (CBS) which uses as input the centroids of the clusters produced by CIDR to identify which sentences are central to the topic of the cluster, rather than the individual articles. We have implemented CBS in a system, named MEAD.",
120
+ "cite_spans": [
121
+ {
122
+ "start": 26,
123
+ "end": 74,
124
+ "text": "TDT evaluation, called CIDR [Radev et al., 1999]",
125
+ "ref_id": null
126
+ }
127
+ ],
128
+ "ref_spans": [],
129
+ "eq_spans": [],
130
+ "section": "Topic detection and multi-document summarization",
131
+ "sec_num": "1.1"
132
+ },
133
+ {
134
+ "text": "The main contributions of this paper are: the development of a centroid-based multi-document summarizer, the use of cluster-based sentence utility (CBSU) and cross-sentence informational subsumption (CSIS) for evaluation of single and multi-document summaries, two user studies that support our findings, and an evaluation of MEAD.",
135
+ "cite_spans": [
136
+ {
137
+ "start": 147,
138
+ "end": 153,
139
+ "text": "(CBSU)",
140
+ "ref_id": null
141
+ }
142
+ ],
143
+ "ref_spans": [],
144
+ "eq_spans": [],
145
+ "section": "Topic detection and multi-document summarization",
146
+ "sec_num": "1.1"
147
+ },
148
+ {
149
+ "text": "An event cluster, produced by a TDT system, consists of chronologically ordered news articles from multiple sources, which describe an event as it develops over time. Event clusters range from 2 to 10 documents from which MEAD produces summaries in the form of sentence extracts.",
150
+ "cite_spans": [],
151
+ "ref_spans": [],
152
+ "eq_spans": [],
153
+ "section": "Topic detection and multi-document summarization",
154
+ "sec_num": "1.1"
155
+ },
156
+ {
157
+ "text": "A key feature of MEAD is its use of cluster centroids, which consist of words which are central not only to one article in a cluster, but to all the articles.",
158
+ "cite_spans": [],
159
+ "ref_spans": [],
160
+ "eq_spans": [],
161
+ "section": "Topic detection and multi-document summarization",
162
+ "sec_num": "1.1"
163
+ },
164
+ {
165
+ "text": "MEAD is significantly different from previous work on multi-document summarization [Radev & McKeown, 1998; Carbonell and Goldstein, 1998; Mani and Bloedorn, 1999; , which use techniques such as graph matching, maximal marginal relevance, or language generation.",
166
+ "cite_spans": [
167
+ {
168
+ "start": 83,
169
+ "end": 106,
170
+ "text": "[Radev & McKeown, 1998;",
171
+ "ref_id": "BIBREF10"
172
+ },
173
+ {
174
+ "start": 107,
175
+ "end": 137,
176
+ "text": "Carbonell and Goldstein, 1998;",
177
+ "ref_id": "BIBREF1"
178
+ },
179
+ {
180
+ "start": 138,
181
+ "end": 162,
182
+ "text": "Mani and Bloedorn, 1999;",
183
+ "ref_id": "BIBREF5"
184
+ }
185
+ ],
186
+ "ref_spans": [],
187
+ "eq_spans": [],
188
+ "section": "Topic detection and multi-document summarization",
189
+ "sec_num": "1.1"
190
+ },
191
+ {
192
+ "text": "Finally, evaluation of multi-document summaries is a difficult problem. There is not yet a widely accepted evaluation scheme. We propose a utility-based evaluation scheme, which can be used to evaluate both single-document and multi-document summaries.",
193
+ "cite_spans": [],
194
+ "ref_spans": [],
195
+ "eq_spans": [],
196
+ "section": "Topic detection and multi-document summarization",
197
+ "sec_num": "1.1"
198
+ },
199
+ {
200
+ "text": "Cluster-based sentence utility (CBSU, or utility) refers to the degree of relevance (from 0 to 10) of a particular sentence to the general topic of the entire cluster (for a discussion of what is a topic, see [Allan et al. 1998 ]). A utility of 0 means that the sentence is not relevant to the cluster and a 10 marks an essential sentence.",
201
+ "cite_spans": [
202
+ {
203
+ "start": 31,
204
+ "end": 49,
205
+ "text": "(CBSU, or utility)",
206
+ "ref_id": null
207
+ },
208
+ {
209
+ "start": 209,
210
+ "end": 227,
211
+ "text": "[Allan et al. 1998",
212
+ "ref_id": "BIBREF0"
213
+ }
214
+ ],
215
+ "ref_spans": [],
216
+ "eq_spans": [],
217
+ "section": "Informational content of sentences 2.1 Cluster-based sentence utility (CBSU)",
218
+ "sec_num": "2"
219
+ },
220
+ {
221
+ "text": "A related notion to CBSU is cross-sentence informational subsumption (CSIS, or subsumption), which reflects that certain sentences repeat some of the information present in other sentences and may, therefore, be omitted during summarization. If the information content of sentence a (denoted as i(a)) is contained within sentence b, then a becomes informationally redundant and the content of b is said to subsume that of a:",
222
+ "cite_spans": [],
223
+ "ref_spans": [],
224
+ "eq_spans": [],
225
+ "section": "Cross-sentence informational subsumption (CSIS)",
226
+ "sec_num": "2.2"
227
+ },
228
+ {
229
+ "text": "i(a) \u2282 i(b)",
230
+ "cite_spans": [],
231
+ "ref_spans": [],
232
+ "eq_spans": [],
233
+ "section": "Cross-sentence informational subsumption (CSIS)",
234
+ "sec_num": "2.2"
235
+ },
236
+ {
237
+ "text": "In the example below, (2) subsumes (1) because the crucial information in (1) is also included in (2) which presents additional content: \"the court\", \"last August\", and \"sentenced him to life\".",
238
+ "cite_spans": [],
239
+ "ref_spans": [],
240
+ "eq_spans": [],
241
+ "section": "Cross-sentence informational subsumption (CSIS)",
242
+ "sec_num": "2.2"
243
+ },
244
+ {
245
+ "text": "(1) John Doe was found guilty of the murder.",
246
+ "cite_spans": [],
247
+ "ref_spans": [],
248
+ "eq_spans": [],
249
+ "section": "Cross-sentence informational subsumption (CSIS)",
250
+ "sec_num": "2.2"
251
+ },
252
+ {
253
+ "text": "(2) The court found John Doe guilty of the murder of Jane Doe last August and sentenced him to life.",
254
+ "cite_spans": [],
255
+ "ref_spans": [],
256
+ "eq_spans": [],
257
+ "section": "Cross-sentence informational subsumption (CSIS)",
258
+ "sec_num": "2.2"
259
+ },
260
+ {
261
+ "text": "The cluster shown in Figure 1 shows subsumption links across two articles 1 about recent terrorist activities in Algeria (ALG 18853 and ALG 18854).",
262
+ "cite_spans": [],
263
+ "ref_spans": [
264
+ {
265
+ "start": 21,
266
+ "end": 29,
267
+ "text": "Figure 1",
268
+ "ref_id": "FIGREF0"
269
+ }
270
+ ],
271
+ "eq_spans": [],
272
+ "section": "Cross-sentence informational subsumption (CSIS)",
273
+ "sec_num": "2.2"
274
+ },
275
+ {
276
+ "text": "An arrow from sentence A to sentence B indicates that the information content of A is subsumed by the information content of B. Sentences 2, 4, and 5 from the first article repeat the information from sentence 1 The full text of these articles is shown in the Appendix.",
277
+ "cite_spans": [
278
+ {
279
+ "start": 210,
280
+ "end": 211,
281
+ "text": "1",
282
+ "ref_id": null
283
+ }
284
+ ],
285
+ "ref_spans": [],
286
+ "eq_spans": [],
287
+ "section": "Cross-sentence informational subsumption (CSIS)",
288
+ "sec_num": "2.2"
289
+ },
290
+ {
291
+ "text": "2 in the second article, while sentence 9 from the former article is later repeated in sentences 3 and 4 of the latter article. ",
292
+ "cite_spans": [],
293
+ "ref_spans": [],
294
+ "eq_spans": [],
295
+ "section": "Cross-sentence informational subsumption (CSIS)",
296
+ "sec_num": "2.2"
297
+ },
298
+ {
299
+ "text": "Sentences subsuming each other are said to belong to the same equivalence class. An equivalence class may contain more than two sentences within the same or different articles. In the following example, although sentences 3and 4are not exact paraphrases of each other, they can be substituted for each other without crucial loss of information and therefore belong to the same equivalence class, i.e. i(3) \u2282 i(4) and i(4) \u2282 i(3). In the user study section we will take a look at the way humans perceive CSIS and equivalence class.",
300
+ "cite_spans": [],
301
+ "ref_spans": [],
302
+ "eq_spans": [],
303
+ "section": "Equivalence classes of sentences",
304
+ "sec_num": "2.3"
305
+ },
306
+ {
307
+ "text": "(3) Eighteen decapitated bodies have been found in a mass grave in northern Algeria, press reports said Thursday. (4) Algerian newspapers have reported on Thursday that 18 decapitated bodies have been found by the authorities.",
308
+ "cite_spans": [],
309
+ "ref_spans": [],
310
+ "eq_spans": [],
311
+ "section": "Equivalence classes of sentences",
312
+ "sec_num": "2.3"
313
+ },
314
+ {
315
+ "text": "Maximal marginal relevance (or MMR) is a technique similar to CSIS and was introduced in [Carbonell and Goldstein, 1998 ]. In that paper, MMR is used to produce summaries of single documents that avoid redundancy. The authors mention that their preliminary results indicate that multiple documents on the same topic also contain redundancy but they fall short of using MMR for multi-document summarization. Their metric is used as an enhancement to a query-based summary whereas CSIS is designed for query-independent (a.k.a., generic) summaries. 3 MEAD: a centroid-based multidocument summarizer",
316
+ "cite_spans": [
317
+ {
318
+ "start": 89,
319
+ "end": 119,
320
+ "text": "[Carbonell and Goldstein, 1998",
321
+ "ref_id": "BIBREF1"
322
+ }
323
+ ],
324
+ "ref_spans": [],
325
+ "eq_spans": [],
326
+ "section": "Comparison with MMR",
327
+ "sec_num": "2.4"
328
+ },
329
+ {
330
+ "text": "We now describe the corpus used for the evaluation of MEAD, and later in this section we present MEAD's algorithm. ",
331
+ "cite_spans": [],
332
+ "ref_spans": [],
333
+ "eq_spans": [],
334
+ "section": "Comparison with MMR",
335
+ "sec_num": "2.4"
336
+ },
337
+ {
338
+ "text": "For our experiments, we prepared a small corpus consisting of a total of 558 sentences in 27 documents, organized in 6 clusters (Table 1) , all extracted by CIDR. Four of the clusters are from Usenet newsgroups. The remaining two clusters are from the official TDT corpus 2 . Among the factors for our selection of clusters are: coverage of as many news sources as possible, coverage of both TDT and non-TDT data, coverage of different types of news (e.g., terrorism, internal affairs, and environment), and diversity in cluster sizes (in our case, from 2 to 10 articles). The test corpus is used in the evaluation in such a way that each cluster is summarized at 9 different compression rates, thus giving nine times as many sample points as one would expect from the size of the corpus. Table 2 shows a sample centroid, produced by CIDR [Radev et al., 1999 ] from cluster A. The \"count\" column indicates the average number of occurrences of a word across the entire cluster. The IDF values were computed from the TDT corpus. A centroid, in this context, is a pseudo-document which consists of words which have Count*IDF scores above a predefined threshold in the documents that constitute the cluster. CIDR computes Count*IDF in an iterative fashion, updating its values as more articles are inserted in a given cluster. We hypothesize that sentences that contain the words from the centroid are more indicative of the topic of the cluster. ",
339
+ "cite_spans": [
340
+ {
341
+ "start": 839,
342
+ "end": 858,
343
+ "text": "[Radev et al., 1999",
344
+ "ref_id": "BIBREF11"
345
+ }
346
+ ],
347
+ "ref_spans": [
348
+ {
349
+ "start": 128,
350
+ "end": 137,
351
+ "text": "(Table 1)",
352
+ "ref_id": "TABREF1"
353
+ },
354
+ {
355
+ "start": 789,
356
+ "end": 796,
357
+ "text": "Table 2",
358
+ "ref_id": "TABREF3"
359
+ }
360
+ ],
361
+ "eq_spans": [],
362
+ "section": "Description of the corpus",
363
+ "sec_num": "3.1"
364
+ },
365
+ {
366
+ "text": "MEAD decides which sentences to include in the extract by ranking them according to a set of parameters. The input to MEAD is a cluster of articles (e.g., extracted by CIDR) and a value for the compression rate r. For example, if the cluster contains a total of 50 sentences (n = 50) and the value of r is 20%, the output of MEAD will contain 10 sentences. Sentences are laid in the same order as they appear in the original documents with documents ordered chronologically. We benefit here from the time stamps associated with each document.",
367
+ "cite_spans": [],
368
+ "ref_spans": [],
369
+ "eq_spans": [],
370
+ "section": "Centroid-based algorithm",
371
+ "sec_num": "3.3"
372
+ },
373
+ {
374
+ "text": "SCORE (s) = \u03a3 i (w c C i + w p P i + w f F i ) where i (1 \u2264 i \u2264 n)",
375
+ "cite_spans": [],
376
+ "ref_spans": [],
377
+ "eq_spans": [],
378
+ "section": "Centroid-based algorithm",
379
+ "sec_num": "3.3"
380
+ },
381
+ {
382
+ "text": "is the sentence number within the cluster.",
383
+ "cite_spans": [],
384
+ "ref_spans": [],
385
+ "eq_spans": [],
386
+ "section": "Centroid-based algorithm",
387
+ "sec_num": "3.3"
388
+ },
389
+ {
390
+ "text": "INPUT: Cluster of d documents 3 with n sentences (compression rate = r)",
391
+ "cite_spans": [],
392
+ "ref_spans": [],
393
+ "eq_spans": [],
394
+ "section": "Centroid-based algorithm",
395
+ "sec_num": "3.3"
396
+ },
397
+ {
398
+ "text": "OUTPUT: (n * r) sentences from the cluster with the highest values of SCORE.",
399
+ "cite_spans": [],
400
+ "ref_spans": [],
401
+ "eq_spans": [],
402
+ "section": "Centroid-based algorithm",
403
+ "sec_num": "3.3"
404
+ },
405
+ {
406
+ "text": "The current paper evaluates two algorithms (pure centroid: w c = 1, w p = 0, w f = 0) and (lead+centroid: w c = 1, w p = 1, w f = 0).",
407
+ "cite_spans": [],
408
+ "ref_spans": [],
409
+ "eq_spans": [],
410
+ "section": "Centroid-based algorithm",
411
+ "sec_num": "3.3"
412
+ },
413
+ {
414
+ "text": "We try to approximate CSIS by identifying sentence similarity across sentences. Its effect on MEAD is the subtraction of a redundancy penalty (R s ) for each sentence which overlaps with sentences that have higher SCORE values. The redundancy penalty is similar to the negative factor in the MMR formula [Carbonell and Goldstein, 1998 ].",
415
+ "cite_spans": [
416
+ {
417
+ "start": 304,
418
+ "end": 334,
419
+ "text": "[Carbonell and Goldstein, 1998",
420
+ "ref_id": "BIBREF1"
421
+ }
422
+ ],
423
+ "ref_spans": [],
424
+ "eq_spans": [],
425
+ "section": "Redundancy-based algorithm",
426
+ "sec_num": "3.4"
427
+ },
428
+ {
429
+ "text": "SCORE (s) = \u03a3 i (w c C i + w p P i + w f F i ) -w R R s",
430
+ "cite_spans": [],
431
+ "ref_spans": [],
432
+ "eq_spans": [],
433
+ "section": "Redundancy-based algorithm",
434
+ "sec_num": "3.4"
435
+ },
436
+ {
437
+ "text": "For each pair of sentences extracted by MEAD, we compute the cross-sentence word overlap according to the following formula:",
438
+ "cite_spans": [],
439
+ "ref_spans": [],
440
+ "eq_spans": [],
441
+ "section": "Redundancy-based algorithm",
442
+ "sec_num": "3.4"
443
+ },
444
+ {
445
+ "text": "R s = 2 * (# overlapping words) / (# words in sentence 1 + # words in sentence 2) w R = Max s (SCORE(s))",
446
+ "cite_spans": [],
447
+ "ref_spans": [],
448
+ "eq_spans": [],
449
+ "section": "Redundancy-based algorithm",
450
+ "sec_num": "3.4"
451
+ },
452
+ {
453
+ "text": "R s = 1 when the sentences are identical and R s = 0 when they have no words in common. After deducting R s , we rerank all sentences and possibly create a new sentence extract. We repeat this process until reranking doesn't result in a different extract.",
454
+ "cite_spans": [],
455
+ "ref_spans": [],
456
+ "eq_spans": [],
457
+ "section": "Redundancy-based algorithm",
458
+ "sec_num": "3.4"
459
+ },
460
+ {
461
+ "text": "The number of overlapping words in the formula is computed in such a way that if a word appears m times in one sentence and n times in another, only min (m, n) of these occurrences will be considered overlapping.",
462
+ "cite_spans": [],
463
+ "ref_spans": [],
464
+ "eq_spans": [],
465
+ "section": "Redundancy-based algorithm",
466
+ "sec_num": "3.4"
467
+ },
468
+ {
469
+ "text": "Summarization evaluation methods can be divided into 2 categories: intrinsic and extrinsic [Mani and Maybury, 1999] . Intrinsic evaluation measures the quality of summaries directly (e.g., by comparing them to ideal summaries). Extrinsic methods measure how well the summaries help in performing a particular task (e.g., classification). The extrinsic evaluation, also called task-based evaluation, has received more attention recently at the DARPA Summarization Evaluation Conference [Mani et al., 1998 ].",
470
+ "cite_spans": [
471
+ {
472
+ "start": 91,
473
+ "end": 115,
474
+ "text": "[Mani and Maybury, 1999]",
475
+ "ref_id": "BIBREF8"
476
+ },
477
+ {
478
+ "start": 485,
479
+ "end": 503,
480
+ "text": "[Mani et al., 1998",
481
+ "ref_id": null
482
+ }
483
+ ],
484
+ "ref_spans": [],
485
+ "eq_spans": [],
486
+ "section": "Techniques for evaluating summaries",
487
+ "sec_num": "4"
488
+ },
489
+ {
490
+ "text": "Two techniques commonly used to measure interjudge agreement and to evaluate extracts are (A), precision and recall, and (B), percent agreement. In both cases, an automatically generated summary is compared against an \"ideal\" summary. To construct the ideal summary, a group of human subjects are asked to extract sentences. Then, the sentences chosen by a majority of humans are included in the ideal summary. The precision and recall indicate the overlap between the ideal summary and the automatic summary.",
491
+ "cite_spans": [],
492
+ "ref_spans": [],
493
+ "eq_spans": [],
494
+ "section": "Single-document summaries",
495
+ "sec_num": "4.1"
496
+ },
497
+ {
498
+ "text": "We should note that [Jing et al., 1998 ] pointed out that the cut-off summary length can affect results significantly, and the assumption of a single \"ideal'' summary is problematic.",
499
+ "cite_spans": [
500
+ {
501
+ "start": 20,
502
+ "end": 38,
503
+ "text": "[Jing et al., 1998",
504
+ "ref_id": "BIBREF4"
505
+ }
506
+ ],
507
+ "ref_spans": [],
508
+ "eq_spans": [],
509
+ "section": "Single-document summaries",
510
+ "sec_num": "4.1"
511
+ },
512
+ {
513
+ "text": "We will illustrate why these two methods are not satisfactory. Suppose we want to determine which of two systems which selected summary sentences at a compression rate of 20% (Table 3) is better. Using precision and recall indicates that the performance of System 1 and System 2 is 50% and 0%, respectively. System 2 appears to perform in the worst possible way since it is not possible to differentiate between sentences S3 -S10, which are equally bad in this model. Using percent agreement, the performance is 80% and 60%, respectively, however percent agreement is highly dependent on the compression rate.",
514
+ "cite_spans": [],
515
+ "ref_spans": [
516
+ {
517
+ "start": 175,
518
+ "end": 184,
519
+ "text": "(Table 3)",
520
+ "ref_id": "TABREF4"
521
+ }
522
+ ],
523
+ "eq_spans": [],
524
+ "section": "Single-document summaries",
525
+ "sec_num": "4.1"
526
+ },
527
+ {
528
+ "text": "Ideal System1 System2 S1 + + - S2 + - - S3 - + + S4 - - + S5 - - - S6 - - - S7 - - - S8 - - - S9 - - - S10 - - -",
529
+ "cite_spans": [],
530
+ "ref_spans": [],
531
+ "eq_spans": [],
532
+ "section": "Single-document summaries",
533
+ "sec_num": "4.1"
534
+ },
535
+ {
536
+ "text": "Instead of P&R or percent agreement, one can measure the coverage of the ideal summary's utility. In the example in We therefore propose to model both interjudge agreement and system evaluation as real-valued vector matching and not as boolean (methods A and B). By giving credit for \"less than ideal'' sentences and distinguishing the degree of importance between sentences, the utility-based scheme is a more natural model to evaluate summaries.",
537
+ "cite_spans": [],
538
+ "ref_spans": [],
539
+ "eq_spans": [],
540
+ "section": "Utility-based evaluation of both single and multiple document summaries.",
541
+ "sec_num": "4.2"
542
+ },
543
+ {
544
+ "text": "Other researchers have also suggested improvements on the precision and recall measure for summarization. [Jing et al., 1998 ] proposed to use fractional P&R. [Goldstein et al., 1999] used 11point average precision.",
545
+ "cite_spans": [
546
+ {
547
+ "start": 106,
548
+ "end": 124,
549
+ "text": "[Jing et al., 1998",
550
+ "ref_id": "BIBREF4"
551
+ },
552
+ {
553
+ "start": 159,
554
+ "end": 183,
555
+ "text": "[Goldstein et al., 1999]",
556
+ "ref_id": "BIBREF2"
557
+ }
558
+ ],
559
+ "ref_spans": [],
560
+ "eq_spans": [],
561
+ "section": "Utility-based evaluation of both single and multiple document summaries.",
562
+ "sec_num": "4.2"
563
+ },
564
+ {
565
+ "text": "Without loss of generality, suppose that three judges are asked to build extracts of a single article 4 . As an example, Table 5 shows the weights of the different sentences (note that no compression rate needs to be specified; from the data in the table, one can generate summaries at arbitrary compression rates).",
566
+ "cite_spans": [],
567
+ "ref_spans": [
568
+ {
569
+ "start": 121,
570
+ "end": 128,
571
+ "text": "Table 5",
572
+ "ref_id": "TABREF7"
573
+ }
574
+ ],
575
+ "eq_spans": [],
576
+ "section": "Interjudge agreement (J)",
577
+ "sec_num": "4.2.1"
578
+ },
579
+ {
580
+ "text": "Judge2 Judge3 Sentence 1 10 10 5 Sentence 2 8 9 8 Sentence 3 2 3 4 Sentence 4 5 6 9 In the example, with a 50% summary, Judge 1 would pick sentences 1 and 2 because they have the maximum utility as far as he is concerned. Judge 2 would select the same two sentences, while Judge 3 would pick 2 and 4 5 . The maximum utilities for each judge are as follows: 18 (= 10 + 8), 19, and 17.",
581
+ "cite_spans": [],
582
+ "ref_spans": [],
583
+ "eq_spans": [],
584
+ "section": "Judge1",
585
+ "sec_num": null
586
+ },
587
+ {
588
+ "text": "How well Judge 1's utility assignment satisfies Judge 2's utility need? Since they have both selected the same sentences, Judge 1 achieves 19/19 (1.00) of Judge 2's utility. However, Judge 1 only achieves 13/17 (0.765) of Judge 3's utility.",
589
+ "cite_spans": [],
590
+ "ref_spans": [],
591
+ "eq_spans": [],
592
+ "section": "Judge1",
593
+ "sec_num": null
594
+ },
595
+ {
596
+ "text": "We can therefore represent the cross-judge utility agreement J i,j as an asymmetric matrix (e.g., the value of J 1,2 is 0.765 while the value of J 2,1 is 13/18 or 0.722). The values J i,j of the cross-judge utility matrix for r = 50% are shown in Table 6 . Table 6 : Cross-judge utility agreement (J)",
597
+ "cite_spans": [],
598
+ "ref_spans": [
599
+ {
600
+ "start": 247,
601
+ "end": 254,
602
+ "text": "Table 6",
603
+ "ref_id": null
604
+ },
605
+ {
606
+ "start": 257,
607
+ "end": 264,
608
+ "text": "Table 6",
609
+ "ref_id": null
610
+ }
611
+ ],
612
+ "eq_spans": [],
613
+ "section": "Judge1",
614
+ "sec_num": null
615
+ },
616
+ {
617
+ "text": "We can also compute the performance of each judge (J i ) against all other judges by averaging for each Judge i all values in the matrix J i,j where i \u2260 j. These numbers indicate that Judge 3 is the outlier.",
618
+ "cite_spans": [],
619
+ "ref_spans": [],
620
+ "eq_spans": [],
621
+ "section": "Judge1",
622
+ "sec_num": null
623
+ },
624
+ {
625
+ "text": "Finally, the mean cross-judge agreement J is the average of J i for i=1..3. In the example, J = 0.841.",
626
+ "cite_spans": [],
627
+ "ref_spans": [],
628
+ "eq_spans": [],
629
+ "section": "Judge1",
630
+ "sec_num": null
631
+ },
632
+ {
633
+ "text": "J is like an upper bound on the performance of a summarizer (it can achieve a score higher than J only when it can do a better job than the judges).",
634
+ "cite_spans": [],
635
+ "ref_spans": [],
636
+ "eq_spans": [],
637
+ "section": "Judge1",
638
+ "sec_num": null
639
+ },
640
+ {
641
+ "text": "We can also similarly define a lower bound on the summarizer performance.",
642
+ "cite_spans": [],
643
+ "ref_spans": [],
644
+ "eq_spans": [],
645
+ "section": "Random performance (R)",
646
+ "sec_num": "4.2.2"
647
+ },
648
+ {
649
+ "text": "The random performance R is the average of all possible system outputs at a given compression rate, r. For example, with 4 sentences and a r = 50%, the set of all possible system outputs is {12,13,14,23,24, 34} 6 . For each of them, we can compute a system performance. For example, the system that selects sentences 1 and 4 (we label this system as {14}) performs at 15/18 (or 0.833) against Judge 1, at 16/19 against Judge 2 (or 0.842), and at 14/17 against Judge 3 (or 0.824). On average, the performance of {14} is the average of the three numbers, or 0.833.",
650
+ "cite_spans": [
651
+ {
652
+ "start": 190,
653
+ "end": 212,
654
+ "text": "{12,13,14,23,24, 34} 6",
655
+ "ref_id": null
656
+ }
657
+ ],
658
+ "ref_spans": [],
659
+ "eq_spans": [],
660
+ "section": "Random performance (R)",
661
+ "sec_num": "4.2.2"
662
+ },
663
+ {
664
+ "text": "We can compute the performance of all possible systems. The six numbers (in the order {12,13,14,23,24,34}) are 0.922, 0.627, 0.833, 0.631, 0.837, and 0.543. Their average becomes the random performance (R) of all possible systems; in this example, R = 0.732.",
665
+ "cite_spans": [],
666
+ "ref_spans": [],
667
+ "eq_spans": [],
668
+ "section": "Random performance (R)",
669
+ "sec_num": "4.2.2"
670
+ },
671
+ {
672
+ "text": "The system performance S is one of the numbers 6 described in the previous subsection. For {13}, the value of S is 0.627 (which is lower than random). For {14}, S is 0.833, which is between R and J. In the example, only two of the six possible sentence selections, {14} and {24} are between R and J. Three others, {13}, {23}, and {34} are below R. while {12} is better than J.",
673
+ "cite_spans": [],
674
+ "ref_spans": [],
675
+ "eq_spans": [],
676
+ "section": "System performance (S)",
677
+ "sec_num": "4.2.3"
678
+ },
679
+ {
680
+ "text": "To restrict system performance (mostly) between 0 and 1, we use a mapping between R and J in such a way that when S = R, the normalized system performance, D, is equal to 0 and when S = J, D becomes 1. The corresponding linear function 7 is: Figure 2 shows the mapping between system performance S on the left (a) and normalized system performance D on the right (b). A small part of the 0-1 segment is mapped to the entire 0-1 segment; therefore the difference between two systems, performing at e.g., 0.785 and 0.812 can be significant! Example: the normalized system performance for the {14} system then becomes (0.833 -0.732) / (0.841 -0.732) or 0.927. Since the score is close to 1, the {14} system is almost as good as the interjudge agreement. The normalized system performance for the {24} system is similarly (0.837 -0.732) / (0.841 -0.732) or 0.963. Of the two systems, {24} outperforms {14}.",
681
+ "cite_spans": [],
682
+ "ref_spans": [
683
+ {
684
+ "start": 242,
685
+ "end": 250,
686
+ "text": "Figure 2",
687
+ "ref_id": null
688
+ }
689
+ ],
690
+ "eq_spans": [],
691
+ "section": "Normalized system performance (D)",
692
+ "sec_num": "4.2.4."
693
+ },
694
+ {
695
+ "text": "D = (S-R) / (J-R)",
696
+ "cite_spans": [],
697
+ "ref_spans": [],
698
+ "eq_spans": [],
699
+ "section": "Normalized system performance (D)",
700
+ "sec_num": "4.2.4."
701
+ },
702
+ {
703
+ "text": "To use CSIS in the evaluation, we introduce a new parameter, E, which tells us how much to penalize a system that includes redundant information. In the example from Table 7 (arrows indicate subsumption), a summarizer with r = 20% needs to pick 2 out of 12 sentences. Suppose that it picks 1/1 and 2/1 (in bold). If E = 1, it should get full credit of 20 utility points. If E = 0, it should get no credit for the second sentence as it is subsumed by the first sentence. By varying E between 0 and 1, the evaluation may favor or ignore subsumption.",
704
+ "cite_spans": [],
705
+ "ref_spans": [
706
+ {
707
+ "start": 166,
708
+ "end": 173,
709
+ "text": "Table 7",
710
+ "ref_id": "TABREF9"
711
+ }
712
+ ],
713
+ "eq_spans": [],
714
+ "section": "Using CSIS to evaluate multi-document summaries",
715
+ "sec_num": "4.3"
716
+ },
717
+ {
718
+ "text": "Article2 Article3 Sent1 10 10 5 S e n t 2 8 9 8 S e n t 3 2 3 4 S e n t 4 5 6 9 5 User studies and system evaluation",
719
+ "cite_spans": [],
720
+ "ref_spans": [],
721
+ "eq_spans": [],
722
+ "section": "Article1",
723
+ "sec_num": null
724
+ },
725
+ {
726
+ "text": "We ran two user experiments. First, six judges were each given six clusters and asked to ascribe an importance score from 0 to 10 to each sentence within a particular cluster. Next, five judges had to indicate for each sentence which other sentence(s), if any, it subsumes 8 .",
727
+ "cite_spans": [],
728
+ "ref_spans": [],
729
+ "eq_spans": [],
730
+ "section": "Article1",
731
+ "sec_num": null
732
+ },
733
+ {
734
+ "text": "Using the techniques described in Section 0, we computed the cross-judge agreement (J) for the 6 clusters for various r (Figure 3) . Overall, interjudge agreement was quite high. An interesting drop in interjudge agreement occurs for 20-30% summaries. The drop most likely results from the fact that 10% summaries are typically easier to produce because the few most important sentences in a cluster are easier to identify. 8 We should note that both annotation tasks were quite time consuming and frustrating for the users who took anywhere from 6 to 10 hours each to complete their part. ",
735
+ "cite_spans": [
736
+ {
737
+ "start": 424,
738
+ "end": 425,
739
+ "text": "8",
740
+ "ref_id": null
741
+ }
742
+ ],
743
+ "ref_spans": [
744
+ {
745
+ "start": 120,
746
+ "end": 130,
747
+ "text": "(Figure 3)",
748
+ "ref_id": "FIGREF3"
749
+ }
750
+ ],
751
+ "eq_spans": [],
752
+ "section": "CBSU: interjudge agreement",
753
+ "sec_num": "5.1"
754
+ },
755
+ {
756
+ "text": "In the second experiment, we asked users to indicate all cases when within a cluster, a sentence is subsumed by another. The judges' data on the first seven sentences of cluster A are shown in Table 8 .",
757
+ "cite_spans": [],
758
+ "ref_spans": [
759
+ {
760
+ "start": 193,
761
+ "end": 200,
762
+ "text": "Table 8",
763
+ "ref_id": "TABREF10"
764
+ }
765
+ ],
766
+ "eq_spans": [],
767
+ "section": "CSIS: interjudge agreement",
768
+ "sec_num": "5.2"
769
+ },
770
+ {
771
+ "text": "The \"+ score\" indicates the number of judges who agree on the most frequent subsumption. The \"score\" indicates that the consensus was no subsumption. We found relatively low interjudge agreement on the cases in which at least one judge indicated evidence of subsumption. Overall, out of 558 sentences, there was full agreement (5 judges) on 292 sentences (Table 9) . Unfortunately, in 291 of these 292 sentences the agreement was that there is no subsumption. When the bar of agreement was lowered to four judges, 23 out of 406 agreements are on sentences with subsumption. Overall, out of 80 sentences with subsumption, only 24 had an agreement of four or more judges. However, in 54 cases at least three judges agreed on the presence of a particular instance of subsumption.",
772
+ "cite_spans": [],
773
+ "ref_spans": [
774
+ {
775
+ "start": 355,
776
+ "end": 364,
777
+ "text": "(Table 9)",
778
+ "ref_id": "TABREF11"
779
+ }
780
+ ],
781
+ "eq_spans": [],
782
+ "section": "CSIS: interjudge agreement",
783
+ "sec_num": "5.2"
784
+ },
785
+ {
786
+ "text": "Sentence Judge1 Judge2 Judge3 Judge4 Judge5 + score -score A1-1 - A2-1 A2-1 - A2-1 3 A1-2 A2-5 A2-5 - - A2-5 3 A1-3 - - - - A2-10 4 A1-4",
787
+ "cite_spans": [],
788
+ "ref_spans": [],
789
+ "eq_spans": [],
790
+ "section": "CSIS: interjudge agreement",
791
+ "sec_num": "5.2"
792
+ },
793
+ {
794
+ "text": "A2-10 A2-10 A2-10 -A2-10 4 A1-5 -A2-1 -A2-2 A2-4 2 A1-6 ----A2-7 4 A1-7 ----A2-8 4 In conclusion, we found very high interjudge agreement in the first experiment and moderately low agreement in the second experiment. We concede that the time necessary to do a proper job at the second task is partly to blame.",
795
+ "cite_spans": [],
796
+ "ref_spans": [],
797
+ "eq_spans": [],
798
+ "section": "CSIS: interjudge agreement",
799
+ "sec_num": "5.2"
800
+ },
801
+ {
802
+ "text": "Since the baseline of random sentence selection is already included in the evaluation formulae, we used the Lead-based method (selecting the positionally first (n*r/c) sentences from each cluster where c = number of clusters) as the baseline to evaluate our system.",
803
+ "cite_spans": [],
804
+ "ref_spans": [],
805
+ "eq_spans": [],
806
+ "section": "Evaluation of MEAD",
807
+ "sec_num": "5.3"
808
+ },
809
+ {
810
+ "text": "In We then modified the MEAD algorithm to include lead information as well as centroids (see Section 0). In this case, MEAD+Lead performed better than the Lead baseline in 41 cases. We are in the process of running experiments with other SCORE formulas.",
811
+ "cite_spans": [],
812
+ "ref_spans": [],
813
+ "eq_spans": [],
814
+ "section": "Evaluation of MEAD",
815
+ "sec_num": "5.3"
816
+ },
817
+ {
818
+ "text": "It may seem that utility-based evaluation requires too much effort and is prone to low interjudge agreement. We believe that our results show that interjudge agreement is quite high. As far as the amount of effort required, we believe that the larger effort on the part of the judges is more or less compensated with the ability to evaluate summaries off-line and at variable compression rates. Alternative evaluations don't make such evaluations possible. We should concede that a utility-based approach is probably not feasible for query-based summaries as these are typically done only on-line.",
819
+ "cite_spans": [],
820
+ "ref_spans": [],
821
+ "eq_spans": [],
822
+ "section": "Discussion",
823
+ "sec_num": "5.4"
824
+ },
825
+ {
826
+ "text": "We discussed the possibility of a sentence contributing negatively to the utility of another sentence due to redundancy. We should also point out that sentences can also reinforce one another positively. For example, if a sentence mentioning a new entity is included in a summary, one might also want to include a sentence that puts the entity in the context of the rest of the article or cluster.",
827
+ "cite_spans": [],
828
+ "ref_spans": [],
829
+ "eq_spans": [],
830
+ "section": "Discussion",
831
+ "sec_num": "5.4"
832
+ },
833
+ {
834
+ "text": "We presented a new multi-document summarizer, MEAD. It summarizes clusters of news articles automatically grouped by a topic detection system. MEAD uses information from the centroids of the clusters to select sentences that are most likely to be relevant to the cluster topic.",
835
+ "cite_spans": [],
836
+ "ref_spans": [],
837
+ "eq_spans": [],
838
+ "section": "Contributions and future work",
839
+ "sec_num": "6"
840
+ },
841
+ {
842
+ "text": "We used a new utility-based technique, CBSU, for the evaluation of MEAD and of summarizers in general. We found that MEAD produces summaries that are similar in quality to the ones produced by humans. We also compared MEAD's performance to an alternative method, multi-document lead, and showed how MEAD's sentence scoring weights can be modified to produce summaries significantly better than the alternatives.",
843
+ "cite_spans": [],
844
+ "ref_spans": [],
845
+ "eq_spans": [],
846
+ "section": "Contributions and future work",
847
+ "sec_num": "6"
848
+ },
849
+ {
850
+ "text": "We also looked at a property of multi-document clusters, namely cross-sentence information subsumption (which is related to the MMR metric proposed in [Carbonell and Goldstein, 1998] ) and showed how it can be used in evaluating multidocument summaries.",
851
+ "cite_spans": [
852
+ {
853
+ "start": 151,
854
+ "end": 182,
855
+ "text": "[Carbonell and Goldstein, 1998]",
856
+ "ref_id": "BIBREF1"
857
+ }
858
+ ],
859
+ "ref_spans": [],
860
+ "eq_spans": [],
861
+ "section": "Contributions and future work",
862
+ "sec_num": "6"
863
+ },
864
+ {
865
+ "text": "All our findings are backed by the analysis of two experiments that we performed with human subjects. We found that the interjudge agreement on sentence utility is very high while the agreement on crosssentence subsumption is moderately low, although promising.",
866
+ "cite_spans": [],
867
+ "ref_spans": [],
868
+ "eq_spans": [],
869
+ "section": "Contributions and future work",
870
+ "sec_num": "6"
871
+ },
872
+ {
873
+ "text": "In the future, we would like to test our multidocument summarizer on a larger corpus and improve the summarization algorithm. We would also like to explore how the techniques we proposed here can be used for multiligual multidocument summarization.",
874
+ "cite_spans": [],
875
+ "ref_spans": [],
876
+ "eq_spans": [],
877
+ "section": "Contributions and future work",
878
+ "sec_num": "6"
879
+ },
880
+ {
881
+ "text": "The selection of Cluster E is due to an idea by the participants in the Novelty Detection Workshop, led by James Allan.",
882
+ "cite_spans": [],
883
+ "ref_spans": [],
884
+ "eq_spans": [],
885
+ "section": "",
886
+ "sec_num": null
887
+ },
888
+ {
889
+ "text": "Note that currently, MEAD requires that sentence boundaries be marked.",
890
+ "cite_spans": [],
891
+ "ref_spans": [],
892
+ "eq_spans": [],
893
+ "section": "",
894
+ "sec_num": null
895
+ },
896
+ {
897
+ "text": "We concatenate all documents in a cluster in a chronological order.5 In case of ties, we arbitrarily pick the sentence that occurs earlier in the cluster.",
898
+ "cite_spans": [],
899
+ "ref_spans": [],
900
+ "eq_spans": [],
901
+ "section": "",
902
+ "sec_num": null
903
+ },
904
+ {
905
+ "text": "There are a total of (n!) / (n(1-r))! (r*n)! system outputs.",
906
+ "cite_spans": [],
907
+ "ref_spans": [],
908
+ "eq_spans": [],
909
+ "section": "",
910
+ "sec_num": null
911
+ },
912
+ {
913
+ "text": "The formula is valid when J > R (that is, the judges agree among each other better than randomly).",
914
+ "cite_spans": [],
915
+ "ref_spans": [],
916
+ "eq_spans": [],
917
+ "section": "",
918
+ "sec_num": null
919
+ }
920
+ ],
921
+ "back_matter": [
922
+ {
923
+ "text": "We would like to thank Inderjeet Mani, Wlodek Zadrozny, Rie Kubota Ando, Joyce Chai, and Nanda Kambhatla for their valuable feedback. We would also like to thank Carl Sable, Min-Yen Kan, Dave Evans, Adam Budzikowski, and Veronika Horvath for their help with the evaluation.",
924
+ "cite_spans": [],
925
+ "ref_spans": [],
926
+ "eq_spans": [],
927
+ "section": "Acknowledgments",
928
+ "sec_num": "7"
929
+ },
930
+ {
931
+ "text": "1. Eighteen decapitated bodies have been found in a mass grave in northern Algeria, press reports said Thursday, adding that two shepherds were murdered earlier this week.2. Security forces found the mass grave on Wednesday at Chbika, near Djelfa, 275 kilometers (170 miles) south of the capital.3. It contained the bodies of people killed last year during a wedding ceremony, according to Le Quotidien Liberte.4. The victims included women, children and old men.5. Most of them had been decapitated and their heads thrown on a road, reported the Es Sahafa.6. Another mass grave containing the bodies of around 10 people was discovered recently near Algiers, in the Eucalyptus district.7. The two shepherds were killed Monday evening by a group of nine armed Islamists near the Moulay Slissen forest.8. After being injured in a hail of automatic weapons fire, the pair were finished off with machete blows before being decapitated, Le Quotidien d'Oran reported.9. Seven people, six of them children, were killed and two injured Wednesday by armed Islamists near Medea, 120 kilometers (75 miles) south of Algiers, security forces said.10. The same day a parcel bomb explosion injured 17 people in Algiers itself.11. Since early March, violence linked to armed Islamists has claimed more than 500 lives, according to press tallies.1. Algerian newspapers have reported that 18 decapitated bodies have been found by authorities in the south of the country.2. Police found the \"decapitated bodies of women, children and old men,with their heads thrown on a road'' near the town of Jelfa, 275 kilometers (170 miles) south of the capital Algiers.3. In another incident on Wednesday, seven people --including six children --were killed by terrorists, Algerian security forces said.4. Extremist Muslim militants were responsible for the slaughter of the seven people in the province of Medea, 120 kilometers (74 miles) south of Algiers.5. The killers also kidnapped three girls during the same attack, authorities said, and one of the girls was found wounded on a nearby road.6. Meanwhile, the Algerian daily Le Matin today quoted Interior Minister Abdul Malik Silal as saying that \"terrorism has not been eradicated, but the movement of the terrorists has significantly declined.'' 7. Algerian violence has claimed the lives of more than 70,000 people since the army cancelled the 1992 general elections that Islamic parties were likely to win.8. Mainstream Islamic groups, most of which are banned in the country, insist their members are not responsible for the violence against civilians. 9. Some Muslim groups have blamed the army, while others accuse \"foreign elements conspiring against Algeria.''",
932
+ "cite_spans": [],
933
+ "ref_spans": [],
934
+ "eq_spans": [],
935
+ "section": "Appendix",
936
+ "sec_num": null
937
+ }
938
+ ],
939
+ "bib_entries": {
940
+ "BIBREF0": {
941
+ "ref_id": "b0",
942
+ "title": "Topic detection and tracking pilot study: final report",
943
+ "authors": [
944
+ {
945
+ "first": "James",
946
+ "middle": [],
947
+ "last": "Allan",
948
+ "suffix": ""
949
+ },
950
+ {
951
+ "first": "Jaime",
952
+ "middle": [],
953
+ "last": "Carbonell",
954
+ "suffix": ""
955
+ },
956
+ {
957
+ "first": "George",
958
+ "middle": [],
959
+ "last": "Doddington",
960
+ "suffix": ""
961
+ },
962
+ {
963
+ "first": "Jonathan",
964
+ "middle": [],
965
+ "last": "Yamron",
966
+ "suffix": ""
967
+ },
968
+ {
969
+ "first": "Yiming",
970
+ "middle": [],
971
+ "last": "Yang",
972
+ "suffix": ""
973
+ }
974
+ ],
975
+ "year": 1998,
976
+ "venue": "Proceedings of the Broadcast News Understanding and Transcription Workshop",
977
+ "volume": "",
978
+ "issue": "",
979
+ "pages": "",
980
+ "other_ids": {},
981
+ "num": null,
982
+ "urls": [],
983
+ "raw_text": "James Allan, Jaime Carbonell, George Doddington, Jonathan Yamron, and Yiming Yang, Topic detection and tracking pilot study: final report, In Proceedings of the Broadcast News Understanding and Transcription Workshop, 1998.",
984
+ "links": null
985
+ },
986
+ "BIBREF1": {
987
+ "ref_id": "b1",
988
+ "title": "The use of MMR, diversity-based reranking for reordering documents and producing summaries",
989
+ "authors": [
990
+ {
991
+ "first": "Jaime",
992
+ "middle": [],
993
+ "last": "Carbonell",
994
+ "suffix": ""
995
+ },
996
+ {
997
+ "first": "Jade",
998
+ "middle": [],
999
+ "last": "Goldstein",
1000
+ "suffix": ""
1001
+ }
1002
+ ],
1003
+ "year": 1998,
1004
+ "venue": "Proceedings of ACM-SIGIR'98",
1005
+ "volume": "",
1006
+ "issue": "",
1007
+ "pages": "",
1008
+ "other_ids": {},
1009
+ "num": null,
1010
+ "urls": [],
1011
+ "raw_text": "Jaime Carbonell and Jade Goldstein. The use of MMR, diversity-based reranking for reordering documents and producing summaries. In Proceedings of ACM-SIGIR'98, Melbourne, Australia, August 1998.",
1012
+ "links": null
1013
+ },
1014
+ "BIBREF2": {
1015
+ "ref_id": "b2",
1016
+ "title": "Summarizing Text Documents: Sentence Selection and Evaluation Metrics",
1017
+ "authors": [
1018
+ {
1019
+ "first": "Jade",
1020
+ "middle": [],
1021
+ "last": "Goldstein",
1022
+ "suffix": ""
1023
+ },
1024
+ {
1025
+ "first": "Mark",
1026
+ "middle": [],
1027
+ "last": "Kantrowitz",
1028
+ "suffix": ""
1029
+ },
1030
+ {
1031
+ "first": "Vibhu",
1032
+ "middle": [],
1033
+ "last": "Mittal",
1034
+ "suffix": ""
1035
+ },
1036
+ {
1037
+ "first": "Jaime",
1038
+ "middle": [],
1039
+ "last": "Carbonell",
1040
+ "suffix": ""
1041
+ }
1042
+ ],
1043
+ "year": 1999,
1044
+ "venue": "Proceedings of ACM-SIGIR'99",
1045
+ "volume": "",
1046
+ "issue": "",
1047
+ "pages": "",
1048
+ "other_ids": {},
1049
+ "num": null,
1050
+ "urls": [],
1051
+ "raw_text": "Jade Goldstein, Mark Kantrowitz, Vibhu Mittal, and Jaime Carbonell, Summarizing Text Documents: Sentence Selection and Evaluation Metrics, In Proceedings of ACM-SIGIR'99, Berkeley, CA, August 1999.",
1052
+ "links": null
1053
+ },
1054
+ "BIBREF3": {
1055
+ "ref_id": "b3",
1056
+ "title": "A Proposal for Task-Based Evaluation of Text Summarization Systems",
1057
+ "authors": [
1058
+ {
1059
+ "first": "Th\u00e9r\u00e8se",
1060
+ "middle": [],
1061
+ "last": "Hand",
1062
+ "suffix": ""
1063
+ }
1064
+ ],
1065
+ "year": 1997,
1066
+ "venue": "Proceedings of the ACL/EACL'97 Workshop on Intelligent Scalable Text Summarization",
1067
+ "volume": "",
1068
+ "issue": "",
1069
+ "pages": "",
1070
+ "other_ids": {},
1071
+ "num": null,
1072
+ "urls": [],
1073
+ "raw_text": "Th\u00e9r\u00e8se Hand. A Proposal for Task-Based Evaluation of Text Summarization Systems, in Mani, I., and Maybury, M., eds., Proceedings of the ACL/EACL'97 Workshop on Intelligent Scalable Text Summarization, Madrid, Spain, July 1997.",
1074
+ "links": null
1075
+ },
1076
+ "BIBREF4": {
1077
+ "ref_id": "b4",
1078
+ "title": "Summarization Evaluation Methods: Experiments and Analysis",
1079
+ "authors": [
1080
+ {
1081
+ "first": "Hongyan",
1082
+ "middle": [],
1083
+ "last": "Jing",
1084
+ "suffix": ""
1085
+ },
1086
+ {
1087
+ "first": "Regina",
1088
+ "middle": [],
1089
+ "last": "Barzilay",
1090
+ "suffix": ""
1091
+ },
1092
+ {
1093
+ "first": "Kathleen",
1094
+ "middle": [],
1095
+ "last": "Mckeown",
1096
+ "suffix": ""
1097
+ },
1098
+ {
1099
+ "first": "Michael",
1100
+ "middle": [],
1101
+ "last": "Elhadad",
1102
+ "suffix": ""
1103
+ }
1104
+ ],
1105
+ "year": 1998,
1106
+ "venue": "Working Notes, AAAI Spring Symposium on Intelligent Text Summarization",
1107
+ "volume": "",
1108
+ "issue": "",
1109
+ "pages": "",
1110
+ "other_ids": {},
1111
+ "num": null,
1112
+ "urls": [],
1113
+ "raw_text": "Hongyan Jing, Regina Barzilay, Kathleen McKeown, and Michael Elhadad, Summarization Evaluation Methods: Experiments and Analysis, In Working Notes, AAAI Spring Symposium on Intelligent Text Summarization, Stanford, CA, April 1998.",
1114
+ "links": null
1115
+ },
1116
+ "BIBREF5": {
1117
+ "ref_id": "b5",
1118
+ "title": "Summarizing Similarities and Differences Among Related Documents",
1119
+ "authors": [
1120
+ {
1121
+ "first": "Inderjeet",
1122
+ "middle": [],
1123
+ "last": "Mani",
1124
+ "suffix": ""
1125
+ },
1126
+ {
1127
+ "first": "Eric",
1128
+ "middle": [],
1129
+ "last": "Bloedorn",
1130
+ "suffix": ""
1131
+ }
1132
+ ],
1133
+ "year": 1999,
1134
+ "venue": "Information Retrieval",
1135
+ "volume": "1",
1136
+ "issue": "1-2",
1137
+ "pages": "35--67",
1138
+ "other_ids": {},
1139
+ "num": null,
1140
+ "urls": [],
1141
+ "raw_text": "Inderjeet Mani and Eric Bloedorn, Summarizing Similarities and Differences Among Related Documents, Information Retrieval 1 (1-2), pages 35-67, June 1999.",
1142
+ "links": null
1143
+ },
1144
+ "BIBREF6": {
1145
+ "ref_id": "b6",
1146
+ "title": "Th\u00e9r\u00e8se Firmin, Michael Chrzanowski, and Beth Sundheim. The TIPSTER SUMMAC text summarization evaluation",
1147
+ "authors": [
1148
+ {
1149
+ "first": "Inderjeet",
1150
+ "middle": [],
1151
+ "last": "Mani",
1152
+ "suffix": ""
1153
+ },
1154
+ {
1155
+ "first": "David",
1156
+ "middle": [],
1157
+ "last": "House",
1158
+ "suffix": ""
1159
+ },
1160
+ {
1161
+ "first": "Gary",
1162
+ "middle": [],
1163
+ "last": "Klein",
1164
+ "suffix": ""
1165
+ },
1166
+ {
1167
+ "first": "Lynette",
1168
+ "middle": [],
1169
+ "last": "Hirschman",
1170
+ "suffix": ""
1171
+ },
1172
+ {
1173
+ "first": "Leo",
1174
+ "middle": [],
1175
+ "last": "Orbst",
1176
+ "suffix": ""
1177
+ }
1178
+ ],
1179
+ "year": null,
1180
+ "venue": "",
1181
+ "volume": "",
1182
+ "issue": "",
1183
+ "pages": "",
1184
+ "other_ids": {},
1185
+ "num": null,
1186
+ "urls": [],
1187
+ "raw_text": "Inderjeet Mani, David House, Gary Klein, Lynette Hirschman, Leo Orbst, Th\u00e9r\u00e8se Firmin, Michael Chrzanowski, and Beth Sundheim. The TIPSTER SUMMAC text summarization evaluation.",
1188
+ "links": null
1189
+ },
1190
+ "BIBREF8": {
1191
+ "ref_id": "b8",
1192
+ "title": "Advances in Automatic Text Summarization",
1193
+ "authors": [
1194
+ {
1195
+ "first": "Inderjeet",
1196
+ "middle": [],
1197
+ "last": "Mani",
1198
+ "suffix": ""
1199
+ },
1200
+ {
1201
+ "first": "Mark",
1202
+ "middle": [],
1203
+ "last": "Maybury",
1204
+ "suffix": ""
1205
+ }
1206
+ ],
1207
+ "year": 1999,
1208
+ "venue": "",
1209
+ "volume": "",
1210
+ "issue": "",
1211
+ "pages": "",
1212
+ "other_ids": {},
1213
+ "num": null,
1214
+ "urls": [],
1215
+ "raw_text": "Inderjeet Mani and Mark Maybury. Advances in Automatic Text Summarization. MIT Press, 1999.",
1216
+ "links": null
1217
+ },
1218
+ "BIBREF9": {
1219
+ "ref_id": "b9",
1220
+ "title": "Towards Multidocument Summarization by Reformulation: Progress and Prospects",
1221
+ "authors": [
1222
+ {
1223
+ "first": "Kathleen",
1224
+ "middle": [],
1225
+ "last": "Mckeown",
1226
+ "suffix": ""
1227
+ },
1228
+ {
1229
+ "first": "Judith",
1230
+ "middle": [],
1231
+ "last": "Klavans",
1232
+ "suffix": ""
1233
+ },
1234
+ {
1235
+ "first": "Vasileios",
1236
+ "middle": [],
1237
+ "last": "Hatzivassiloglou",
1238
+ "suffix": ""
1239
+ },
1240
+ {
1241
+ "first": "Regina",
1242
+ "middle": [],
1243
+ "last": "Barzilay",
1244
+ "suffix": ""
1245
+ },
1246
+ {
1247
+ "first": "Eleazar",
1248
+ "middle": [],
1249
+ "last": "Eskin",
1250
+ "suffix": ""
1251
+ }
1252
+ ],
1253
+ "year": 1999,
1254
+ "venue": "Proceedings of AAAI'99",
1255
+ "volume": "",
1256
+ "issue": "",
1257
+ "pages": "",
1258
+ "other_ids": {},
1259
+ "num": null,
1260
+ "urls": [],
1261
+ "raw_text": "Kathleen McKeown, Judith Klavans, Vasileios Hatzivassiloglou, Regina Barzilay, and Eleazar Eskin, Towards Multidocument Summarization by Reformulation: Progress and Prospects, In Proceedings of AAAI'99, Orlando, FL, July 1999.",
1262
+ "links": null
1263
+ },
1264
+ "BIBREF10": {
1265
+ "ref_id": "b10",
1266
+ "title": "Generating natural language summaries from multiple on-line sources",
1267
+ "authors": [
1268
+ {
1269
+ "first": "R",
1270
+ "middle": [],
1271
+ "last": "Dragomir",
1272
+ "suffix": ""
1273
+ },
1274
+ {
1275
+ "first": "Kathleen",
1276
+ "middle": [],
1277
+ "last": "Radev",
1278
+ "suffix": ""
1279
+ },
1280
+ {
1281
+ "first": "",
1282
+ "middle": [],
1283
+ "last": "Mckeown",
1284
+ "suffix": ""
1285
+ }
1286
+ ],
1287
+ "year": 1998,
1288
+ "venue": "Computational Linguistics",
1289
+ "volume": "24",
1290
+ "issue": "3",
1291
+ "pages": "469--500",
1292
+ "other_ids": {},
1293
+ "num": null,
1294
+ "urls": [],
1295
+ "raw_text": "Dragomir R. Radev and Kathleen McKeown. Generating natural language summaries from multiple on-line sources. Computational Linguistics, 24 (3), pages 469-500, September 1998.",
1296
+ "links": null
1297
+ },
1298
+ "BIBREF11": {
1299
+ "ref_id": "b11",
1300
+ "title": "A description of the CIDR system as used for TDT-2",
1301
+ "authors": [
1302
+ {
1303
+ "first": "R",
1304
+ "middle": [],
1305
+ "last": "Dragomir",
1306
+ "suffix": ""
1307
+ },
1308
+ {
1309
+ "first": "Vasileios",
1310
+ "middle": [],
1311
+ "last": "Radev",
1312
+ "suffix": ""
1313
+ },
1314
+ {
1315
+ "first": "Kathleen",
1316
+ "middle": [
1317
+ "R"
1318
+ ],
1319
+ "last": "Hatzivassiloglou",
1320
+ "suffix": ""
1321
+ },
1322
+ {
1323
+ "first": "",
1324
+ "middle": [],
1325
+ "last": "Mckeown",
1326
+ "suffix": ""
1327
+ }
1328
+ ],
1329
+ "year": 1999,
1330
+ "venue": "DARPA Broadcast News Workshop",
1331
+ "volume": "",
1332
+ "issue": "",
1333
+ "pages": "",
1334
+ "other_ids": {},
1335
+ "num": null,
1336
+ "urls": [],
1337
+ "raw_text": "Dragomir R. Radev, Vasileios Hatzivassiloglou, and Kathleen R. McKeown. A description of the CIDR system as used for TDT-2. In DARPA Broadcast News Workshop, Herndon, VA, February 1999.",
1338
+ "links": null
1339
+ }
1340
+ },
1341
+ "ref_entries": {
1342
+ "FIGREF0": {
1343
+ "type_str": "figure",
1344
+ "uris": null,
1345
+ "text": "Subsumption links across two articles: ALG 18853 and ALG 18854.",
1346
+ "num": null
1347
+ },
1348
+ "FIGREF2": {
1349
+ "type_str": "figure",
1350
+ "uris": null,
1351
+ "text": "Figure 2: Performance mapping",
1352
+ "num": null
1353
+ },
1354
+ "FIGREF3": {
1355
+ "type_str": "figure",
1356
+ "uris": null,
1357
+ "text": "Cross-judge agreement (J) on the CBSU annotation task.",
1358
+ "num": null
1359
+ },
1360
+ "TABREF1": {
1361
+ "num": null,
1362
+ "text": "",
1363
+ "type_str": "table",
1364
+ "html": null,
1365
+ "content": "<table/>"
1366
+ },
1367
+ "TABREF3": {
1368
+ "num": null,
1369
+ "text": "",
1370
+ "type_str": "table",
1371
+ "html": null,
1372
+ "content": "<table/>"
1373
+ },
1374
+ "TABREF4": {
1375
+ "num": null,
1376
+ "text": "",
1377
+ "type_str": "table",
1378
+ "html": null,
1379
+ "content": "<table/>"
1380
+ },
1381
+ "TABREF5": {
1382
+ "num": null,
1383
+ "text": "",
1384
+ "type_str": "table",
1385
+ "html": null,
1386
+ "content": "<table><tr><td>, using both evaluation</td></tr><tr><td>methods A and B, System 1 achieves 50%, whereas</td></tr><tr><td>System 2 achieves 0%. If we look at sentence utility,</td></tr><tr><td>System 1 matches 18 out of 19 utility points in the</td></tr><tr><td>ideal summary and System 2 gets 15 out of 19. In this</td></tr><tr><td>case, the performance of system 2 is not as low as</td></tr><tr><td>when using methods A and B.</td></tr></table>"
1387
+ },
1388
+ "TABREF6": {
1389
+ "num": null,
1390
+ "text": "",
1391
+ "type_str": "table",
1392
+ "html": null,
1393
+ "content": "<table/>"
1394
+ },
1395
+ "TABREF7": {
1396
+ "num": null,
1397
+ "text": "",
1398
+ "type_str": "table",
1399
+ "html": null,
1400
+ "content": "<table><tr><td>The interjudge agreement measures, to what extent</td></tr><tr><td>each judge satisfies the utility of the other judges by</td></tr><tr><td>picking the right sentences.</td></tr></table>"
1401
+ },
1402
+ "TABREF9": {
1403
+ "num": null,
1404
+ "text": "",
1405
+ "type_str": "table",
1406
+ "html": null,
1407
+ "content": "<table/>"
1408
+ },
1409
+ "TABREF10": {
1410
+ "num": null,
1411
+ "text": "",
1412
+ "type_str": "table",
1413
+ "html": null,
1414
+ "content": "<table><tr><td/><td colspan=\"2\">Cluster A</td><td colspan=\"2\">Cluster B</td><td colspan=\"2\">Cluster C</td><td colspan=\"2\">Cluster D</td><td colspan=\"2\">Cluster E</td><td colspan=\"2\">Cluster F</td></tr><tr><td># judges agreeing</td><td>+</td><td>-</td><td>+</td><td>-</td><td>+</td><td>-</td><td>+</td><td>-</td><td>+</td><td>-</td><td>+</td><td>-</td></tr><tr><td>5</td><td>0</td><td>7</td><td>0</td><td>24</td><td>0</td><td>45</td><td>0</td><td>88</td><td>1</td><td>73</td><td>0</td><td>61</td></tr><tr><td>4</td><td>1</td><td>6</td><td>3</td><td>6</td><td>1</td><td>1 0</td><td>9</td><td>3 7</td><td>8</td><td>3 5</td><td>0</td><td>1 1</td></tr><tr><td>3</td><td>3</td><td>6</td><td>4</td><td>5</td><td>4</td><td>4</td><td>2 8</td><td>2 0</td><td>5</td><td>2 3</td><td>3</td><td>7</td></tr><tr><td>2</td><td>1</td><td>1</td><td>2</td><td>1</td><td>1</td><td>0</td><td>7</td><td>0</td><td>7</td><td>0</td><td>1</td><td>0</td></tr></table>"
1415
+ },
1416
+ "TABREF11": {
1417
+ "num": null,
1418
+ "text": "",
1419
+ "type_str": "table",
1420
+ "html": null,
1421
+ "content": "<table/>"
1422
+ },
1423
+ "TABREF12": {
1424
+ "num": null,
1425
+ "text": "",
1426
+ "type_str": "table",
1427
+ "html": null,
1428
+ "content": "<table><tr><td>we show the normalized performance</td></tr><tr><td>(D) of MEAD, for the six clusters at nine</td></tr><tr><td>compression rates. MEAD performed better than</td></tr><tr><td>Lead in 29 (in bold) out of 54 cases. Note that for</td></tr><tr><td>the largest cluster, Cluster D, MEAD outperformed</td></tr><tr><td>Lead at all compression rates.</td></tr></table>"
1429
+ },
1430
+ "TABREF13": {
1431
+ "num": null,
1432
+ "text": "",
1433
+ "type_str": "table",
1434
+ "html": null,
1435
+ "content": "<table/>"
1436
+ }
1437
+ }
1438
+ }
1439
+ }
Full_text_JSON/prefixW/json/W00/W00-0404.json ADDED
The diff for this file is too large to render. See raw diff
 
Full_text_JSON/prefixW/json/W00/W00-0405.json ADDED
@@ -0,0 +1,1650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0405",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:38.525224Z"
6
+ },
7
+ "title": "Multi-Document Summarization By Sentence Extraction",
8
+ "authors": [
9
+ {
10
+ "first": "Jade",
11
+ "middle": [],
12
+ "last": "Goldstein",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "Carnegie Mellon University Pittsburgh",
17
+ "location": {
18
+ "postCode": "15213",
19
+ "region": "PA",
20
+ "country": "U.S.A"
21
+ }
22
+ },
23
+ "email": ""
24
+ },
25
+ {
26
+ "first": "Vibhu",
27
+ "middle": [],
28
+ "last": "Mittal",
29
+ "suffix": "",
30
+ "affiliation": {},
31
+ "email": "mittal@jprc.com"
32
+ },
33
+ {
34
+ "first": "Jaime",
35
+ "middle": [],
36
+ "last": "Carbonell",
37
+ "suffix": "",
38
+ "affiliation": {
39
+ "laboratory": "",
40
+ "institution": "Carnegie Mellon University Pittsburgh",
41
+ "location": {
42
+ "postCode": "15213",
43
+ "region": "PA",
44
+ "country": "U.S.A"
45
+ }
46
+ },
47
+ "email": ""
48
+ },
49
+ {
50
+ "first": "Mark",
51
+ "middle": [],
52
+ "last": "Kantrowitzt",
53
+ "suffix": "",
54
+ "affiliation": {},
55
+ "email": ""
56
+ }
57
+ ],
58
+ "year": "",
59
+ "venue": null,
60
+ "identifiers": {},
61
+ "abstract": "This paper discusses a text extraction approach to multidocument summarization that builds on single-document summarization methods by using additional, available information on about the document set as a whole and the relationships between the documents. Multi-document summarization differs from single in that the issues of compression, speed, redundancy and passage selection are critical in the formation of useful summaries. Our approach addresses these issues by using domainindependent techniques based mainly on fast, statistical processing, a metric for reducing redundancy and maximizing diversity in the selected passages, and a modular framework to allow easy parameterization for different genres, corpora characteristics and user requirements.",
62
+ "pdf_parse": {
63
+ "paper_id": "W00-0405",
64
+ "_pdf_hash": "",
65
+ "abstract": [
66
+ {
67
+ "text": "This paper discusses a text extraction approach to multidocument summarization that builds on single-document summarization methods by using additional, available information on about the document set as a whole and the relationships between the documents. Multi-document summarization differs from single in that the issues of compression, speed, redundancy and passage selection are critical in the formation of useful summaries. Our approach addresses these issues by using domainindependent techniques based mainly on fast, statistical processing, a metric for reducing redundancy and maximizing diversity in the selected passages, and a modular framework to allow easy parameterization for different genres, corpora characteristics and user requirements.",
68
+ "cite_spans": [],
69
+ "ref_spans": [],
70
+ "eq_spans": [],
71
+ "section": "Abstract",
72
+ "sec_num": null
73
+ }
74
+ ],
75
+ "body_text": [
76
+ {
77
+ "text": "With the continuing growth of online information, it has become increasingly important to provide improved mechanisms to find and present textual information effectively. Conventional IR systems find and rank documents based on maximizing relevance to the user query (Salton, 1970; van Rijsbergen, 1979; Buckley, 1985; Salton, 1989) . Some systems also include sub-document relevance assessments and convey this information to the user. More recently, single document summarization systems provide an automated generic abstract or a queryrelevant summary (TIPSTER, 1998a) . i However, largescale IR and summarization have not yet been truly integrated, and the functionality challenges on a summarization system are greater in a true IR or topic-detection context Allan et al., 1998) .",
78
+ "cite_spans": [
79
+ {
80
+ "start": 267,
81
+ "end": 281,
82
+ "text": "(Salton, 1970;",
83
+ "ref_id": null
84
+ },
85
+ {
86
+ "start": 282,
87
+ "end": 303,
88
+ "text": "van Rijsbergen, 1979;",
89
+ "ref_id": "BIBREF22"
90
+ },
91
+ {
92
+ "start": 304,
93
+ "end": 318,
94
+ "text": "Buckley, 1985;",
95
+ "ref_id": "BIBREF15"
96
+ },
97
+ {
98
+ "start": 319,
99
+ "end": 332,
100
+ "text": "Salton, 1989)",
101
+ "ref_id": null
102
+ },
103
+ {
104
+ "start": 555,
105
+ "end": 571,
106
+ "text": "(TIPSTER, 1998a)",
107
+ "ref_id": "BIBREF20"
108
+ },
109
+ {
110
+ "start": 764,
111
+ "end": 783,
112
+ "text": "Allan et al., 1998)",
113
+ "ref_id": "BIBREF9"
114
+ }
115
+ ],
116
+ "ref_spans": [],
117
+ "eq_spans": [],
118
+ "section": "Introduction",
119
+ "sec_num": "1"
120
+ },
121
+ {
122
+ "text": "Consider the situation where the user issues a search query, for instance on a news topic, and the retrieval system finds hundreds of closely-ranked documents in response. Many of these documents are likely to repeat much the same information, while differing in certain i Most of these were based on statistical techniques applied to various document entities; examples include frait, 1983; Kupiec et al., 1995; Paice, 1990, Klavans and Shaw, 1995; MeKeown et al., 1995; Shaw, 1995; Aon\u00a2 et al., 1997; Boguraev and Kennedy, 1997; Hovy and Lin, 1997; Mitra et al., 1997; Teufel and Moens, 1997; Barzilay and Elhadad, 1997; Baldwin and Mortbn, 1998; Radev and McKeown, 1998; Strzalkowski et al., 1998) .",
123
+ "cite_spans": [
124
+ {
125
+ "start": 379,
126
+ "end": 391,
127
+ "text": "frait, 1983;",
128
+ "ref_id": null
129
+ },
130
+ {
131
+ "start": 392,
132
+ "end": 412,
133
+ "text": "Kupiec et al., 1995;",
134
+ "ref_id": null
135
+ },
136
+ {
137
+ "start": 413,
138
+ "end": 437,
139
+ "text": "Paice, 1990, Klavans and",
140
+ "ref_id": null
141
+ },
142
+ {
143
+ "start": 438,
144
+ "end": 449,
145
+ "text": "Shaw, 1995;",
146
+ "ref_id": null
147
+ },
148
+ {
149
+ "start": 450,
150
+ "end": 471,
151
+ "text": "MeKeown et al., 1995;",
152
+ "ref_id": null
153
+ },
154
+ {
155
+ "start": 472,
156
+ "end": 483,
157
+ "text": "Shaw, 1995;",
158
+ "ref_id": null
159
+ },
160
+ {
161
+ "start": 484,
162
+ "end": 502,
163
+ "text": "Aon\u00a2 et al., 1997;",
164
+ "ref_id": null
165
+ },
166
+ {
167
+ "start": 503,
168
+ "end": 530,
169
+ "text": "Boguraev and Kennedy, 1997;",
170
+ "ref_id": "BIBREF13"
171
+ },
172
+ {
173
+ "start": 531,
174
+ "end": 550,
175
+ "text": "Hovy and Lin, 1997;",
176
+ "ref_id": null
177
+ },
178
+ {
179
+ "start": 551,
180
+ "end": 570,
181
+ "text": "Mitra et al., 1997;",
182
+ "ref_id": null
183
+ },
184
+ {
185
+ "start": 571,
186
+ "end": 594,
187
+ "text": "Teufel and Moens, 1997;",
188
+ "ref_id": "BIBREF20"
189
+ },
190
+ {
191
+ "start": 595,
192
+ "end": 622,
193
+ "text": "Barzilay and Elhadad, 1997;",
194
+ "ref_id": "BIBREF12"
195
+ },
196
+ {
197
+ "start": 623,
198
+ "end": 648,
199
+ "text": "Baldwin and Mortbn, 1998;",
200
+ "ref_id": null
201
+ },
202
+ {
203
+ "start": 649,
204
+ "end": 673,
205
+ "text": "Radev and McKeown, 1998;",
206
+ "ref_id": null
207
+ },
208
+ {
209
+ "start": 674,
210
+ "end": 700,
211
+ "text": "Strzalkowski et al., 1998)",
212
+ "ref_id": "BIBREF18"
213
+ }
214
+ ],
215
+ "ref_spans": [],
216
+ "eq_spans": [],
217
+ "section": "Introduction",
218
+ "sec_num": "1"
219
+ },
220
+ {
221
+ "text": "parts. Summaries of the individual documents would help, but are likely to be very similar to each other, unless the summarization system takes into account other summaries that have already been generated. Multidocument summarization -capable of summarizing either complete documents sets, or single documents in the context of previously summarized ones -are likely to be essential in such situations. Ideally, multi-document summaries should contain the key shared relevant information among all the documents only once, plus other information unique to some of the individual documents that are directly relevant to the user's query.",
222
+ "cite_spans": [],
223
+ "ref_spans": [],
224
+ "eq_spans": [],
225
+ "section": "Introduction",
226
+ "sec_num": "1"
227
+ },
228
+ {
229
+ "text": "Though many of the same techniques used in singledocument summarization can also be used in multidocument summarization, there are at least four significant differences:",
230
+ "cite_spans": [],
231
+ "ref_spans": [],
232
+ "eq_spans": [],
233
+ "section": "Introduction",
234
+ "sec_num": "1"
235
+ },
236
+ {
237
+ "text": "1. The degree of redundancy in information contained within a group of topically-related articles is much higher than the degree of redundancy within an article, as each article is apt to describe the main point as well as necessary shared background. Hence anti-redundancy methods are more crucial.",
238
+ "cite_spans": [],
239
+ "ref_spans": [],
240
+ "eq_spans": [],
241
+ "section": "Introduction",
242
+ "sec_num": "1"
243
+ },
244
+ {
245
+ "text": "A group of articles may contain a temporal dimension, typical in a stream of news reports about an unfolding event. Here later information may override earlier more tentative or incomplete accounts.",
246
+ "cite_spans": [],
247
+ "ref_spans": [],
248
+ "eq_spans": [],
249
+ "section": "2.",
250
+ "sec_num": null
251
+ },
252
+ {
253
+ "text": "The compression ratio (i.e. the size of the summary with respect to the size of the document set) will typically be much smaller for collections of dozens or hundreds of topically related documents than for single document summaries. The SUMMAC evaluation (TIPSTER, 1998a) tested 10% compression summaries, but in our work summarizing 200document clusters, we find that compression to the 1% or 0.1% level is required. Summarization becomes significantly more difficult when compression demands increase.",
254
+ "cite_spans": [],
255
+ "ref_spans": [],
256
+ "eq_spans": [],
257
+ "section": "3.",
258
+ "sec_num": null
259
+ },
260
+ {
261
+ "text": "The co-reference problem in summarization presents even greater challenges for multidocument than for single-document summarization (Baldwin and Morton, 1998) .",
262
+ "cite_spans": [
263
+ {
264
+ "start": 132,
265
+ "end": 158,
266
+ "text": "(Baldwin and Morton, 1998)",
267
+ "ref_id": "BIBREF11"
268
+ }
269
+ ],
270
+ "ref_spans": [],
271
+ "eq_spans": [],
272
+ "section": "4.",
273
+ "sec_num": null
274
+ },
275
+ {
276
+ "text": "This paper discusses an approach to multi-document summarization that builds on previous work in single-",
277
+ "cite_spans": [],
278
+ "ref_spans": [],
279
+ "eq_spans": [],
280
+ "section": "4.",
281
+ "sec_num": null
282
+ },
283
+ {
284
+ "text": "I i l i i I ! I i i l I I ! I I ! I, I",
285
+ "cite_spans": [],
286
+ "ref_spans": [],
287
+ "eq_spans": [],
288
+ "section": "4.",
289
+ "sec_num": null
290
+ },
291
+ {
292
+ "text": "document summarization by using additional, available information about the document set as a whole, the relationships between the documents, as well as individual documents.",
293
+ "cite_spans": [],
294
+ "ref_spans": [],
295
+ "eq_spans": [],
296
+ "section": "4.",
297
+ "sec_num": null
298
+ },
299
+ {
300
+ "text": "Generating an effective summary requires the summarizer to select, evaluate, order and aggregate items of information according to their relevance to a particular subject or purpose. These tasks can either be approximated by IR techniques or done in greater depth with fuller natural language processing. Most previous work in summarization has attempted to deal with the issues by focusing more on a related, but simpler, problem. With text-span deletion the system attempts to delete \"less important\" spans of text from the original document; the text that remains is deemed a summary. Work on automated document summarization by text span extraction dates back at least to work at IBM in the fifties (Luhn, 1958) . Most of the work in sentence extraction applied statistical techniques (frequency analysis, variance analysis, etc.) to linguistic units such as tokens, names, anaphora, etc. More recently, other approaches have investigated the utility of discourse structure (Marcu, 1997) , the combination of information extraction and language generation (Klavans and Shaw, 1995; McKeown et al., 1995) , and using machine learning to find patterns in text (Teufel and Moens, 1997; Barzilay and Elhadad, 1997; Strzalkowski et al., 1998) .",
301
+ "cite_spans": [
302
+ {
303
+ "start": 703,
304
+ "end": 715,
305
+ "text": "(Luhn, 1958)",
306
+ "ref_id": null
307
+ },
308
+ {
309
+ "start": 978,
310
+ "end": 991,
311
+ "text": "(Marcu, 1997)",
312
+ "ref_id": null
313
+ },
314
+ {
315
+ "start": 1060,
316
+ "end": 1084,
317
+ "text": "(Klavans and Shaw, 1995;",
318
+ "ref_id": null
319
+ },
320
+ {
321
+ "start": 1085,
322
+ "end": 1106,
323
+ "text": "McKeown et al., 1995)",
324
+ "ref_id": null
325
+ },
326
+ {
327
+ "start": 1161,
328
+ "end": 1185,
329
+ "text": "(Teufel and Moens, 1997;",
330
+ "ref_id": "BIBREF20"
331
+ },
332
+ {
333
+ "start": 1186,
334
+ "end": 1213,
335
+ "text": "Barzilay and Elhadad, 1997;",
336
+ "ref_id": "BIBREF12"
337
+ },
338
+ {
339
+ "start": 1214,
340
+ "end": 1240,
341
+ "text": "Strzalkowski et al., 1998)",
342
+ "ref_id": "BIBREF18"
343
+ }
344
+ ],
345
+ "ref_spans": [],
346
+ "eq_spans": [],
347
+ "section": "Background and Related Work",
348
+ "sec_num": "2"
349
+ },
350
+ {
351
+ "text": "Some of these approaches to single document summarization have been extended to deal with multi-document summarization (Mani and Bloedern, 1997; TIPSTER, 1998b; Radev and McKeown, 1998; Mani and Bloedorn, 1999; McKeown et al., .!999; Stein et al., 1999) . These include comparing templates filled in by extracting information -using specialized, domain specific knowledge sources -from the doc-\"ument, and then generating natural language summaries from the templates (Radev and McKeown, 1998) , com--\u2022 paring named-entities -extracted using specialized lists between documents and selecting the most relevant section (TIPSTER, 1998b), finding co-reference chains in the document set to identify common sections of interest (TIPSTER, 1998b), or building activation networks of related lexical items (identity mappings, synonyms, hypernyms, etc.) to extract text spans from the document set (Mani and Bloedern, 1997). Another system (Stein et al., 1999) creates a multi-document summary from multiple single document summaries, an approach that can be sub-optimal in some cases, due to the fact that the process of generating the final multi-document summary takes as input the individual summaries and not the complete documents. (Particularly if the single-document summaries can contain much overlapping information.) The Columbia University system (McKeown et al., 1999) creates a multi-document summary using machine learning and statistical techniques to identify similar sections and language generation to reformulate the summary. The focus of our approach is a multi-document system that can quickly summarize large clusters of similar documents (on the order of thousands) while providing the key relevant useful information or pointers to such information. Our system (1) primarily uses only domainindependent techniques, based mainly on fast, statistical processing, (2) explicitly deals with the issue of reducing redundancy without eliminating potential relevant information, and 3contains parameterized modules, so that different genres or corpora characteristics can be taken into account easily.",
352
+ "cite_spans": [
353
+ {
354
+ "start": 119,
355
+ "end": 144,
356
+ "text": "(Mani and Bloedern, 1997;",
357
+ "ref_id": null
358
+ },
359
+ {
360
+ "start": 145,
361
+ "end": 160,
362
+ "text": "TIPSTER, 1998b;",
363
+ "ref_id": "BIBREF21"
364
+ },
365
+ {
366
+ "start": 161,
367
+ "end": 185,
368
+ "text": "Radev and McKeown, 1998;",
369
+ "ref_id": null
370
+ },
371
+ {
372
+ "start": 186,
373
+ "end": 210,
374
+ "text": "Mani and Bloedorn, 1999;",
375
+ "ref_id": null
376
+ },
377
+ {
378
+ "start": 211,
379
+ "end": 233,
380
+ "text": "McKeown et al., .!999;",
381
+ "ref_id": null
382
+ },
383
+ {
384
+ "start": 234,
385
+ "end": 253,
386
+ "text": "Stein et al., 1999)",
387
+ "ref_id": "BIBREF18"
388
+ },
389
+ {
390
+ "start": 468,
391
+ "end": 493,
392
+ "text": "(Radev and McKeown, 1998)",
393
+ "ref_id": null
394
+ },
395
+ {
396
+ "start": 932,
397
+ "end": 952,
398
+ "text": "(Stein et al., 1999)",
399
+ "ref_id": "BIBREF18"
400
+ },
401
+ {
402
+ "start": 1351,
403
+ "end": 1373,
404
+ "text": "(McKeown et al., 1999)",
405
+ "ref_id": null
406
+ },
407
+ {
408
+ "start": 1878,
409
+ "end": 1881,
410
+ "text": "(2)",
411
+ "ref_id": "BIBREF1"
412
+ }
413
+ ],
414
+ "ref_spans": [],
415
+ "eq_spans": [],
416
+ "section": "Background and Related Work",
417
+ "sec_num": "2"
418
+ },
419
+ {
420
+ "text": "There are two types of situations in which multidocument summarization would be useful: (1) the user is faced with a collection of dis-similar documents and wishes to assess the information landscape contained in the collection, or 2there is a collection of topicallyrelated documents, extracted from a larger more diverse collection as the result of a query, or a topically-cohesive cluster. In the first case, if the collection is large enough, it only makes sense to first cluster and categorize the documents (Yang et al., 1999) , and then sample from, or summarize each cohesive cluster. Hence, a \"summary\" would constitute of a visualization of the information landscape, where features could be clusters or summaries thereof. In the second case, it is possible to build a synthetic textual summary containing the main point(s) of the topic, augmented with non-redundant background information and/or query-relevant elaborations. This is the focus of our work reported here, including the necessity to eliminate redundancy among the information content of multiple related documents. Users' information seeking needs and goals vary tremendously. When a group of three people created a multi-document summarization of 10 articles about the Microsoft Trial from a given day, one summary focused on the details presented in court, one on an overall gist of the day's events, and the third on a high level view of the goals and outcome of the trial. Thus, an ideal multidocument summarization would be able to address the different levels of detail, which is difficult without natural language understanding. An interface for the summarization system needs to be able to permit the user to enter information seeking goals, via a query, a background interest profile and/or a relevance feedback mechanism.",
421
+ "cite_spans": [
422
+ {
423
+ "start": 513,
424
+ "end": 532,
425
+ "text": "(Yang et al., 1999)",
426
+ "ref_id": "BIBREF23"
427
+ }
428
+ ],
429
+ "ref_spans": [],
430
+ "eq_spans": [],
431
+ "section": "Requirements for Multi-Document Summarization",
432
+ "sec_num": "3"
433
+ },
434
+ {
435
+ "text": "Following is a list of requirements for multi-document summarization:",
436
+ "cite_spans": [],
437
+ "ref_spans": [],
438
+ "eq_spans": [],
439
+ "section": "Requirements for Multi-Document Summarization",
440
+ "sec_num": "3"
441
+ },
442
+ {
443
+ "text": "\u2022 clustering: The ability to cluster similar documents and passages to find related information.",
444
+ "cite_spans": [],
445
+ "ref_spans": [],
446
+ "eq_spans": [],
447
+ "section": "Requirements for Multi-Document Summarization",
448
+ "sec_num": "3"
449
+ },
450
+ {
451
+ "text": "\u2022 coverage: The ability to find and extract the main points across documents.",
452
+ "cite_spans": [],
453
+ "ref_spans": [],
454
+ "eq_spans": [],
455
+ "section": "Requirements for Multi-Document Summarization",
456
+ "sec_num": "3"
457
+ },
458
+ {
459
+ "text": "\u2022 anti-redundancy: The ability to minimize redundancy between passages in the summary.",
460
+ "cite_spans": [],
461
+ "ref_spans": [],
462
+ "eq_spans": [],
463
+ "section": "Requirements for Multi-Document Summarization",
464
+ "sec_num": "3"
465
+ },
466
+ {
467
+ "text": "The ability to combine text passages in a useful manner for the reader.-This may include:",
468
+ "cite_spans": [],
469
+ "ref_spans": [],
470
+ "eq_spans": [],
471
+ "section": "*. summary cohesion criteria:",
472
+ "sec_num": null
473
+ },
474
+ {
475
+ "text": "-document ordering: All text segments of highest ranking document, then all segments from the next highest ranking document, etc.",
476
+ "cite_spans": [],
477
+ "ref_spans": [],
478
+ "eq_spans": [],
479
+ "section": "*. summary cohesion criteria:",
480
+ "sec_num": null
481
+ },
482
+ {
483
+ "text": "-news-story principle (rank ordering):present the most relevant and diverse information first so that the reader gets the maximal information content even if they stop reading the summary.",
484
+ "cite_spans": [],
485
+ "ref_spans": [],
486
+ "eq_spans": [],
487
+ "section": "*. summary cohesion criteria:",
488
+ "sec_num": null
489
+ },
490
+ {
491
+ "text": "-topic-cohesion: Group together the passages by topic clustering using passage similarity criteria and present the information by the cluster\" centroid passage rank.",
492
+ "cite_spans": [],
493
+ "ref_spans": [],
494
+ "eq_spans": [],
495
+ "section": "*. summary cohesion criteria:",
496
+ "sec_num": null
497
+ },
498
+ {
499
+ "text": "-time line ordering: Text passages ordered based on the occurrence of events in time.",
500
+ "cite_spans": [],
501
+ "ref_spans": [],
502
+ "eq_spans": [],
503
+ "section": "*. summary cohesion criteria:",
504
+ "sec_num": null
505
+ },
506
+ {
507
+ "text": "* coherence: Summaries generated should be readable and relevant to the user.",
508
+ "cite_spans": [],
509
+ "ref_spans": [],
510
+ "eq_spans": [],
511
+ "section": "*. summary cohesion criteria:",
512
+ "sec_num": null
513
+ },
514
+ {
515
+ "text": ". context: Include sufficient context so that the summary is understandable to the reader.",
516
+ "cite_spans": [],
517
+ "ref_spans": [],
518
+ "eq_spans": [],
519
+ "section": "*. summary cohesion criteria:",
520
+ "sec_num": null
521
+ },
522
+ {
523
+ "text": "\u2022 identification of source inconsistencies: Articles often have errors (such as billion reported as million, etc.); multi-document summarization must be able to recognize and report source inconsistencies.",
524
+ "cite_spans": [],
525
+ "ref_spans": [],
526
+ "eq_spans": [],
527
+ "section": "*. summary cohesion criteria:",
528
+ "sec_num": null
529
+ },
530
+ {
531
+ "text": "\u2022 summary updates: A new multi-document summary must take into account previous summaries in generating new summaries. In such cases, the system needs to be able to track and categorize events.",
532
+ "cite_spans": [],
533
+ "ref_spans": [],
534
+ "eq_spans": [],
535
+ "section": "*. summary cohesion criteria:",
536
+ "sec_num": null
537
+ },
538
+ {
539
+ "text": "\u2022 effective user interfaces:",
540
+ "cite_spans": [],
541
+ "ref_spans": [],
542
+ "eq_spans": [],
543
+ "section": "*. summary cohesion criteria:",
544
+ "sec_num": null
545
+ },
546
+ {
547
+ "text": "-Attributability: The user needs to be able to easily access the source of a given passage. This could be the single document summary.",
548
+ "cite_spans": [],
549
+ "ref_spans": [],
550
+ "eq_spans": [],
551
+ "section": "*. summary cohesion criteria:",
552
+ "sec_num": null
553
+ },
554
+ {
555
+ "text": "-Relationship: The user needs to view related passages to the text passage shown, which can highlight source inconsistencies.",
556
+ "cite_spans": [],
557
+ "ref_spans": [],
558
+ "eq_spans": [],
559
+ "section": "*. summary cohesion criteria:",
560
+ "sec_num": null
561
+ },
562
+ {
563
+ "text": "-Source Selection: The user needs to be able to ,-select or eliminate various sources. For example, the user may want to eliminate information from some less reliable foreign news reporting sources.",
564
+ "cite_spans": [],
565
+ "ref_spans": [],
566
+ "eq_spans": [],
567
+ "section": "*. summary cohesion criteria:",
568
+ "sec_num": null
569
+ },
570
+ {
571
+ "text": "-Context: The user needs to be able to zoom in on the context surrounding the chosen passages. -Redirection: The user should be able to highlight certain parts of the synthetic summary and give a command to the system indicating that these parts are to be weighted heavily and that other parts are to be given a lesser weight.",
572
+ "cite_spans": [],
573
+ "ref_spans": [],
574
+ "eq_spans": [],
575
+ "section": "*. summary cohesion criteria:",
576
+ "sec_num": null
577
+ },
578
+ {
579
+ "text": "In the previous section we discussed the requirements for a multi-document summarization system. Depending on a user's information seeking goals, the user may want to create summaries that contain primarily the common portions of the documents (their intersection) or an overview of the entire cluster of documents (a sampling. of the space that the documents span). A user may also want to have a highly readable summary, an overview of pointers (sentences or word lists) to further information, \u2022 or a combination of the two. Following is a list of various methods of creating multi-document summaries by extraction:",
580
+ "cite_spans": [],
581
+ "ref_spans": [],
582
+ "eq_spans": [],
583
+ "section": "Types of Multi-Document Summarizers",
584
+ "sec_num": "4"
585
+ },
586
+ {
587
+ "text": "1. Summary from Common Sections of Documents:",
588
+ "cite_spans": [],
589
+ "ref_spans": [],
590
+ "eq_spans": [],
591
+ "section": "Types of Multi-Document Summarizers",
592
+ "sec_num": "4"
593
+ },
594
+ {
595
+ "text": "Find the important relevant parts that the cluster of documents have in common (their intersection) and use that as a summary.",
596
+ "cite_spans": [],
597
+ "ref_spans": [],
598
+ "eq_spans": [],
599
+ "section": "Types of Multi-Document Summarizers",
600
+ "sec_num": "4"
601
+ },
602
+ {
603
+ "text": "2. Summary from Common Sections and Unique Sections of Documents: Find the important relevant parts that the cluster of documents have in common and the relevant parts that are unique and use that as a summary.",
604
+ "cite_spans": [],
605
+ "ref_spans": [],
606
+ "eq_spans": [],
607
+ "section": "Types of Multi-Document Summarizers",
608
+ "sec_num": "4"
609
+ },
610
+ {
611
+ "text": "3. Centroid Document Summary: Create a single document summary from the centroid document in the \u2022 cluster.",
612
+ "cite_spans": [],
613
+ "ref_spans": [],
614
+ "eq_spans": [],
615
+ "section": "Types of Multi-Document Summarizers",
616
+ "sec_num": "4"
617
+ },
618
+ {
619
+ "text": "ate a single document summary from the centroid document in the cluster and add some representation from outlier documents (passages or keyword extraction) to provide a fuller coverage of the document set. 2",
620
+ "cite_spans": [],
621
+ "ref_spans": [],
622
+ "eq_spans": [],
623
+ "section": "Centroid Document plus Outliers Summary: Cre-",
624
+ "sec_num": "4."
625
+ },
626
+ {
627
+ "text": "Latest Document plus Outliers Summary: Create a single document summary from the latest time stamped document in the cluster (most recent information) and add some representation of outlier documents to provide a fuller coverage of the document set.",
628
+ "cite_spans": [],
629
+ "ref_spans": [],
630
+ "eq_spans": [],
631
+ "section": "5.",
632
+ "sec_num": null
633
+ },
634
+ {
635
+ "text": "Find the important relevant parts that the cluster of documents have in common and the relevant parts that are unique and weight all the information by the time sequence of the documents in which they appear and use the result as a summary. This allows the more recent, often updated information to be more likely to be included in the summary.",
636
+ "cite_spans": [],
637
+ "ref_spans": [],
638
+ "eq_spans": [],
639
+ "section": "Summary from Common Sections and Unique Sections of Documents with Time Weighting Factor:",
640
+ "sec_num": "6."
641
+ },
642
+ {
643
+ "text": "There are also much more complicated types of summary extracts which involve natural language processing and/or understanding. These types of summaries include: (1) differing points of view within the document collection, (2) updates of information within the document collection, (3) updates of information from the document collection with respect to an already provided summary, 4 Naturally, an ideal multi-document summary would include a natural language generation component to create cohesive readable summaries (Radev and McKeown, 1998; McKeown et al., 1999) . Our current focus is on the extraction of the relevant passages.",
644
+ "cite_spans": [
645
+ {
646
+ "start": 519,
647
+ "end": 544,
648
+ "text": "(Radev and McKeown, 1998;",
649
+ "ref_id": null
650
+ },
651
+ {
652
+ "start": 545,
653
+ "end": 566,
654
+ "text": "McKeown et al., 1999)",
655
+ "ref_id": null
656
+ }
657
+ ],
658
+ "ref_spans": [],
659
+ "eq_spans": [],
660
+ "section": "Summary from Common Sections and Unique Sections of Documents with Time Weighting Factor:",
661
+ "sec_num": "6."
662
+ },
663
+ {
664
+ "text": "In the previous sections we discussed the requirements and types of multi-document summarization systems. This section discusses our current implementation of a multi-document summarization system which is designed to produce summaries that emphasize \"relevant novelty.\" Relevant novelty is a metric for minimizing redundancy and maximizing both relevance and diversity. A first approximation to measuring relevant novelty is to measure relevance and novelty independently and provide a linear combination as the metric. We call this linear combination \"marginal relevance\" .--i.e., a text passage has high marginal relevance if it is both relevant to the query and useful for a summary, while having minimal similarity to previously selected passages. Using this metric one can maximize marginal relevance in retrieval and summarization, hence we label our method \"maximal marginal relevance\" (MMR) (Carboneli and Goldstein, 1998).",
665
+ "cite_spans": [],
666
+ "ref_spans": [],
667
+ "eq_spans": [],
668
+ "section": "System Design",
669
+ "sec_num": "5"
670
+ },
671
+ {
672
+ "text": "The Maximal Marginal Relevance Multi-Document (MMR-MD) metric is defined in Figure 1 . Sirnl and Sire2 cover some of the properties that we discussed in Section 3. 3 : For Sirnl, the first term is the cosine similarity metric for query and document. The second term computes a coverage score for the passage by whether the passage is in one or more clusters and the size of the cluster. The third term reflects the information content of the pas-.sage by taking into account both statistical and linguistic features for summary inclusion (such as query expan-.sion, position of the passage in the document and presence/absence of named-entities in the passage). The final term indicates the temporal sequence of the document in the collection allowing for more recent information to have higher weights. For Sire2, the first term uses the cosine similarity metric to compute the similarity between the passage and previously selected passages. (This helps the system to minimize the possibility of including passages similar to ones already selected.) The second term penalizes passages that are part of clusters from which other passages have already been chosen. The third term penalizes documents from which passages have already been selected; however, the penalty is inversely proportional to document length, to allow the possibility of longer documents 3Sirnn and Sirn2 as previously defined in MMR for singledocument summarization contained only the first term of each equation:",
673
+ "cite_spans": [
674
+ {
675
+ "start": 153,
676
+ "end": 165,
677
+ "text": "Section 3. 3",
678
+ "ref_id": null
679
+ }
680
+ ],
681
+ "ref_spans": [
682
+ {
683
+ "start": 76,
684
+ "end": 84,
685
+ "text": "Figure 1",
686
+ "ref_id": null
687
+ }
688
+ ],
689
+ "eq_spans": [],
690
+ "section": "System Design",
691
+ "sec_num": "5"
692
+ },
693
+ {
694
+ "text": "contributing more passages. These latter two terms allow for a fuller coverage of the clusters and documents.",
695
+ "cite_spans": [],
696
+ "ref_spans": [],
697
+ "eq_spans": [],
698
+ "section": "System Design",
699
+ "sec_num": "5"
700
+ },
701
+ {
702
+ "text": "Given the above definition, MMR-MD incrementally computes the standard relevance-ranked list -plus some additional scoring factors -when the parameter A= 1, and computes a maximal diversity ranking among the passages in the documents when A=0. For intermediate values of A in the interval [0,1 ], a linear combination of both criteria is optimized. In order to sample the information space in the general vicinity of the query, small values of can be used; to focus on multiple, potentially overlapping or reinforcing relevant passages, A can be set to a value closer to 1. We found that a particularly effective search strategy for document retrieval is to start with a small A (e.g., A = .3) in order to understand the information space in the region of the query, and then to focus on the most important parts using a reformulated query (possibly via relevance feedback) and a larger value of (e.g., A = .7) (Carboneli and .",
703
+ "cite_spans": [],
704
+ "ref_spans": [],
705
+ "eq_spans": [],
706
+ "section": "System Design",
707
+ "sec_num": "5"
708
+ },
709
+ {
710
+ "text": "Our multi-document summarizer works as follows:",
711
+ "cite_spans": [],
712
+ "ref_spans": [],
713
+ "eq_spans": [],
714
+ "section": "System Design",
715
+ "sec_num": "5"
716
+ },
717
+ {
718
+ "text": "\u2022 Segment the documents into passages, and index them using inverted indices (as used by the IR engine). Passages may be phrases, sentences, nsentence chunks, or paragraphs.",
719
+ "cite_spans": [],
720
+ "ref_spans": [],
721
+ "eq_spans": [],
722
+ "section": "System Design",
723
+ "sec_num": "5"
724
+ },
725
+ {
726
+ "text": "\u2022 Identify the passages relevant to the query using cosine similarity with a threshold below which the passages are discarded.",
727
+ "cite_spans": [],
728
+ "ref_spans": [],
729
+ "eq_spans": [],
730
+ "section": "System Design",
731
+ "sec_num": "5"
732
+ },
733
+ {
734
+ "text": "\u2022 Apply the MMR-MD metric as defined above. Depending on the desired length of the summary, select a number of passages to compute passage redundancy using the cosine similarity metric and use the passage similarity scoring as a method of clustering passages. Users can select the number of passages or the amount of compression.",
735
+ "cite_spans": [],
736
+ "ref_spans": [],
737
+ "eq_spans": [],
738
+ "section": "System Design",
739
+ "sec_num": "5"
740
+ },
741
+ {
742
+ "text": "\u2022 Reassemble the selected passages into a summary document using one of the summary-cohesion criteria (see Section 3).",
743
+ "cite_spans": [],
744
+ "ref_spans": [],
745
+ "eq_spans": [],
746
+ "section": "System Design",
747
+ "sec_num": "5"
748
+ },
749
+ {
750
+ "text": "The results reported in this paper are based on the use of the SMART search engine (Buckley, 1985) to compute cosine similarities (with a SMART weighting of lnn for both queries and passages), stopwords eliminated from the indexed data and stemming turned on.",
751
+ "cite_spans": [
752
+ {
753
+ "start": 83,
754
+ "end": 98,
755
+ "text": "(Buckley, 1985)",
756
+ "ref_id": "BIBREF15"
757
+ }
758
+ ],
759
+ "ref_spans": [],
760
+ "eq_spans": [],
761
+ "section": "System Design",
762
+ "sec_num": "5"
763
+ },
764
+ {
765
+ "text": "The TIPSTER evaluation corpus provided several sets of topical clusters to which we applied MMR-MD summarization. As an example, consider a set of 200 apartheidrelated news-wire documents from the Associated Press and the Wall Street Journal, spanning the period from 1988 to 1992. We used the TIPSTER provided topic description as the query. These 200 documents were on an average 31 sentences in length, with a total of 6115 sentences. We used the sentence as our summary unit. Generating a summary 10 sentences long resulted in a (Pii, Q, Cij, Di, D) Sire1 is the similarity metric for relevance ranking Sim~ is the anti-redundancy metric D is a document collection P is the passages from the documents in that collection (e.g., ~j is passage j from document Di)",
766
+ "cite_spans": [],
767
+ "ref_spans": [
768
+ {
769
+ "start": 533,
770
+ "end": 553,
771
+ "text": "(Pii, Q, Cij, Di, D)",
772
+ "ref_id": null
773
+ }
774
+ ],
775
+ "eq_spans": [],
776
+ "section": "Discussion",
777
+ "sec_num": "6"
778
+ },
779
+ {
780
+ "text": "Q is a query or user profile R = IR (D, P, Q, 8) , i.e., the ranked list of passages from documents retrieved by an IR system, given D, P, Q and a ' relevance threshold O, below which it will not retrieve passages (O can be degree of match or number of passages) . _5\" is the subset of passages in R already selected R\\S is the set difference, i.e., the set of as yet unselected passages in R ' C is the set of passage clusters for the set of documents (7vw is the subset of clusters of (7 that contains passage Pvw (7~ is the subset of clusters that contain passages from document D~ Ikl is the number of passages in the individual cluster k IC~,~ N Cijl is the number of clusters in the intersection of (7, , , nand(Tij wi. .are weights for the terms, which can be optimized W is a word in the passage/~j type is a particular type of word, e.g., city name",
781
+ "cite_spans": [
782
+ {
783
+ "start": 36,
784
+ "end": 48,
785
+ "text": "(D, P, Q, 8)",
786
+ "ref_id": null
787
+ },
788
+ {
789
+ "start": 705,
790
+ "end": 708,
791
+ "text": "(7,",
792
+ "ref_id": "BIBREF6"
793
+ },
794
+ {
795
+ "start": 709,
796
+ "end": 710,
797
+ "text": ",",
798
+ "ref_id": null
799
+ },
800
+ {
801
+ "start": 711,
802
+ "end": 712,
803
+ "text": ",",
804
+ "ref_id": null
805
+ },
806
+ {
807
+ "start": 713,
808
+ "end": 725,
809
+ "text": "nand(Tij wi.",
810
+ "ref_id": null
811
+ }
812
+ ],
813
+ "ref_spans": [],
814
+ "eq_spans": [],
815
+ "section": "MMR-MD ~ Arg max [A(Siml",
816
+ "sec_num": null
817
+ },
818
+ {
819
+ "text": "IOil is the length of document i. Figures 2 and 3 respectively. The summary in Figure 2 clearly illustrates the need for reducing redundancy and maximizing novel information.",
820
+ "cite_spans": [],
821
+ "ref_spans": [
822
+ {
823
+ "start": 34,
824
+ "end": 49,
825
+ "text": "Figures 2 and 3",
826
+ "ref_id": "FIGREF3"
827
+ },
828
+ {
829
+ "start": 79,
830
+ "end": 87,
831
+ "text": "Figure 2",
832
+ "ref_id": "FIGREF3"
833
+ }
834
+ ],
835
+ "eq_spans": [],
836
+ "section": "MMR-MD ~ Arg max [A(Siml",
837
+ "sec_num": null
838
+ },
839
+ {
840
+ "text": "Consider for instance, the summary shown in Figure 2 . The fact that the ANC is fighting to overthrow the gov-i. wsJg10204-0176:1 CAPE TOWN, South Africa -President EW. de Klerk's proposal to repeal the major pillars of apartheid drew a generally positive response from black leaders, but African National Congress leader Nelson Mandela called on the international community to continue economic sanctions against South Africa until the government takes further steps.",
841
+ "cite_spans": [],
842
+ "ref_spans": [
843
+ {
844
+ "start": 44,
845
+ "end": 52,
846
+ "text": "Figure 2",
847
+ "ref_id": "FIGREF3"
848
+ }
849
+ ],
850
+ "eq_spans": [],
851
+ "section": "MMR-MD ~ Arg max [A(Siml",
852
+ "sec_num": null
853
+ },
854
+ {
855
+ "text": "2. AP880803-0082:25 Three Canadian anti-apartheid groups issued a statement urging the government to sever diplomatic and economic links with South Africa and aid the African National Congress, the banned group fighting the white-dominated government in South Africa.",
856
+ "cite_spans": [],
857
+ "ref_spans": [],
858
+ "eq_spans": [],
859
+ "section": "MMR-MD ~ Arg max [A(Siml",
860
+ "sec_num": null
861
+ },
862
+ {
863
+ "text": "3. AP880803-0080:25 Three Canadian anti-apartheid groups issued a statement urging the government to sever diplomatic and economic links with South Africa and aid the African National Congress, the banned group fighting the white-dominated government in South Africa.",
864
+ "cite_spans": [],
865
+ "ref_spans": [],
866
+ "eq_spans": [],
867
+ "section": "MMR-MD ~ Arg max [A(Siml",
868
+ "sec_num": null
869
+ },
870
+ {
871
+ "text": "4. AP880802-0165:23 South Africa says the ANC, the main black group fighting to overthrow South Africa's white government, has seven major military bases in Angola, and the Pretoria government wants those bases closed down.",
872
+ "cite_spans": [],
873
+ "ref_spans": [],
874
+ "eq_spans": [],
875
+ "section": "MMR-MD ~ Arg max [A(Siml",
876
+ "sec_num": null
877
+ },
878
+ {
879
+ "text": "5. AP880212-0060:14 ANGOP quoted the Angolan statement as saying the main causes of confict in the region are South Africa's \"illegal occupation\" of Namibia, South African attacks against its black-ruled neighbors and its alleged creation of armed groups to carry out \"terrorist a~tivities\" in those countries, and the denial of political rights to the black majority in South Africa.",
880
+ "cite_spans": [],
881
+ "ref_spans": [],
882
+ "eq_spans": [],
883
+ "section": "MMR-MD ~ Arg max [A(Siml",
884
+ "sec_num": null
885
+ },
886
+ {
887
+ "text": "6. AP880823-0069:17 The ANC is the main guerrilla group fighting to overthrow the South African government and end apartheid, the system of racial segregation in which South Africa's black majority has no vote in national affairs.",
888
+ "cite_spans": [],
889
+ "ref_spans": [],
890
+ "eq_spans": [],
891
+ "section": "MMR-MD ~ Arg max [A(Siml",
892
+ "sec_num": null
893
+ },
894
+ {
895
+ "text": "7. AP880803-0158:26 South Africa says the ANC, the main black group fighting to overthrow South Africa's whiteled government, has seven major military bases in Angola, and it wants those bases closed down.",
896
+ "cite_spans": [],
897
+ "ref_spans": [],
898
+ "eq_spans": [],
899
+ "section": "MMR-MD ~ Arg max [A(Siml",
900
+ "sec_num": null
901
+ },
902
+ {
903
+ "text": "8. AP880613-0126:15 The ANC is fighting to topple the South African government and its policy of apartheid, under which the nation's 26 million blacks have no voice in national affairs and the 5 million whites control the economy and dominate government.",
904
+ "cite_spans": [],
905
+ "ref_spans": [],
906
+ "eq_spans": [],
907
+ "section": "MMR-MD ~ Arg max [A(Siml",
908
+ "sec_num": null
909
+ },
910
+ {
911
+ "text": "9. AP880212-0060:13 The African National Congress is the main rebel movement fighting South Africa's white-led government and SWAPO is a black guerrilla group fighting for independence for Namibia, which is administered by South Africa.",
912
+ "cite_spans": [],
913
+ "ref_spans": [],
914
+ "eq_spans": [],
915
+ "section": "MMR-MD ~ Arg max [A(Siml",
916
+ "sec_num": null
917
+ },
918
+ {
919
+ "text": "I0. WSJ870129-0051:1 Secretary of State George Shultz, in a meeting with Oliver Tambo, head of the African National Congress, voiced concerns about Soviet influence on the black South African group and the ANC's use of violence in the struggle against apartheid. \u2022 ernment is mentioned seven times (sentences #2,-#4,#6-#9),\"which constitutes 70% of the sentences in the summary. Furthermore, sentence #3 is an exact duplicate of sentence #2, and sentence #7 is almost identical to sentence #4. In contrast, the summary in Figure 3 , generated using MMR-MD with a value of A set to 0.3 shows significant improvements in eliminating redundancy. The fact that the ANC is fighting to overthrow the government is mentioned only twice (sentences #3,#7), and one of these sentences has additional information in it. The new summary retained only three of the sentences from the earlier summary. Counting clearly distinct propositions in both cases, yields a 60% greater information content for the MMR-MD case, though both summaries are equivalent in length.",
920
+ "cite_spans": [],
921
+ "ref_spans": [
922
+ {
923
+ "start": 522,
924
+ "end": 530,
925
+ "text": "Figure 3",
926
+ "ref_id": "FIGREF4"
927
+ }
928
+ ],
929
+ "eq_spans": [],
930
+ "section": "MMR-MD ~ Arg max [A(Siml",
931
+ "sec_num": null
932
+ },
933
+ {
934
+ "text": "When these 200 documents were added to a set of 4 other topics of 200 documents, yielding a document-set with 1000 documents, the query relevant multi-document summarization system produced exactly the same resuits.",
935
+ "cite_spans": [],
936
+ "ref_spans": [],
937
+ "eq_spans": [],
938
+ "section": "MMR-MD ~ Arg max [A(Siml",
939
+ "sec_num": null
940
+ },
941
+ {
942
+ "text": "We are currently working on constructing datasetsfor experimental evaluations of multi-document summarization. In order to construct these data sets, we attempted to categorize user's information seeking goals for multidocument summarization (see Section 3). As can be seen in Figure 2 , the standard IR technique of using a query to extract relevant passages is no longer sufficient for multidocument summarization due to redundancy. In addition, query relevant extractions cannot capture temporal sequencing. The data sets will allow us to measure the effects of these, and other features, on multi-document summarization quality.",
943
+ "cite_spans": [],
944
+ "ref_spans": [
945
+ {
946
+ "start": 277,
947
+ "end": 285,
948
+ "text": "Figure 2",
949
+ "ref_id": "FIGREF3"
950
+ }
951
+ ],
952
+ "eq_spans": [],
953
+ "section": "MMR-MD ~ Arg max [A(Siml",
954
+ "sec_num": null
955
+ },
956
+ {
957
+ "text": "Specifically, we are constructing sets of 10 documents, \u2022 which either contain a snapshot of an event from multiple sources or the unfoldment of an event over time.",
958
+ "cite_spans": [],
959
+ "ref_spans": [],
960
+ "eq_spans": [],
961
+ "section": "MMR-MD ~ Arg max [A(Siml",
962
+ "sec_num": null
963
+ },
964
+ {
965
+ "text": "10. wsJg10204-01761 CAPE TOWN, South Africa -President EW. de Klerk's proposal to repeal the major pillars of apartheid drew a generally positive response from black leaders, but African National Congress leader Nelson Mandela called on the international community to continue economic sanctions against South Africa until the government takes further steps. From these sets we are performing two types of experiments. In the first, we are examining how users put sentences into pre-defined clusters and how they create sentence based multi-document summaries. The result will also serve as a gold standard for system generated summaries -do our systems pick the same summary sentences as humans and are they picking sentences from the same clusters as humans? The second type Of experiment is designed to determine how users perceive the output summary quality. In this experiment, users are asked to rate the output sentences from the summarizer as good, okay or bad. For the okay or bad sentences, they are asked to provide a summary sentence from the document set that is \"better\", i.e., that makes a better set of sentences to represent the information content of the document set. We are comparing our proposed summarizer #6 in Section 4 to summarizer #1, the common portions of the document sets with no anti-redundancy and summarizer #3, single document summary of a centroid document using our single document summarizer (Goldstein et al., 1999) .",
966
+ "cite_spans": [
967
+ {
968
+ "start": 1430,
969
+ "end": 1454,
970
+ "text": "(Goldstein et al., 1999)",
971
+ "ref_id": "BIBREF17"
972
+ }
973
+ ],
974
+ "ref_spans": [],
975
+ "eq_spans": [],
976
+ "section": "MMR-MD ~ Arg max [A(Siml",
977
+ "sec_num": null
978
+ },
979
+ {
980
+ "text": "This paper presented a statistical method of generating extraction based multi-document summaries. It builds upon previous work in single-document summarization and takes into account some of the major differences between single-document and multi-document summarization: (i) the need to carefully eliminate redundant information from multiple documents, and achieve high compression ratios, (ii) take into account information about document and passage similarities, and weight different passages accordingly, and (iii) take temporal information into account. Our approach differs from others in several ways: it is completely domain-independent, is based mainly on fast, statistical processing, it attempts to maximize the novelty of the information being selected, and different genres or corpora characteristics can be taken into account easily. Since our system is not based on the use of sophisticated natural language understanding or information extraction techniques, summaries lack co-reference resolution, passages may be disjoint from one another, and in some cases may have false implicature.",
981
+ "cite_spans": [],
982
+ "ref_spans": [],
983
+ "eq_spans": [],
984
+ "section": "Conclusions and Future Work",
985
+ "sec_num": "7"
986
+ },
987
+ {
988
+ "text": "In future work, we will integrate work on multidocument summarization with work on clustering to provide summaries for clusters produced by topic detection and tracking. We also plan to investigate how to generate coherent temporally based event summaries. We will also investigate how users can effectively use multidocument summarization through interactive interfaces to browse and explore large document sets.",
989
+ "cite_spans": [],
990
+ "ref_spans": [],
991
+ "eq_spans": [],
992
+ "section": "Conclusions and Future Work",
993
+ "sec_num": "7"
994
+ }
995
+ ],
996
+ "back_matter": [
997
+ {
998
+ "text": " ",
999
+ "cite_spans": [],
1000
+ "ref_spans": [],
1001
+ "eq_spans": [],
1002
+ "section": "Conference on Research and Development in",
1003
+ "sec_num": null
1004
+ }
1005
+ ],
1006
+ "bib_entries": {
1007
+ "BIBREF0": {
1008
+ "ref_id": "b0",
1009
+ "title": "Secretary of State George Shultz, in a meeting with Oliver Tambo, head of the African National Congress, voiced concerns about Soviet influence on the black South African group and the ANC's use of violence in the struggle against apartheid",
1010
+ "authors": [],
1011
+ "year": null,
1012
+ "venue": "",
1013
+ "volume": "",
1014
+ "issue": "",
1015
+ "pages": "",
1016
+ "other_ids": {},
1017
+ "num": null,
1018
+ "urls": [],
1019
+ "raw_text": "WSJ870129-0051 1 Secretary of State George Shultz, in a meeting with Oliver Tambo, head of the African Na- tional Congress, voiced concerns about Soviet influence on the black South African group and the ANC's use of violence in the struggle against apartheid.",
1020
+ "links": null
1021
+ },
1022
+ "BIBREF1": {
1023
+ "ref_id": "b1",
1024
+ "title": "ANC: Apartheid' s Foes -The Long Struggle: The ANC Is Banned, But It Is in the Hearts of a Nation's Blacks --In South Africa, the Group Survives Assassinations, Government Crackdowns n The Black, Green and Gold",
1025
+ "authors": [],
1026
+ "year": 1988,
1027
+ "venue": "WSJ",
1028
+ "volume": "",
1029
+ "issue": "",
1030
+ "pages": "",
1031
+ "other_ids": {},
1032
+ "num": null,
1033
+ "urls": [],
1034
+ "raw_text": "wsJgg0422-0133 44 (See related story: \"ANC: Apartheid' s Foes -The Long Struggle: The ANC Is Banned, But It Is in the Hearts of a Nation's Blacks --In South Africa, the Group Survives Assassinations, Government Crackdowns n The Black, Green and Gold\" -WSJ April 22, 1988)",
1035
+ "links": null
1036
+ },
1037
+ "BIBREF2": {
1038
+ "ref_id": "b2",
1039
+ "title": "South Africa says the ANC, the main black group fighting to overthrow South Africa's whiteled government, has seven major military bases in Angola, and it wants those bases closed down",
1040
+ "authors": [],
1041
+ "year": null,
1042
+ "venue": "",
1043
+ "volume": "",
1044
+ "issue": "",
1045
+ "pages": "",
1046
+ "other_ids": {},
1047
+ "num": null,
1048
+ "urls": [],
1049
+ "raw_text": "AP880803-0158 26 South Africa says the ANC, the main black group fighting to overthrow South Africa's white- led government, has seven major military bases in Angola, and it wants those bases closed down.",
1050
+ "links": null
1051
+ },
1052
+ "BIBREF3": {
1053
+ "ref_id": "b3",
1054
+ "title": "But activist clergymen from South Africa said the pontiff should have spoken out more forcefully against their white-minority government's policies of apartheid, under which 26 million blacks have no say in national affairs",
1055
+ "authors": [],
1056
+ "year": null,
1057
+ "venue": "",
1058
+ "volume": "",
1059
+ "issue": "",
1060
+ "pages": "",
1061
+ "other_ids": {},
1062
+ "num": null,
1063
+ "urls": [],
1064
+ "raw_text": "AP880919-0052 5 But activist clergymen from South Africa said the pontiff should have spoken out more force- fully against their white-minority government's policies of apartheid, under which 26 million blacks have no say in national affairs.",
1065
+ "links": null
1066
+ },
1067
+ "BIBREF4": {
1068
+ "ref_id": "b4",
1069
+ "title": "Besides ending the emergency and lifting bans on anti-apartheid groups and individual activists, the Harare summit's conditions included the removal of all troops from South Africa's black townships, releasing all political prisoners and ending political trials and executions, and a government commitment to free political discussion",
1070
+ "authors": [],
1071
+ "year": null,
1072
+ "venue": "",
1073
+ "volume": "",
1074
+ "issue": "",
1075
+ "pages": "",
1076
+ "other_ids": {},
1077
+ "num": null,
1078
+ "urls": [],
1079
+ "raw_text": "AP890821-0092 10 Besides ending the emergency and lifting bans on anti-apartheid groups and individual ac- tivists, the Harare summit's conditions included the removal of all troops from South Africa's black townships, releasing all political prisoners and ending political trials and executions, and a government commitment to free political discussion.",
1080
+ "links": null
1081
+ },
1082
+ "BIBREF5": {
1083
+ "ref_id": "b5",
1084
+ "title": "Pretoria and the ANC remain'far ap~t ontheir vision s for a post-apartheid South Africa: The ANC wants a simple one-man, one-vote majority rule system, while the government claims that will lead to black domination and insists on constitutional protection of the rights of minorities",
1085
+ "authors": [],
1086
+ "year": null,
1087
+ "venue": "",
1088
+ "volume": "",
1089
+ "issue": "",
1090
+ "pages": "",
1091
+ "other_ids": {},
1092
+ "num": null,
1093
+ "urls": [],
1094
+ "raw_text": "wsJg00503-0041 11 Pretoria and the ANC remain'far ap~t ontheir vision s for a post-apartheid South Africa: The ANC wants a simple one-man, one-vote majority rule system, while the government claims that will lead to black domination and insists on constitutional protection of the rights of minorities, including the whites.",
1095
+ "links": null
1096
+ },
1097
+ "BIBREF6": {
1098
+ "ref_id": "b6",
1099
+ "title": "South Africa -The African National Congress suspended its 30-year armed struggle against the whiie minority government, clearing the way for the start of negotiations over a new constitution based on black-white power sharing",
1100
+ "authors": [],
1101
+ "year": null,
1102
+ "venue": "",
1103
+ "volume": "",
1104
+ "issue": "",
1105
+ "pages": "",
1106
+ "other_ids": {},
1107
+ "num": null,
1108
+ "urls": [],
1109
+ "raw_text": "WSJ900807-0037 1 JOHANNESBURG, South Africa -The African National Congress suspended its 30-year armed struggle against the whiie minority government, clearing the way for the start of negotiations over a new constitution based on black-white power sharing.",
1110
+ "links": null
1111
+ },
1112
+ "BIBREF7": {
1113
+ "ref_id": "b7",
1114
+ "title": "The African National Congress, South Africa's main black liberation group, forged its sanctions strategy as a means of pressuring the government to abandon white-minority rule",
1115
+ "authors": [],
1116
+ "year": null,
1117
+ "venue": "",
1118
+ "volume": "",
1119
+ "issue": "",
1120
+ "pages": "",
1121
+ "other_ids": {},
1122
+ "num": null,
1123
+ "urls": [],
1124
+ "raw_text": "WSJ900924-011920 The African National Congress, South Africa's main black liberation group, forged its sanc- tions strategy as a means of pressuring the government to abandon white-minority rule.",
1125
+ "links": null
1126
+ },
1127
+ "BIBREF8": {
1128
+ "ref_id": "b8",
1129
+ "title": "At a, meeting in South Africa this week, the African National Congress, the major black group",
1130
+ "authors": [],
1131
+ "year": null,
1132
+ "venue": "",
1133
+ "volume": "",
1134
+ "issue": "",
1135
+ "pages": "",
1136
+ "other_ids": {},
1137
+ "num": null,
1138
+ "urls": [],
1139
+ "raw_text": "WSJ910702-0053 36 At a, meeting in South Africa this week, the African National Congress, the major black group, is expected to take a tough line again st the white-rnn government.",
1140
+ "links": null
1141
+ },
1142
+ "BIBREF9": {
1143
+ "ref_id": "b9",
1144
+ "title": "Topic detection and tracking pilot study: Final report",
1145
+ "authors": [
1146
+ {
1147
+ "first": "Jaime",
1148
+ "middle": [],
1149
+ "last": "References James Allan",
1150
+ "suffix": ""
1151
+ },
1152
+ {
1153
+ "first": "George",
1154
+ "middle": [],
1155
+ "last": "Carbonell",
1156
+ "suffix": ""
1157
+ },
1158
+ {
1159
+ "first": "Jonathan",
1160
+ "middle": [],
1161
+ "last": "Doddington",
1162
+ "suffix": ""
1163
+ },
1164
+ {
1165
+ "first": "Yiming",
1166
+ "middle": [],
1167
+ "last": "Yamron",
1168
+ "suffix": ""
1169
+ },
1170
+ {
1171
+ "first": "",
1172
+ "middle": [],
1173
+ "last": "Yang",
1174
+ "suffix": ""
1175
+ }
1176
+ ],
1177
+ "year": 1998,
1178
+ "venue": "Proceedings of the DARPA Broadcast News Transcription and Understanding Workshop",
1179
+ "volume": "",
1180
+ "issue": "",
1181
+ "pages": "",
1182
+ "other_ids": {},
1183
+ "num": null,
1184
+ "urls": [],
1185
+ "raw_text": "References James Allan, Jaime Carbonell, George Doddington,, Jonathan Yamron, and Yiming Yang. 1998. Topic de- tection and tracking pilot study: Final report. In Pro- ceedings of the DARPA Broadcast News Transcription and Understanding Workshop.",
1186
+ "links": null
1187
+ },
1188
+ "BIBREF10": {
1189
+ "ref_id": "b10",
1190
+ "title": "A scalable summarization system using robust NLP",
1191
+ "authors": [
1192
+ {
1193
+ "first": "M",
1194
+ "middle": [
1195
+ "E"
1196
+ ],
1197
+ "last": "Chinatsu Aone",
1198
+ "suffix": ""
1199
+ },
1200
+ {
1201
+ "first": "J",
1202
+ "middle": [],
1203
+ "last": "Okurowski",
1204
+ "suffix": ""
1205
+ },
1206
+ {
1207
+ "first": "B",
1208
+ "middle": [],
1209
+ "last": "Gorlinsky",
1210
+ "suffix": ""
1211
+ },
1212
+ {
1213
+ "first": "",
1214
+ "middle": [],
1215
+ "last": "Larsen",
1216
+ "suffix": ""
1217
+ }
1218
+ ],
1219
+ "year": 1997,
1220
+ "venue": "Proceedings of the ACL'97/EACL'97 Workshop on Intelligent Scalable Text Summarization",
1221
+ "volume": "",
1222
+ "issue": "",
1223
+ "pages": "66--73",
1224
+ "other_ids": {},
1225
+ "num": null,
1226
+ "urls": [],
1227
+ "raw_text": "Chinatsu Aone, M. E. Okurowski, J. Gorlinsky, and B. Larsen. 1997. A scalable summarization sys- tem using robust NLP. In Proceedings of the ACL'97/EACL'97 Workshop on Intelligent Scalable Text Summarization, pages 66-73, Madrid, Spain.",
1228
+ "links": null
1229
+ },
1230
+ "BIBREF11": {
1231
+ "ref_id": "b11",
1232
+ "title": "Dynamic coreference-based summarization",
1233
+ "authors": [
1234
+ {
1235
+ "first": "Breck",
1236
+ "middle": [],
1237
+ "last": "Baldwin",
1238
+ "suffix": ""
1239
+ },
1240
+ {
1241
+ "first": "Thomas",
1242
+ "middle": [
1243
+ "S"
1244
+ ],
1245
+ "last": "Morton",
1246
+ "suffix": ""
1247
+ }
1248
+ ],
1249
+ "year": 1998,
1250
+ "venue": "Proceedings of the Third Conference on Empirical Methods in Natural Language Processing",
1251
+ "volume": "",
1252
+ "issue": "",
1253
+ "pages": "",
1254
+ "other_ids": {},
1255
+ "num": null,
1256
+ "urls": [],
1257
+ "raw_text": "Breck Baldwin and Thomas S. Morton. 1998. Dy- namic coreference-based summarization. In Proceed- ings of the Third Conference on Empirical Methods in Natural Language Processing (EMNLP-3), Granada, Spain, June.",
1258
+ "links": null
1259
+ },
1260
+ "BIBREF12": {
1261
+ "ref_id": "b12",
1262
+ "title": "Using lexical chains for text summarization",
1263
+ "authors": [
1264
+ {
1265
+ "first": "Regina",
1266
+ "middle": [],
1267
+ "last": "Barzilay",
1268
+ "suffix": ""
1269
+ },
1270
+ {
1271
+ "first": "Michael",
1272
+ "middle": [],
1273
+ "last": "Elhadad",
1274
+ "suffix": ""
1275
+ }
1276
+ ],
1277
+ "year": 1997,
1278
+ "venue": "Proceedings of the ACL'97/EACL'97 Workshop on Intelligent Scalable Text Summarization",
1279
+ "volume": "",
1280
+ "issue": "",
1281
+ "pages": "10--17",
1282
+ "other_ids": {},
1283
+ "num": null,
1284
+ "urls": [],
1285
+ "raw_text": "Regina Barzilay and Michael Elhadad. 1997. Using lex- ical chains for text summarization. In Proceedings of the ACL'97/EACL'97 Workshop on Intelligent Scal- able Text Summarization, pages 10-17, Madrid, Spain.",
1286
+ "links": null
1287
+ },
1288
+ "BIBREF13": {
1289
+ "ref_id": "b13",
1290
+ "title": "Salience based content characterization of text documents",
1291
+ "authors": [
1292
+ {
1293
+ "first": "Branimir",
1294
+ "middle": [],
1295
+ "last": "Boguraev",
1296
+ "suffix": ""
1297
+ },
1298
+ {
1299
+ "first": "Chris",
1300
+ "middle": [],
1301
+ "last": "Kennedy",
1302
+ "suffix": ""
1303
+ }
1304
+ ],
1305
+ "year": 1997,
1306
+ "venue": "Proceedings of the ACL'97/EACL'97 Workshop on Intelligent Scalable Text Summarization",
1307
+ "volume": "",
1308
+ "issue": "",
1309
+ "pages": "2--9",
1310
+ "other_ids": {},
1311
+ "num": null,
1312
+ "urls": [],
1313
+ "raw_text": "Branimir Boguraev and Chris Kennedy. 1997. Salience based content characterization of text documents. In Proceedings of the ACL'97/EACL'97 Workshop on Intelligent Scalable Text Summarization, pages 2-9,.",
1314
+ "links": null
1315
+ },
1316
+ "BIBREF15": {
1317
+ "ref_id": "b15",
1318
+ "title": "Implementation of the SMART information retrieval system",
1319
+ "authors": [
1320
+ {
1321
+ "first": "Chris",
1322
+ "middle": [],
1323
+ "last": "Buckley",
1324
+ "suffix": ""
1325
+ }
1326
+ ],
1327
+ "year": 1985,
1328
+ "venue": "",
1329
+ "volume": "",
1330
+ "issue": "",
1331
+ "pages": "",
1332
+ "other_ids": {},
1333
+ "num": null,
1334
+ "urls": [],
1335
+ "raw_text": "Chris Buckley. 1985. Implementation of the SMART in- formation retrieval system. Technical Report TR 85- 686, Cornell University.",
1336
+ "links": null
1337
+ },
1338
+ "BIBREF16": {
1339
+ "ref_id": "b16",
1340
+ "title": "The use of MMR, diversity-based reranking for reordering documents and producing summaries",
1341
+ "authors": [
1342
+ {
1343
+ "first": "Jaime",
1344
+ "middle": [
1345
+ "G"
1346
+ ],
1347
+ "last": "Carbonell",
1348
+ "suffix": ""
1349
+ },
1350
+ {
1351
+ "first": "Jade",
1352
+ "middle": [],
1353
+ "last": "Goldstein",
1354
+ "suffix": ""
1355
+ }
1356
+ ],
1357
+ "year": 1998,
1358
+ "venue": "Proceedings of SIGIR-98",
1359
+ "volume": "",
1360
+ "issue": "",
1361
+ "pages": "",
1362
+ "other_ids": {},
1363
+ "num": null,
1364
+ "urls": [],
1365
+ "raw_text": "Jaime G. Carbonell and Jade Goldstein. 1998. The use of MMR, diversity-based reranking for reordering documents and producing summaries. In Proceedings of SIGIR-98, Melbourne, Australia, August.",
1366
+ "links": null
1367
+ },
1368
+ "BIBREF17": {
1369
+ "ref_id": "b17",
1370
+ "title": "The use of mmr and diversity-based reranking in document reranking and summarization",
1371
+ "authors": [
1372
+ {
1373
+ "first": "Jade",
1374
+ "middle": [],
1375
+ "last": "Goldstein",
1376
+ "suffix": ""
1377
+ },
1378
+ {
1379
+ "first": "Jaime",
1380
+ "middle": [],
1381
+ "last": "Carbonell",
1382
+ "suffix": ""
1383
+ },
1384
+ {
1385
+ "first": ";",
1386
+ "middle": [],
1387
+ "last": "Vibhu",
1388
+ "suffix": ""
1389
+ },
1390
+ {
1391
+ "first": "O",
1392
+ "middle": [],
1393
+ "last": "Mittal",
1394
+ "suffix": ""
1395
+ },
1396
+ {
1397
+ "first": "\u2022",
1398
+ "middle": [],
1399
+ "last": "Jaime",
1400
+ "suffix": ""
1401
+ },
1402
+ {
1403
+ "first": "G",
1404
+ "middle": [],
1405
+ "last": "Carbonell",
1406
+ "suffix": ""
1407
+ }
1408
+ ],
1409
+ "year": 1995,
1410
+ "venue": "Proceedings of the 14th Twente Workshop on Language Technology in Multimedia Information Retrieval",
1411
+ "volume": "",
1412
+ "issue": "",
1413
+ "pages": "329--331",
1414
+ "other_ids": {},
1415
+ "num": null,
1416
+ "urls": [],
1417
+ "raw_text": "Jade Goldstein and Jaime Carbonell. 1998. The use of mmr and diversity-based reranking in document reranking and summarization. In Proceedings of the 14th Twente Workshop on Language Technology in Multimedia Information Retrieval, pages 152-166, Enschede, the Netherlands, December. Jade Goldstein, Mark Kantrowitz, Vibhu O. Mittal, and \u2022 Jaime G. Carbonell. 1999. Summarizing Text Doc- uments: Sentence Selection and Evaluation Metrics. Irf Proceedings of the 22nd International ACM SIGIR James Shaw. 1995. Conciseness through aggregation in text generation. In Proceedings of 33rd Association for Computational Linguistics, pages 329-331.",
1418
+ "links": null
1419
+ },
1420
+ "BIBREF18": {
1421
+ "ref_id": "b18",
1422
+ "title": "Summarizing Multiple Documents Using Text Extraction and Interactive Clustering",
1423
+ "authors": [
1424
+ {
1425
+ "first": "C",
1426
+ "middle": [],
1427
+ "last": "Gees",
1428
+ "suffix": ""
1429
+ },
1430
+ {
1431
+ "first": "Tomek",
1432
+ "middle": [],
1433
+ "last": "Stein",
1434
+ "suffix": ""
1435
+ },
1436
+ {
1437
+ "first": "G",
1438
+ "middle": [],
1439
+ "last": "Strzalkowski",
1440
+ "suffix": ""
1441
+ },
1442
+ {
1443
+ "first": "Canada",
1444
+ "middle": [
1445
+ "Tomek"
1446
+ ],
1447
+ "last": "Bowden Wise ; Waterloo",
1448
+ "suffix": ""
1449
+ },
1450
+ {
1451
+ "first": "Jin",
1452
+ "middle": [],
1453
+ "last": "Strzalkowski",
1454
+ "suffix": ""
1455
+ },
1456
+ {
1457
+ "first": "Bowden",
1458
+ "middle": [],
1459
+ "last": "Wang",
1460
+ "suffix": ""
1461
+ },
1462
+ {
1463
+ "first": "",
1464
+ "middle": [],
1465
+ "last": "Wise",
1466
+ "suffix": ""
1467
+ }
1468
+ ],
1469
+ "year": 1998,
1470
+ "venue": "Proceedings of PacLing-99: The Pacific Rim Conference on Computational Linguistics",
1471
+ "volume": "",
1472
+ "issue": "",
1473
+ "pages": "26--30",
1474
+ "other_ids": {},
1475
+ "num": null,
1476
+ "urls": [],
1477
+ "raw_text": "Gees C. Stein, Tomek Strzalkowski, and G. Bowden Wise. 1999. Summarizing Multiple Documents Us- ing Text Extraction and Interactive Clustering. In Pro- ceedings of PacLing-99: The Pacific Rim Conference on Computational Linguistics, pages 200-208, Water- loo, Canada. Tomek Strzalkowski, Jin Wang, and Bowden Wise. 1998. A robust practical text summarization system. In AAAI Intelligent Text Summarization Workshop, pages 26-30, Stanford, CA, March.",
1478
+ "links": null
1479
+ },
1480
+ "BIBREF19": {
1481
+ "ref_id": "b19",
1482
+ "title": "Automatic Summarizing of English Texts",
1483
+ "authors": [
1484
+ {
1485
+ "first": "I",
1486
+ "middle": [],
1487
+ "last": "Tait",
1488
+ "suffix": ""
1489
+ }
1490
+ ],
1491
+ "year": 1983,
1492
+ "venue": "",
1493
+ "volume": "",
1494
+ "issue": "",
1495
+ "pages": "",
1496
+ "other_ids": {},
1497
+ "num": null,
1498
+ "urls": [],
1499
+ "raw_text": "I. Tait. 1983. Automatic Summarizing of English Texts. Ph.D. thesis, University of Cambridge, Cam- bridge, UK.",
1500
+ "links": null
1501
+ },
1502
+ "BIBREF20": {
1503
+ "ref_id": "b20",
1504
+ "title": "Tipster text phase III 18-month workshop notes",
1505
+ "authors": [
1506
+ {
1507
+ "first": "Simone",
1508
+ "middle": [],
1509
+ "last": "Teufel",
1510
+ "suffix": ""
1511
+ },
1512
+ {
1513
+ "first": "Marc",
1514
+ "middle": [],
1515
+ "last": "Moens",
1516
+ "suffix": ""
1517
+ }
1518
+ ],
1519
+ "year": 1997,
1520
+ "venue": "ACL/EACL-97 Workshop on Intelligent Scalable Text Summarization",
1521
+ "volume": "",
1522
+ "issue": "",
1523
+ "pages": "58--65",
1524
+ "other_ids": {},
1525
+ "num": null,
1526
+ "urls": [],
1527
+ "raw_text": "Simone Teufel and Marc Moens. 1997. Sentence ex- traction as a classification task. In ACL/EACL-97 Workshop on Intelligent Scalable Text Summarization, pages 58-65, Madrid, Spain, July. TIPSTER. 1998a. Tipster text phase III 18-month work- shop notes, May. Fairfax, VA.",
1528
+ "links": null
1529
+ },
1530
+ "BIBREF21": {
1531
+ "ref_id": "b21",
1532
+ "title": "Tipster text phase III 24-month workshop notes",
1533
+ "authors": [
1534
+ {
1535
+ "first": "",
1536
+ "middle": [],
1537
+ "last": "Tipster",
1538
+ "suffix": ""
1539
+ }
1540
+ ],
1541
+ "year": 1998,
1542
+ "venue": "",
1543
+ "volume": "",
1544
+ "issue": "",
1545
+ "pages": "",
1546
+ "other_ids": {},
1547
+ "num": null,
1548
+ "urls": [],
1549
+ "raw_text": "TIPSTER. 1998b. Tipster text phase III 24-month work- shop notes, October. Baltimore, MD.",
1550
+ "links": null
1551
+ },
1552
+ "BIBREF22": {
1553
+ "ref_id": "b22",
1554
+ "title": "Information Retrieval",
1555
+ "authors": [
1556
+ {
1557
+ "first": "J",
1558
+ "middle": [],
1559
+ "last": "Charles",
1560
+ "suffix": ""
1561
+ },
1562
+ {
1563
+ "first": "",
1564
+ "middle": [],
1565
+ "last": "Van Rijsbergen",
1566
+ "suffix": ""
1567
+ }
1568
+ ],
1569
+ "year": 1979,
1570
+ "venue": "",
1571
+ "volume": "",
1572
+ "issue": "",
1573
+ "pages": "",
1574
+ "other_ids": {},
1575
+ "num": null,
1576
+ "urls": [],
1577
+ "raw_text": "Charles J. van Rijsbergen. 1979. Information Retrieval. Butterworths, London.",
1578
+ "links": null
1579
+ },
1580
+ "BIBREF23": {
1581
+ "ref_id": "b23",
1582
+ "title": "Learning approaches for topic detection and tracking . news events",
1583
+ "authors": [
1584
+ {
1585
+ "first": "Yiming",
1586
+ "middle": [],
1587
+ "last": "Yang",
1588
+ "suffix": ""
1589
+ },
1590
+ {
1591
+ "first": "Tom",
1592
+ "middle": [],
1593
+ "last": "Pierce",
1594
+ "suffix": ""
1595
+ },
1596
+ {
1597
+ "first": "Jaime",
1598
+ "middle": [
1599
+ "13"
1600
+ ],
1601
+ "last": "Carbonell",
1602
+ "suffix": ""
1603
+ }
1604
+ ],
1605
+ "year": 1998,
1606
+ "venue": "Proceedings of the 21th Ann lnt ACM SI-G1R Conference on Research and Development in Information Retrieval ( SIGIR'98)",
1607
+ "volume": "14",
1608
+ "issue": "",
1609
+ "pages": "32--43",
1610
+ "other_ids": {},
1611
+ "num": null,
1612
+ "urls": [],
1613
+ "raw_text": "Yiming Yang, Tom Pierce, and Jaime 13. Carbonell. 1998. A study on retrospective and on-line event de- tection. In Proceedings of the 21th Ann lnt ACM SI- G1R Conference on Research and Development in In- formation Retrieval ( SIGIR'98), pages 28-36. :Yiming Yang, Jaime G. Carbonell, Ralf D. Brown, Tom Pierce, Brian T. Archibald, and Xin Liu. 1999. Learning approaches for topic detection and tracking . news events. IEEE Intelligent Systems, Special Issue on Applications of Intelligent Information Retrieval, 14(4):32-43, July/August.",
1614
+ "links": null
1615
+ }
1616
+ },
1617
+ "ref_entries": {
1618
+ "FIGREF0": {
1619
+ "type_str": "figure",
1620
+ "text": "the development of an event or subtopic of 2This is similar to the approach of Textwise fHPSTER, 1998b), whose multi-document summary consists of the most relevant paragraph and specialized word lists. e.g., death tolls) over time, and (5) a comparative development of an event.",
1621
+ "num": null,
1622
+ "uris": null
1623
+ },
1624
+ "FIGREF1": {
1625
+ "type_str": "figure",
1626
+ "text": ") -(1 -A) max Sirn2 (Pij, Pnm, C, S, Di))] .j, Q, Cij, Di, D) = wl *(Pij'Q)+w2*coverage(Pij, Cij)+wa*content(Pij)+w4*tirne_sequenee(Di, D) Sim2 ( Pij, Pare, C, S, Di ) = tOa * ( ff ij \" Pnm) + rob * clusters_selected( (7ij , S) + we * documents_selected( Di , S) ~ov~r~ge(Pi~,C) = ~ wk * Ikl ,a=tim, ) -timestamp( Di ) time_sequ_ence ( Di, D) = timestamp( Dmaxtime ) -tiraestamp( D,nintime ) clusters_selected(C~, S) = IC~ n L.J cv=l v,w:P,,,~ES documents_selected(Di, S) = ~ = where",
1627
+ "num": null,
1628
+ "uris": null
1629
+ },
1630
+ "FIGREF2": {
1631
+ "type_str": "figure",
1632
+ "text": "of 0.2% and a character compression of 0.3%, approximately two orders of magnitude different with compression ratios used in single document summarization. The results of summarizing this document set with a value of A set to I (effectively query relevance, but no MMR-MD) and A set to 0.3 (both query relevance and MMR-MD anti-redundancy) are shown in",
1633
+ "num": null,
1634
+ "uris": null
1635
+ },
1636
+ "FIGREF3": {
1637
+ "type_str": "figure",
1638
+ "text": "Sample multi-document summary with A = 1, news-story-principle ordering (rank order)",
1639
+ "num": null,
1640
+ "uris": null
1641
+ },
1642
+ "FIGREF4": {
1643
+ "type_str": "figure",
1644
+ "text": "Sample multi-document summary with A = 0.3, time-line ordering",
1645
+ "num": null,
1646
+ "uris": null
1647
+ }
1648
+ }
1649
+ }
1650
+ }
Full_text_JSON/prefixW/json/W00/W00-0406.json ADDED
@@ -0,0 +1,926 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0406",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:39.494371Z"
6
+ },
7
+ "title": "Text Summarizer in Use: Lessons Learned from Real World Deployment and Evaluation",
8
+ "authors": [
9
+ {
10
+ "first": "Mary",
11
+ "middle": [
12
+ "Ellen"
13
+ ],
14
+ "last": "Okurowski",
15
+ "suffix": "",
16
+ "affiliation": {},
17
+ "email": ""
18
+ },
19
+ {
20
+ "first": "Harold",
21
+ "middle": [],
22
+ "last": "Wilson",
23
+ "suffix": "",
24
+ "affiliation": {},
25
+ "email": ""
26
+ },
27
+ {
28
+ "first": "Joaquin",
29
+ "middle": [],
30
+ "last": "Urbina",
31
+ "suffix": "",
32
+ "affiliation": {},
33
+ "email": ""
34
+ },
35
+ {
36
+ "first": "Tony",
37
+ "middle": [],
38
+ "last": "Taylor",
39
+ "suffix": "",
40
+ "affiliation": {
41
+ "laboratory": "",
42
+ "institution": "SRA Corp",
43
+ "location": {
44
+ "addrLine": "4939 Elkridge Landing Suite #195 Linthicum",
45
+ "postCode": "21090",
46
+ "region": "MD"
47
+ }
48
+ },
49
+ "email": ""
50
+ },
51
+ {
52
+ "first": "Ruth",
53
+ "middle": [],
54
+ "last": "Colvin",
55
+ "suffix": "",
56
+ "affiliation": {},
57
+ "email": ""
58
+ },
59
+ {
60
+ "first": "Frank",
61
+ "middle": [],
62
+ "last": "Krapcho",
63
+ "suffix": "",
64
+ "affiliation": {},
65
+ "email": ""
66
+ }
67
+ ],
68
+ "year": "",
69
+ "venue": null,
70
+ "identifiers": {},
71
+ "abstract": "",
72
+ "pdf_parse": {
73
+ "paper_id": "W00-0406",
74
+ "_pdf_hash": "",
75
+ "abstract": [],
76
+ "body_text": [
77
+ {
78
+ "text": "Much of the historical and current summarization literature has been technologycentered with the questions posed and answered having implications for technology development.",
79
+ "cite_spans": [],
80
+ "ref_spans": [],
81
+ "eq_spans": [],
82
+ "section": "Introduction",
83
+ "sec_num": "1.0"
84
+ },
85
+ {
86
+ "text": "Though commercial summarization products have appeared in the market place and developers continue to explore new summarization areas, few papers have been user-centered, examining summarization technology in-use. In this ~aaper, we show how applied work and the knowledge gleaned about technology in-use \"can temper theoretical considerations and \u2022 motivate as well as direct development likely to result in higher return on investment.",
87
+ "cite_spans": [],
88
+ "ref_spans": [],
89
+ "eq_spans": [],
90
+ "section": "Introduction",
91
+ "sec_num": "1.0"
92
+ },
93
+ {
94
+ "text": "The importance of understanding the function a summary serves for users is widely acknowledged, and seminal works defining summary types by functions (Paice, 1990; Sparck-Jones, 1993) are frequently cited by developers. Task orientation defines extrinsic technology assessments, and the research literature on how to assess performance for machine generated summaries in an experimental task scenario has grown ( Brandow et al., 1994; Morris et al., 1999; Jing et al., 1998; Merlino and Maybury, 1999; Wasson, 1998; Tombros et al., 1998; Firmin and Chrzanowski, 1999; and Mani et al., 1999) . An increasing number of research papers on summarization systems now also describe some type of extrinsic evaluative task (e.g. Salton et al., 1999; Strzalkowski et al., 1998) . A number of factors (i.e. characteristics of summaries, documents, users, and tasks) have surfaced which have implications for technology use. More \" research assessing technology (or any aspect of it) in-use on a user's own data even in a development mode along the lines of McKeown et al. (1998) is needed. While experimentation designs involving subjects performing short term controlled tasks may yield results of statistical \u2022significance, generalizability to the user community is limited.",
95
+ "cite_spans": [
96
+ {
97
+ "start": 150,
98
+ "end": 163,
99
+ "text": "(Paice, 1990;",
100
+ "ref_id": "BIBREF4"
101
+ },
102
+ {
103
+ "start": 164,
104
+ "end": 183,
105
+ "text": "Sparck-Jones, 1993)",
106
+ "ref_id": "BIBREF5"
107
+ },
108
+ {
109
+ "start": 411,
110
+ "end": 434,
111
+ "text": "( Brandow et al., 1994;",
112
+ "ref_id": null
113
+ },
114
+ {
115
+ "start": 435,
116
+ "end": 455,
117
+ "text": "Morris et al., 1999;",
118
+ "ref_id": "BIBREF3"
119
+ },
120
+ {
121
+ "start": 456,
122
+ "end": 474,
123
+ "text": "Jing et al., 1998;",
124
+ "ref_id": null
125
+ },
126
+ {
127
+ "start": 475,
128
+ "end": 501,
129
+ "text": "Merlino and Maybury, 1999;",
130
+ "ref_id": null
131
+ },
132
+ {
133
+ "start": 502,
134
+ "end": 515,
135
+ "text": "Wasson, 1998;",
136
+ "ref_id": null
137
+ },
138
+ {
139
+ "start": 516,
140
+ "end": 537,
141
+ "text": "Tombros et al., 1998;",
142
+ "ref_id": null
143
+ },
144
+ {
145
+ "start": 538,
146
+ "end": 567,
147
+ "text": "Firmin and Chrzanowski, 1999;",
148
+ "ref_id": null
149
+ },
150
+ {
151
+ "start": 568,
152
+ "end": 590,
153
+ "text": "and Mani et al., 1999)",
154
+ "ref_id": null
155
+ },
156
+ {
157
+ "start": 721,
158
+ "end": 741,
159
+ "text": "Salton et al., 1999;",
160
+ "ref_id": "BIBREF6"
161
+ },
162
+ {
163
+ "start": 742,
164
+ "end": 768,
165
+ "text": "Strzalkowski et al., 1998)",
166
+ "ref_id": null
167
+ },
168
+ {
169
+ "start": 1062,
170
+ "end": 1068,
171
+ "text": "(1998)",
172
+ "ref_id": null
173
+ }
174
+ ],
175
+ "ref_spans": [],
176
+ "eq_spans": [],
177
+ "section": "Background",
178
+ "sec_num": "2.0"
179
+ },
180
+ {
181
+ "text": "In addition, the level of user support text. summarization systems should provide also continues to be speculative. More interest lies in new areas of inquiry like visualization and browsing techniques (e.g., Boguraev et al., 1998) , multi-document summarization ( e.g., McKeown and Radev, 1995) , multi-media summarization (e.g., Merlino and Maybury, 1999) , summarization of documents with graphics (e.g., Futrelle, 1998) and multi-lingual summarization (e.g., Cowie, 1998) . But systematic user studies on interface support, applicability of proposed summarization features, or on the real-world use of demonstration and prototype systems or even commercial systems have not materialized.",
182
+ "cite_spans": [
183
+ {
184
+ "start": 209,
185
+ "end": 231,
186
+ "text": "Boguraev et al., 1998)",
187
+ "ref_id": "BIBREF2"
188
+ },
189
+ {
190
+ "start": 271,
191
+ "end": 295,
192
+ "text": "McKeown and Radev, 1995)",
193
+ "ref_id": null
194
+ },
195
+ {
196
+ "start": 331,
197
+ "end": 357,
198
+ "text": "Merlino and Maybury, 1999)",
199
+ "ref_id": null
200
+ },
201
+ {
202
+ "start": 408,
203
+ "end": 423,
204
+ "text": "Futrelle, 1998)",
205
+ "ref_id": null
206
+ },
207
+ {
208
+ "start": 463,
209
+ "end": 475,
210
+ "text": "Cowie, 1998)",
211
+ "ref_id": null
212
+ }
213
+ ],
214
+ "ref_spans": [],
215
+ "eq_spans": [],
216
+ "section": "Background",
217
+ "sec_num": "2.0"
218
+ },
219
+ {
220
+ "text": "This paper presents a user study of a summarization system and provides insights on a number of technical issues relevant to the summarization R&D community that arise in, the context of use, concerning technology performance and user support. We describe initial stages in the insertion of the SRA summarizer in which (1) a large scale beta test was conducted, and (2) analysis of tool usage data, user surveys and observations, and user requirements is leading to system enhancements and more effective summarization technology insertion. In our user study, we begin with a brief description of the task and technology (3.1). We then describe the beta test methodology (3.2) and :analysis of tool usage data (3.3). We focus on what we learned in our user-centered . approach about how technology performance in a task and user support affect user \u2022 acceptance (3.4) and what significant technology-related modifications resulted and what studies are in progress to measure tool efficacy, summarization effectiveness, and the impact of training on tool use (3.5). Though work to enhance the text summarization system is underway, we focus in this paper on user-centered issues. Our work is predicated on the belief that there is no substitute for user generated data to guide tool enhancement.",
221
+ "cite_spans": [],
222
+ "ref_spans": [],
223
+ "eq_spans": [],
224
+ "section": "Overview",
225
+ "sec_num": "3.0"
226
+ },
227
+ {
228
+ "text": "The task is indicative. Our users rely on machine generated summaries (single document, either generic or query-based, with user adjustment of compression rates) to judge relevance of full documents to their information need. As an information analyst,\" our typical user routinely scans summaries to stay current with fields of interest and enhance domain knowledge. This scanning task is one of many jobs an analyst performs to support report writing for customers in other Government agencies. Our goal is to generate summaries that accelerate eliminating or selecting documents without misleading or causing a user to access the original text unnecessarily.",
229
+ "cite_spans": [],
230
+ "ref_spans": [],
231
+ "eq_spans": [],
232
+ "section": "Task and Technology",
233
+ "sec_num": "3.1"
234
+ },
235
+ {
236
+ "text": "The system in this user study is a version of the SRA sentence extraction system described in Aone et al. (1997 Aone et al. ( , 1998 Aone et al. ( , 1999 . Users retrieve documents from a database of multiple text collections of reports and press. Documents are generally written in a journalistic style and average 2,000 characters in length. The number of documents in a batch may vary from a few to hundreds. Batches of retrieved texts may be routinely routed to our summary server or uploaded by the user. The system is web-.based and provides the capability to tailor summary output by creating multiple summary set-ups. User options include: number of sentences viewed, summary type applied and sorting, other information viewed (e.g. title, date), and high frequency document terms and named entities viewed. Users can save, print or view full text originals with summaries appended.",
237
+ "cite_spans": [
238
+ {
239
+ "start": 94,
240
+ "end": 111,
241
+ "text": "Aone et al. (1997",
242
+ "ref_id": null
243
+ },
244
+ {
245
+ "start": 112,
246
+ "end": 132,
247
+ "text": "Aone et al. ( , 1998",
248
+ "ref_id": "BIBREF0"
249
+ },
250
+ {
251
+ "start": 133,
252
+ "end": 153,
253
+ "text": "Aone et al. ( , 1999",
254
+ "ref_id": "BIBREF1"
255
+ }
256
+ ],
257
+ "ref_spans": [],
258
+ "eq_spans": [],
259
+ "section": "Task and Technology",
260
+ "sec_num": "3.1"
261
+ },
262
+ {
263
+ "text": "Viewed originals highlight extracted sentences.",
264
+ "cite_spans": [],
265
+ "ref_spans": [],
266
+ "eq_spans": [],
267
+ "section": "Task and Technology",
268
+ "sec_num": "3.1"
269
+ },
270
+ {
271
+ "text": "All system use is voluntary. Our users are customers and, if dissatisfied, may elect to scan data without our technology.",
272
+ "cite_spans": [],
273
+ "ref_spans": [],
274
+ "eq_spans": [],
275
+ "section": "Task and Technology",
276
+ "sec_num": "3.1"
277
+ },
278
+ {
279
+ "text": "In the fall of 1998, 90+ users were recruited primarily through an IR system news group and provided access to the SRA system summarizer to replace their full text review process of scanning concatenated files. Procedural (how-to) training was optional, but I I I I I I approximately 70 users opted to receive a oneon-one hands-on demonstration (about fortyfive minutes in length) on texts that the new user had retrieved. The beta testing took place over a six month period. With no stipulation on the length of participation, many users simply tried out the system a limited number of times. Initial feedback gave us a clear picture of the likelihood of continued use. Our relatively low retention rate highlighted the fact that the experimental conditions in previous summary experiments may be misleading and masked factors that do not surface until users use a system in a daily work in a real-world setting.",
280
+ "cite_spans": [],
281
+ "ref_spans": [],
282
+ "eq_spans": [],
283
+ "section": "Beta Test Methodology",
284
+ "sec_num": "3.2"
285
+ },
286
+ {
287
+ "text": "Usage data were collected for all system users and analyzed through web logs.\" These logs were a record of what users did on their actual work data. For each user, our logs provided a rich source of information: number of summary batches, number of documents in each, whether documents were viewed, and set up features--summary type, summary lines viewed, number of indicator (high frequency signature terms) lines viewed, number of entity (persons, places, organizations-) lines viewed, query terms). Table 1 below illustrates the type of representative data collected, questions of interest, and findings. Usage data indicated that about half the population selected generic and the other half querybased summaries. (Note: The default set-up was the generic summarization.)",
288
+ "cite_spans": [],
289
+ "ref_spans": [
290
+ {
291
+ "start": 502,
292
+ "end": 509,
293
+ "text": "Table 1",
294
+ "ref_id": "TABREF0"
295
+ }
296
+ ],
297
+ "eq_spans": [],
298
+ "section": "Analysis of Tool Usage Data",
299
+ "sec_num": "3.3"
300
+ },
301
+ {
302
+ "text": "The hypothesis of equal median number of sentences available for viewing sentences was tested. The number of sentences viewed with generic summary type (3) is significantly different from either query-based (5) or lead (6).",
303
+ "cite_spans": [],
304
+ "ref_spans": [],
305
+ "eq_spans": [],
306
+ "section": "Analysis of Tool Usage Data",
307
+ "sec_num": "3.3"
308
+ },
309
+ {
310
+ "text": "Users tended to retain indicator and entity preferences when tailoring capabilities. (But users generallymodified a default set-up in which both preferences have a line viewed.) training and use data A chi-squared test for independence between training and use reflected a significant relationship (p value close to 0) i.e., training did impact the user's decision to use the system. However, training did not make a difference across the three user profile types. A Fisher Exact test on a 3x2 contingency table revealed that the relative numbers of trained and untrained users at the three user profile types were the same (p-value= 0.1916) i.e., training and type are independent. t",
311
+ "cite_spans": [],
312
+ "ref_spans": [],
313
+ "eq_spans": [],
314
+ "section": "Analysis of Tool Usage Data",
315
+ "sec_num": "3.3"
316
+ },
317
+ {
318
+ "text": "As we began to analyze the data, we realized that we had only a record of use, but were not sure of what motivated the use patterns. Therefore, the team supplemented tool usage data with an or/-line survey and one-on-one observations to help us understand and analyze the user behavior. These additional data points motivated much of our work described in 3.5. Throughout the six month cycle we also collected and categorized user requirements.",
319
+ "cite_spans": [],
320
+ "ref_spans": [],
321
+ "eq_spans": [],
322
+ "section": "I I I I I",
323
+ "sec_num": null
324
+ },
325
+ {
326
+ "text": "Insight 1: For user acceptance, technology \u2022 performance must go beyond a good suthmary. It requires an understanding of the users\" work practices.",
327
+ "cite_spans": [],
328
+ "ref_spans": [],
329
+ "eq_spans": [],
330
+ "section": "Insights on Text Summarization \u2022 3.4.1 Technology Performance",
331
+ "sec_num": "3.4"
332
+ },
333
+ {
334
+ "text": "We learned that many factors in the task environment affect technology performance and user acceptance. Underpinning much work in summarization is the view that summaries are time savers. Mani et al. (1999) report that summaries at a low compression rate reduced decision making time by 40% (categorization) and 50% (adhoc) with relevance asessments almost as accurate as the full text. Although evaluators acknowledge the role of data presentation ( e.g., Firmin and Chrzanowski, 1999; Merlino and Maybury, 1999) , most studies use summary system output as the metric for evaluation. The question routinely posed seems to be \"Do summaries save the user time without loss in accuracy?\" However, we confirmed observations on the integration of summarization and retrieval technologies of McKeown et al. (1998) and learned that users are not likely to consider using summaries as a time saver unless the summaries are efficiently accessed. For our users a tight coupling of retrieval and summarization is pre-requisite. Batches automatically routed to the summary server available for user review were preferred over those requiring the user to upload files for summarization. Users pointed out that the uploading took more time then they were willing to spend.",
335
+ "cite_spans": [
336
+ {
337
+ "start": 200,
338
+ "end": 206,
339
+ "text": "(1999)",
340
+ "ref_id": null
341
+ },
342
+ {
343
+ "start": 457,
344
+ "end": 486,
345
+ "text": "Firmin and Chrzanowski, 1999;",
346
+ "ref_id": null
347
+ },
348
+ {
349
+ "start": 487,
350
+ "end": 513,
351
+ "text": "Merlino and Maybury, 1999)",
352
+ "ref_id": null
353
+ },
354
+ {
355
+ "start": 787,
356
+ "end": 808,
357
+ "text": "McKeown et al. (1998)",
358
+ "ref_id": null
359
+ }
360
+ ],
361
+ "ref_spans": [],
362
+ "eq_spans": [],
363
+ "section": "Insights on Text Summarization \u2022 3.4.1 Technology Performance",
364
+ "sec_num": "3.4"
365
+ },
366
+ {
367
+ "text": "User needs and their work practices often constrain how technology is applied. For example, McKeown et al. (1998) focused on the needs of physicians who want to examine only data for patients with similar characteristics to their own patients, and Wasson (1998) focused on the needs of news information customers who want to retrieve documents likely to be on-topic. We too II I I I I I l I I I I! I I I I I I I I I I I I I I I I I discovered that the user needs affect their interest in summarization technology, but from a more general perspective. Text REtrieval Conferences (e.g., Harman, 1996) have baselined system performance in terms of two types of tasks--routing or ad-h.oc. In our environment the ad-hoc users were less likely .to want a summary. They simply wanted an answer to a question and did not want to review summaries. If too many documents were retrieved, they would simply craft a more effective query.",
368
+ "cite_spans": [
369
+ {
370
+ "start": 92,
371
+ "end": 113,
372
+ "text": "McKeown et al. (1998)",
373
+ "ref_id": null
374
+ },
375
+ {
376
+ "start": 617,
377
+ "end": 630,
378
+ "text": "Harman, 1996)",
379
+ "ref_id": null
380
+ }
381
+ ],
382
+ "ref_spans": [
383
+ {
384
+ "start": 374,
385
+ "end": 463,
386
+ "text": "II I I I I I l I I I I! I I I I I I I I I I I I I I I I I",
387
+ "ref_id": null
388
+ }
389
+ ],
390
+ "eq_spans": [],
391
+ "section": "Insights on Text Summarization \u2022 3.4.1 Technology Performance",
392
+ "sec_num": "3.4"
393
+ },
394
+ {
395
+ "text": "Measuring the efficiency gains with a real population was quite problematic for technology in-use. We faced a number of challenges. Note that in experimental conditions, subjects perform, on full and reduced versions. One challenge was to baseline non-intrusively the current (nonsummary) full text review process. A second was to measure both accuracy and efficiency gains for users performing on the job. These challenges were further exacerbated by the fact that users in an indicative task primarily use a summary to eliminate most documents. They have developed effective skimming and scanning techniques and are already quite efficient at this task. .",
396
+ "cite_spans": [],
397
+ "ref_spans": [],
398
+ "eq_spans": [],
399
+ "section": "Insights on Text Summarization \u2022 3.4.1 Technology Performance",
400
+ "sec_num": "3.4"
401
+ },
402
+ {
403
+ "text": "In short, our experience showed that technologists deploying single document summarization capability are likely be constrained by the following factors: \u2022 \u2022 the ease of technology use \u00b0 the type of user information need \u2022 how effective the user performs the task without the technology.",
404
+ "cite_spans": [],
405
+ "ref_spans": [],
406
+ "eq_spans": [],
407
+ "section": "Insights on Text Summarization \u2022 3.4.1 Technology Performance",
408
+ "sec_num": "3.4"
409
+ },
410
+ {
411
+ "text": "Insight 2: Users require more than just a good summary. They require the right level of technology support,",
412
+ "cite_spans": [],
413
+ "ref_spans": [],
414
+ "eq_spans": [],
415
+ "section": "User Support",
416
+ "sec_num": "3.4.2"
417
+ },
418
+ {
419
+ "text": "Although the bulk of the research work still continues to focus on summarization algorithms, we now appreciate the importance of user support to text summarization use. The SRA software was quite robust and fast.",
420
+ "cite_spans": [],
421
+ "ref_spans": [],
422
+ "eq_spans": [],
423
+ "section": "User Support",
424
+ "sec_num": "3.4.2"
425
+ },
426
+ {
427
+ "text": "The task of judging relevance with a summary (even a machine generated one) instead of the full text version does not require a user to acquire a fundamentally different work practice. Yet our system was not apparently sufficiently supporting tool navigation. One of the reasons was that our on-line help was not developed from a user perspective and was rarely accessed. Another was that browse and view features did not maximize performance. For example, the interface employed a scroll bar for viewing summaries rather than more effective Next Or Previous buttons. Users frequently asked the same questions, but we were answering them individually. Terminology clear to the technologists was not understood by users. We also noticed that though there were requirements for improvement of summarization quality, many requirements were associated with these user support issues.",
428
+ "cite_spans": [],
429
+ "ref_spans": [],
430
+ "eq_spans": [],
431
+ "section": "53",
432
+ "sec_num": null
433
+ },
434
+ {
435
+ "text": "One of the more unexpected findings was the under-utilization of tailoring features. The system offered the user many ways to tailor summaries to their individual needs, yet most users simply relied on default set-ups. Observations revealed little understanding of the configurable features and how these features corresponded to user needs to say nothing of how the algorithm worked. Some users did not understand the difference between the two summary types or sorting effects with query-based summary selection. Non-traditional summary types--indicators and named entities--did not appear to help render a relevance judgment. We came to understand that just because technologists sees the value to these features does not mean that a user will or that the features, in fact, have utility.",
436
+ "cite_spans": [],
437
+ "ref_spans": [],
438
+ "eq_spans": [],
439
+ "section": "53",
440
+ "sec_num": null
441
+ },
442
+ {
443
+ "text": "On technology performance, we learned that \u2022 seamless integration with an IR system was preferred \u2022 users with static queries were more likely customers for a summary service \u2022 gains in efficiency are hard to measure for a task already efficiently performed in a real-world situations. In response, we have established a summary service in which retrieval results are directly routed to our summary server and await the user. We plan to integrate the summarization tool into the IR system. (Uploading batches and then submission to the server is still an option.) We also abandoned the naive idea that data overload equates to summarization requirements and realized that the technology does not apply to all users. We have more effectively selected users by profiling characteristics of active ,users (e.g. daily document viewing work practice, document volume, static query use, etc.) and have prioritized deployment to that population which could most benefit from it.",
444
+ "cite_spans": [],
445
+ "ref_spans": [],
446
+ "eq_spans": [],
447
+ "section": "Technology-related Modifications 3.5.1 User-centered Changes to Technology Work Practices",
448
+ "sec_num": "3.5"
449
+ },
450
+ {
451
+ "text": "In order to demonstrate tool ~summarization efficiency, we needed to :baseline full-text review. We considered, but . rejected a number of options--user self-report and timing, observations, and even the \u2022 creation of a viewing tool to monitor and document full text review. Instead, we ba, selined full text scanning through information retrieval logs for a subgroup of users by tracking per document viewing time for a month period. These users submit the same queries daily and view their documents through the IR system browser. For the heaviest system users, 75% of the documents were viewed in under 20 seconds per document, but note that users vary widely with a tendency to spend a much longer browse time on a relatively small number of documents. We then identified a subgroup of these users and attempted to deploy the summarizer to this baseline group to compare scanning time required over a similar time frame. We are currently analyzing these data.",
452
+ "cite_spans": [],
453
+ "ref_spans": [],
454
+ "eq_spans": [],
455
+ "section": "Technology-related Modifications 3.5.1 User-centered Changes to Technology Work Practices",
456
+ "sec_num": "3.5"
457
+ },
458
+ {
459
+ "text": "System in a work environment is considered a good indicator of tool utility, but we wanted some gauge of summary quality and also anticipated user concerns about an emerging technology like automatic text summarization.",
460
+ "cite_spans": [],
461
+ "ref_spans": [],
462
+ "eq_spans": [],
463
+ "section": "Technology-related Modifications 3.5.1 User-centered Changes to Technology Work Practices",
464
+ "sec_num": "3.5"
465
+ },
466
+ {
467
+ "text": "We compromised and selected a method to measure the effectiveness of our summaries that serves a dual purpose--our users gain confidence in the utility of the summaries and we can collect and measure the effectiveness of the generic summaries for some of our users on their data.",
468
+ "cite_spans": [],
469
+ "ref_spans": [],
470
+ "eq_spans": [],
471
+ "section": "Technology-related Modifications 3.5.1 User-centered Changes to Technology Work Practices",
472
+ "sec_num": "3.5"
473
+ },
474
+ {
475
+ "text": "We initially piloted and now have incorporated a data collection procedure into our software. In our on-line training, we guide users to explore tool capabilities through a series of experiments or tasks. In the first of these tasks, a user is asked to submit a batch for summarization, then for each of five to seven user-selected summaries to record answers to the question:",
476
+ "cite_spans": [],
477
+ "ref_spans": [],
478
+ "eq_spans": [],
479
+ "section": "Technology-related Modifications 3.5.1 User-centered Changes to Technology Work Practices",
480
+ "sec_num": "3.5"
481
+ },
482
+ {
483
+ "text": "\"Is this document likely to be relevant to me?\"(based on the summary) ~.yes no Then, the user was directed to open the original documents for each of the summaries and record answers to the question:",
484
+ "cite_spans": [],
485
+ "ref_spans": [],
486
+ "eq_spans": [],
487
+ "section": "Technology-related Modifications 3.5.1 User-centered Changes to Technology Work Practices",
488
+ "sec_num": "3.5"
489
+ },
490
+ {
491
+ "text": "\"Is the document relevant to me?\" (after reading the original text) yes no In a prototype collection effort, we asked users to review the first ten documents, but in follow-on interviews the users recommended review of fewer documents. We understand the limits this places on interpreting our data. Also, the on-line training is optional so we are not able to collect these data for all our users uniformly.",
492
+ "cite_spans": [],
493
+ "ref_spans": [],
494
+ "eq_spans": [],
495
+ "section": "Technology-related Modifications 3.5.1 User-centered Changes to Technology Work Practices",
496
+ "sec_num": "3.5"
497
+ },
498
+ {
499
+ "text": "Most of the users tested exhibited both high recall and precision, with six users judging relevance correctly for all documents (in Table 2 below). The False Negative error was high for only one user, while the majority of-the users exhibited no False Negative I I I I I I I I I I I I I I I I I I I Table 2 errors, a worse error to commit than wasting time viewing irrelevant data, False Positive. Across all the users, 79% of all relevant documents and 81% of the irrelevant documents were accurately categorized by examination of the summary.",
500
+ "cite_spans": [],
501
+ "ref_spans": [
502
+ {
503
+ "start": 132,
504
+ "end": 139,
505
+ "text": "Table 2",
506
+ "ref_id": null
507
+ },
508
+ {
509
+ "start": 252,
510
+ "end": 328,
511
+ "text": "Negative I I I I I I I I I I I I I I I I I I I Table 2",
512
+ "ref_id": null
513
+ }
514
+ ],
515
+ "eq_spans": [],
516
+ "section": "Technology-related Modifications 3.5.1 User-centered Changes to Technology Work Practices",
517
+ "sec_num": "3.5"
518
+ },
519
+ {
520
+ "text": "On user support, we learned that User our system did not effectively support user tool navigation \u2022 our users did not fully exploit system tailorable features In response, we addressed user support needs from three different angles, each of which we discuss below: incorporation of Electronic Performance Support Systems, design and implementation of procedural on-line training and guided discovery training, and user analysis of summary quality. Electronic Performance Support Systems (EPSS) is a widely acknowledged strategy for on the job performance support. Defined as \"an optimized body of coordinated on-line methods and resources that enable and maintain a person's or an organization's performance,\" EPSS interventions range from simple help systems to intelligent wizard-types of support. (Villachica and Stone, 1999; Gery 1991) . We elected to incorporate EPSS rather than cl~issroom instruction. Based on an analysis of tool usage data, user requirements, and user observations, experts in interface design and technology performance support prototyped an EPSS enhanced interface. Active system users reviewed these changes before implementation. The on-line perfomance support available at all times includes system feature procedures, a term glossary, FAQ, and a new interface design.",
521
+ "cite_spans": [
522
+ {
523
+ "start": 800,
524
+ "end": 828,
525
+ "text": "(Villachica and Stone, 1999;",
526
+ "ref_id": null
527
+ },
528
+ {
529
+ "start": 829,
530
+ "end": 839,
531
+ "text": "Gery 1991)",
532
+ "ref_id": null
533
+ }
534
+ ],
535
+ "ref_spans": [],
536
+ "eq_spans": [],
537
+ "section": "User-centered Changes in Support",
538
+ "sec_num": "3.5.2"
539
+ },
540
+ {
541
+ "text": "With incorporation of the EPSS, we also addressed the under-utilization of the configurable features. Although simple technologies with few options such as traditional telephones do not require conceptual system understanding for effective use, more complex systems with multiple options are often underutilized when supported with procedural training alone. We decided to incorporate both procedural training in a \"Getting Started\" tutorial and conceptual training in \"The Lab.\" In \"Getting Started\", users learn basic system actions (e.g., creating set-ups, submitting batches for summarization, viewing summaries). \"The Labi', on the other hand, supports guided discovery training in which users explore the system through a series of experiments in which they use their own data against various tool options and record their observations. Given our own experience with underutilization and research reporting difficulties with unguided exploratory learning (Hsu et al., 1993; Tuovinen and Sweller, 1999) , we built on the work of de Mul and Van Oostendorf (1996) and Van Oostendorf and de Mul (1999) and their finding that task-oriented exploratory support leads to more effective learning of computer systems. We created a series of experiments that the user conducts to discover how the summarization technology can best meet their needs. For example, users are directed to change summary length and to determine for themselves how the variation. affects their ability to judge relevance using their data.",
542
+ "cite_spans": [
543
+ {
544
+ "start": 961,
545
+ "end": 979,
546
+ "text": "(Hsu et al., 1993;",
547
+ "ref_id": null
548
+ },
549
+ {
550
+ "start": 980,
551
+ "end": 1007,
552
+ "text": "Tuovinen and Sweller, 1999)",
553
+ "ref_id": null
554
+ },
555
+ {
556
+ "start": 1037,
557
+ "end": 1066,
558
+ "text": "Mul and Van Oostendorf (1996)",
559
+ "ref_id": null
560
+ }
561
+ ],
562
+ "ref_spans": [],
563
+ "eq_spans": [],
564
+ "section": "User-centered Changes in Support",
565
+ "sec_num": "3.5.2"
566
+ },
567
+ {
568
+ "text": "In February, we conducted a study of. two groups, one with the EPSS and \"Getting Starting\" Tutorial and a second with the same level of support and additionally \"The Lab\". Earlier work by Kieras and Bovair (1984) compared straight procedural training with conceptual training and showed that the conceptually trained users made more efficient use of system features. The goal of our study was to determine just what level of training support the summarization technology requires for effective use. Through surveys, we planned to collect attitudes toward the tool and training and through web logs, tool usage data and option trials. We also planned to assess the users' understanding of the features and benefits of the tool. We are currently analyzing these data.",
569
+ "cite_spans": [
570
+ {
571
+ "start": 188,
572
+ "end": 212,
573
+ "text": "Kieras and Bovair (1984)",
574
+ "ref_id": null
575
+ }
576
+ ],
577
+ "ref_spans": [],
578
+ "eq_spans": [],
579
+ "section": "User-centered Changes in Support",
580
+ "sec_num": "3.5.2"
581
+ },
582
+ {
583
+ "text": "In addition to the EPSS and the on-line training, we developed a method for taking into account user assessment of our summary quality in a systematic way. User feedback on summarization quality during the beta test was far too general and uneven. We recruited two users to join our technology team and become informed rather than the typical naive users. They designed an analysis tool through which they database problematic machine generated summaries and assign them to errortype categories. Though we expected users to address issues like summary coherence, they have identified categories like the following:",
584
+ "cite_spans": [],
585
+ "ref_spans": [],
586
+ "eq_spans": [],
587
+ "section": "User-centered Changes in Support",
588
+ "sec_num": "3.5.2"
589
+ },
590
+ {
591
+ "text": "\u2022 sentence identification errors \u2022 formatting errors \u2022 sentence extraction due to the \"rare\" word phenomena \u2022 sentence extraction in \"long\" documents \u2022 failure to identify abstracts when available We expect that this approach can complement a technology-driven one by helping us prioritize changes we need based on methodical data collection and analysis.",
592
+ "cite_spans": [],
593
+ "ref_spans": [],
594
+ "eq_spans": [],
595
+ "section": "User-centered Changes in Support",
596
+ "sec_num": "3.5.2"
597
+ },
598
+ {
599
+ "text": "Our experience with text summarization technology in-use has been quite sobering. In this paper, we have shown how beta testing an emerging technology has helped us to understand that for technology to enhance job performance many factors besides the algorithm need to be addressed. Aone, C., Gorlinsky, J. and Okurowski, M.E. 1997 ",
600
+ "cite_spans": [
601
+ {
602
+ "start": 283,
603
+ "end": 331,
604
+ "text": "Aone, C., Gorlinsky, J. and Okurowski, M.E. 1997",
605
+ "ref_id": null
606
+ }
607
+ ],
608
+ "ref_spans": [],
609
+ "eq_spans": [],
610
+ "section": "Summary",
611
+ "sec_num": "4.0"
612
+ }
613
+ ],
614
+ "back_matter": [
615
+ {
616
+ "text": "from the 1998 AAAI Spring Symposium Technical Report SS-98-06), pages 109-118.Brandow, R., Mitze, K. and Rau, L. 1994. Automatic condensation of electronic publications by sentence selection.Information Processing and Management, 31(5) :675-685.Cowie, J., Mahesh, K., Nirenburg, S. and. Zajac, R., 1998 ",
617
+ "cite_spans": [
618
+ {
619
+ "start": 91,
620
+ "end": 118,
621
+ "text": "Mitze, K. and Rau, L. 1994.",
622
+ "ref_id": null
623
+ },
624
+ {
625
+ "start": 203,
626
+ "end": 229,
627
+ "text": "Processing and Management,",
628
+ "ref_id": null
629
+ },
630
+ {
631
+ "start": 230,
632
+ "end": 235,
633
+ "text": "31(5)",
634
+ "ref_id": null
635
+ },
636
+ {
637
+ "start": 256,
638
+ "end": 302,
639
+ "text": "Mahesh, K., Nirenburg, S. and. Zajac, R., 1998",
640
+ "ref_id": null
641
+ }
642
+ ],
643
+ "ref_spans": [],
644
+ "eq_spans": [],
645
+ "section": "annex",
646
+ "sec_num": null
647
+ }
648
+ ],
649
+ "bib_entries": {
650
+ "BIBREF0": {
651
+ "ref_id": "b0",
652
+ "title": "Trainable scalable summarization using robust NLP and machine learning",
653
+ "authors": [
654
+ {
655
+ "first": "C",
656
+ "middle": [],
657
+ "last": "Aone",
658
+ "suffix": ""
659
+ },
660
+ {
661
+ "first": "J",
662
+ "middle": [],
663
+ "last": "Gorlinsky",
664
+ "suffix": ""
665
+ },
666
+ {
667
+ "first": "M",
668
+ "middle": [
669
+ "E"
670
+ ],
671
+ "last": "Okurowski",
672
+ "suffix": ""
673
+ }
674
+ ],
675
+ "year": 1998,
676
+ "venue": "Coling-A CL 98",
677
+ "volume": "",
678
+ "issue": "",
679
+ "pages": "62--66",
680
+ "other_ids": {},
681
+ "num": null,
682
+ "urls": [],
683
+ "raw_text": "Aone, C., Gorlinsky, J. and Okurowski, M.E. 1998. Trainable scalable summarization using robust NLP and machine learning. In Coling-A CL 98. Montreal, Quebec, Canada, pages 62-66.",
684
+ "links": null
685
+ },
686
+ "BIBREF1": {
687
+ "ref_id": "b1",
688
+ "title": "A trainable summarizer with knowledge acquired from robust NLP techniques",
689
+ "authors": [
690
+ {
691
+ "first": "C",
692
+ "middle": [],
693
+ "last": "Aone",
694
+ "suffix": ""
695
+ },
696
+ {
697
+ "first": "J",
698
+ "middle": [],
699
+ "last": "Gorlinsky",
700
+ "suffix": ""
701
+ },
702
+ {
703
+ "first": "B",
704
+ "middle": [],
705
+ "last": "Larsen",
706
+ "suffix": ""
707
+ },
708
+ {
709
+ "first": "M",
710
+ "middle": [
711
+ "E"
712
+ ],
713
+ "last": "Okurowski",
714
+ "suffix": ""
715
+ }
716
+ ],
717
+ "year": 1999,
718
+ "venue": "Advances in Automatic Text Summarization",
719
+ "volume": "",
720
+ "issue": "",
721
+ "pages": "71--80",
722
+ "other_ids": {},
723
+ "num": null,
724
+ "urls": [],
725
+ "raw_text": "Aone, C., Gorlinsky, J., Larsen, B. and Okurowski, M.E. 1999. A trainable summarizer with knowledge acquired from robust NLP techniques. In Mani, I. and Maybury, M. (eds.), Advances in Automatic Text Summarization. pages 71- 80, Cambridge, Massachusetts: MIT Press.",
726
+ "links": null
727
+ },
728
+ "BIBREF2": {
729
+ "ref_id": "b2",
730
+ "title": "Dynamic presentation of document content for rapid on-line skimming. Intelligent Text Summarization. (Papers Summarization",
731
+ "authors": [
732
+ {
733
+ "first": "B",
734
+ "middle": [],
735
+ "last": "Boguraev",
736
+ "suffix": ""
737
+ },
738
+ {
739
+ "first": "C",
740
+ "middle": [],
741
+ "last": "Kennedy",
742
+ "suffix": ""
743
+ },
744
+ {
745
+ "first": "R",
746
+ "middle": [],
747
+ "last": "Bellamey",
748
+ "suffix": ""
749
+ },
750
+ {
751
+ "first": "S",
752
+ "middle": [],
753
+ "last": "Brawer",
754
+ "suffix": ""
755
+ },
756
+ {
757
+ "first": "Y",
758
+ "middle": [
759
+ "Y"
760
+ ],
761
+ "last": "Wong",
762
+ "suffix": ""
763
+ },
764
+ {
765
+ "first": "J",
766
+ "middle": [],
767
+ "last": "Swartz",
768
+ "suffix": ""
769
+ }
770
+ ],
771
+ "year": 1998,
772
+ "venue": "",
773
+ "volume": "",
774
+ "issue": "",
775
+ "pages": "391--401",
776
+ "other_ids": {},
777
+ "num": null,
778
+ "urls": [],
779
+ "raw_text": "Boguraev, B., Kennedy, C., Bellamey, R., Brawer, S., Wong, Y.Y. and Swartz, J. 1998. Dynamic presentation of document content for rapid on-line skimming. Intelligent Text Summarization. (Papers Summarization. pages 391-401, Cambridge, Massachusetts: MIT Press.",
780
+ "links": null
781
+ },
782
+ "BIBREF3": {
783
+ "ref_id": "b3",
784
+ "title": "The effects and limitations of automated text condensing on reading comprehension performance",
785
+ "authors": [
786
+ {
787
+ "first": "A",
788
+ "middle": [],
789
+ "last": "Morris",
790
+ "suffix": ""
791
+ },
792
+ {
793
+ "first": "G",
794
+ "middle": [],
795
+ "last": "Kasper",
796
+ "suffix": ""
797
+ },
798
+ {
799
+ "first": "Adams",
800
+ "middle": [],
801
+ "last": "",
802
+ "suffix": ""
803
+ },
804
+ {
805
+ "first": "D",
806
+ "middle": [],
807
+ "last": "",
808
+ "suffix": ""
809
+ }
810
+ ],
811
+ "year": 1999,
812
+ "venue": "Advances in Automatic Text Summarization",
813
+ "volume": "",
814
+ "issue": "",
815
+ "pages": "305--323",
816
+ "other_ids": {},
817
+ "num": null,
818
+ "urls": [],
819
+ "raw_text": "Morris, A., Kasper, G., and Adams, D. 1999. The effects and limitations of automated text condensing on reading comprehension performance. In Mani, I. and Maybury, M. (eds.), Advances in Automatic Text Summarization. pages 305-323, Cambridge, Massachusetts: MIT Press.",
820
+ "links": null
821
+ },
822
+ "BIBREF4": {
823
+ "ref_id": "b4",
824
+ "title": "Constructing literature abstracts by computer: Techniques and prospects. Information Processing and~ Management",
825
+ "authors": [
826
+ {
827
+ "first": "C",
828
+ "middle": [
829
+ "D"
830
+ ],
831
+ "last": "Paice",
832
+ "suffix": ""
833
+ }
834
+ ],
835
+ "year": 1990,
836
+ "venue": "",
837
+ "volume": "26",
838
+ "issue": "",
839
+ "pages": "171--186",
840
+ "other_ids": {},
841
+ "num": null,
842
+ "urls": [],
843
+ "raw_text": "Paice, C.D., 1990. Constructing literature abstracts by computer: Techniques and prospects. Information Processing and~ Management, 26(1): 171-186.",
844
+ "links": null
845
+ },
846
+ "BIBREF5": {
847
+ "ref_id": "b5",
848
+ "title": "What might be in a summary?",
849
+ "authors": [
850
+ {
851
+ "first": "K",
852
+ "middle": [],
853
+ "last": "Sparck-Jones",
854
+ "suffix": ""
855
+ }
856
+ ],
857
+ "year": 1993,
858
+ "venue": "Information Retrieval 93: Von der Modellierung zur Anwendung",
859
+ "volume": "",
860
+ "issue": "",
861
+ "pages": "9--26",
862
+ "other_ids": {},
863
+ "num": null,
864
+ "urls": [],
865
+ "raw_text": "Sparck-Jones, K. 1993. What might be in a summary? In Information Retrieval 93: Von der Modellierung zur Anwendung, pages 9-26.",
866
+ "links": null
867
+ },
868
+ "BIBREF6": {
869
+ "ref_id": "b6",
870
+ "title": "Automatic text structuring and summarization",
871
+ "authors": [
872
+ {
873
+ "first": "G",
874
+ "middle": [],
875
+ "last": "Salton",
876
+ "suffix": ""
877
+ },
878
+ {
879
+ "first": "A",
880
+ "middle": [],
881
+ "last": "Singhal",
882
+ "suffix": ""
883
+ },
884
+ {
885
+ "first": "M",
886
+ "middle": [],
887
+ "last": "Mitra",
888
+ "suffix": ""
889
+ },
890
+ {
891
+ "first": "C",
892
+ "middle": [],
893
+ "last": "Buckely",
894
+ "suffix": ""
895
+ }
896
+ ],
897
+ "year": 1999,
898
+ "venue": "Advances in Automatic Text Summarization",
899
+ "volume": "",
900
+ "issue": "",
901
+ "pages": "",
902
+ "other_ids": {},
903
+ "num": null,
904
+ "urls": [],
905
+ "raw_text": "Salton, G., Singhal, A., Mitra, M. and Buckely, C. 1999. Automatic text structuring and summarization. In Mani, I. and Maybury, M. (eds.), Advances in Automatic Text Summarization. pages 342- : 355, Cambridge, Massachusetts: MIT Press.",
906
+ "links": null
907
+ }
908
+ },
909
+ "ref_entries": {
910
+ "TABREF0": {
911
+ "text": "",
912
+ "html": null,
913
+ "num": null,
914
+ "content": "<table><tr><td colspan=\"2\">Questions</td><td>Data</td><td>Finding</td></tr><tr><td>Were</td><td colspan=\"3\">documents number of sum-Users routinely accessed our system to read</td></tr><tr><td colspan=\"2\">summarized?</td><td>mary events</td><td>machine generated summaries.</td></tr><tr><td colspan=\"2\">Did the users select</td><td colspan=\"2\">type of summary</td></tr><tr><td colspan=\"2\">generic or query-</td><td/></tr><tr><td colspan=\"2\">based summaries?</td><td/></tr><tr><td colspan=\"2\">Is there a difference</td><td colspan=\"2\">number of sen-</td></tr><tr><td>among</td><td>summary</td><td colspan=\"2\">tences viewed by</td></tr><tr><td colspan=\"2\">t3~pes for the num-</td><td>summary</td><td>types</td></tr><tr><td colspan=\"2\">ber of sentences</td><td>(generic,</td><td>query-</td></tr><tr><td>viewed?</td><td/><td>based, lead)</td></tr><tr><td colspan=\"2\">Do users choose to</td><td colspan=\"2\">indicator/entity</td></tr><tr><td colspan=\"2\">use indicators and</td><td>preferences</td><td>for</td></tr><tr><td colspan=\"2\">entities when tailor-</td><td colspan=\"2\">non-default set-ups</td></tr><tr><td colspan=\"2\">ing browsing capa-</td><td>(on or off)</td></tr><tr><td>bility?</td><td/><td/></tr></table>",
915
+ "type_str": "table"
916
+ },
917
+ "TABREF1": {
918
+ "text": "",
919
+ "html": null,
920
+ "num": null,
921
+ "content": "<table><tr><td/><td/><td/><td>i</td></tr><tr><td/><td/><td/><td>i</td></tr><tr><td colspan=\"3\">Questions</td><td>Data</td><td>Finding</td></tr><tr><td colspan=\"3\">Does training make</td><td>tool</td></tr><tr><td colspan=\"3\">a difference on sys-</td></tr><tr><td colspan=\"3\">tem use or user pro-</td></tr><tr><td colspan=\"3\">file type? Users</td></tr><tr><td>were</td><td colspan=\"2\">categorized</td></tr><tr><td colspan=\"3\">(advanced, interme-</td></tr><tr><td colspan=\"3\">diate, novice) on</td></tr><tr><td colspan=\"3\">the basis of usage</td></tr><tr><td colspan=\"3\">features with Harti-</td></tr><tr><td>gan's</td><td/><td>K-Means</td></tr><tr><td colspan=\"2\">clustering</td><td>algo-</td></tr><tr><td>rithm.</td><td/><td/></tr></table>",
922
+ "type_str": "table"
923
+ }
924
+ }
925
+ }
926
+ }
Full_text_JSON/prefixW/json/W00/W00-0407.json ADDED
@@ -0,0 +1,996 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0407",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:30.829108Z"
6
+ },
7
+ "title": "Evaluation of Phrase-Representation Summarization based on Information Retrieval Task",
8
+ "authors": [
9
+ {
10
+ "first": "Mamiko",
11
+ "middle": [],
12
+ "last": "Oka",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "",
16
+ "institution": "UEDA Industry Solutions Company",
17
+ "location": {
18
+ "settlement": "Yoshihiro"
19
+ }
20
+ },
21
+ "email": "oka.mamiko@fujixerox.co.jp"
22
+ }
23
+ ],
24
+ "year": "",
25
+ "venue": null,
26
+ "identifiers": {},
27
+ "abstract": "We have developed an improved task-based evaluation method of summarization, the accuracy of which is increased by specifying the details of the task including background stories, and by assigning ten subjects per summary sample. The method also serves precision/recall pairs for a variety of situations by introducing multiple levels of relevance assessment. The method is applied to prove phrase-represented summary is most effective to select relevant documents from information retrieval results.",
28
+ "pdf_parse": {
29
+ "paper_id": "W00-0407",
30
+ "_pdf_hash": "",
31
+ "abstract": [
32
+ {
33
+ "text": "We have developed an improved task-based evaluation method of summarization, the accuracy of which is increased by specifying the details of the task including background stories, and by assigning ten subjects per summary sample. The method also serves precision/recall pairs for a variety of situations by introducing multiple levels of relevance assessment. The method is applied to prove phrase-represented summary is most effective to select relevant documents from information retrieval results.",
34
+ "cite_spans": [],
35
+ "ref_spans": [],
36
+ "eq_spans": [],
37
+ "section": "Abstract",
38
+ "sec_num": null
39
+ }
40
+ ],
41
+ "body_text": [
42
+ {
43
+ "text": "Summaries are often used to select relevant documents from information retrieval results. The goal of summarization for such \"indicative\" use is to serve fast and accurate judgement. We have developed the concept of the \"at-a-glance\" summary, and its realization in the Japanese language -\"phrase-representation summarizatiola\" -to achieve this goal (Ueda, et al. 2000) . We have conducted an evaluation experiment to verify the effectiveness of this summarization method.",
44
+ "cite_spans": [
45
+ {
46
+ "start": 350,
47
+ "end": 369,
48
+ "text": "(Ueda, et al. 2000)",
49
+ "ref_id": "BIBREF6"
50
+ }
51
+ ],
52
+ "ref_spans": [],
53
+ "eq_spans": [],
54
+ "section": "Introduction",
55
+ "sec_num": null
56
+ },
57
+ {
58
+ "text": "There are two strategies for evaluating summarization systems: intrinsic and extrinsic (Jing, et al. 1998) . Intrinsic methods measure a system's quality mainly by comparing the system's output with an \"ideal\" summary. Extrinsic methods measure a system's performance in a particular task. The aim of the phrase-representation summarization method is fast and accurate judgement in selecting documents in information retrieval. Thus, we adopted a task-based method to evaluate whether the goal was achieved. Task-based evaluation has recently drawn the attention in thq summarization field, because the assumption that there is only one \"ideal\" summary is considered to be incorrect, and some experiments on information retrieval were reported (Jing, et al. 1998) (Mani, et al. 1998) (Mochizuki and Okunura 1999) . However, there is no standard evaluation method, and we consider that there are some shortcomings in the existing methods. Thus, we have developed an improved evaluation method and carried out a relatively large experiment.",
59
+ "cite_spans": [
60
+ {
61
+ "start": 87,
62
+ "end": 106,
63
+ "text": "(Jing, et al. 1998)",
64
+ "ref_id": "BIBREF1"
65
+ },
66
+ {
67
+ "start": 744,
68
+ "end": 763,
69
+ "text": "(Jing, et al. 1998)",
70
+ "ref_id": "BIBREF1"
71
+ },
72
+ {
73
+ "start": 764,
74
+ "end": 783,
75
+ "text": "(Mani, et al. 1998)",
76
+ "ref_id": "BIBREF2"
77
+ },
78
+ {
79
+ "start": 784,
80
+ "end": 812,
81
+ "text": "(Mochizuki and Okunura 1999)",
82
+ "ref_id": null
83
+ }
84
+ ],
85
+ "ref_spans": [],
86
+ "eq_spans": [],
87
+ "section": "Introduction",
88
+ "sec_num": null
89
+ },
90
+ {
91
+ "text": "In this paper, we first give an overview of the phrase-representation summarization method. We then consider the evaluation method and show the result of an experiment based on the improved method to demonstrate the effectiveness of phrase-representation summarization.",
92
+ "cite_spans": [],
93
+ "ref_spans": [],
94
+ "eq_spans": [],
95
+ "section": "Introduction",
96
+ "sec_num": null
97
+ },
98
+ {
99
+ "text": "Most automatic summarization systems adopt the \"sentence extraction\" method, which gives a score to every sentence based on such characteristics as the frequency of a word or the position where it appears, and selects sentences with high scores. In such a way, long and complex sentences tend to be extracted. However, a long and complex sentence is difficult to read and understand, and therefore it is not a suitable unit to compose a summary for use in selecting documents.",
100
+ "cite_spans": [],
101
+ "ref_spans": [],
102
+ "eq_spans": [],
103
+ "section": "Phrase-Representation Summarization",
104
+ "sec_num": "1"
105
+ },
106
+ {
107
+ "text": "To avoid the burden of reading such long and complex sentences, we have developed the phrase-representation summarization method, which represents the outline of a document by a series of short and simple expressions (\"phrases\") that contain key concepts. We use the word \"phrase\" to representthe simplicity characteristic I in a word.",
108
+ "cite_spans": [],
109
+ "ref_spans": [],
110
+ "eq_spans": [],
111
+ "section": "Phrase-Representation Summarization",
112
+ "sec_num": "1"
113
+ },
114
+ {
115
+ "text": "The phrase-represented summary has the following characteristics.",
116
+ "cite_spans": [],
117
+ "ref_spans": [],
118
+ "eq_spans": [],
119
+ "section": "Phrase-Representation Summarization",
120
+ "sec_num": "1"
121
+ },
122
+ {
123
+ "text": "(1) At-a-glance comprehension Because each unit is short and simple, the user is able to grasp the meaning at a glance.",
124
+ "cite_spans": [],
125
+ "ref_spans": [],
126
+ "eq_spans": [],
127
+ "section": "Phrase-Representation Summarization",
128
+ "sec_num": "1"
129
+ },
130
+ {
131
+ "text": "(2) Adequate informativeness Unlike extracted sentences, phrases created by this method are not accompanied by information unnecessary for relevance judgement.",
132
+ "cite_spans": [],
133
+ "ref_spans": [],
134
+ "eq_spans": [],
135
+ "section": "Phrase-Representation Summarization",
136
+ "sec_num": "1"
137
+ },
138
+ {
139
+ "text": "(3) Wide coverage of topics Units composing a summary are relatively . short, and point various positions of the original text. Therefore, even a generic summary includes various topics written in a J document. ,~ phrase-represented summary is generated as follows.",
140
+ "cite_spans": [],
141
+ "ref_spans": [],
142
+ "eq_spans": [],
143
+ "section": "Phrase-Representation Summarization",
144
+ "sec_num": "1"
145
+ },
146
+ {
147
+ "text": "1. Syntactic analysis to extract the relationships between words 2. Selection of an important relation (two word sequences connected by an arc) as a \"core\" 3. Addition of relations necessary for the unity of the phrase's meaning (e.g., essential cases) 4. Generation of the surface phrase from the selected relations An important relation is selected by considering both the importance of a word and that of a relation between words. For example, predicate-argument relations are considered important and noun-modifier relations are given low importance scores. Steps [2] to [4] are repeated until specified amount of phrases are obtained. Before selecting a new \"core,\" the stores for the already selected words are decreased to suppress overuse of the same words. Fig. 1 shows a sample summary created from a news article 2 put on WWW. The underlined words constitute the core relation of each phrase.",
148
+ "cite_spans": [],
149
+ "ref_spans": [
150
+ {
151
+ "start": 766,
152
+ "end": 772,
153
+ "text": "Fig. 1",
154
+ "ref_id": null
155
+ }
156
+ ],
157
+ "eq_spans": [],
158
+ "section": "Phrase-Representation Summarization",
159
+ "sec_num": "1"
160
+ },
161
+ {
162
+ "text": "The word \"phrase\" as used here is not used in the linguistic sense, but an expression for \"short\" and \"simple.\" In Japanese, there is no rigid linguistic distinction between a \"phrase\" mad a \"clause.\" 2 The original text in Japanese and its outline in English can be seen in the following URL. http://www, fuiixerox. In this experiment, we compare the effectiveness of phrase-represented summaries to summaries created by other commonly used summarization methods. From the viewpoint of the phraserepresented summary, we focus the comparison of the units that constitute summaries. The units to be compared with phrases are sentences (created by the sentence extraction method) and words (by the keyword enumeration method). We also compare \"leading fixed-length characters,\" which are often used as substitutes for summaries by WWW search engines. The generation method for each summary is described as follows.",
163
+ "cite_spans": [],
164
+ "ref_spans": [],
165
+ "eq_spans": [],
166
+ "section": "Phrase-Representation Summarization",
167
+ "sec_num": "1"
168
+ },
169
+ {
170
+ "text": "(A) Leading fixed-length characters: extract the first 80 characters of the document body.",
171
+ "cite_spans": [],
172
+ "ref_spans": [],
173
+ "eq_spans": [],
174
+ "section": "Phrase-Representation Summarization",
175
+ "sec_num": "1"
176
+ },
177
+ {
178
+ "text": "(B) Sentence extraction summarization: select important sentences from a document. The importance score of each sentence is calculated from the simple sum of the importance scores of the words in a sentence (Zechner 1996 (Mani, et al. ' ') Accuracy 1998). Hand (1997) details the proposed taskbased evaluation under TIPSTER. Jing, et al. (1998) describe how various parameters affect the evaluation result through a relatively large task-based experiment. Evaluation conferences like SUMMAC are not yet held for Japanese summarization systems 4. Mochizuki and Okumura (1999) applied the SUMMAC methodology to Japanese summarization methods for the first time. Most previous experiments are concerned with SUMMAC, accordingly the methods resemble each other.",
179
+ "cite_spans": [
180
+ {
181
+ "start": 207,
182
+ "end": 220,
183
+ "text": "(Zechner 1996",
184
+ "ref_id": "BIBREF7"
185
+ },
186
+ {
187
+ "start": 221,
188
+ "end": 236,
189
+ "text": "(Mani, et al. '",
190
+ "ref_id": null
191
+ },
192
+ {
193
+ "start": 256,
194
+ "end": 267,
195
+ "text": "Hand (1997)",
196
+ "ref_id": "BIBREF0"
197
+ },
198
+ {
199
+ "start": 325,
200
+ "end": 344,
201
+ "text": "Jing, et al. (1998)",
202
+ "ref_id": "BIBREF1"
203
+ },
204
+ {
205
+ "start": 546,
206
+ "end": 574,
207
+ "text": "Mochizuki and Okumura (1999)",
208
+ "ref_id": "BIBREF4"
209
+ }
210
+ ],
211
+ "ref_spans": [],
212
+ "eq_spans": [],
213
+ "section": "Phrase-Representation Summarization",
214
+ "sec_num": "1"
215
+ },
216
+ {
217
+ "text": "~Fhe framework of task-based evaluation on information retrieval is shown in Fig. 2 .",
218
+ "cite_spans": [],
219
+ "ref_spans": [
220
+ {
221
+ "start": 77,
222
+ "end": 83,
223
+ "text": "Fig. 2",
224
+ "ref_id": null
225
+ }
226
+ ],
227
+ "eq_spans": [],
228
+ "section": "Framework of Evaluation",
229
+ "sec_num": "2.3"
230
+ },
231
+ {
232
+ "text": "Task-based evaluation in general consists of the following three steps:",
233
+ "cite_spans": [],
234
+ "ref_spans": [],
235
+ "eq_spans": [],
236
+ "section": "Framework of Evaluation",
237
+ "sec_num": "2.3"
238
+ },
239
+ {
240
+ "text": "(l) Data preparation: Assume an information need, create a query for the information need, and prepare simulated search results with different types of summaries. (2) Relevance assessment: Using the summades, human subjects assess the relevance of the search results to the assumed information needs. (3) Measuring performance: Measure the accuracy of the subjects' assessment by comparing the subjects' judgement with the correct relevance. The assessment process is also timed.",
241
+ "cite_spans": [],
242
+ "ref_spans": [],
243
+ "eq_spans": [],
244
+ "section": "Framework of Evaluation",
245
+ "sec_num": "2.3"
246
+ },
247
+ {
248
+ "text": "4 It is planning to be held in 2000. Further information is in the following URL. http://www.rd.nacsis.acAp/-ntcadm/workshop/ann2pen.html",
249
+ "cite_spans": [],
250
+ "ref_spans": [],
251
+ "eq_spans": [],
252
+ "section": "Framework of Evaluation",
253
+ "sec_num": "2.3"
254
+ },
255
+ {
256
+ "text": "We designed our evaluation method through detailed examination of previous work. The consideration points are compared to the SUMMAC ad hoc task (Table l) . A section number will be found in the \"*\" column if we made an improvement. Details will be discussed in the section indicated by the number in the next chapter.",
257
+ "cite_spans": [],
258
+ "ref_spans": [
259
+ {
260
+ "start": 145,
261
+ "end": 154,
262
+ "text": "(Table l)",
263
+ "ref_id": null
264
+ }
265
+ ],
266
+ "eq_spans": [],
267
+ "section": "Fig.2: Framework of Task-Based Evaluation",
268
+ "sec_num": null
269
+ },
270
+ {
271
+ "text": "To assess the relevance accurately, the situation of information retrieval should be realistic enough for the subjects to feel as if they really want to know about a given question. The previous experiments gave only a short description of a topic. We consider it is not sufficiently specific and the interpretation of a question must varied with the subjects.",
272
+ "cite_spans": [],
273
+ "ref_spans": [],
274
+ "eq_spans": [],
275
+ "section": "Description of Questions",
276
+ "sec_num": "3.1"
277
+ },
278
+ {
279
+ "text": "We selected two topics (\"moon cake\" and \"journey in Malay. Peninsula\") and assumed three questions. To indicate to the subjects, we set detailed situation including the motivation to know about that or the use of the information obtained for each question. This method satisfies the restriction \"to limit the variation in assessment between readers\" in the MLUCE Protocol (Minel, et ai. 1997 ).",
280
+ "cite_spans": [
281
+ {
282
+ "start": 372,
283
+ "end": 391,
284
+ "text": "(Minel, et ai. 1997",
285
+ "ref_id": null
286
+ }
287
+ ],
288
+ "ref_spans": [],
289
+ "eq_spans": [],
290
+ "section": "Description of Questions",
291
+ "sec_num": "3.1"
292
+ },
293
+ {
294
+ "text": "For each topic, ten documents are selected from search results by major WWW search engines, so that more than five relevant documents are included for each question. The topics, the outline of the questions, the queries for WWW search, and the number of relevant documents are shown in Table 2 . The description of Question-a2 that was given to the subjects is shown in Fig. 3 .",
295
+ "cite_spans": [],
296
+ "ref_spans": [
297
+ {
298
+ "start": 286,
299
+ "end": 293,
300
+ "text": "Table 2",
301
+ "ref_id": "TABREF3"
302
+ },
303
+ {
304
+ "start": 370,
305
+ "end": 376,
306
+ "text": "Fig. 3",
307
+ "ref_id": "FIGREF0"
308
+ }
309
+ ],
310
+ "eq_spans": [],
311
+ "section": "Description of Questions",
312
+ "sec_num": "3.1"
313
+ },
314
+ {
315
+ "text": "One day just after the mid-autumn festival, my colleague Mr. A brought some moon cakes to the office. He said that one of his Chinese friends had given them to him. They rooked so new to us that we shared and ate them at a coffee break. Chinese eat moon cakes at the mid-autumn festival while Japanese have dumplings then. Someone asked a question why Chinese ate moon cakes, to-which nobody gave the answer. Some cakes tasted sweet as we expected; some were stuffed with salty fillings like roasted pork. Ms. B said that there were over fifty kinds of filling. Her story made me think of a question:",
316
+ "cite_spans": [],
317
+ "ref_spans": [],
318
+ "eq_spans": [],
319
+ "section": "Description of Questions",
320
+ "sec_num": "3.1"
321
+ },
322
+ {
323
+ "text": "What kinds of filling are there for moon cakes sold at the mid-autumn festival in Chinese society? ",
324
+ "cite_spans": [],
325
+ "ref_spans": [],
326
+ "eq_spans": [],
327
+ "section": "Description of Questions",
328
+ "sec_num": "3.1"
329
+ },
330
+ {
331
+ "text": "In the previous experiments, one to three subjects were assigned to each summary sample. Because the judgement must vary with the subjects even if a detailed situation is given, we assigned ten subjects per summary sample to reduce the influence of each person's assessment. The only requirement for subjects is that they should be familiar with WWW search process.",
332
+ "cite_spans": [],
333
+ "ref_spans": [],
334
+ "eq_spans": [],
335
+ "section": "Number of Subjects per Summary Sample",
336
+ "sec_num": "3.2"
337
+ },
338
+ {
339
+ "text": "In the previous experiments, a subject reads a summary and judges whether it is relevant or irrelevant. However, a summary sometimes does not give enough information for relevance judgement. In actual information retrieval situations, selecting criteria vary depending on the question, the motivation, and other circumstances. We will not examine dubious documents if sufficient information is obtained or we do not have sufficient time, and we will examine dubious documents when an exhaustive survey is required. Thus, here we introduce four relevance levels L0 to L3 to simulate various cases in the experiment. L3, L2, and L1 are considered relevant, the confidence becomes lower in order. To reduce the variance of interpretation by subjects, we define each level as follows.",
340
+ "cite_spans": [],
341
+ "ref_spans": [],
342
+ "eq_spans": [],
343
+ "section": "Relevance Levels",
344
+ "sec_num": "3.3"
345
+ },
346
+ {
347
+ "text": "L3: The answer to the given question is found .in a summary. L2: A clue to the answer is found in a summary. Ll:Apparent clues are not found, but it is probable that the answer is contained in the whole document. L0: A summary is not relevant to the question at all. If these are applied to the case of the fare of the Malay Railway, the criteria will be interpreted as follows.",
348
+ "cite_spans": [],
349
+ "ref_spans": [],
350
+ "eq_spans": [],
351
+ "section": "Relevance Levels",
352
+ "sec_num": "3.3"
353
+ },
354
+ {
355
+ "text": "L3:An expression like \"the berth charge of the second class is about RMI5\" is in a summary. L2: An expression like \"I looked into the fare of the train\" is in a summary. LI:A summary describes about a trip by the Malay Railway, but the fare is not referred in it.",
356
+ "cite_spans": [],
357
+ "ref_spans": [],
358
+ "eq_spans": [],
359
+ "section": "Relevance Levels",
360
+ "sec_num": "3.3"
361
+ },
362
+ {
363
+ "text": "In the previous experiments, precision and recall are used to measure accuracy. There are two drawbacks to these measurements: (1) the variance of the subjects' assessment makes the measure inaccurate, and (2) performance of each summary sample is not measured. Precision and recall are widely used to measure information retrieval performance. In the evaluation of summarization, they are calculated as follows. I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I Documents that are actually relevant in S Precision = Documents that are assessed relevant by a subject (S) Documents that are assessed relevant by a subject Recall = Relevant documents",
364
+ "cite_spans": [],
365
+ "ref_spans": [
366
+ {
367
+ "start": 413,
368
+ "end": 550,
369
+ "text": "I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I Documents that are actually",
370
+ "ref_id": null
371
+ }
372
+ ],
373
+ "eq_spans": [],
374
+ "section": "Measures of Accuracy",
375
+ "sec_num": "3.4"
376
+ },
377
+ {
378
+ "text": "In the previous experiments, the assessment standard was not fixed, and some subjects tended to make the relevant set broader and others narrower. The variance reduces the significance of the average precision and recall value. Because we introduced four relevance levels and showed the assessment criteria to the subjects, we can assume three kinds of relevance document sets: L3 only, L3 + L2, and L3 + L2 + L1. The set composed only of the documents with L3 assessment should have a high precision score. This case represents a user wants to know only high-probability information, for example, the user is hurried, or just one answer is sufficient. The set including L1 documents should get a high recall score. This case represents a user wants to know any information concerned with a specific question.",
379
+ "cite_spans": [],
380
+ "ref_spans": [],
381
+ "eq_spans": [],
382
+ "section": "I I",
383
+ "sec_num": null
384
+ },
385
+ {
386
+ "text": "Precision and recall represent the performance of a summarization method for certain question, however they do not indicate the reason why the method presents higher or lower performance. To find the reasons and improve a summarization method based on them, it is useful to analyze quality and performance connected together for each summary sample. Measuring each summary's performance is necessary for such analysis. Therefore, we introduce the relevance score, which represents the correspondence between the subject judgement and the correct document relevance. The score of each pair of subject judgement and document relevance is shown in Table 3 .",
387
+ "cite_spans": [],
388
+ "ref_spans": [
389
+ {
390
+ "start": 645,
391
+ "end": 652,
392
+ "text": "Table 3",
393
+ "ref_id": "TABREF4"
394
+ }
395
+ ],
396
+ "eq_spans": [],
397
+ "section": "I I",
398
+ "sec_num": null
399
+ },
400
+ {
401
+ "text": "By averaging scores of all subjects for every sample, summary's performances are compared. By averaging scores of all summary samples for every summarization method, method's performances are compared. ",
402
+ "cite_spans": [],
403
+ "ref_spans": [],
404
+ "eq_spans": [],
405
+ "section": "I I",
406
+ "sec_num": null
407
+ },
408
+ {
409
+ "text": "The precision and recall are shown in Fig. 4 , and the F-measure is shown in Fig. 5 . The F-measure is the balanced score of precision and recall, calculated as follows:",
410
+ "cite_spans": [],
411
+ "ref_spans": [
412
+ {
413
+ "start": 38,
414
+ "end": 44,
415
+ "text": "Fig. 4",
416
+ "ref_id": null
417
+ },
418
+ {
419
+ "start": 77,
420
+ "end": 83,
421
+ "text": "Fig. 5",
422
+ "ref_id": null
423
+ }
424
+ ],
425
+ "eq_spans": [],
426
+ "section": "Precision and Recall",
427
+ "sec_num": "4.1.1"
428
+ },
429
+ {
430
+ "text": "Figures 4 and 5 show that the phraserepresented summary (C) presents the highest performance. It satisfies both the high precision and the high recall requirements. Because there are various situations in WWW searches, phrase-representation sumtnarization is considered suitable in any cases.",
431
+ "cite_spans": [],
432
+ "ref_spans": [],
433
+ "eq_spans": [],
434
+ "section": "Precision and Recall",
435
+ "sec_num": "4.1.1"
436
+ },
437
+ {
438
+ "text": "The relevance score for each question is shown in Fig. 6 . The phrase-represented summary (C) gets the highest score on average, and the best in Question-a2 and Question-b. For Question-al, though all summaries get poor scores, the sentence extraction summary (B) is the best among them.",
439
+ "cite_spans": [],
440
+ "ref_spans": [
441
+ {
442
+ "start": 50,
443
+ "end": 56,
444
+ "text": "Fig. 6",
445
+ "ref_id": null
446
+ }
447
+ ],
448
+ "eq_spans": [],
449
+ "section": "Relevance Score",
450
+ "sec_num": "4.1.2"
451
+ },
452
+ {
453
+ "text": "The time required to assess relevance is shown in Fig. 7 . The time for Question-a is a sum of the times for Questions al and a2. In the Question-a case, phrase-represented summary (C) requires the shortest time. For Question-b, leading fixedlength characters (A) requires the shortest time, and this result is different from the intuition. This requires further examination. I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I ",
454
+ "cite_spans": [],
455
+ "ref_spans": [
456
+ {
457
+ "start": 50,
458
+ "end": 56,
459
+ "text": "Fig. 7",
460
+ "ref_id": null
461
+ },
462
+ {
463
+ "start": 376,
464
+ "end": 480,
465
+ "text": "I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I",
466
+ "ref_id": null
467
+ }
468
+ ],
469
+ "eq_spans": [],
470
+ "section": "Time",
471
+ "sec_num": "4.2"
472
+ },
473
+ {
474
+ "text": "Here we analyze the experiment result from multiple viewpoint,s: the constituent unit of summaries and .the characteristics of questions and documents in Section 5.1 and 5.2. We then discuss advantages of our experimental method in Section 5.3, and language dependency o f the experiment result in Section 5.4.",
475
+ "cite_spans": [],
476
+ "ref_spans": [],
477
+ "eq_spans": [],
478
+ "section": "Discussion",
479
+ "sec_num": "5"
480
+ },
481
+ {
482
+ "text": "The units that constitute a summary may affect the judging process; if the unit is long, the number of units appeared in a summary may decrease and the summary contains fewer key concepts in the original document. We counted the number of the summaries that contain the clues to the given questions (see Table 4 ). The average numbers are 0.3, 2.0, 4.3 and 4.7 for (A) fixed-length characters, (B) sentences, (C) phrases and (D) words, respectively. The phraserepresented summary (C) and the keyword enumeration summary (D) widely cover the topics, and they are about twice as wide as the sentence extraction summary (B). The leading fixed-length characters (A) contain very few clues and this fact supports that this summary presents the worst performance (see Section 4.1).",
483
+ "cite_spans": [],
484
+ "ref_spans": [
485
+ {
486
+ "start": 304,
487
+ "end": 311,
488
+ "text": "Table 4",
489
+ "ref_id": null
490
+ }
491
+ ],
492
+ "eq_spans": [],
493
+ "section": "Comparison of Constituent Unit",
494
+ "sec_num": "5.1"
495
+ },
496
+ {
497
+ "text": "In order to compare a summary's performance with a summary's quality, we calculate the average relevance score of summaries that contain clues. These scores are also shown in Table 4 . The average score represents the informativeness of each summary. Table 4 shows that the sentence extraction summary (B) and the phrase-represented summary (C) get , relatively high scores, but vary with the question. T h i s is because sentences and phrases are sufficiently informative in most cases, but sentences tend to contain unnecessary information, and phrases tend to lack necessary information. The keyword enumeration summary (D) gets a relatively low score. This is because a word is not sufficiently informative t o enable judgement of whether it is clue to the answer, and relations among words are lacked.",
498
+ "cite_spans": [],
499
+ "ref_spans": [
500
+ {
501
+ "start": 175,
502
+ "end": 182,
503
+ "text": "Table 4",
504
+ "ref_id": null
505
+ },
506
+ {
507
+ "start": 251,
508
+ "end": 258,
509
+ "text": "Table 4",
510
+ "ref_id": null
511
+ }
512
+ ],
513
+ "eq_spans": [],
514
+ "section": "Comparison of Constituent Unit",
515
+ "sec_num": "5.1"
516
+ },
517
+ {
518
+ "text": "These analyses support the two characteristics of the phrase-represented summaries described in Chapter 1, that is, adequate informativeness and wide coverage of topics.",
519
+ "cite_spans": [],
520
+ "ref_spans": [],
521
+ "eq_spans": [],
522
+ "section": "Comparison of Constituent Unit",
523
+ "sec_num": "5.1"
524
+ },
525
+ {
526
+ "text": "The most suitable summarization method may depend on the type of question and/or document. In the experiment results (see Section 4.1.2), the sentence extraction summary (B) and the phraserepresented summary (C) get the highest relevance score. Therefore, here we focus on those two summarization methods and consider the influence of questions and documents.",
527
+ "cite_spans": [],
528
+ "ref_spans": [],
529
+ "eq_spans": [],
530
+ "section": "Influence of Question and Document",
531
+ "sec_num": "5.2"
532
+ },
533
+ {
534
+ "text": "In selecting questions, we consider two factors may affect performance. One is which unit an answer is expressed in. Another is whether clue words easily come to mind.",
535
+ "cite_spans": [],
536
+ "ref_spans": [],
537
+ "eq_spans": [],
538
+ "section": "Influence of Question and Document",
539
+ "sec_num": "5.2"
540
+ },
541
+ {
542
+ "text": "If an answer is expressed as a relation of a predicate to its arguments, the phraserepresentation summarization may be suitable. Question-a2 and Question-b are of this case. If an answer is expressed as compound relations, e.g., reason-consequence relations or causeresult relations, the sentence extraction summarization may be required. And, if an answer is expressed in complex relations of sentences, any summarization method of the four is not suitable. Questions that ask historical background or complicated procedures are examples of this kind, e.g., Question-al.",
543
+ "cite_spans": [],
544
+ "ref_spans": [],
545
+ "eq_spans": [],
546
+ "section": "Influence of Question and Document",
547
+ "sec_num": "5.2"
548
+ },
549
+ {
550
+ "text": "As for another factor, if clue words easily come to mind, the phrase-represented summary is suitable for any unit in which an answer is expressed. This is because the clues are found more easily in short phrases than in long sentences.",
551
+ "cite_spans": [],
552
+ "ref_spans": [],
553
+ "eq_spans": [],
554
+ "section": "Influence of Question and Document",
555
+ "sec_num": "5.2"
556
+ },
557
+ {
558
+ "text": "In selecting documents, whether a question is relevant to the main topic of a document affects the performance, because we use generic summaries. B y sentence extraction summarization, the answer is extracted as a summary only when the question is relevant to the main topic. Phrase-represented summary is able to cover topics more widely, for example, one of the main topics or detailed description of each topic (see Section 5.1). Because the characteristic of the document is independent of the question, which summaries cannot be predicted, and thus the phrase-represented summary will give better results.",
559
+ "cite_spans": [],
560
+ "ref_spans": [],
561
+ "eq_spans": [],
562
+ "section": "Influence of Question and Document",
563
+ "sec_num": "5.2"
564
+ },
565
+ {
566
+ "text": "Through these discussions, we conclude that the phrase-representation summarization is suitable for various cases, while the sentence extraction summarization is for only some restricted cases. Though the samples of questions and documents are relatively few in our experiment, it is sufficient to show the effectiveness of the phrase-representation summarization.",
567
+ "cite_spans": [],
568
+ "ref_spans": [],
569
+ "eq_spans": [],
570
+ "section": "Influence of Question and Document",
571
+ "sec_num": "5.2"
572
+ },
573
+ {
574
+ "text": "Our experimental method has the following advantages.",
575
+ "cite_spans": [],
576
+ "ref_spans": [],
577
+ "eq_spans": [],
578
+ "section": "A d v a n t a g e s of our Experimental Method",
579
+ "sec_num": "5.3"
580
+ },
581
+ {
582
+ "text": "(1) More exact assessment (2)Serves precision/recall pairs for a variety of situations (3)Helps further analysis of problems of a summarization method",
583
+ "cite_spans": [],
584
+ "ref_spans": [],
585
+ "eq_spans": [],
586
+ "section": "A d v a n t a g e s of our Experimental Method",
587
+ "sec_num": "5.3"
588
+ },
589
+ {
590
+ "text": "Our experimental method provides more exact relevance assessment in the following ways. (a) More detailed description of a question We asked the subjects to assess the relevance of full documents to each question after the experiment. Result shows that 93% of the subject judgements match the assumed relevance, while only 69% match in the same kind of assessment in SUMMAC. The percentage that all judgements per document agreed the assumed relevance is 33%, while only 17% in SUMMAC. This is because the subjects comprehended the questions correctly by given detailed information about the situation. (b) More subjects assigned per summary sample We assigned ten subjects to each summary sample, while only one or two subjects were used in SUMMAC. We examined the difference of judgement between the average of ten subjects and the first subject of the ten. Result \"shows that 47% of the first subject's judgement differ more than one level from the average. This proves that the assessment varies from one subject to another, even if a detailed situation is given. (c) Finer levels of relevance We introduced four levels of relevance, by which ambiguity of relevance can be expressed better.",
591
+ "cite_spans": [],
592
+ "ref_spans": [],
593
+ "eq_spans": [],
594
+ "section": "More Exact Assessment",
595
+ "sec_num": "5.3.1"
596
+ },
597
+ {
598
+ "text": "According to the four levels of relevance, we assume three kinds of relevance document sets. This enables to plot the PR curve.",
599
+ "cite_spans": [],
600
+ "ref_spans": [],
601
+ "eq_spans": [],
602
+ "section": "Serves precision~recall pairs for a varie(F of situations",
603
+ "sec_num": "5.3.2"
604
+ },
605
+ {
606
+ "text": "In evaluation conferences like SUMMAC, various summarization methods that are developed for different purposes must be compared. Using such a PR curve, each method can be compared in a criterion that matches its purpose.",
607
+ "cite_spans": [],
608
+ "ref_spans": [],
609
+ "eq_spans": [],
610
+ "section": "Serves precision~recall pairs for a varie(F of situations",
611
+ "sec_num": "5.3.2"
612
+ },
613
+ {
614
+ "text": "We have introduced the relevance score, which allows each summary to be evaluated. Using this score, we can analyze the extrinsic evaluation result and the intrinsic evaluation result connected together, for example, an evaluation result based on information retrieval task and that based on Q & A task using the same questions. Through such analyses, the text quality of summaries or the adequate informativeness can be examined. We ourselves got a lot of benefit from the analysis to find problems and improve the quality of the summary.",
615
+ "cite_spans": [],
616
+ "ref_spans": [],
617
+ "eq_spans": [],
618
+ "section": "Helps further analysis of problems of a summarization method",
619
+ "sec_num": "5.3.3"
620
+ },
621
+ {
622
+ "text": "Though experiment method may be applied to any other languages, we must consider t h e possibility that our result depends on the language characteristics. Japanese text is written by mixing several kinds of characters; Kana characters (Hiragana and Katakana) and Kanji (Chinese) characters, and alphabetic characters are also used. Kanji characters are mainly used to represent concept words and Hiragana characters are used for function words. The fact that they play the different roles makes it easy to find the full words. Also Kanji is a kind of ideogram and each character has its own meaning. Thus, most words can be expressed by 1 to 3 Kanji characters to make short phrases (15 -20 characters) sufficiently informative.",
623
+ "cite_spans": [],
624
+ "ref_spans": [],
625
+ "eq_spans": [],
626
+ "section": "L a n g u a g e dependency",
627
+ "sec_num": "5.4"
628
+ },
629
+ {
630
+ "text": "Though the basic algorithm to create phraserepresented summary itself can be applied to other languages by replacing its analysis component and generation component, similar experiment in that language is required to prove the effectiveness of the phrase-represented summary.",
631
+ "cite_spans": [],
632
+ "ref_spans": [],
633
+ "eq_spans": [],
634
+ "section": "L a n g u a g e dependency",
635
+ "sec_num": "5.4"
636
+ },
637
+ {
638
+ "text": "We proposed an improved method of task-based evaluation on information retrieval. This method can be used to evaluate the performance of summarization methods more accurately than is possible by the methods used in previous work. We carried out a relatively large experiment using this method, the results of which show that phrase-representation summarization is effective to select relevant documents from information retrieval results.",
639
+ "cite_spans": [],
640
+ "ref_spans": [],
641
+ "eq_spans": [],
642
+ "section": "Conclusion",
643
+ "sec_num": null
644
+ },
645
+ {
646
+ "text": "A c k n o w l e d g e m e n t s",
647
+ "cite_spans": [],
648
+ "ref_spans": [],
649
+ "eq_spans": [],
650
+ "section": "Conclusion",
651
+ "sec_num": null
652
+ },
653
+ {
654
+ "text": "We would like to thank our company members who gave valuable suggestions and participated in the experiment.",
655
+ "cite_spans": [],
656
+ "ref_spans": [],
657
+ "eq_spans": [],
658
+ "section": "Conclusion",
659
+ "sec_num": null
660
+ },
661
+ {
662
+ "text": "* precision * recall \"-F-measure = precision + recall",
663
+ "cite_spans": [],
664
+ "ref_spans": [],
665
+ "eq_spans": [],
666
+ "section": "",
667
+ "sec_num": null
668
+ }
669
+ ],
670
+ "back_matter": [],
671
+ "bib_entries": {
672
+ "BIBREF0": {
673
+ "ref_id": "b0",
674
+ "title": "A Proposal for Task-based Evaluation of Text Summarization Systems",
675
+ "authors": [
676
+ {
677
+ "first": "T",
678
+ "middle": [
679
+ "F"
680
+ ],
681
+ "last": "Hand",
682
+ "suffix": ""
683
+ }
684
+ ],
685
+ "year": 1997,
686
+ "venue": "Proceedings of the ACL/EACL'97 Workshop on Intelligent Scalable Text Summarization",
687
+ "volume": "",
688
+ "issue": "",
689
+ "pages": "31--38",
690
+ "other_ids": {},
691
+ "num": null,
692
+ "urls": [],
693
+ "raw_text": "Hand, T. F. (1997). \"A Proposal for Task-based Evaluation of Text Summarization Systems.\" In Proceedings of the ACL/EACL'97 Workshop on Intelligent Scalable Text Summarization, pp31-38.",
694
+ "links": null
695
+ },
696
+ "BIBREF1": {
697
+ "ref_id": "b1",
698
+ "title": "Summarization Evaluation Methods: Experiments and Analysis",
699
+ "authors": [
700
+ {
701
+ "first": "H",
702
+ "middle": [],
703
+ "last": "Jing",
704
+ "suffix": ""
705
+ },
706
+ {
707
+ "first": "R",
708
+ "middle": [],
709
+ "last": "Barzilay",
710
+ "suffix": ""
711
+ },
712
+ {
713
+ "first": "K",
714
+ "middle": [],
715
+ "last": "Mckeown",
716
+ "suffix": ""
717
+ },
718
+ {
719
+ "first": "M",
720
+ "middle": [],
721
+ "last": "Elhadad",
722
+ "suffix": ""
723
+ }
724
+ ],
725
+ "year": 1998,
726
+ "venue": "Intelligent Text Summarization",
727
+ "volume": "",
728
+ "issue": "",
729
+ "pages": "51--59",
730
+ "other_ids": {},
731
+ "num": null,
732
+ "urls": [],
733
+ "raw_text": "Jing, H., Barzilay, R., McKeown, K. and Elhadad, M. (1998). \"Summarization Evaluation Methods: Experiments and Analysis.\" In Intelligent Text Summarization. pp51-59. AAAI Press.",
734
+ "links": null
735
+ },
736
+ "BIBREF2": {
737
+ "ref_id": "b2",
738
+ "title": "The TIPSTER SUMMAC Text Summarization Evaluation",
739
+ "authors": [
740
+ {
741
+ "first": "I",
742
+ "middle": [],
743
+ "last": "Mani",
744
+ "suffix": ""
745
+ },
746
+ {
747
+ "first": "D",
748
+ "middle": [],
749
+ "last": "House",
750
+ "suffix": ""
751
+ },
752
+ {
753
+ "first": "G",
754
+ "middle": [],
755
+ "last": "Klein",
756
+ "suffix": ""
757
+ },
758
+ {
759
+ "first": "L",
760
+ "middle": [],
761
+ "last": "Hirschman",
762
+ "suffix": ""
763
+ },
764
+ {
765
+ "first": "L",
766
+ "middle": [],
767
+ "last": "Obrst",
768
+ "suffix": ""
769
+ },
770
+ {
771
+ "first": "T",
772
+ "middle": [],
773
+ "last": "Firmin",
774
+ "suffix": ""
775
+ },
776
+ {
777
+ "first": "M",
778
+ "middle": [],
779
+ "last": "Chizanowski",
780
+ "suffix": ""
781
+ },
782
+ {
783
+ "first": "B",
784
+ "middle": [],
785
+ "last": "Sundheim",
786
+ "suffix": ""
787
+ }
788
+ ],
789
+ "year": 1998,
790
+ "venue": "",
791
+ "volume": "",
792
+ "issue": "",
793
+ "pages": "",
794
+ "other_ids": {},
795
+ "num": null,
796
+ "urls": [],
797
+ "raw_text": "Mani, I., House, D., Klein,G., Hirschman, L.,Obrst, L., Firmin, T., Chizanowski, M., and Sundheim, B. (1998). \"'The TIPSTER SUMMAC Text Summari- zation Evaluation.'\" Technical Report MTR 98W0000138, MITRE Technical Report.",
798
+ "links": null
799
+ },
800
+ "BIBREF3": {
801
+ "ref_id": "b3",
802
+ "title": "How to Appreciate the Quality of Automatic Text Summarization? Examples of FAN and MLUCE Protocols and their Results on SERAPHIN",
803
+ "authors": [
804
+ {
805
+ "first": "J.-L",
806
+ "middle": [],
807
+ "last": "Minel",
808
+ "suffix": ""
809
+ },
810
+ {
811
+ "first": "S",
812
+ "middle": [],
813
+ "last": "Nugier",
814
+ "suffix": ""
815
+ },
816
+ {
817
+ "first": "G",
818
+ "middle": [],
819
+ "last": "Piat",
820
+ "suffix": ""
821
+ }
822
+ ],
823
+ "year": 1997,
824
+ "venue": "Proc. of the ACL/EACL'97 Workshop on Intelligent Scalable Text Summarization",
825
+ "volume": "",
826
+ "issue": "",
827
+ "pages": "25--30",
828
+ "other_ids": {},
829
+ "num": null,
830
+ "urls": [],
831
+ "raw_text": "Minel, J.-L., Nugier, S. and Piat, G. (1997). \"'How to Appreciate the Quality of Automatic Text Summa- rization? Examples of FAN and MLUCE Protocols and their Results on SERAPHIN.\" In Proc. of the ACL/EACL'97 Workshop on Intelligent Scalable Text Summarization, pp.25-30.",
832
+ "links": null
833
+ },
834
+ "BIBREF4": {
835
+ "ref_id": "b4",
836
+ "title": "Evaluation of Summarization Methods based on Information Retrieval Task",
837
+ "authors": [
838
+ {
839
+ "first": "",
840
+ "middle": [],
841
+ "last": "Mochizuki",
842
+ "suffix": ""
843
+ },
844
+ {
845
+ "first": "M",
846
+ "middle": [],
847
+ "last": "Okumura",
848
+ "suffix": ""
849
+ }
850
+ ],
851
+ "year": 1999,
852
+ "venue": "Notes of SIGNL of the Information Processing Society of Japan",
853
+ "volume": "",
854
+ "issue": "",
855
+ "pages": "41--48",
856
+ "other_ids": {},
857
+ "num": null,
858
+ "urls": [],
859
+ "raw_text": "Mochizuki, H and Okumura, M. (1999). \"Evaluation of Summarization Methods based on Information Retrieval Task.\" In Notes of SIGNL of the Information Processing Society of Japan, 99-NL- 132, pp41-48. (In Japanese)",
860
+ "links": null
861
+ },
862
+ "BIBREF5": {
863
+ "ref_id": "b5",
864
+ "title": "Automatic Text Processing: The Transformation, Analysis, and Retrieval ot",
865
+ "authors": [
866
+ {
867
+ "first": "G",
868
+ "middle": [],
869
+ "last": "Salton",
870
+ "suffix": ""
871
+ }
872
+ ],
873
+ "year": 1989,
874
+ "venue": "",
875
+ "volume": "",
876
+ "issue": "",
877
+ "pages": "",
878
+ "other_ids": {},
879
+ "num": null,
880
+ "urls": [],
881
+ "raw_text": "Salton, G. (1989). Automatic Text Processing: The Transformation, Analysis, and Retrieval ot\" \"-Information by Computer. Addison-Wesley Publishing Company, Inc.",
882
+ "links": null
883
+ },
884
+ "BIBREF6": {
885
+ "ref_id": "b6",
886
+ "title": "At-a-glance\" Summary: Phrase-representation Summarization Method",
887
+ "authors": [
888
+ {
889
+ "first": "Y",
890
+ "middle": [],
891
+ "last": "Ueda",
892
+ "suffix": ""
893
+ },
894
+ {
895
+ "first": "M",
896
+ "middle": [],
897
+ "last": "Oka",
898
+ "suffix": ""
899
+ },
900
+ {
901
+ "first": "T",
902
+ "middle": [],
903
+ "last": "Koyama",
904
+ "suffix": ""
905
+ },
906
+ {
907
+ "first": "T",
908
+ "middle": [],
909
+ "last": "Miyauchi",
910
+ "suffix": ""
911
+ }
912
+ ],
913
+ "year": 2000,
914
+ "venue": "Toward the",
915
+ "volume": "",
916
+ "issue": "",
917
+ "pages": "",
918
+ "other_ids": {},
919
+ "num": null,
920
+ "urls": [],
921
+ "raw_text": "Ueda, Y., Oka, M., Koyama, T. and Miyauchi, T. (2000). \"Toward the \"At-a-glance\" Summary: Phrase-representation Summarization Method.\" submitted to COLING2000.",
922
+ "links": null
923
+ },
924
+ "BIBREF7": {
925
+ "ref_id": "b7",
926
+ "title": "Fast Generation of Abstracts from General Domain Text Corpora by Extracting Relevant Sentences",
927
+ "authors": [
928
+ {
929
+ "first": "K",
930
+ "middle": [],
931
+ "last": "Zechner",
932
+ "suffix": ""
933
+ }
934
+ ],
935
+ "year": 1996,
936
+ "venue": "Proc. of COLING-96",
937
+ "volume": "",
938
+ "issue": "",
939
+ "pages": "986--989",
940
+ "other_ids": {},
941
+ "num": null,
942
+ "urls": [],
943
+ "raw_text": "Zechner, K. (1996). \"Fast Generation of Abstracts from General Domain Text Corpora by Extracting Relevant Sentences.\" In Proc. of COLING-96, pp. 986-989.",
944
+ "links": null
945
+ }
946
+ },
947
+ "ref_entries": {
948
+ "FIGREF0": {
949
+ "uris": null,
950
+ "text": "An example of question (Question-a2)",
951
+ "type_str": "figure",
952
+ "num": null
953
+ },
954
+ "FIGREF1": {
955
+ "uris": null,
956
+ "text": ":0)~:. ~ff6rmance.measurmgph.ase.:.':, :...-. \u2022 :~\". . i .,. ..... :. ...... :.:.'.~:. '.::2:7::~,::,~ '~ :\u00a2~.~:~:~ Measure of accuracy Precision and recall Precision and recall ]",
957
+ "type_str": "figure",
958
+ "num": null
959
+ },
960
+ "FIGREF2": {
961
+ "uris": null,
962
+ "text": "Fig. 4: Precision.& Recall",
963
+ "type_str": "figure",
964
+ "num": null
965
+ },
966
+ "TABREF0": {
967
+ "text": "eo.ip/release/2000/0224..purcha .se.html (in Japanese) ... acauire chemical toner business 3 Fuji Xerox ... acouires chemical toner business of Nippon Carbide Industries Co., Inc .... ... new chemical toner that contributes to reduce cost in laser nrinters and to lower energy consumption ... ... strengthen...supplies business ... manufacturing facilities of Hayatsuki Plant, ... ... uniform...each particle ...",
968
+ "type_str": "table",
969
+ "num": null,
970
+ "html": null,
971
+ "content": "<table><tr><td colspan=\"2\">Fig. 1\" A sample summary</td><td/><td/></tr><tr><td>2 Evaluation Method</td><td/><td/><td/></tr><tr><td>2.1 Summarization</td><td>Methods</td><td>to</td><td>be</td></tr><tr><td>Compared</td><td/><td/><td/></tr></table>"
972
+ },
973
+ "TABREF2": {
974
+ "text": "",
975
+ "type_str": "table",
976
+ "num": null,
977
+ "html": null,
978
+ "content": "<table><tr><td>Experimental Method</td></tr></table>"
979
+ },
980
+ "TABREF3": {
981
+ "text": "",
982
+ "type_str": "table",
983
+ "num": null,
984
+ "html": null,
985
+ "content": "<table><tr><td>: Topics and Questions</td></tr><tr><td>5</td></tr></table>"
986
+ },
987
+ "TABREF4": {
988
+ "text": "",
989
+ "type_str": "table",
990
+ "num": null,
991
+ "html": null,
992
+ "content": "<table><tr><td>: Relevance Score</td></tr></table>"
993
+ }
994
+ }
995
+ }
996
+ }
Full_text_JSON/prefixW/json/W00/W00-0408.json ADDED
@@ -0,0 +1,640 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0408",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:02.627883Z"
6
+ },
7
+ "title": "A Comparison of Rankings Produced by Summarization Evaluation Measures",
8
+ "authors": [
9
+ {
10
+ "first": "Robert",
11
+ "middle": [
12
+ "L"
13
+ ],
14
+ "last": "Donaway",
15
+ "suffix": "",
16
+ "affiliation": {},
17
+ "email": ""
18
+ },
19
+ {
20
+ "first": "Kevin",
21
+ "middle": [
22
+ "W"
23
+ ],
24
+ "last": "Drummey",
25
+ "suffix": "",
26
+ "affiliation": {},
27
+ "email": ""
28
+ },
29
+ {
30
+ "first": "Laura",
31
+ "middle": [
32
+ "A"
33
+ ],
34
+ "last": "Mather",
35
+ "suffix": "",
36
+ "affiliation": {
37
+ "laboratory": "La Jolla Research Lab Britannica.com, Inc",
38
+ "institution": "",
39
+ "location": {
40
+ "addrLine": "3253 Holiday Ct. Suite 208 La Jolla",
41
+ "postCode": "92037",
42
+ "region": "CA"
43
+ }
44
+ },
45
+ "email": "mather@us@corn"
46
+ }
47
+ ],
48
+ "year": "",
49
+ "venue": null,
50
+ "identifiers": {},
51
+ "abstract": "evaluation measures produce a ranking of all possible extract summaries of a document., Recall-based evaluation measures, which depend on costly human-generated ground truth summaries, produce uncorrelated rankings when ground truth is varied. This paper proposes using sentence-rankbased and content-based measures for evaluating extract summaries, and compares these with recallbased evaluation measures. Content-based measures increase the correlation of rankings induced by synonymous ground truths, and exhibit other desirable properties.",
52
+ "pdf_parse": {
53
+ "paper_id": "W00-0408",
54
+ "_pdf_hash": "",
55
+ "abstract": [
56
+ {
57
+ "text": "evaluation measures produce a ranking of all possible extract summaries of a document., Recall-based evaluation measures, which depend on costly human-generated ground truth summaries, produce uncorrelated rankings when ground truth is varied. This paper proposes using sentence-rankbased and content-based measures for evaluating extract summaries, and compares these with recallbased evaluation measures. Content-based measures increase the correlation of rankings induced by synonymous ground truths, and exhibit other desirable properties.",
58
+ "cite_spans": [],
59
+ "ref_spans": [],
60
+ "eq_spans": [],
61
+ "section": "Abstract",
62
+ "sec_num": null
63
+ }
64
+ ],
65
+ "body_text": [
66
+ {
67
+ "text": "The bulk of active research in the automatic text summarization community centers on developing algorithms to produce extract summaries, e. g. (Schwarz, 1990) , (Paice and Jones, .1993), (Kupiec et al., 1995) , (Marcu, 1997) , (Strzalkowski et al., 1998), and (Goldstein et :al., 1999 ). Yet understanding how to evaluate their output has received less attention. In. \u2022 1997, TIPSTER sponsored a conference (SUM-MAC) where various text summarization algorithms were evaluated for their performance in various tasks (Mani et al., 1999; Firmin and Chrzanowski, 1999) . While extrinsic evaluation measures such as these are often very Concrete, the act of designing the task and scoring the results of the task introduces bias and subject-based variability. These factors may confound the comparison of summarization algorithms. Machine-generated summaries also may be evaluated intrinsically by comparing them with \"ideal\" human-generated summaries. However, there is often little agreement as to what constitutes the ideal summary of a document.",
68
+ "cite_spans": [
69
+ {
70
+ "start": 143,
71
+ "end": 158,
72
+ "text": "(Schwarz, 1990)",
73
+ "ref_id": null
74
+ },
75
+ {
76
+ "start": 161,
77
+ "end": 171,
78
+ "text": "(Paice and",
79
+ "ref_id": null
80
+ },
81
+ {
82
+ "start": 172,
83
+ "end": 208,
84
+ "text": "Jones, .1993), (Kupiec et al., 1995)",
85
+ "ref_id": null
86
+ },
87
+ {
88
+ "start": 211,
89
+ "end": 224,
90
+ "text": "(Marcu, 1997)",
91
+ "ref_id": null
92
+ },
93
+ {
94
+ "start": 227,
95
+ "end": 259,
96
+ "text": "(Strzalkowski et al., 1998), and",
97
+ "ref_id": null
98
+ },
99
+ {
100
+ "start": 260,
101
+ "end": 284,
102
+ "text": "(Goldstein et :al., 1999",
103
+ "ref_id": null
104
+ },
105
+ {
106
+ "start": 407,
107
+ "end": 416,
108
+ "text": "(SUM-MAC)",
109
+ "ref_id": null
110
+ },
111
+ {
112
+ "start": 515,
113
+ "end": 534,
114
+ "text": "(Mani et al., 1999;",
115
+ "ref_id": null
116
+ },
117
+ {
118
+ "start": 535,
119
+ "end": 564,
120
+ "text": "Firmin and Chrzanowski, 1999)",
121
+ "ref_id": null
122
+ }
123
+ ],
124
+ "ref_spans": [],
125
+ "eq_spans": [],
126
+ "section": "Introduction",
127
+ "sec_num": "1"
128
+ },
129
+ {
130
+ "text": "Both intrinsic and extrinsic methods require time consuming, expert human input in order to evaluate summaries. While the traditional methods have many advantages, they are costly, and human assessors cannot always agree on summary quality. If a numerical measure were available which did not depend on human judgement, researchers and developers would be able to immediately assess the effect of modifications to summary generation algorithms\u2022 Also, such a measure might be free of the bias that is introduced by human assessment. This paper investigates the properties of various numerical measures for evaluating the quality of generic, indicative document summaries.",
131
+ "cite_spans": [],
132
+ "ref_spans": [],
133
+ "eq_spans": [],
134
+ "section": "Introduction",
135
+ "sec_num": "1"
136
+ },
137
+ {
138
+ "text": "As explained by Mani et al. (1999) , a generic summary is not topic-related, but \"is aimed at a broad readership community\" and an indicative summary tells \"what topics are addressed in the source text, and thus can be used to alert the user as to source content.\" Section 2 discusses the properties of numerical evaluation measures, points out several drawbacks associated with intrinsic measures and introduces new measures developed by the authors. An experiment was devised to compare the new evaluation measures with the traditional ones. The design of this experiment is discussed in Section 3 and its results are presented in Section 4. The final section includes conclusions and a statement of the future work related to these evaluation measures.",
139
+ "cite_spans": [
140
+ {
141
+ "start": 16,
142
+ "end": 34,
143
+ "text": "Mani et al. (1999)",
144
+ "ref_id": null
145
+ }
146
+ ],
147
+ "ref_spans": [],
148
+ "eq_spans": [],
149
+ "section": "Introduction",
150
+ "sec_num": "1"
151
+ },
152
+ {
153
+ "text": "An evaluation measure produces a numerical score which can be used to compare different summaries of the same document. The scores are used to assess summary quality across a collection of test documents in order to produce an average for an algorithm or system. However, it must be emphasized that the scores are {}9 most significant when considered per document. For example, two different summaries of a document may have been produced by two different summarization algorithms. Presumably, the summary with the higher score indicates that the system which produced it performed better than the other system. Obviously, if one system consistently produces higher scores than another system, its average score will be higher, and one has reason to believe that it is a better system. Thus, the important feature of any summary evaluation measure is not the value of its score, but rather the ranking its score imposes on a set of extracts of a document.",
154
+ "cite_spans": [],
155
+ "ref_spans": [],
156
+ "eq_spans": [],
157
+ "section": "Evaluation Measures",
158
+ "sec_num": "2"
159
+ },
160
+ {
161
+ "text": "To compare two evaluation measures, whose scores may have very different ranges and distributions, one must compare the order in which the measures rank various summaries of a document. For instance, suppose a summary scoring function Y is completely dependent upon the output of another scoring function X, such as Y --2 X. Since Y is an increasing function of X, both X and Y will produce the same ranking of any set of summaries. However, the scores produced by Y will have a very different distribution than those of X and the two sets of scores will not be correlated since the dependence of Y on X is non-linear. Therefore, in order to compare the scores two different measures assign to a set of summaries, one must compare the ranks . they assign, not the actual scores.",
162
+ "cite_spans": [],
163
+ "ref_spans": [],
164
+ "eq_spans": [],
165
+ "section": "Evaluation Measures",
166
+ "sec_num": "2"
167
+ },
168
+ {
169
+ "text": "The ranks assigned by an evaluation measure produce equivalence classes of extract summaries; each rank equivalence class contains summaries which received the same score. When a measure produces the same score for two different summaries of a document, there is a tie, and the equivalence class will contain more than one summary. All summaries in an equivalence class must share the same rank; let this rank be the midrank of the range of ranks that would have be assigned if each score were distinct. An evaluation measure should posses the following properties: (i) higher-ranking summaries are more effective or are of higher quality than lower-ranking summaries, and (ii) all of the summaries in a rank equivalence class are moreor-less equally effective.",
170
+ "cite_spans": [],
171
+ "ref_spans": [],
172
+ "eq_spans": [],
173
+ "section": "Evaluation Measures",
174
+ "sec_num": "2"
175
+ },
176
+ {
177
+ "text": "The following sections contrast the ranking properties of three types of evaluation measures: recall-based measures, a sentence-rank-based measure and content-based measures. These types of measures are defined, their properties are described and their use is explained.",
178
+ "cite_spans": [],
179
+ "ref_spans": [],
180
+ "eq_spans": [],
181
+ "section": "Evaluation Measures",
182
+ "sec_num": "2"
183
+ },
184
+ {
185
+ "text": "Recall-based evaluation measures are intrinsic. They compare machine-generated summaries with sentences previously extracted by human assessors or judges. From each document, the judges extract sentences that they believe make up the best extract summary of the document. A summary of a document generated by a summarization algorithm is typically compared to one of these \"ground truth\" summaries by counting the number of sentences the ground truth summary and the algorithm's summary have in common. Thus, the more sentences a summary has recalled from the ground truth, the higher its score will be. See work by Goldstein et al. (1999) and Jing et al. (1998) for examples of the use of this measure.",
186
+ "cite_spans": [
187
+ {
188
+ "start": 616,
189
+ "end": 639,
190
+ "text": "Goldstein et al. (1999)",
191
+ "ref_id": null
192
+ },
193
+ {
194
+ "start": 644,
195
+ "end": 662,
196
+ "text": "Jing et al. (1998)",
197
+ "ref_id": null
198
+ }
199
+ ],
200
+ "ref_spans": [],
201
+ "eq_spans": [],
202
+ "section": "Recall-Based Evaluation Measures",
203
+ "sec_num": "2.1"
204
+ },
205
+ {
206
+ "text": "The recall-based measures introduce a bias since they are based on the Opinions of a small number of assessors. It is widely acknowledged (Jing et al., 1998; Kupiec et al., 1995; Voorhees, 1998 ) that assessor agreement is typically quite low. There are at least two sources of this disagreement. First, it is possible that one human assessor will pick a particular sentence for inclusion in their summary when the content of another sentence or set of sentences is approximately equivalent. Jing et al. (1998) agree: \"...precision and recall are not the best measures for computing document quality. This is due to the fact that a small change in the summary output (e.g., replacing one sentence with an equally good equivalent which happens not to match majority opinion [of the assessors]) can dramatically affect a system's score.\" We call this source of summary disagreement 'disagreement due to synonymy.' Here is an example of two human-generated extracts from the same 1991 Wall Street Journal article which contain different sentences, but still seem to be describing an article about violin playing in a film: EXTRACT 2: Journalistic ethics forbid me from saying if I think \"Prince of Tides\" is as good as \"Citizen Kane,\" but I don't think it's wrong to reveal that the film has some very fine violin playing. But moviegoers will hear Mr. Zuckerman cast off the languor that too often makes him seem like the most bored of great violinists. With each of these pieces, Mr. Zuckerman takes over the movie and shows what it means to play his instrument with supreme dash.",
207
+ "cite_spans": [
208
+ {
209
+ "start": 138,
210
+ "end": 157,
211
+ "text": "(Jing et al., 1998;",
212
+ "ref_id": null
213
+ },
214
+ {
215
+ "start": 158,
216
+ "end": 178,
217
+ "text": "Kupiec et al., 1995;",
218
+ "ref_id": null
219
+ },
220
+ {
221
+ "start": 179,
222
+ "end": 193,
223
+ "text": "Voorhees, 1998",
224
+ "ref_id": null
225
+ }
226
+ ],
227
+ "ref_spans": [],
228
+ "eq_spans": [],
229
+ "section": "Recall-Based Evaluation Measures",
230
+ "sec_num": "2.1"
231
+ },
232
+ {
233
+ "text": "Another source of disagreement can arise from judges' differing opinions about the true focus of the original document. In other words, judges disagree on what the document is about. We call this second source 'disagreement due to focus.' Here is a human-generated extract of the same article which seems to differ in focus:",
234
+ "cite_spans": [],
235
+ "ref_spans": [],
236
+ "eq_spans": [],
237
+ "section": "Recall-Based Evaluation Measures",
238
+ "sec_num": "2.1"
239
+ },
240
+ {
241
+ "text": "EXTRACT 3: Columbia Pictures has delayed the New York City and Los Angeles openings of \"Prince Of Tides\" for a week. So Gothamites and Angelenos, along with the rest of the country, will have to wait until Christmas Day to see this film version of the Pat Conroy novel about a Southern football coach (Nick Nolte) dallying with a Jewish female psychotherapist (Barbra Streisand) in the Big Apple. Perhaps the postponement is a sign that the studio is looking askance at this expensive product directed and co-produced \".by its female lead.",
242
+ "cite_spans": [],
243
+ "ref_spans": [],
244
+ "eq_spans": [],
245
+ "section": "Recall-Based Evaluation Measures",
246
+ "sec_num": "2.1"
247
+ },
248
+ {
249
+ "text": "Whatever the source, disagreements at the sentence level are prevalent.",
250
+ "cite_spans": [],
251
+ "ref_spans": [],
252
+ "eq_spans": [],
253
+ "section": "Recall-Based Evaluation Measures",
254
+ "sec_num": "2.1"
255
+ },
256
+ {
257
+ "text": "This has serious implications for measures based on a single opinion, when a slightly different opinion would result in a significantly different score (and rank) for many summaries.",
258
+ "cite_spans": [],
259
+ "ref_spans": [],
260
+ "eq_spans": [],
261
+ "section": "Recall-Based Evaluation Measures",
262
+ "sec_num": "2.1"
263
+ },
264
+ {
265
+ "text": "For example, consider the following threesentence ground truth extract of a 37-sentence 1994 Los Angeles Times article from the TREC collection. It contains sentences 1, 2 and 13.",
266
+ "cite_spans": [],
267
+ "ref_spans": [],
268
+ "eq_spans": [],
269
+ "section": "Recall-Based Evaluation Measures",
270
+ "sec_num": "2.1"
271
+ },
272
+ {
273
+ "text": "(1) Clinton Trade Initiative Sinks Under G-7 Criticism. (2) President Clinton came to the high-profile Group of Seven summit to demonstrate new strength in for-71 eign policy but instead watched his premier initiative sink Saturday under a wave of sharp criticism. (13) The negative reaction to the president's trade proposal came as a jolt after administration officials had built it up under the forwardlooking name of \"Markets 2000\" and had portrayed it as evidence of his interest in leading the other nations to more open trade practices.",
274
+ "cite_spans": [],
275
+ "ref_spans": [],
276
+ "eq_spans": [],
277
+ "section": "Recall-Based Evaluation Measures",
278
+ "sec_num": "2.1"
279
+ },
280
+ {
281
+ "text": "An extract that replaces sentence 13 with sentence 5:",
282
+ "cite_spans": [],
283
+ "ref_spans": [],
284
+ "eq_spans": [],
285
+ "section": "Recall-Based Evaluation Measures",
286
+ "sec_num": "2.1"
287
+ },
288
+ {
289
+ "text": "(5) In its most elementary form, it woul~d have set up a one-year examination of im-' prediments to world trade, but it would have also set an agenda for liberalizing trade rules in entirely new areas, such as financial services, telecommunications and invest ment.",
290
+ "cite_spans": [],
291
+ "ref_spans": [],
292
+ "eq_spans": [],
293
+ "section": "Recall-Based Evaluation Measures",
294
+ "sec_num": "2.1"
295
+ },
296
+ {
297
+ "text": "will receive the same recall score as one which replaces sentence 13 with sentence 32:",
298
+ "cite_spans": [],
299
+ "ref_spans": [],
300
+ "eq_spans": [],
301
+ "section": "Recall-Based Evaluation Measures",
302
+ "sec_num": "2.1"
303
+ },
304
+ {
305
+ "text": "(32) Most nations have yet to go through this process, which they hope to complete by January.",
306
+ "cite_spans": [],
307
+ "ref_spans": [],
308
+ "eq_spans": [],
309
+ "section": "Recall-Based Evaluation Measures",
310
+ "sec_num": "2.1"
311
+ },
312
+ {
313
+ "text": "These two alternative summaries both have the same recall rank, but are obviously of very different quality. Considered quantitatively, the only important component of either precision or recall is the 'sentence agreement' J, the number of sentences a summary has in common with the ground truth summary. Following Goldstein et al. (1999) , let M be the number of sentences in a ground truth extract summary and let K be the number of sentences in a summary to be evaluated. With precision P = J/K and recall R = JIM as usual, and F1 = 2PR/(P + R); then elementary algebra shows that F1 = 2J/(M\u00f7K). Often, a fixed summary length K is used. (In terms of word count, this represents varying compression rates.) When a particular ground truth of a given document is chosen, then precision, recall and F1 are all constant multiples of J. As such, these measures produce different scores, but the same ranking of all the K-sentence extracts from the document. Since only this ranking is of interest, it is not necessary to examine more than one of P, R and F1.",
314
+ "cite_spans": [
315
+ {
316
+ "start": 315,
317
+ "end": 338,
318
+ "text": "Goldstein et al. (1999)",
319
+ "ref_id": null
320
+ }
321
+ ],
322
+ "ref_spans": [],
323
+ "eq_spans": [],
324
+ "section": "Recall-Based Evaluation Measures",
325
+ "sec_num": "2.1"
326
+ },
327
+ {
328
+ "text": "The sentence agreement J can only take on integer values between 0 and M, so J, P, R, and F1 are all discrete variables. Therefore, although there may be thousands of possible extract summaries of a document, only M + 1 different scores are possible. This will obviously create a large number of ties in rankings produced by the P, R, and F1 scores, and will greatly increase the probability that radically different summaries will be given the same score and rank. On the other hand, two summaries which express the same ideas using different sentences will be given very different scores. Both of these problems are illustrated by the example above. Furthermore, if a particular ground truth includes a large proportion of the document's sentences (perhaps it is ~ very concise document), shorter summaries will likely include only sentences which appear in the ground truth. Consequently, even a randomly selected collection of sentences might obtain the largest possible score. Thus, recall-based measures are likely to violate both properties (i) and (ii), discussed at the beginning of Section 2. These inherent weaknesses in recall-based measures will be further explored in Section 4.",
329
+ "cite_spans": [],
330
+ "ref_spans": [],
331
+ "eq_spans": [],
332
+ "section": "Recall-Based Evaluation Measures",
333
+ "sec_num": "2.1"
334
+ },
335
+ {
336
+ "text": "Measure One way to produce ground truth summaries is to ask judges to rank the sentences of a docu-.ment in order of their importance in a generic, indicative summary. This is often a difficult task for which it is nearly impossible to obtain consistent results. However, sentences which appear early in a document are often more indicative of the content of the document than are other sentences. This is particularly true in newspaper articles, whose authors frequently try, to give the main points in the first paragraph (Brandow et al., 1995) . Similarly, adjacent sentences are more likely to be related to each other than to those which appear further away in the text. Thus, sentence position alone may be an effective way to rank the importance of sentences.",
337
+ "cite_spans": [
338
+ {
339
+ "start": 524,
340
+ "end": 546,
341
+ "text": "(Brandow et al., 1995)",
342
+ "ref_id": null
343
+ }
344
+ ],
345
+ "ref_spans": [],
346
+ "eq_spans": [],
347
+ "section": "A Sentence-Rank-Based",
348
+ "sec_num": "2.2"
349
+ },
350
+ {
351
+ "text": "To account for sentence importance within a ground truth, a summary comparison measure was developed which treats an extract as a ranking of the sentences of the document. For example, a document with five sentences can be expressed as (1, 2, 3, 4, 5). A particular extract may include sentences 2 and 3. Then if sentence 2 is more important than sentence 3, the sentence ranks are given by (4, 1, 2, 4, 4). Sen-tences 1, 4, and 5 all rank fourth, since 4 is the midrank of ranks 3, 4 and 5. Such rank vectors can be compared using Kendall's tau statistic (Sheskin, 1997) , thus quantifying a summary's agreement with a particular ground truth. As will be shown in Section 4, sentence rank measures result in a smaller number of ties than do recall-based evaluation measures.",
352
+ "cite_spans": [
353
+ {
354
+ "start": 556,
355
+ "end": 571,
356
+ "text": "(Sheskin, 1997)",
357
+ "ref_id": null
358
+ }
359
+ ],
360
+ "ref_spans": [],
361
+ "eq_spans": [],
362
+ "section": "A Sentence-Rank-Based",
363
+ "sec_num": "2.2"
364
+ },
365
+ {
366
+ "text": "Although it is also essentially recall-based, the sentence rank measure has another slight advantage over recall. Suppose a ground truth summary of a 20-sentence document consists of sentences (2, 3, 5}. The machine-generated summaries consisting of sentences (2, 3, 4} and {2, 3, 9} would receive the same recall score, but (2, 3, 4} would receive a higher tau score (5 is closer to 4 than to 9). Of course, this higher score may not be warranted if the content of sentence 9 is more similar to that of sentence 5.",
367
+ "cite_spans": [],
368
+ "ref_spans": [],
369
+ "eq_spans": [],
370
+ "section": "A Sentence-Rank-Based",
371
+ "sec_num": "2.2"
372
+ },
373
+ {
374
+ "text": "The use of the tau statistic may be more appropriate for ground truths produced by classifying all of the sentences of the original document in terms of their importance to an indicative summary. Perhaps four different categories could be used, ranging from 'very important' to 'not important.' This would allow comparison of a ranking with four equivalence classes (representing the document) to one with just two equivalence classes (representing inclusion and exclusion from the summary to be evaluated).",
375
+ "cite_spans": [],
376
+ "ref_spans": [],
377
+ "eq_spans": [],
378
+ "section": "A Sentence-Rank-Based",
379
+ "sec_num": "2.2"
380
+ },
381
+ {
382
+ "text": "Since indicative summaries alert users to document content, any measure that evaluates the quality of an indicative summary ought to consider the similarity of the content of the summary to the content of the full document. This consideration should be independent of exactly which sentences are chosen for the summary. The content of the summary need only capture the general ideas of the original document. If human-generated extracts are available, machine-generated extracts may be evaluated alternatively by comparing their contents to these ground truths. This section defines content-based measures by comparing the term frequency (tf) vectors of extracts to tf vectors of the full text or to tf vectors of a ground truth extract. When the texts and summaries are tokenized and token aliases are determined by a thesaurus, sumrriaries that disagree due to synonymy are likely to have similarly-distributed i i 11 I I I l I I i I l I I I I I I I I I I i I i I i I I i ! term frequencies. Also, summaries which happen to use synonyms appearing infrequently in the text will not be penalized in a summaryto-full-document comparison. Note that term frequencies can always be used to compare an extract with its full text, since the two will always have terms in common, but without a thesaurus or some form of term aliasing, term frequencies cannot be used to compare abstracts with extracts.",
383
+ "cite_spans": [],
384
+ "ref_spans": [
385
+ {
386
+ "start": 913,
387
+ "end": 1008,
388
+ "text": "i i 11 I I I l I I i I l I I I I I I I I I I i I i I i I I i !",
389
+ "ref_id": "TABREF2"
390
+ }
391
+ ],
392
+ "eq_spans": [],
393
+ "section": "Content-Based Measures",
394
+ "sec_num": "2.3"
395
+ },
396
+ {
397
+ "text": "The vector space model of information retrieval as described by Salton (1989) uses the inner product of document vectors to measure the content similarity sirn(dl,d2) of two documents dl and d2. Geometrically, this similarity measure gives the cosine of the angle between the two document vectors. Since cos 0 = 1, documents with high cosine similarity are deemed similar. We apply this concept to summary evaluation by computing document-summary content similarity sim (d, s) or ground truthsummary content similarity sire (g, s) .",
398
+ "cite_spans": [
399
+ {
400
+ "start": 64,
401
+ "end": 77,
402
+ "text": "Salton (1989)",
403
+ "ref_id": null
404
+ },
405
+ {
406
+ "start": 470,
407
+ "end": 476,
408
+ "text": "(d, s)",
409
+ "ref_id": null
410
+ },
411
+ {
412
+ "start": 524,
413
+ "end": 530,
414
+ "text": "(g, s)",
415
+ "ref_id": null
416
+ }
417
+ ],
418
+ "ref_spans": [],
419
+ "eq_spans": [],
420
+ "section": "Content-Based Measures",
421
+ "sec_num": "2.3"
422
+ },
423
+ {
424
+ "text": "Note that when comparing a summary with its document, a prior human assessment is not necessary. This may serve to eliminate the ambiguity of a human assessor's bias towards certain types of summaries or sentences. However, the scores produced by such evaluation measures cannot be used reliably to compare summaries of drastically different lengths, since a much longer summary is more likely than a short summary to produce a term frequency .vector which is similar to the full document's \"tf vector, despite the normalization of the two vectors. (This contrasts with the bias of recall towards short summaries.)",
425
+ "cite_spans": [],
426
+ "ref_spans": [],
427
+ "eq_spans": [],
428
+ "section": "Content-Based Measures",
429
+ "sec_num": "2.3"
430
+ },
431
+ {
432
+ "text": "This similarity measure can be enhanced in a number of ways. For example, using term frequency counts for a large corpus of documents, term weighting (such as log-entropy (Dumais, 199!) or tf-idf (Salton, 1989)) can be used to weight the terms in the document and summary vectors. This may improve the performance of the similarity measure by increasing the weights of content-indicative terms and decreasing the weights of those terms that are not indicative of content. It is demonstrated in Section 4 that term weighting caused a significant increase in the correlation of the rankings produced by different ground truths; however, it is n.ot clear that this weighting increases the scores of high quality summaries.",
433
+ "cite_spans": [],
434
+ "ref_spans": [],
435
+ "eq_spans": [],
436
+ "section": "Content-Based Measures",
437
+ "sec_num": "2.3"
438
+ },
439
+ {
440
+ "text": "There are two potential problems with usingthe cosine measure to evaluate the performance of a summarization algorithm. First of all, it is likely that the summary vector will be very sparse compared to the document vector since the summary will probably contain many fewer terms than the document. Second, it is possible that the summary will use key terms that are not used often in the document. For example, a document about the merger of two banks, may use the term \"bank\" frequently, and use the related (yet not exactly synonymous) term \"financial institution\" only a few times. It is pos-silJle that a high quality extract would have a low cosine similarity with the full document if it contained only those few sentences that use the term \"financial institution\" instead of \"bank.\" Both of these problems can be addressed with another common tool in information retrieval: latent semantic indexing or LSI (Deerwester et al., 1990) .",
441
+ "cite_spans": [
442
+ {
443
+ "start": 914,
444
+ "end": 939,
445
+ "text": "(Deerwester et al., 1990)",
446
+ "ref_id": null
447
+ }
448
+ ],
449
+ "ref_spans": [],
450
+ "eq_spans": [],
451
+ "section": "Content-Based Measures",
452
+ "sec_num": "2.3"
453
+ },
454
+ {
455
+ "text": "LSI is a method of reducing the dimension of the vector space model using the singular value decomposition. Given a corpus of documents, create a term-by-document matrix A where each row corresponds to a term in the document set and each column corresponds to a document. Thus, the columns of A represent all the documents from the corpus, expressed in a particular term-weighting scheme. (In our testing, the document vectors' entries are the relative frequencies of the terms.) Compute the singular value decomposition (SVD) of this matrix (for details see Golub and van Loan (1989) ). Retain some number of the largest singular values of A and discard the rest. In general, removing singular values serves as a dimension reduction technique. While the SVD computation may be time-consuming when the corpus is large, it needs to be performed only once to produce a new term-document matrix and a projection matrix. To calculate the similarity of a summary and a document, the summary vector s must also be mapped to this low-dimensional space. This involves computing a vector-matrix product, which can be done quickly.",
456
+ "cite_spans": [
457
+ {
458
+ "start": 559,
459
+ "end": 584,
460
+ "text": "Golub and van Loan (1989)",
461
+ "ref_id": null
462
+ }
463
+ ],
464
+ "ref_spans": [],
465
+ "eq_spans": [],
466
+ "section": "Content-Based Measures",
467
+ "sec_num": "2.3"
468
+ },
469
+ {
470
+ "text": "The effect of using the scaled, reduceddimension document and summary vectors is two-fold. First, each coordinate of both the document and summary vector will contribute to the overall similarity of the summary to the document (unlike the original vector space model, where only terms that occur in the summary contribute to the cosine similarity score). Second, the purpose of using LSI is to reduce the effect of near-synonymy on the similarity score. If a term occurs infrequently in the document but is highly indicative of the content of the document, as in the case where the infrequent term is synonymous with a frequent term, the summary will be penalized less in the reduceddimension model for using the infrequent termthan it would be in the original vector space model. This reduction in penalty occurs because LSI essentially averages the weights of, terms that co-occur frequently with other terms (both \"bank\" and \"financial institution\" often occur with the term \"account\"). This should improve the accuracy of the cosine similarity measure for determining the quality of a summary of a document.",
471
+ "cite_spans": [],
472
+ "ref_spans": [],
473
+ "eq_spans": [],
474
+ "section": "Content-Based Measures",
475
+ "sec_num": "2.3"
476
+ },
477
+ {
478
+ "text": "This section describes the experiment that tests how well these summary evaluation metrics perform. Fifteen documents from the Text Retrieval Conference (TREC) collection were used in the experiment. These documents are part of a corpus of 103 newspaper articles. Each of the documents was tokenized by a language processing algorithm, which performed token aliasing. In our experiments, the term set was comprised of all the aliases appearing in the full corpus of 103 documents. This corpus was used for the purposes of term weighting. Four expert judges created extract summaries (ground truths) for each of the documents. A list of the first 15 documents, along with some of their numerical features is found in Table 1 . The judges were instructed to select as many sentences as were necessary to make an \"ideal\" indicative extract summary of the document. In terms of the count of sentences in the ground truth, the lengths of the summaries varied from document to document. Ground truth compression rates were generally between 10 and 20 percent. The inter-assessor agreement also varied, but was often quite high. We measured this by calculating the average pairwise recall in the collection of four ground truths.",
479
+ "cite_spans": [],
480
+ "ref_spans": [
481
+ {
482
+ "start": 716,
483
+ "end": 723,
484
+ "text": "Table 1",
485
+ "ref_id": "TABREF2"
486
+ }
487
+ ],
488
+ "eq_spans": [],
489
+ "section": "Experimental Design",
490
+ "sec_num": "3"
491
+ },
492
+ {
493
+ "text": "A suite of summary evaluation measures {Ek } which produce a score for a summary was developed. These measures may depend on none, one, or all of the collection of ground truth summaries {gj}. Measures which do not depend on ground truth compute the summarydocument similarity sire (s, d) . Content-based measures which depend on a single ground truth gi compute the summary-ground truth similarity sim (s, gi) . A measure which depends on all of the ground truths gl,.-.,ga, computes a summary's similarity with each ground truth separately and averages these values. Table 2 enumerates the 28 different evaluation measures that were compared in this experiment. Note that the Recall and Kendall measures require a ground truth. In this study, the measures will be used to evaluate extract summaries of a fixed sentence length K. In all of our tests, K = 3 for reasons of scale which will become clear. A summary length of three sentences represents varying proportions of the number of sentences in the full text document, but this length was usually comparable to the lengths of the humangenerated ground truths. For each document, the collection {Sj} was generated. This is the set of all possible K-sentence extracts from the document. If the document has N sentences total, there will be N choose K N) N! g = K! (/~L g)! extracts in the exhaustive collection {Sj}. The focus now is only on the set of all possible summaries and the evaluation measures, and not On any particular summarization algorithm. For each document, each of the measures in {Ek} was used to rank the sets {Sj}. (Note that the measures which do notdepend on ground truths could, in fact, be used to generate summaries if it were possible to produce and rank the exhaustive set of fixed-length summaries in real time. Despite the authors' access to impressive computing power, the process took several hours for each document!) The next section compares these different rankings of the exhaustive set of extracts for each document. Calculate their Spearman rank correlation coef-'ficient. When two evaluation measures produce nearly the same ranking of the summary set, the rank correlation will be near 1 and a scatterplot of the two rankings will show points nearly ly-\u2022 ing on a line with slope 1. When there is little correlation between two rankings, the statistic will be near 0 and the scatterplot will appear to have randomly-distributed points. A negative correlation indicates that one ranking often reverses the rankings of the other and in this case a rank scatterplot will show points nearly lying on a line with negative slope. Table 3 compares the Spearman correlation of the rankings produced by a specific pair of ground truths. The first row contains the correlations of two highly similar ground truth extracts of document 14. Both of these extracts consisted of three sentences; two of the sentences were common to both extracts. Not surprisingly, the correlation is high regardless of what measure produced the rankings. The second row demonstrates an increase (across the row) in correlation between rankings produced by two different ground truth summaries of document 8. These two ground truths did not disagree in focus, but did disagree due to synonymy --they contain just one sentence in common. In general, the correlation among the rankings produced by synonymous ground truths was increased most by using the SVD content-based comparison. Figure 1 illustrates the correlation increase graphically for this pair of ground truths. By contrast, the third row of Table 3 displays a decrease (across the row) in correlation between rankings produced by two different ground truths. In this case, the two ground truths disagreed in .focus: they are Extracts 2 and 3 contrasted in Section 2.1. Again, the correlation among the rankings produced by the four ground truths was decreased most by using a weighted content-based comparison such as tf-idf or SVD. These patterns were typical for rankings produced by ground truths which differed in focus, allaying the fear that applying the SVD weighting would produce correlated rankings based on any two ground truths. Of course, the lack of correlation among recall-based rankings whenever ground truths did not contain exactly the same sentences implies that a different collection of extracts would rank highly if one ground truth were replaced with the other. This effect would surely carry through to system averages across a set of documents. To exemplify the size of this effect, for each document, the summaries which scored highest using one ground truth were scored (using recall) against a second ground truth. With the first ground truths, these high-scoring summaries averaged over 75% recall; using the second ground truths, the same summaries averaged just over 25% recall. Thus, by simply changing judges, an automatic system which produced these summaries would appear to have a very different success rate. This dispar-. ity is lessened when content-based measures are used, but the outcomes are still disparate. Evidence suggests that the content-based measures which do not rely on a ground truth \u2022 may be an acceptable substitute to those which do'.-Over the set of 15 documents, the average within-document inter-assessor correlation is 0.61 using term frequency, 0.72 using tf-idf, and 0.67 using SVD. The average correlation of the ground truth dependent measures with those that perform summary-document comparisons is 0.48 using term frequency, 0.70 using tf-idf, and 0.56 using SVD. This means that on average, the rankings based on single ground truths are only slightly more correlated to each other than they are to the rankings that do not depend on any ground truth. As noted in Section 2.1, the recall-based measures exhibit unfavorable scoring properties. Figure 2 shows the histogram of scores assigned to the exhaustive summary set for doc- !l ,,,,,,,,-,,,: .... amwi ll,, I , . ,,. ~.., ~%.~,,r~, Each of these measures was based on the same ground truth summary of this document, which contained four sentences. Clearly, the measures based on a more sophisticated parsing method have a much greater ability to discriminate between summaries. By contrast, the recail metric can assign one of only four scores to a length 3 summary, based on the value of Ji Elementary combinatorics shows that 4 extracts will receive the highest possible score (and thus will rank first), 126 summaries will rank second, 840summaries will rank third, and 1330 summaries will rank last (with a score of 0). This accounts for all of the 2300 three-sentence extracts that, are possible. It seems very unlikely that all of the second-ranking summaries are equally effective. The histogram depicting this distribution is shown at the top of Figure 2 . This is followed by the histograms for the Kendall metric, and the content-based metrics using term frequency, tf-idf, and SVD weighted vectors, respectively. The tf-idf and SVD weighted measures produced a very fine distribution of scores, particularly near the top of the range. That is, these metrics are able to distinguish between different high-scoring summaries. These patterns in the score histograms were typical across the 15 documents.",
494
+ "cite_spans": [
495
+ {
496
+ "start": 282,
497
+ "end": 288,
498
+ "text": "(s, d)",
499
+ "ref_id": null
500
+ },
501
+ {
502
+ "start": 403,
503
+ "end": 410,
504
+ "text": "(s, gi)",
505
+ "ref_id": null
506
+ }
507
+ ],
508
+ "ref_spans": [
509
+ {
510
+ "start": 569,
511
+ "end": 576,
512
+ "text": "Table 2",
513
+ "ref_id": "TABREF3"
514
+ },
515
+ {
516
+ "start": 2619,
517
+ "end": 2626,
518
+ "text": "Table 3",
519
+ "ref_id": "TABREF4"
520
+ },
521
+ {
522
+ "start": 3446,
523
+ "end": 3454,
524
+ "text": "Figure 1",
525
+ "ref_id": "FIGREF0"
526
+ },
527
+ {
528
+ "start": 3566,
529
+ "end": 3573,
530
+ "text": "Table 3",
531
+ "ref_id": "TABREF4"
532
+ },
533
+ {
534
+ "start": 5837,
535
+ "end": 5845,
536
+ "text": "Figure 2",
537
+ "ref_id": null
538
+ },
539
+ {
540
+ "start": 5924,
541
+ "end": 5957,
542
+ "text": "!l ,,,,,,,,-,,,: .... amwi ll,, I",
543
+ "ref_id": null
544
+ },
545
+ {
546
+ "start": 6803,
547
+ "end": 6811,
548
+ "text": "Figure 2",
549
+ "ref_id": null
550
+ }
551
+ ],
552
+ "eq_spans": [],
553
+ "section": "Experimental Design",
554
+ "sec_num": "3"
555
+ },
556
+ {
557
+ "text": "Conclusions and Future Work 'There is wide variation in the rankings produced by recall scores from non-identical ground truths. This difference in scores is reflected in \u2022 averages computed across documents. The low inter-assessor correlation of ranks based on recall measures is distressing, and indicates that these measures cannot be effectively used to compare performances of summarization systems. Measures which gauge content similarity produce more highly correlated rankings whenever ground truths do not disagree in focus. Content-based measures assign different rankings when ground truths do disagree in focus. In addition, these measures provide a finer grained score with which to compare summaries. Moreover, the content-based measures which rely on a ground truth are only slightly more correlated to each other than theyare to the measures which perform summary-document comparisons. This suggests that the effective-77 ness of summarization algorithms could be measured without the use of human judges. Since the cosine measure is easy to calculates feedback of summary quality can be almost instantaneous.",
558
+ "cite_spans": [],
559
+ "ref_spans": [],
560
+ "eq_spans": [],
561
+ "section": "5",
562
+ "sec_num": null
563
+ },
564
+ {
565
+ "text": "The properties of these content-based measures need to be further investigated. For example, it is not clear that content-based measures satisfy properties (i) and (ii), discussed in Section 2. Also, while they do produce far fewer ties than either recall or tau, such a fine distinction in summary quality is probably not justified. When human-generated ground truths are available, perhaps some combination of recall and the content-based measures could be used. For instance, whenever recall is not perfect, the content of the non-overlapping sentences could be compared with the missed ground truth sentences. Also, the effects of compression rate,",
566
+ "cite_spans": [],
567
+ "ref_spans": [],
568
+ "eq_spans": [],
569
+ "section": "5",
570
+ "sec_num": null
571
+ }
572
+ ],
573
+ "back_matter": [
574
+ {
575
+ "text": "summary length, and document style are not known.The authors are currently performing further experiments to see if users prefer summaries that rank highly with content-based measures over other summaries. Also, the outcomes of extrinsic evaluation techniques will be compared with each of these scoring methods. In other words, do the high-ranking summaries help users to perform various tasks better than lower-ranking summaries do?",
576
+ "cite_spans": [],
577
+ "ref_spans": [],
578
+ "eq_spans": [],
579
+ "section": "annex",
580
+ "sec_num": null
581
+ },
582
+ {
583
+ "text": "The authors would like to thank Mary Ellen Okurowski and Duncan Buell for their sup-j port, encouragement, and advice throughout this project. Thanks go also to Tomek Strzalkowski, Inderjeet Mani, Donna Harman, and Hal Wilson for their suggestions of how to improve the design of the experiment. We greatly appreciate the fine editing advice Oksana Lassowsky provided. Finally, we are especially grateful to the four expert judges, Benay, Ed, MEO, and Toby, who produced our ground truth summaries. ",
584
+ "cite_spans": [],
585
+ "ref_spans": [],
586
+ "eq_spans": [],
587
+ "section": "Acknowledgements",
588
+ "sec_num": "6"
589
+ }
590
+ ],
591
+ "bib_entries": {},
592
+ "ref_entries": {
593
+ "FIGREF0": {
594
+ "uris": null,
595
+ "num": null,
596
+ "type_str": "figure",
597
+ "text": "Both Ms. Streisand's film husband, played by Jeroen Krabbe, and her film son, played by her real son Jason Gould, are, for the purposes of the screenplay, violinists. The actual sound what might be called a fiddle over -was produced off camera by Pinchas Zucker-violin program in \"Prince of Tides\" eliminates the critic's usual edge and makes everyone fall back on his basic pair of ears."
598
+ },
599
+ "FIGREF1": {
600
+ "uris": null,
601
+ "num": null,
602
+ "type_str": "figure",
603
+ "text": "Figure 1: Synonymy: Content-based Measures Increase Rank Correlation"
604
+ },
605
+ "FIGREF2": {
606
+ "uris": null,
607
+ "num": null,
608
+ "type_str": "figure",
609
+ "text": "five different measures."
610
+ },
611
+ "FIGREF3": {
612
+ "uris": null,
613
+ "num": null,
614
+ "type_str": "figure",
615
+ "text": "~ ~'~T GT4 ~ ~),dlllh,,.,,,,,,,..,.. ......"
616
+ },
617
+ "TABREF2": {
618
+ "num": null,
619
+ "text": "",
620
+ "content": "<table><tr><td colspan=\"6\">: Test Document &amp; Summary Statistics</td></tr><tr><td/><td>Sent.</td><td/><td colspan=\"2\">Token Gnd. Truth</td><td>Gnd. Truth</td></tr><tr><td colspan=\"2\">TREC File Name Count</td><td/><td colspan=\"2\">Count Sent. Cnt.</td><td>Avg. Recall</td></tr><tr><td>WSJ911211-0057</td><td>34</td><td/><td>667</td><td>3, 4, 12, 3</td><td>44%</td></tr><tr><td>wsJg00608-0126</td><td>34</td><td/><td>603</td><td>4, 4, 9, 3</td><td>54%</td></tr><tr><td>WSJ900712-0047</td><td>\" 18</td><td/><td>364</td><td>2, 3, 5, 2</td><td>7S%</td></tr><tr><td>latwp940604.0027</td><td>23</td><td/><td>502</td><td>4, 5, 5, 4</td><td>69%</td></tr><tr><td>latwp940621.0116</td><td>27</td><td/><td>579</td><td>12, 11, 10</td><td>84%</td></tr><tr><td>latwp940624.0094</td><td>17</td><td/><td>460</td><td>5, 5, 5, 4</td><td>79%</td></tr><tr><td>latwp940707.0400</td><td>33</td><td/><td>503</td><td>6, 9, 8, 8</td><td>52%</td></tr><tr><td>latwp940709.0051</td><td>37</td><td/><td>877</td><td>3, 5, 5, 4</td><td>53%</td></tr><tr><td>latwp940713.0013</td><td>34</td><td/><td>702</td><td>9, 4, 5, 8</td><td>35%</td></tr><tr><td>latwp940713.0014</td><td>30</td><td/><td>528</td><td>6, 5, 7, 5</td><td>88%</td></tr><tr><td>latwp940721.0080</td><td>28</td><td/><td>793</td><td>3, 3, 5, 2</td><td>88%</td></tr><tr><td>latwp940725.0030</td><td>36</td><td/><td>690</td><td>9, 2, 7, 5</td><td>45%</td></tr><tr><td>latwp940725.0128</td><td>18</td><td/><td>438</td><td>6, 3, 5, 5</td><td>63%</td></tr><tr><td>latwp940729.0109</td><td>25</td><td/><td>682</td><td>4, 3, 4,3</td><td>96%</td></tr><tr><td>' l'atwp940801.0010</td><td>28</td><td>I</td><td>474</td><td>4, 5, 4, 5</td><td>43%</td></tr></table>",
621
+ "type_str": "table",
622
+ "html": null
623
+ },
624
+ "TABREF3": {
625
+ "num": null,
626
+ "text": "",
627
+ "content": "<table><tr><td/><td colspan=\"3\">: Evaluation Measures</td><td/><td/><td/></tr><tr><td>Similarity</td><td/><td/><td/><td/><td/><td/></tr><tr><td>Measure</td><td>Details</td><td/><td/><td/><td/><td/></tr><tr><td>Recall</td><td>JJ M , Ji = # ( s N gi )</td><td colspan=\"2\">N.A. E1</td><td>E2</td><td>E3</td><td>E4</td><td>E5</td></tr><tr><td>Kendall Tau</td><td>see Section 2.2</td><td colspan=\"2\">N.A. E6</td><td>E7</td><td>Es</td><td colspan=\"2\">E9 ElO</td></tr><tr><td>tf Cosine \"tf-idf Cosine</td><td>sirn(s, d or gi) o, tf voctors sim(s,-) on tf-idf weighted vectors</td><td colspan=\"6\">Ell E12 E13 E14 EIS E16 El7 ElS El9 E20 E21 E22</td></tr><tr><td colspan=\"2\">SVD tf Cosine sire(s, .) o, iow-ai ..... to,s</td><td>E23</td><td colspan=\"5\">E24 E25 E26 E27] E2s</td></tr></table>",
628
+ "type_str": "table",
629
+ "html": null
630
+ },
631
+ "TABREF4": {
632
+ "num": null,
633
+ "text": "Correlation of Ground Truths Depends on Level of DisagreementIt recall i tan I tf cosine I tf-idf I SVD I",
634
+ "content": "<table><tr><td>Agree Sentences</td><td>0.87 0.96</td><td>0.95</td><td>0.87</td><td>0.99</td></tr><tr><td>Disagree synonymy</td><td>0.34 0.37</td><td>0.53</td><td>0.72</td><td>0.96</td></tr><tr><td>Disagree focus</td><td>0.22 0.31</td><td>0.32</td><td colspan=\"2\">0.20 -0.29 i</td></tr></table>",
635
+ "type_str": "table",
636
+ "html": null
637
+ }
638
+ }
639
+ }
640
+ }
Full_text_JSON/prefixW/json/W00/W00-0409.json ADDED
The diff for this file is too large to render. See raw diff
 
Full_text_JSON/prefixW/json/W00/W00-0410.json ADDED
@@ -0,0 +1,979 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0410",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:44.090778Z"
6
+ },
7
+ "title": "Using Summarization for Automatic Briefing Generation",
8
+ "authors": [
9
+ {
10
+ "first": "Inderjeet",
11
+ "middle": [
12
+ "Mani Kristian"
13
+ ],
14
+ "last": "Concepcion",
15
+ "suffix": "",
16
+ "affiliation": {
17
+ "laboratory": "",
18
+ "institution": "The MITRE Corporation",
19
+ "location": {
20
+ "addrLine": "W640 11493 Sunset Hills Road",
21
+ "postCode": "22090",
22
+ "settlement": "Reston",
23
+ "region": "VA",
24
+ "country": "USA"
25
+ }
26
+ },
27
+ "email": ""
28
+ },
29
+ {
30
+ "first": "Linda",
31
+ "middle": [],
32
+ "last": "Van Guilder",
33
+ "suffix": "",
34
+ "affiliation": {
35
+ "laboratory": "",
36
+ "institution": "The MITRE Corporation",
37
+ "location": {
38
+ "addrLine": "W640 11493 Sunset Hills Road",
39
+ "postCode": "22090",
40
+ "settlement": "Reston",
41
+ "region": "VA",
42
+ "country": "USA"
43
+ }
44
+ },
45
+ "email": ""
46
+ }
47
+ ],
48
+ "year": "",
49
+ "venue": null,
50
+ "identifiers": {},
51
+ "abstract": "We describe a system which automatically generates multimedia briefings from highlevel outlines. The system uses summarization in content selection and creation, and in helping form a coherent narrative for the briefing. The approach does not require a domain knowledge base.",
52
+ "pdf_parse": {
53
+ "paper_id": "W00-0410",
54
+ "_pdf_hash": "",
55
+ "abstract": [
56
+ {
57
+ "text": "We describe a system which automatically generates multimedia briefings from highlevel outlines. The system uses summarization in content selection and creation, and in helping form a coherent narrative for the briefing. The approach does not require a domain knowledge base.",
58
+ "cite_spans": [],
59
+ "ref_spans": [],
60
+ "eq_spans": [],
61
+ "section": "Abstract",
62
+ "sec_num": null
63
+ }
64
+ ],
65
+ "body_text": [
66
+ {
67
+ "text": "Document production is an important function in many organizations. In addition to instruction manuals, reports, courseware, system documentation, etc., briefings are a very common type of document product, often used in slide form as a visual accompaniment to a talk. Since so much time is spent by so many people in producing briefings, often under serious time constraints, any method to reduce the amount of time spent on briefing production could yield great gains in productivity.",
68
+ "cite_spans": [],
69
+ "ref_spans": [],
70
+ "eq_spans": [],
71
+ "section": "Introduction",
72
+ "sec_num": "1"
73
+ },
74
+ {
75
+ "text": "Briefings involve a high degree of condensation of information (e.g., no more than a few points, perhaps bul,leted, per slide), and they typically contain multimedia information. Many briefings have a stereotypical structure, dictated in part by the business rules of the organisation. For example, a commander may present a daily or weekly brief to her superiors, which is more in the nature of a routine update of activities since the last briefing; or she may provide an action brief, which is triggered by a particular situation, and which consists of a situation update followed by arguments recommending a particular course of action. Further, the process of constructing a briefing may involve certain stereotypical activities, including culling information from particular sources, such as messages, news, web pages, previous briefings, etc. Thus, while part of the briefing content may be created anew by the briefing author 1, other parts of the briefing may be constructed from existing information sources. However, information in those sources need not necessarily be in the same form as needed by the briefing.",
76
+ "cite_spans": [],
77
+ "ref_spans": [],
78
+ "eq_spans": [],
79
+ "section": "Introduction",
80
+ "sec_num": "1"
81
+ },
82
+ {
83
+ "text": "All these characteristics of briefings make them attractive as an application of automatic summarization, which is aimed at producing a condensed, task-tailored representation of salient content in information sources. Often, the background information being used in a slide is quite considerable; the author needs to identify what's salient, presenting it in a succinct manner so as to fit on the slide, perhaps creating a graphic or other multimedia clip to do so. Automatic summarization; by definition, has a clear role to play here. A briefing usually involves a sequence of slides; as the summary becomes longer, it needs to form a coherent narrative, built around the prescribed structure. Finally, a briefing must strive, to the extent possible, to be persuasive and vivid, so that the point gets across. This in turn presents a further challenge for summarization: the ability to generate smoothly narrated, coherent summaries. I The noun \"author\" is used throughout the paper to designate a human author.",
84
+ "cite_spans": [],
85
+ "ref_spans": [],
86
+ "eq_spans": [],
87
+ "section": "Introduction",
88
+ "sec_num": "1"
89
+ },
90
+ {
91
+ "text": "It is therefore worthwhile investigating whether combining automatic summarization with intelligent multimedia presentation techniques can make the briefing generation amenable to full automation. In other words, the author should be able to use a computer program to generate an initial briefing, which she can then edit and revise as needed. The briefing can then be presented by the author if desired, or else directly by the computer (particularly useful if the briefing is being sent to someone else). The starting point for this process would be a high-\" level outline of the briefing on the part of the author. The outline would include references to particular information sources that had to be, summarized in particular ways. If a program were able to take such outlines and generate briefings which didn't require extensive postediting to massage into a state deemed acceptable for the task at hand, the program could be regarded as a worthwhile time saving tool.",
92
+ "cite_spans": [],
93
+ "ref_spans": [],
94
+ "eq_spans": [],
95
+ "section": "Introduction",
96
+ "sec_num": "1"
97
+ },
98
+ {
99
+ "text": "Our work forms part of a larger DARPA-funded project aimed at improving analysis and decision-making in crisis situations by providing tools that allow analysts to collaborate to develop structured arguments in support of particular conclusions and to help predict likely future scenarios. These arguments, along with background evidence, are packaged together as briefing s to high-level decision-makers. In leveraging automatic methods along the lines suggested above to generate briefings, our approach needs to allow the analyst to take on as much of the briefing authoring as she wants to (e.g., it may take time for her to adapt to or trust the machine, or she may want the machine to present just part of the briefing). The analyst's organisation usually will instantiate one of several templates dictating the high-level structure of a briefing; for example, a briefing may always have to begin with an executive summary. The summarization methods also need to be relatively domain-independent, given that the subject matter of crises are somewhat unpredictable; an analyst in a crisis situation is likely to be inundated with large numbers of crisis-related news and intelligence reports from many different sources. This means that we cannot require that a domain knowledge base be available to help the briefing generation process.",
100
+ "cite_spans": [],
101
+ "ref_spans": [],
102
+ "eq_spans": [],
103
+ "section": "Approach",
104
+ "sec_num": "2"
105
+ },
106
+ {
107
+ "text": "Given these task requirements, we have adopted an approach that is flexible about accommodating different degrees of author involvement, that is relatively neutral about the rhetorical theory underlying the briefing structure (since a template may be provided by others), and that is domain-independent. In our approach, the author creates the briefing outline, which is then fleshed out further by the system based on information in the outline. The system fills out some content by invoking specified summarizers; it also makes decisions, when needed, about output media type; it introduces narrative elements to improve the coherence of the briefing; and finally, it assembles the final presentation, making decisions about spatial layout in the process.",
108
+ "cite_spans": [],
109
+ "ref_spans": [],
110
+ "eq_spans": [],
111
+ "section": "Approach",
112
+ "sec_num": "2"
113
+ },
114
+ {
115
+ "text": "A briefing is represented as a tree. The structure of the tree represents the rhetorical structure of the briefing. Each node has a label, which offers a brief textual description of the node. Each leaf node has an associated goal, which, when realized, provides content for that node. There are two kinds of goals: content-level goals and narrative-level goals. Content-level goals are also of two kinds: retrieve goals, which retrieve existing media objects of a particular type (text, audio, image, audio, video) satisfying some description, and create goals, which create new media objects of these types using programs (called summarization filters). Narrative-level goals introduce descriptions of content at other nodes: they include captions and running text for media objects, and segues, which are rhetorical moves describing a transition to a node.",
116
+ "cite_spans": [],
117
+ "ref_spans": [],
118
+ "eq_spans": [],
119
+ "section": "Approach",
120
+ "sec_num": "2"
121
+ },
122
+ {
123
+ "text": "Ordering relations reflecting temporal and spatial layout are defined on nodes in the tree. Two coarse-grained relations, seq for precedence, and par for simultaneity, are used to specify a temporal ordering on the nodes in the tree. As an example, temporal constraints for a (tiny) tree of 9 nodes may be expressed as: <ordering> <seq> <par>7</par> <par>8</par> <par>3</par> <par>4 5</par> <par>6</par> <par>l 9</par> <par>2</par> </seq> </ordering> The tree representation, along with the temporal constraints, can be rendered in text as XML; we refer to the XML representation as a script. The overall architecture of our system is shown in Figure 1 , The user creates the briefing outline in the form of a script, by using a GUI. The briefing generator takes the script as input. The Script Validator applies an XML parser to the script, to check for syntactic correctness. It then builds a tree representation for the script, which represents the briefing outline, with temporal constraints attached to the leaves of the tree.",
124
+ "cite_spans": [],
125
+ "ref_spans": [
126
+ {
127
+ "start": 644,
128
+ "end": 652,
129
+ "text": "Figure 1",
130
+ "ref_id": null
131
+ }
132
+ ],
133
+ "eq_spans": [],
134
+ "section": "Approach",
135
+ "sec_num": "2"
136
+ },
137
+ {
138
+ "text": "Next, a Content Creator takes the input tree and expands it by introducing narrative-level goals including segues to content nodes, and rtmning text and captions describing media objects at content nodes. Running text and short captions are generated from meta-information associated with media objects, by using shallow text generation methods (canned text). The end result of content selection (which has an XML representation callod a ground script) is that the complete tree has been fully specified, with all the create and retrieve goals fully specified , with all the output media types decided. The Content Creator is thus responsible for both content selection and creation, in terms of tree structure and node content.",
139
+ "cite_spans": [],
140
+ "ref_spans": [],
141
+ "eq_spans": [],
142
+ "section": "Approach",
143
+ "sec_num": "2"
144
+ },
145
+ {
146
+ "text": "Then, a Content Executor executes all the create and retrieve goals. This is a very simple step, resulting in the generation of all the media objects in the presentation, except for the audio files for speech to be synthesized. Thus, this step results in realization of the content at the leaves of the tree.",
147
+ "cite_spans": [],
148
+ "ref_spans": [],
149
+ "eq_spans": [],
150
+ "section": "Approach",
151
+ "sec_num": "2"
152
+ },
153
+ {
154
+ "text": "Finally, the Presentation Generator takes the tree which is output from Content Execution, along with its temporal ordering constraints, and generates the spatial layout of the presentation. If no spatial layout constraints are specified (the default is to not specify these), the system allocates space using a simple method based on the temporal layout for nodes which have spatial manifestations. Speech synthesis is also carried out here. Once the tree is augmented with spatial layout constraints, it is translated by the Presentation Generator into SMIL 2 (Synchronized Multimedia Integration Language) (SMIL 99), a W3C-developod extension of HTML that can be played by standard multimedia players (such as Real 3 and Grins 4. This step thus presents the realized content, synthesizing it into a multimedia presentation laid out spatially and temporally.",
155
+ "cite_spans": [],
156
+ "ref_spans": [],
157
+ "eq_spans": [],
158
+ "section": "Approach",
159
+ "sec_num": "2"
160
+ },
161
+ {
162
+ "text": "This particular architecture, driven by the above project requirements, does not use planning as an overall problem-solving strategy, as planning requires domain knowledge. It therefore differs from traditional intelligent multimedia presentation planners, e.g., (Wahlster etal. 93) . Nevertheless, the system does make a number of intelligent decisions in organizing and coordinating presentation decisions. These are discussed next, after which we turn to the main point of the paper, namely the leveraging of summarization in automatic briefing generation.",
163
+ "cite_spans": [
164
+ {
165
+ "start": 263,
166
+ "end": 282,
167
+ "text": "(Wahlster etal. 93)",
168
+ "ref_id": null
169
+ }
170
+ ],
171
+ "ref_spans": [],
172
+ "eq_spans": [],
173
+ "section": "Approach",
174
+ "sec_num": "2"
175
+ },
176
+ {
177
+ "text": "The author of a briefing may choose to flesh out as little of the tree as desired, with the caveat that the temporal ordering relations for nonnarrative nodes need to be provided by her. When a media object is generated at a node by a create goal, the running text and captions are generated by the system. The motivation for this is obvious: when a summarization filter (which is a program under our control) is generating a media object, we can often provide sufficient recta-information about that object to generate a short caption and some running text. By default, all segues and spatial layout relations are also specified by the system, so the author does not have to know about these unless she wants to. Finally, the decision as to when to produce audio, when not specified by the author, is left to the system. When summarization filters are used (for create goals), the media type of the output is specified as a parameter to the filter. This media type may be converted to some other type by the system, e.g., text to speech conversion using Festival (Taylor et al. 98) . By default, all narrative nodes attempt to realize their goals as a speech media type, using rules based on text length and tnmcatability to less than 250 bytes to decide when to use text-to-speech. The truncation algorithm is based on dropping syntactic constituents, using a method similar to (Mani et al. 99) . Captions are always realized, in addition, as text (i.e., they have a text realization and a. possible audio realization).",
178
+ "cite_spans": [
179
+ {
180
+ "start": 1064,
181
+ "end": 1082,
182
+ "text": "(Taylor et al. 98)",
183
+ "ref_id": null
184
+ },
185
+ {
186
+ "start": 1380,
187
+ "end": 1396,
188
+ "text": "(Mani et al. 99)",
189
+ "ref_id": null
190
+ }
191
+ ],
192
+ "ref_spans": [],
193
+ "eq_spans": [],
194
+ "section": "Intelligent Multimedia Presentation Generation",
195
+ "sec_num": "3"
196
+ },
197
+ {
198
+ "text": "Spatial layout is decided in the Presentation Generator, after all the individual media objects are created along with their temporal constraints by the Content Executor. The layout algorithm walks through the temporal ordering in sequence, allocating a segment to each set of objects that is designated to occur simultaneously (grouped by par in the temporal constraints). Each segment can have up to 4 frames, in each of which a media object is displayed (thus, no more than 4 media objects can be displayed at the same time). Since media objects declared to be simultaneous (using par) in the temporal constraints will go together in a separate segment, the temporal constraints determine what elements are grouped together in a segment. The layout within a segment handles two special cases. Captions are placed directly undemeath their associated media object. Running text, when realized as text, is placed beside the media object being described, so that they are paired together visually. Thus, coherence of a segment is influenced mainly by the temporal constraints (which have been fleshed out by the Content Creator to include narrative nodes), with further handling of special cases. Of course, an individual summarization filter may choose to coordinate component multimedia objects in particular ways in the course of generating a composite multimedia object.",
199
+ "cite_spans": [],
200
+ "ref_spans": [],
201
+ "eq_spans": [],
202
+ "section": "Intelligent Multimedia Presentation Generation",
203
+ "sec_num": "3"
204
+ },
205
+ {
206
+ "text": "Details such as duration and onset of particular frames are specified in the translation to SMIL. Duration is determined by the number of frames present in a segment, unless there is an audio media object in the segment (this media object may have a spatial representation, e.g., as an audio icon, or it may not). If an audio media object occurs in a frame, the duration of all media objects in that frame is equal to the length of all the audio files in the segment. If there is no audio present in a segment, the duration is ot seconds (or has a default value of 5) times the number of frames created.",
207
+ "cite_spans": [],
208
+ "ref_spans": [],
209
+ "eq_spans": [],
210
+ "section": "Intelligent Multimedia Presentation Generation",
211
+ "sec_num": "3"
212
+ },
213
+ {
214
+ "text": "As mentioned above, create goals are satisfied by summarization filters, which create new media objects summarizing information sources. These programs are called summarization filters because in the course of condensing information, they take input information and turn it into some more abstract and useful representation, filtering out unimportant information. Such filters provide a novel way of carrying out content selection and creation for automated presentation generation.",
215
+ "cite_spans": [],
216
+ "ref_spans": [],
217
+ "eq_spans": [],
218
+ "section": "Summarization Filters",
219
+ "sec_num": "4"
220
+ },
221
+ {
222
+ "text": "Our approach relies on component-based software composition, i.e., assembly of software units that have contractually specified interfaces that can be independently deployed and reused. The idea of assembling complex language processing programs out of simpler ones is hardly new; however, by employing current industry standards to specify the interaction between the components, we simultaneously increase the robustness of the system, ensure the reusability of individual components and create a more fully plug-and-play capability. Among the core technology standards that support this plug-and-play component assembly capability are (a) Java interfaces, used to specify functions that all summarization components must implement in order to be used in the system, (b) the JavaBeans standard, which allows the parameters and methods of individual components to be inspected by the system and revealed to the users (c) the XML markup standard, which we have adopted as an intercomponent communication language. Using these technologies, legacy or third-party summarizers are incorporated into the system by \"wrapping\" them so as to meet the interface specification of the system. These technologies also make possible a graphical environment to assemble and configure complex summarization filters from individual summarization components.",
223
+ "cite_spans": [],
224
+ "ref_spans": [],
225
+ "eq_spans": [],
226
+ "section": "Summarization Filters",
227
+ "sec_num": "4"
228
+ },
229
+ {
230
+ "text": "Among the most important wins over the traditional \"piping\" approach to filter assembly is the ability to impose build-time restrictions on the component assembly, disallowing \"illegal\" compositions, e.g. component X cannot provide input to component Y unless X's output type corresponds to Y's input type.",
231
+ "cite_spans": [],
232
+ "ref_spans": [],
233
+ "eq_spans": [],
234
+ "section": "Summarization Filters",
235
+ "sec_num": "4"
236
+ },
237
+ {
238
+ "text": "Build-time restrictions such as these play a clear role in increasing the overall robustness of the run-time summarization system. Another build-time win lies in the ability of JavaBeans to be serialized, i.e., written to disk in such a way as to preserve ~he state of its parameters settings, ensuring that every component in the system can be configured and run at different times independently of whether the component provides a parameter file facility.",
239
+ "cite_spans": [],
240
+ "ref_spans": [],
241
+ "eq_spans": [],
242
+ "section": "Summarization Filters",
243
+ "sec_num": "4"
244
+ },
245
+ {
246
+ "text": "Establishing the standard functions required of a summarization filter is challenging on several fronts. One class of functions required by the interface is necessary to handle the technicalities of exchanging information between otherwise discrete components.",
247
+ "cite_spans": [],
248
+ "ref_spans": [],
249
+ "eq_spans": [],
250
+ "section": "Summarization Filters",
251
+ "sec_num": "4"
252
+ },
253
+ {
254
+ "text": "This set includes functions for discovering a component's input and output types, for handling messages, exceptions and events passed between components and for interpreting XML based on one or more system-wide document type definitions (DTDs). The other, more interesting set of functions gets to the core of summarization functionality. Selecting these functions involves identifying parameters likely to be broadly applicable across most or all summarizers and finding ways to group them and/or to generalize them. This is desirable in order to reduce the burden on the end user of understanding the subtle differences between the various settings in the summarizers available to her.",
255
+ "cite_spans": [],
256
+ "ref_spans": [],
257
+ "eq_spans": [],
258
+ "section": "Summarization Filters",
259
+ "sec_num": "4"
260
+ },
261
+ {
262
+ "text": "An. example of the difficulty inherent in this endeavor is provided by the compression (summary length divided by source length) vs.",
263
+ "cite_spans": [],
264
+ "ref_spans": [],
265
+ "eq_spans": [],
266
+ "section": "Summarization Filters",
267
+ "sec_num": "4"
268
+ },
269
+ {
270
+ "text": "reduction (l's complementof compression) vs. target length paradigm. Different summarizers will implement one or more of these. The wrapper maps from the high-level interface function, where the application/user can specify either compression or target length, but not both, to the individual summarizer's representation. Thus, a user doesn't need to know which representation(s) a particular summarizer uses for reduction/compression.",
271
+ "cite_spans": [],
272
+ "ref_spans": [],
273
+ "eq_spans": [],
274
+ "section": "Summarization Filters",
275
+ "sec_num": "4"
276
+ },
277
+ {
278
+ "text": "A vanilla summarization Bean includes the following functionality, which every summarizer must be able to provide methods for:",
279
+ "cite_spans": [],
280
+ "ref_spans": [],
281
+ "eq_spans": [],
282
+ "section": "Summarization Filters",
283
+ "sec_num": "4"
284
+ },
285
+ {
286
+ "text": "source: documents to be summarized (this can be a single document, or a collection) reduction-rate: either summary size/source size, or target length audience: user-focused or generic (user-focused requires the specification of a bag of terms, which can be of different types) output-type: specific data formats (specified by DTDs)",
287
+ "cite_spans": [],
288
+ "ref_spans": [],
289
+ "eq_spans": [],
290
+ "section": "Summarization Filters",
291
+ "sec_num": "4"
292
+ },
293
+ {
294
+ "text": "The above are parameters which we expect all summarizers to support. More specialized summarizer beans can be constructed to reflect groupings of summarizers. Among other parameters are output-fluency, which specifies whether a textual summary is to be made up of passages (sentences, paras, blocks), named entities, lists of words, phrases, or topics, etc. Given that definitions of summarization in more theoretical terms have not been entirely satisfactory (Mani 2000) , it is worth noting that the above vanilla Bean provides an operational definition of what a summarizer is.",
295
+ "cite_spans": [
296
+ {
297
+ "start": 460,
298
+ "end": 471,
299
+ "text": "(Mani 2000)",
300
+ "ref_id": "BIBREF5"
301
+ }
302
+ ],
303
+ "ref_spans": [],
304
+ "eq_spans": [],
305
+ "section": "Summarization Filters",
306
+ "sec_num": "4"
307
+ },
308
+ {
309
+ "text": "text, and segues. The captions and running text, when not provided by the filters, are provided by the script input. In the case of retrieve goals, the objects may not have any meta-information, in which case a default caption and running-text is generated. Clearly, a system's explanatory narrative will be enhanced by the availability of rich meta-information.",
310
+ "cite_spans": [],
311
+ "ref_spans": [],
312
+ "eq_spans": [],
313
+ "section": "Summarization Filters",
314
+ "sec_num": "4"
315
+ },
316
+ {
317
+ "text": "The segues are provided by the system. For example, an item with a label \"A biography of bin Laden\" could result in a generated segue \"Here is a biography of bin Laden\". The Content Creator, when providing content for narrative nodes, uses a variety of different canned text patterns. For the above example, the pattern would be \"Here is @6.label\", where 6 is the number of a non-narrative node, with label being its label.",
318
+ "cite_spans": [],
319
+ "ref_spans": [],
320
+ "eq_spans": [],
321
+ "section": "Summarization Filters",
322
+ "sec_num": "4"
323
+ },
324
+ {
325
+ "text": "In addition to its practical utility in the ability to assimilate, combine and reuse components in different combinations, and to do so within a GUI, this approach is interesting because it allows powerful summarization functions to be created by composing together simpler tools. (Note that this is different from automatically finding the best combination, which our system does not address). For example, Figure 2 illustrates a complex filter created by using a GUI to compose together a named entity extractor, a date extractor, a component which discovers significant associations between the two and writes the result to a table, and a visualizer which plots the results as a graph. The resulting summarizer takes in a large collection of documents, and produces as a summary a graph (a jpeg) of salient named entity mentions over time. Each of its components can be easily reused within the filter composition system to build other summarizers.",
326
+ "cite_spans": [],
327
+ "ref_spans": [
328
+ {
329
+ "start": 408,
330
+ "end": 416,
331
+ "text": "Figure 2",
332
+ "ref_id": null
333
+ }
334
+ ],
335
+ "eq_spans": [],
336
+ "section": "Figure 2: Summarization Filter Composition",
337
+ "sec_num": null
338
+ },
339
+ {
340
+ "text": "3 Coda \"This briefing has aszessed aspects of the situation in Peru. Overall, the crisis appears to be worsening.\"",
341
+ "cite_spans": [],
342
+ "ref_spans": [],
343
+ "eq_spans": [],
344
+ "section": "Narrative Summarization",
345
+ "sec_num": "5"
346
+ },
347
+ {
348
+ "text": "As mentioned above, the system can construct a narrative to accompany the briefing. Narrative nodes are generated to cover captions, running ",
349
+ "cite_spans": [],
350
+ "ref_spans": [],
351
+ "eq_spans": [],
352
+ "section": "Figure 3: Input Script",
353
+ "sec_num": null
354
+ },
355
+ {
356
+ "text": "All segue nodes are by default generated automatically by the system, based on node labels. We always introduce a segue node at the beginning of the presentation (called a preamble node), which provides a segue covering the \"crown\" of the tree, i.e., all nodes upto a particular depth d from the root (d=2) are marked with segue nodes. A segue node is also produced at the end (called a coda). (Both preamble and segue can of course be specified by the author if desired).",
357
+ "cite_spans": [],
358
+ "ref_spans": [],
359
+ "eq_spans": [],
360
+ "section": "Figure 4: Ground Script",
361
+ "sec_num": null
362
+ },
363
+ {
364
+ "text": "For introducing intervening segue nodes, we use the following algorithm based on the distance between nodes and the height in the tree, We traverse the non-narrative leaves of the tree in their temporal order, evaluating each pair of adjacent nodes A and B where A precedes B temporally. A segue is introduced between nodes A and B if either (a) the maximum of the 2 distances from A and B to their least common ancestor is greater than 3 nodes or (b) the sum of the 2 distances from A and B to the least common ancestor is greater than 4 nodes. This is less intrusive than introducing segues at random or between every pair of successive nodes, and appears to perform better than introducing a segue at each depth of the tree.",
365
+ "cite_spans": [],
366
+ "ref_spans": [],
367
+ "eq_spans": [],
368
+ "section": "Figure 4: Ground Script",
369
+ "sec_num": null
370
+ },
371
+ {
372
+ "text": "We currently have a working version of the system with a variety of different single and multi-document summarization filters. Figure 3 shows an input script created by an author (the scripts in Figure 3 and 4 are schematic representations of the scripts, rather than the raw XML). The script includes two create goals, one with a single-document generic summarization filter, the other with a multi-document userfocused summarization filter. Figure 4 shows the ground script which was created automatically by the Content Creator component. Note the addition of media type specifications, the introduction of narrative nodes, and the extension of the temporal constraints. The final presentation generated is shown in Figure 5 .",
373
+ "cite_spans": [],
374
+ "ref_spans": [
375
+ {
376
+ "start": 127,
377
+ "end": 135,
378
+ "text": "Figure 3",
379
+ "ref_id": null
380
+ },
381
+ {
382
+ "start": 195,
383
+ "end": 203,
384
+ "text": "Figure 3",
385
+ "ref_id": null
386
+ },
387
+ {
388
+ "start": 443,
389
+ "end": 451,
390
+ "text": "Figure 4",
391
+ "ref_id": null
392
+ },
393
+ {
394
+ "start": 719,
395
+ "end": 727,
396
+ "text": "Figure 5",
397
+ "ref_id": null
398
+ }
399
+ ],
400
+ "eq_spans": [],
401
+ "section": "An Example",
402
+ "sec_num": "6"
403
+ },
404
+ {
405
+ "text": "Here we show screen dumps of the six SMIL segments produced, with the audio if any for each segment indicated in this paper next to an audio icon.",
406
+ "cite_spans": [],
407
+ "ref_spans": [],
408
+ "eq_spans": [],
409
+ "section": "An Example",
410
+ "sec_num": "6"
411
+ },
412
+ {
413
+ "text": "The summarization filters have incorporated several summarizers, including some that have been evaluated in the DARPA SUMMAC conference . These carry out both single-document and multi-document summarization, and include a preliminary biographical summarizer we have developed.",
414
+ "cite_spans": [],
415
+ "ref_spans": [],
416
+ "eq_spans": [],
417
+ "section": "Status",
418
+ "sec_num": "7"
419
+ },
420
+ {
421
+ "text": "The running text for the biography table in the second-last segment of Figure 5 is produced from meta-information in the table XML generated by the biographical summarizer. The production method for running text uses canned text which should work for any input table conforming to that DTD.",
422
+ "cite_spans": [],
423
+ "ref_spans": [
424
+ {
425
+ "start": 71,
426
+ "end": 79,
427
+ "text": "Figure 5",
428
+ "ref_id": null
429
+ }
430
+ ],
431
+ "eq_spans": [],
432
+ "section": "Status",
433
+ "sec_num": "7"
434
+ },
435
+ {
436
+ "text": "The summarization filters are. being tested as part of a DARPA situated test with end-users. The briefing generator itself has been used internally to generate numerous briefings, and has been demonstrated as part of the DARPA system. We also expect to carry out an evaluation to assess the extent to which the automation described here provides efficiency gains in briefing production.",
437
+ "cite_spans": [],
438
+ "ref_spans": [],
439
+ "eq_spans": [],
440
+ "section": "Status",
441
+ "sec_num": "7"
442
+ },
443
+ {
444
+ "text": "There is a fair amount of work on automatic authoring of multimedia presentations, e.g., (Wahlster et al. 93) , (Dalai et al. 96) , (Mittal et al. 95) , (Andre and Rist 97) 5. These efforts differ from ours in two ways: first, unlike us, they are not open-domain; and, second, they don't use summarization components. While such efforts are extremely sophisticated compared to us in multimedia presentation planning and fine-grained coordination and synchronization capabilities, many of the components used in those efforts are clearly applicable to our work. For example, (Andre and Rist 96) include methods for leveraging lifelike characters in this process; these characters can be leveraged in our work as well, to help personify the computer narrator. In addition, our captions, which are very short, rely on canned text based on node labels in the initial script, or based on shallow meta-information generated by the summarization filter (in XML) along with the created media object. (Mittal et al. 95) describe a variety of strategies for generation of longer, more explanatory captions, some of which may be exploited in our work by deepening the level of recta-information, at least for summarization components developed by us.",
445
+ "cite_spans": [
446
+ {
447
+ "start": 89,
448
+ "end": 109,
449
+ "text": "(Wahlster et al. 93)",
450
+ "ref_id": null
451
+ },
452
+ {
453
+ "start": 112,
454
+ "end": 129,
455
+ "text": "(Dalai et al. 96)",
456
+ "ref_id": null
457
+ },
458
+ {
459
+ "start": 132,
460
+ "end": 150,
461
+ "text": "(Mittal et al. 95)",
462
+ "ref_id": null
463
+ },
464
+ {
465
+ "start": 992,
466
+ "end": 1010,
467
+ "text": "(Mittal et al. 95)",
468
+ "ref_id": null
469
+ }
470
+ ],
471
+ "ref_spans": [],
472
+ "eq_spans": [],
473
+ "section": "Related Work",
474
+ "sec_num": "8"
475
+ },
476
+ {
477
+ "text": "In our ability to leverage automatic summarization, our work should be clearly distinguished from work which attempts to format a summary (from an XML representation) into something akin to a Powerpoint briefing, e.g., (Nagao and Hasida 98) . Our work, by contrast, is focused on using summarization in generating briefings from an abstract outline.",
478
+ "cite_spans": [
479
+ {
480
+ "start": 219,
481
+ "end": 240,
482
+ "text": "(Nagao and Hasida 98)",
483
+ "ref_id": null
484
+ }
485
+ ],
486
+ "ref_spans": [],
487
+ "eq_spans": [],
488
+ "section": "Related Work",
489
+ "sec_num": "8"
490
+ },
491
+ {
492
+ "text": "We have described methods for leveraging automatic summarization in the automatic generation of multimedia briefings. This work has taken an open-domain approach, in order to meet the requirements of the DARPA application we are involved with. We believe there is a stronger role that NL generation can play in the narrative aspects of our briefings, which currently rely for the most part on canned text. Our future work on description merging in biographical summaries, and on introducing referring expressions into the narrative nodes, would in effect take advantage of more powerful generation methods, without sacrificing opendomain capabilities. This may require much richer meta-information specifications than the ones we currently use.",
493
+ "cite_spans": [],
494
+ "ref_spans": [],
495
+ "eq_spans": [],
496
+ "section": "Conclusion",
497
+ "sec_num": "9"
498
+ },
499
+ {
500
+ "text": "Finally, we have begun the design of the Script Creator GUI (the only component in Figure l remaining to be built). This will allow the author to create scripts for the briefing generator (instead of editing templates by hand), by laying out icons for media objects in temporal order. A user will be able to select a \"standard\" briefing template from a menu, and then view it in a briefing/template structure editor. The user can then provide content by adding annotations to any node in the briefing template. Here is the latest document summary.",
501
+ "cite_spans": [],
502
+ "ref_spans": [
503
+ {
504
+ "start": 83,
505
+ "end": 91,
506
+ "text": "Figure l",
507
+ "ref_id": null
508
+ }
509
+ ],
510
+ "eq_spans": [],
511
+ "section": "Conclusion",
512
+ "sec_num": "9"
513
+ },
514
+ {
515
+ "text": "This briefing has assessed aspects of the situation in Peru. Overall, the crisis appears to be worsening. I I I I I I I I I I I I I I I I I ",
516
+ "cite_spans": [],
517
+ "ref_spans": [
518
+ {
519
+ "start": 106,
520
+ "end": 155,
521
+ "text": "I I I I I I I I I I I I I I I I I",
522
+ "ref_id": null
523
+ }
524
+ ],
525
+ "eq_spans": [],
526
+ "section": "Conclusion",
527
+ "sec_num": "9"
528
+ },
529
+ {
530
+ "text": "h. ttp://www.w3.org/AudioVideo/ 3 www.real.com 4 www.oratrix.com _J",
531
+ "cite_spans": [],
532
+ "ref_spans": [],
533
+ "eq_spans": [],
534
+ "section": "",
535
+ "sec_num": null
536
+ }
537
+ ],
538
+ "back_matter": [],
539
+ "bib_entries": {
540
+ "BIBREF0": {
541
+ "ref_id": "b0",
542
+ "title": "Towards a New Generation of Hypermedia Systems: Extending Automated Presentation Design for Hypermedia",
543
+ "authors": [
544
+ {
545
+ "first": "E",
546
+ "middle": [],
547
+ "last": "Andre",
548
+ "suffix": ""
549
+ },
550
+ {
551
+ "first": "T",
552
+ "middle": [],
553
+ "last": "Rist",
554
+ "suffix": ""
555
+ }
556
+ ],
557
+ "year": 1997,
558
+ "venue": "",
559
+ "volume": "",
560
+ "issue": "",
561
+ "pages": "",
562
+ "other_ids": {},
563
+ "num": null,
564
+ "urls": [],
565
+ "raw_text": "Andre, E. and Rist, T. (1997) Towards a New Generation of Hypermedia Systems: Extending Automated Presentation Design for Hypermedia.",
566
+ "links": null
567
+ },
568
+ "BIBREF1": {
569
+ "ref_id": "b1",
570
+ "title": "Topics in Natural Interactive Systems 1. The Maersk Me-Kinney Moiler Institute for Production Technology",
571
+ "authors": [
572
+ {
573
+ "first": "L",
574
+ "middle": [],
575
+ "last": "Dybkjaer",
576
+ "suffix": ""
577
+ }
578
+ ],
579
+ "year": null,
580
+ "venue": "Proceedings of the Third Spoken Dialogue and Discourse Workshop",
581
+ "volume": "",
582
+ "issue": "",
583
+ "pages": "10--27",
584
+ "other_ids": {},
585
+ "num": null,
586
+ "urls": [],
587
+ "raw_text": "L. Dybkjaer, ed., Proceedings of the Third Spoken Dialogue and Discourse Workshop, Topics in Natural Interactive Systems 1. The Maersk Me- Kinney Moiler Institute for Production Technology, Odense University, Denmark, pp. 10- 27.",
588
+ "links": null
589
+ },
590
+ "BIBREF2": {
591
+ "ref_id": "b2",
592
+ "title": "Negotiation for Automated Generation of Temporal MultimediaPresentations",
593
+ "authors": [
594
+ {
595
+ "first": "M",
596
+ "middle": [],
597
+ "last": "Dalai",
598
+ "suffix": ""
599
+ },
600
+ {
601
+ "first": "S",
602
+ "middle": [],
603
+ "last": "Feiner",
604
+ "suffix": ""
605
+ },
606
+ {
607
+ "first": "K",
608
+ "middle": [],
609
+ "last": "Mckeown",
610
+ "suffix": ""
611
+ },
612
+ {
613
+ "first": "S",
614
+ "middle": [],
615
+ "last": "Pan",
616
+ "suffix": ""
617
+ },
618
+ {
619
+ "first": "M",
620
+ "middle": [],
621
+ "last": "Zhou",
622
+ "suffix": ""
623
+ },
624
+ {
625
+ "first": "T",
626
+ "middle": [],
627
+ "last": "Hollerer",
628
+ "suffix": ""
629
+ },
630
+ {
631
+ "first": "J",
632
+ "middle": [],
633
+ "last": "Shaw",
634
+ "suffix": ""
635
+ },
636
+ {
637
+ "first": "Y",
638
+ "middle": [],
639
+ "last": "Feng",
640
+ "suffix": ""
641
+ },
642
+ {
643
+ "first": "J",
644
+ "middle": [],
645
+ "last": "Fromer",
646
+ "suffix": ""
647
+ }
648
+ ],
649
+ "year": 1996,
650
+ "venue": "Proceedings of ACM Multimedia '96",
651
+ "volume": "",
652
+ "issue": "",
653
+ "pages": "",
654
+ "other_ids": {},
655
+ "num": null,
656
+ "urls": [],
657
+ "raw_text": "Dalai, M., Feiner, S., McKeown, K., Pan, S., Zhou, M., Hollerer, T., Shaw, J., Feng, Y., and Fromer, J. (1996) Negotiation for Automated Generation of Temporal MultimediaPresentations. Proceedings of ACM Multimedia '96.",
658
+ "links": null
659
+ },
660
+ "BIBREF3": {
661
+ "ref_id": "b3",
662
+ "title": "Improving Summaries by Revising Them",
663
+ "authors": [
664
+ {
665
+ "first": "1",
666
+ "middle": [],
667
+ "last": "Mani",
668
+ "suffix": ""
669
+ },
670
+ {
671
+ "first": "B",
672
+ "middle": [],
673
+ "last": "Gates",
674
+ "suffix": ""
675
+ },
676
+ {
677
+ "first": "E",
678
+ "middle": [],
679
+ "last": "Bloedorn",
680
+ "suffix": ""
681
+ }
682
+ ],
683
+ "year": 1999,
684
+ "venue": "Proceedings of the 37 ~ Annual Meeting of the Association for Computational Lihguistics",
685
+ "volume": "",
686
+ "issue": "",
687
+ "pages": "558--565",
688
+ "other_ids": {},
689
+ "num": null,
690
+ "urls": [],
691
+ "raw_text": "Mani, 1., Gates, B., and Bloedorn, E. (1999) Improving Summaries by Revising Them. Proceedings of the 37 ~ Annual Meeting of the Association for Computational Lihguistics, College Park, MD, pp. 558-565.",
692
+ "links": null
693
+ },
694
+ "BIBREF4": {
695
+ "ref_id": "b4",
696
+ "title": "The TIPSTER SUMMA C Text Summarization Evaluation",
697
+ "authors": [
698
+ {
699
+ "first": "1",
700
+ "middle": [],
701
+ "last": "Mani",
702
+ "suffix": ""
703
+ },
704
+ {
705
+ "first": "T",
706
+ "middle": [],
707
+ "last": "Firmin",
708
+ "suffix": ""
709
+ },
710
+ {
711
+ "first": "D",
712
+ "middle": [],
713
+ "last": "House",
714
+ "suffix": ""
715
+ },
716
+ {
717
+ "first": "G",
718
+ "middle": [],
719
+ "last": "Klein",
720
+ "suffix": ""
721
+ },
722
+ {
723
+ "first": "B",
724
+ "middle": [],
725
+ "last": "Sundheim",
726
+ "suffix": ""
727
+ },
728
+ {
729
+ "first": "L",
730
+ "middle": [],
731
+ "last": "Hirschman",
732
+ "suffix": ""
733
+ }
734
+ ],
735
+ "year": 1999,
736
+ "venue": "Proceedings of EACL'99",
737
+ "volume": "",
738
+ "issue": "",
739
+ "pages": "77--85",
740
+ "other_ids": {},
741
+ "num": null,
742
+ "urls": [],
743
+ "raw_text": "Mani, 1., Firmin, T., House, D., Klein, G., Sundheim, B., and Hirschman, L. (1999) The TIPSTER SUMMA C Text Summarization Evaluation. Proceedings of EACL'99, Bergen, Norway, pp. 77- 85.",
744
+ "links": null
745
+ },
746
+ "BIBREF5": {
747
+ "ref_id": "b5",
748
+ "title": "Automatic Text Summarization",
749
+ "authors": [
750
+ {
751
+ "first": "",
752
+ "middle": [],
753
+ "last": "Mani",
754
+ "suffix": ""
755
+ }
756
+ ],
757
+ "year": 2000,
758
+ "venue": "",
759
+ "volume": "",
760
+ "issue": "",
761
+ "pages": "",
762
+ "other_ids": {},
763
+ "num": null,
764
+ "urls": [],
765
+ "raw_text": "Mani, 1. (2000)Automatic Text Summarization. John Benjamins Publishing Company. To appear.",
766
+ "links": null
767
+ },
768
+ "BIBREF6": {
769
+ "ref_id": "b6",
770
+ "title": "Generating Explanatory Captions for Information Graphics",
771
+ "authors": [
772
+ {
773
+ "first": "V",
774
+ "middle": [],
775
+ "last": "Mittal",
776
+ "suffix": ""
777
+ },
778
+ {
779
+ "first": "S",
780
+ "middle": [],
781
+ "last": "Roth",
782
+ "suffix": ""
783
+ },
784
+ {
785
+ "first": "J",
786
+ "middle": [],
787
+ "last": "Moore",
788
+ "suffix": ""
789
+ },
790
+ {
791
+ "first": "J",
792
+ "middle": [],
793
+ "last": "Mattis",
794
+ "suffix": ""
795
+ },
796
+ {
797
+ "first": "G",
798
+ "middle": [],
799
+ "last": "Carenini",
800
+ "suffix": ""
801
+ }
802
+ ],
803
+ "year": 1995,
804
+ "venue": "Proceedings of the International Joint Conference on Artificial Intelligence (IJCAr95)",
805
+ "volume": "",
806
+ "issue": "",
807
+ "pages": "1276--1283",
808
+ "other_ids": {},
809
+ "num": null,
810
+ "urls": [],
811
+ "raw_text": "Mittal, V., Roth, S., Moore, J., Mattis, J., and Carenini, G. (1995) Generating Explanatory Captions for Information Graphics. Proceedings of the International Joint Conference on Artificial Intelligence (IJCAr95), pp. 1276-1283.",
812
+ "links": null
813
+ },
814
+ "BIBREF7": {
815
+ "ref_id": "b7",
816
+ "title": "Automatic Text Summarization Based on the Global Document Annotation",
817
+ "authors": [
818
+ {
819
+ "first": "K",
820
+ "middle": [],
821
+ "last": "Nagao",
822
+ "suffix": ""
823
+ },
824
+ {
825
+ "first": "K",
826
+ "middle": [],
827
+ "last": "Hasida",
828
+ "suffix": ""
829
+ },
830
+ {
831
+ "first": "K",
832
+ "middle": [],
833
+ "last": "",
834
+ "suffix": ""
835
+ }
836
+ ],
837
+ "year": 1998,
838
+ "venue": "Proceedings of COLING'98",
839
+ "volume": "",
840
+ "issue": "",
841
+ "pages": "917--921",
842
+ "other_ids": {},
843
+ "num": null,
844
+ "urls": [],
845
+ "raw_text": "Nagao, K. and K. Hasida, K. (1998) Automatic Text Summarization Based on the Global Document Annotation. Proceedings of COLING'98, Montreal, pp. 917-921.",
846
+ "links": null
847
+ },
848
+ "BIBREF8": {
849
+ "ref_id": "b8",
850
+ "title": "Multilingual\" Authoring using Feedback Texts",
851
+ "authors": [
852
+ {
853
+ "first": "R",
854
+ "middle": [],
855
+ "last": "Power",
856
+ "suffix": ""
857
+ },
858
+ {
859
+ "first": "D",
860
+ "middle": [],
861
+ "last": "Scott",
862
+ "suffix": ""
863
+ }
864
+ ],
865
+ "year": 1998,
866
+ "venue": "Proceedings of COLING'98",
867
+ "volume": "",
868
+ "issue": "",
869
+ "pages": "1053--1059",
870
+ "other_ids": {},
871
+ "num": null,
872
+ "urls": [],
873
+ "raw_text": "Power, R. and Scott, D. (1998) Multilingual\" Authoring using Feedback Texts. Proceedings of COLING'98, Montreal, pp. 1053-1059.",
874
+ "links": null
875
+ },
876
+ "BIBREF9": {
877
+ "ref_id": "b9",
878
+ "title": "The architecture of the Festival Speech Synthesis System",
879
+ "authors": [
880
+ {
881
+ "first": "P",
882
+ "middle": [],
883
+ "last": "Taylor",
884
+ "suffix": ""
885
+ },
886
+ {
887
+ "first": "A",
888
+ "middle": [],
889
+ "last": "Black",
890
+ "suffix": ""
891
+ },
892
+ {
893
+ "first": "R",
894
+ "middle": [],
895
+ "last": "Caley",
896
+ "suffix": ""
897
+ }
898
+ ],
899
+ "year": 1998,
900
+ "venue": "Proceedings of the Third ESCA Workshop on Speech Synthesis",
901
+ "volume": "",
902
+ "issue": "",
903
+ "pages": "147--151",
904
+ "other_ids": {},
905
+ "num": null,
906
+ "urls": [],
907
+ "raw_text": "Taylor, P., Black, A., and Caley, R. (1998) The architecture of the Festival Speech Synthesis System. Proceedings of the Third ESCA Workshop on Speech Synthesis, Jenolan Caves, Australia, pp. 147-151.",
908
+ "links": null
909
+ },
910
+ "BIBREF10": {
911
+ "ref_id": "b10",
912
+ "title": "Plan-Based Integration of Natural Language and Graphics Generation",
913
+ "authors": [
914
+ {
915
+ "first": "W",
916
+ "middle": [],
917
+ "last": "Wahlster",
918
+ "suffix": ""
919
+ },
920
+ {
921
+ "first": "E",
922
+ "middle": [],
923
+ "last": "Andre",
924
+ "suffix": ""
925
+ },
926
+ {
927
+ "first": "W",
928
+ "middle": [],
929
+ "last": "Finkler",
930
+ "suffix": ""
931
+ },
932
+ {
933
+ "first": "H.-J",
934
+ "middle": [],
935
+ "last": "Profitlich",
936
+ "suffix": ""
937
+ },
938
+ {
939
+ "first": "T",
940
+ "middle": [],
941
+ "last": "Rist",
942
+ "suffix": ""
943
+ }
944
+ ],
945
+ "year": 1993,
946
+ "venue": "AI Journal",
947
+ "volume": "",
948
+ "issue": "",
949
+ "pages": "",
950
+ "other_ids": {},
951
+ "num": null,
952
+ "urls": [],
953
+ "raw_text": "Wahlster, W., Andre, E., Finkler, W., Profitlich, H.- J., and Rist, T. (1993) Plan-Based Integration of Natural Language and Graphics Generation. AI Journal, 63.",
954
+ "links": null
955
+ }
956
+ },
957
+ "ref_entries": {
958
+ "FIGREF0": {
959
+ "num": null,
960
+ "type_str": "figure",
961
+ "uris": null,
962
+ "text": "Figure 1: System Architecture"
963
+ },
964
+ "FIGREF1": {
965
+ "num": null,
966
+ "type_str": "figure",
967
+ "uris": null,
968
+ "text": "Figure 5: Presentation"
969
+ },
970
+ "TABREF1": {
971
+ "text": "Victor Polay, also known as Comandante Rolando, is the Tupac Amaru founder, a Peruvian guerrilla commander, a former rebel leader, and the Tupac Amaru rebels' top leader. He studied in both France and Spain. His wife is Rosa Polay and his mother is Otilia Campos de Polay. His associates include Alan Garcia.",
972
+ "content": "<table><tr><td>I</td><td/><td/><td/><td/><td/></tr><tr><td>I</td><td/><td/><td/><td/><td/></tr><tr><td>I</td><td colspan=\"2\">Peru Action Brief \u2022 Exeeadv\u00a2 Smmmu'y</td><td colspan=\"2\">! ;!</td><td/></tr><tr><td/><td>o Hypothesis</td><td/><td/><td/><td/></tr><tr><td>I</td><td colspan=\"2\">\u2022 Situation Assessmml o Ehromdo.e~ of ]~','\u00a2nls o Biographles \u2022 SWuctm-ed A~,mneats</td><td colspan=\"2\">:i ii :,~ :~</td><td/></tr><tr><td>I</td><td>\u2022 .4Jtentadve Views \u2022 Derision,</td><td/><td colspan=\"2\">'i ~:i</td><td/></tr><tr><td>i</td><td colspan=\"3\">assessment. This will cover an overview of the .&lt;e In this briefing I will go over the situation</td><td/><td>\"e Next, a biography of Victor Polay.</td></tr><tr><td/><td colspan=\"3\">chronology of events and a profile of Victor</td><td/><td/></tr><tr><td>I</td><td>Polay.</td><td/><td/><td/><td/></tr><tr><td>I</td><td/><td/><td/><td/><td/></tr><tr><td>I</td><td/><td/><td/><td/><td/></tr><tr><td>I</td><td/><td/><td/><td/><td/></tr><tr><td>I</td><td colspan=\"2\">::. Here is an overview of the chronology of events.</td><td/><td/><td/></tr><tr><td>I</td><td>I I: (3qN-Peruvian cebel~ releet~e 2 bo,tages I III I II 3; JUOOUC ZOO hOS~flge~ ~.1~ d tn51cle the h~</td><td>-Dec. IS~h 0 ~' Japeme:~e</td><td>Illlll</td><td>~ii</td><td>i ::~</td></tr><tr><td/><td colspan=\"2\">Jt~loan=edor Boclhl=a koki, vhece Tupec Jtz~l~u rebel= were</td><td/><td colspan=\"2\">~!</td></tr><tr><td>I</td><td/><td/><td/><td/><td/></tr><tr><td>I</td><td/><td/><td/><td/><td/></tr><tr><td>I</td><td/><td/><td/><td/><td/></tr><tr><td>I</td><td/><td/><td/><td/><td>The user will</td></tr><tr><td>I</td><td/><td/><td/><td/><td>have a choice of saving the edit version in template form, or in SMIL or possibly Microsoft Powerpoint format.</td></tr><tr><td>I</td><td/><td/><td/><td/><td/></tr><tr><td>I</td><td/><td/><td/><td/><td/></tr></table>",
973
+ "type_str": "table",
974
+ "html": null,
975
+ "num": null
976
+ }
977
+ }
978
+ }
979
+ }
Full_text_JSON/prefixW/json/W00/W00-0500.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0500",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:21.338068Z"
6
+ },
7
+ "title": "",
8
+ "authors": [],
9
+ "year": "",
10
+ "venue": null,
11
+ "identifiers": {},
12
+ "abstract": "",
13
+ "pdf_parse": {
14
+ "paper_id": "W00-0500",
15
+ "_pdf_hash": "",
16
+ "abstract": [],
17
+ "body_text": [],
18
+ "back_matter": [],
19
+ "bib_entries": {},
20
+ "ref_entries": {}
21
+ }
22
+ }
Full_text_JSON/prefixW/json/W00/W00-0501.json ADDED
@@ -0,0 +1,818 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0501",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:34:12.302030Z"
6
+ },
7
+ "title": "When is an Embedded MT System \"Good Enough\" for Filtering?",
8
+ "authors": [
9
+ {
10
+ "first": "Clare",
11
+ "middle": [
12
+ "R"
13
+ ],
14
+ "last": "Voss",
15
+ "suffix": "",
16
+ "affiliation": {
17
+ "laboratory": "Army Research Laboratory Adelphi",
18
+ "institution": "",
19
+ "location": {
20
+ "postCode": "20783",
21
+ "region": "MD"
22
+ }
23
+ },
24
+ "email": "voss@arl.mil"
25
+ },
26
+ {
27
+ "first": "Carol",
28
+ "middle": [],
29
+ "last": "Van Ess-Dykema",
30
+ "suffix": "",
31
+ "affiliation": {},
32
+ "email": ""
33
+ }
34
+ ],
35
+ "year": "",
36
+ "venue": null,
37
+ "identifiers": {},
38
+ "abstract": "This paper proposes an end-to-end process analysis template with replicable measures to evaluate the filtering performance of a Scan-OCR-MT system. Preliminary results 1 across three language-specific FALCon 2 systems show that, with one exception, the derived measures consistently yield the same performance ranking: Haitian Creole at the low end, Arabic in the middle, and Spanish at the high end.",
39
+ "pdf_parse": {
40
+ "paper_id": "W00-0501",
41
+ "_pdf_hash": "",
42
+ "abstract": [
43
+ {
44
+ "text": "This paper proposes an end-to-end process analysis template with replicable measures to evaluate the filtering performance of a Scan-OCR-MT system. Preliminary results 1 across three language-specific FALCon 2 systems show that, with one exception, the derived measures consistently yield the same performance ranking: Haitian Creole at the low end, Arabic in the middle, and Spanish at the high end.",
45
+ "cite_spans": [],
46
+ "ref_spans": [],
47
+ "eq_spans": [],
48
+ "section": "Abstract",
49
+ "sec_num": null
50
+ }
51
+ ],
52
+ "body_text": [
53
+ {
54
+ "text": "How do people quickly determine whether a particular foreign language text document is relevant to their interest when they do not understand that foreign language? FALCon, our embedded MT system, has been designed to assist an English-speaking person in filtering, i.e., deciding which foreign language documents are worth having an expert translator process further. In this paper, we seek to determine when such systems are \"good enough\" for filtering. We define \"filtering\" to be a forced-choice decision-making process on individual documents, where each document is assigned a single value, either a \"yes, relevant\" or a \"no, irrelevant\" by the system user) The singl edocument relevance assessment is performed For a more extensive report of our work, see Voss and Van Ess-Dykema (2000) . 2 FALCon (Forward Area Language CONverter) is a laptop-based embedded MT system integrated at the Army Research Laboratory for field use. (Fisher and Voss, 1997) 3 See the report entitled \"Mulfilingual Information Management: Current Levels and Future Abilities\" for other definitions of filtering, available at http://www.cs.cmu.edu/People/ref/mlim/. independent of the content of other documents in the processing collection.",
55
+ "cite_spans": [
56
+ {
57
+ "start": 763,
58
+ "end": 793,
59
+ "text": "Voss and Van Ess-Dykema (2000)",
60
+ "ref_id": null
61
+ },
62
+ {
63
+ "start": 934,
64
+ "end": 957,
65
+ "text": "(Fisher and Voss, 1997)",
66
+ "ref_id": "BIBREF1"
67
+ }
68
+ ],
69
+ "ref_spans": [],
70
+ "eq_spans": [],
71
+ "section": "The Filtering Problem",
72
+ "sec_num": "1"
73
+ },
74
+ {
75
+ "text": "When Church and Hovy (1993) introduced the notion that \"crummy\" MT engines could be put to good use on tasks less-demanding than publication-quality translation, MT research efforts did not typically evaluate system performance in the context of specific tasks. (Sparck Jones and Galliers, 1996) . In the last few years, however, the Church and Hovy insight has led to innovative experiments, like those reported by Resnik (1997) , Pomarede et al. (1998) , and Taylor and White (1998) , using task-based evaluation methods. Most recently, research on task-based evaluation has been. proposed within TIDES, a recent DARPA initiative whose goals include enabling Englishspeaking individuals to access, correlate, and interpret multilingual sources of information (DARPA, 1999; Harmon, 1999) .",
76
+ "cite_spans": [
77
+ {
78
+ "start": 5,
79
+ "end": 27,
80
+ "text": "Church and Hovy (1993)",
81
+ "ref_id": "BIBREF0"
82
+ },
83
+ {
84
+ "start": 270,
85
+ "end": 295,
86
+ "text": "Jones and Galliers, 1996)",
87
+ "ref_id": "BIBREF6"
88
+ },
89
+ {
90
+ "start": 416,
91
+ "end": 429,
92
+ "text": "Resnik (1997)",
93
+ "ref_id": "BIBREF4"
94
+ },
95
+ {
96
+ "start": 432,
97
+ "end": 454,
98
+ "text": "Pomarede et al. (1998)",
99
+ "ref_id": "BIBREF3"
100
+ },
101
+ {
102
+ "start": 461,
103
+ "end": 484,
104
+ "text": "Taylor and White (1998)",
105
+ "ref_id": "BIBREF7"
106
+ },
107
+ {
108
+ "start": 761,
109
+ "end": 774,
110
+ "text": "(DARPA, 1999;",
111
+ "ref_id": null
112
+ },
113
+ {
114
+ "start": 775,
115
+ "end": 788,
116
+ "text": "Harmon, 1999)",
117
+ "ref_id": "BIBREF2"
118
+ }
119
+ ],
120
+ "ref_spans": [],
121
+ "eq_spans": [],
122
+ "section": "The Filtering Problem",
123
+ "sec_num": "1"
124
+ },
125
+ {
126
+ "text": "This paper introduces a method of assessing when an embedded MT system is \"good enough\" for the filtering of hard-copy foreign language (FL) documents by individuals with no knowledge of that language. We describe preliminary work developing measures on system-internal components that assess: (i) the flow of words relevant to the filtering task and domain through the steps of document processing in our embedded MT system, and (ii) the level of \"noise,\" i.e., processing errors, passing through the system. We present an analysis template that displays the processing steps, the sequence of document versions, and the basic measures of our evaluation method. After tracing the processing of Spanish, Arabic, and Haitian Creole parallel texts that is recorded in the analysis templates, we discuss our preliminary results on the filtering performance of the three language-specific embedded MT systems from this process flow. An Embedded MT System Design 4",
127
+ "cite_spans": [],
128
+ "ref_spans": [],
129
+ "eq_spans": [],
130
+ "section": "The Filtering Problem",
131
+ "sec_num": "1"
132
+ },
133
+ {
134
+ "text": "Our three systems process documents using a sequence of three software modules. First, the Scan software module creates an online bitmap image in real-time as the user feeds the document into the page-feed scanner-. 5 Second, the optical character recognition (OCR) software converts that image to character text and, third, the machine translation (MT) software converts the foreign language character text to English, where it may be stored to disk or displayed on screen directly to the user. The user interface only requires that the user push one or two buttons to carry out all of the system's processing on an individual document.",
135
+ "cite_spans": [],
136
+ "ref_spans": [],
137
+ "eq_spans": [],
138
+ "section": "The Filtering Problem",
139
+ "sec_num": "1"
140
+ },
141
+ {
142
+ "text": "We tested three separate language-specific embedded MT systems for Spanish, Arabic and Haitian Creole. These systems differ in their 4 We use \"embedded MT systems\" as defined in Voss and Reeder (1998) . 5 We chose a small scanner for portability of the system. Substituting in a flatbed scanner would not affect performance.",
143
+ "cite_spans": [
144
+ {
145
+ "start": 178,
146
+ "end": 200,
147
+ "text": "Voss and Reeder (1998)",
148
+ "ref_id": null
149
+ }
150
+ ],
151
+ "ref_spans": [],
152
+ "eq_spans": [],
153
+ "section": "The Filtering Problem",
154
+ "sec_num": "1"
155
+ },
156
+ {
157
+ "text": "OCR and MT components, but otherwise they share the same software, Omnipage's Paperport for scaning and Windows95 as the operating system. 6",
158
+ "cite_spans": [],
159
+ "ref_spans": [],
160
+ "eq_spans": [],
161
+ "section": "The Filtering Problem",
162
+ "sec_num": "1"
163
+ },
164
+ {
165
+ "text": "3 Approach",
166
+ "cite_spans": [],
167
+ "ref_spans": [],
168
+ "eq_spans": [],
169
+ "section": "The Filtering Problem",
170
+ "sec_num": "1"
171
+ },
172
+ {
173
+ "text": "As we sought to measure the performance of each component in the systems, it quickly became apparent that not all available measures may be equally applicable for our filtering task. For example, counting the number of source language (SL) characters correctly OCR-ed may be overly specific: as discussed below, we only need to make use of the number of SL words that are correctly OCR-ed. In the sections to follow, we describe those measures that have been most informative for the task of filtering.",
174
+ "cite_spans": [],
175
+ "ref_spans": [],
176
+ "eq_spans": [],
177
+ "section": "The Filtering Problem",
178
+ "sec_num": "1"
179
+ },
180
+ {
181
+ "text": "We use three types of information in our evaluation of the end-to-end embedded MT systems that we have available to us: transformation processes, document versions, and basic count measures. The transformation processes are listed vertically in the diamonds on the left side of figure 1. Starting with the hardcopy original document, each process transforms its input text and creates a new version. These document versions are listed vertically in the boxes in the second column of the figure. For each version, we compute one or more basic count measures on the words in that version's text. That is, for each process, there is an associated document version and for each document version, there are associated basic count measures. These count measures shown as A. through M. are defined in figure 2 below.",
182
+ "cite_spans": [],
183
+ "ref_spans": [],
184
+ "eq_spans": [],
185
+ "section": "Analysis Template",
186
+ "sec_num": null
187
+ },
188
+ {
189
+ "text": "For each end-to-end system and language pair, we follow two separate passes in creating analysis files from scanned-in bitmap images. The first pass is for end-to-end Scan-OCR-MT evaluation: \"OCR\" the original document, then MT the resulting OCR-output file. The second pass is for Ground Truth-MT evaluation: \"ground-truth\" (GT) the original document, then MT the resulting GT-ed output file.",
190
+ "cite_spans": [],
191
+ "ref_spans": [],
192
+ "eq_spans": [],
193
+ "section": "Two-Pass Evaluation",
194
+ "sec_num": null
195
+ },
196
+ {
197
+ "text": "A.",
198
+ "cite_spans": [],
199
+ "ref_spans": [],
200
+ "eq_spans": [],
201
+ "section": "Two-Pass Evaluation",
202
+ "sec_num": null
203
+ },
204
+ {
205
+ "text": "B.",
206
+ "cite_spans": [],
207
+ "ref_spans": [],
208
+ "eq_spans": [],
209
+ "section": "Two-Pass Evaluation",
210
+ "sec_num": null
211
+ },
212
+ {
213
+ "text": "\"~ C. :",
214
+ "cite_spans": [],
215
+ "ref_spans": [],
216
+ "eq_spans": [],
217
+ "section": "Measures image doe",
218
+ "sec_num": null
219
+ },
220
+ {
221
+ "text": "# \"words\" lost/added~ [ t i,,OCR oo , J I SPANISH S CAN/ GT/ OCR/ MT MT H. _I. ..................... # dosLed class i \u2022 # incorrect i L ............. M~ ..................... t,-wor~-A-7~.--)l rol~,~it i I *ords in TL I.",
222
+ "cite_spans": [],
223
+ "ref_spans": [],
224
+ "eq_spans": [],
225
+ "section": "Measures image doe",
226
+ "sec_num": null
227
+ },
228
+ {
229
+ "text": "The two passes represent the \"worst\" and \"best\" cases respectively for filtering within each of the three embedded MT systems. By \"ground truth\" versions of the document, we mean online duplicated versions that match, character-for-character, the input text.",
230
+ "cite_spans": [],
231
+ "ref_spans": [],
232
+ "eq_spans": [],
233
+ "section": "Figure2 Comparison of",
234
+ "sec_num": null
235
+ },
236
+ {
237
+ "text": "We intentionally chose low-performance OCR software (for each language) to simulate a \"worst case\" performance by our systems, enabling us to compare them with the ideal highperformance ground-truth input to simulate a \"best case\" performance.",
238
+ "cite_spans": [],
239
+ "ref_spans": [],
240
+ "eq_spans": [],
241
+ "section": "Figure2 Comparison of",
242
+ "sec_num": null
243
+ },
244
+ {
245
+ "text": "Texts from the Center for Disease Control In order to compare the three language-specific systems, we had to fred a corpus in a domain well-defined for filtering 7 that included parallel texts in Spanish, Arabic, and Haitian Creole. We found parallel corpora for these and many other",
246
+ "cite_spans": [],
247
+ "ref_spans": [],
248
+ "eq_spans": [],
249
+ "section": "Figure2 Comparison of",
250
+ "sec_num": null
251
+ },
252
+ {
253
+ "text": "ARABIC SCAN/ OCR/ MT GT/ MT HAITIAN C REOLE S CAN/ OT/ OCR/ MT MT Language-Spedfic System Results",
254
+ "cite_spans": [],
255
+ "ref_spans": [],
256
+ "eq_spans": [],
257
+ "section": "Figure2 Comparison of",
258
+ "sec_num": null
259
+ },
260
+ {
261
+ "text": "languages at a website of the Center for Disease Control (CDC). 8",
262
+ "cite_spans": [],
263
+ "ref_spans": [],
264
+ "eq_spans": [],
265
+ "section": "Figure2 Comparison of",
266
+ "sec_num": null
267
+ },
268
+ {
269
+ "text": "We chose a paragraph from the chicken pox/varicella bulletin, page 2, for each of our three languages. This passage contains narrative full-length sentences and minimizes the OCR complications that arise with variable layouts. Our objective for selecting this input paragraph was to illustrate our methodology in a tractable way for multiple languages. Our next step will be to increase the amount of data analyzed for each language.",
270
+ "cite_spans": [],
271
+ "ref_spans": [],
272
+ "eq_spans": [],
273
+ "section": "Figure2 Comparison of",
274
+ "sec_num": null
275
+ },
276
+ {
277
+ "text": "We fill out one analysis template for each document tested in a language-specific system. Example templates with the basic count",
278
+ "cite_spans": [],
279
+ "ref_spans": [],
280
+ "eq_spans": [],
281
+ "section": "Analyses",
282
+ "sec_num": "4"
283
+ },
284
+ {
285
+ "text": "II II 7",
286
+ "cite_spans": [],
287
+ "ref_spans": [],
288
+ "eq_spans": [],
289
+ "section": "Analyses",
290
+ "sec_num": "4"
291
+ },
292
+ {
293
+ "text": "Filtering judgments are well-defined when multiple readers of a text in a domain agree on the \"yes, relevant\" status of the text.",
294
+ "cite_spans": [],
295
+ "ref_spans": [],
296
+ "eq_spans": [],
297
+ "section": "Analyses",
298
+ "sec_num": "4"
299
+ },
300
+ {
301
+ "text": "8 See http://www.irnmunize.org/vis/index.htm. The texts are \"Vaccine Information Statements\" describing basic medical symptoms that individuals should know about in advance of being vaccinated.",
302
+ "cite_spans": [],
303
+ "ref_spans": [],
304
+ "eq_spans": [],
305
+ "section": "Analyses",
306
+ "sec_num": "4"
307
+ },
308
+ {
309
+ "text": "measures 9 are presented in figure 2 for each Of the three embedded MT systems that we tested. Notice that in figure 2 we distinguish valid words of a language from OCR-generated strings of characters that we identify as \"words.\" The latter \"words\" may include any of the following: wordstrings with OCR-induced spelling changes (valid or invalid for the specific language), wordstrings duplicating misspellings in the source document, and words accurately OCR-ed. \"Words\" may also be lost in the MT process (see F.). 1\u00b0 The wide, block arrow in figure 2 connect,,; E. and G. because they are both based on the MT output document. (We do not compute a sum for these counts because the E \"words\" are in the SL and the G words are in the TL.) The open class words (see H.) are nouns, verbs, adjectives, and adverbs. Closed class words (see I.) include: all parts of speech not listed as open class categories.",
310
+ "cite_spans": [],
311
+ "ref_spans": [],
312
+ "eq_spans": [],
313
+ "section": "Analyses",
314
+ "sec_num": "4"
315
+ },
316
+ {
317
+ "text": "In this methodology, we track the conltent words that ultimately contribute to the final filtering decision. Clearly for other tasks, such as summarization or information extraction, other measures may be more appropriate. The basic count measures A. through M. are preliminary and will require refinement as more data sets are tested. From these basic count measures, we define four derived percentage measures in section 5 and summarize these cases across our three systems in figure 3 of that section.",
318
+ "cite_spans": [],
319
+ "ref_spans": [],
320
+ "eq_spans": [],
321
+ "section": "Analyses",
322
+ "sec_num": "4"
323
+ },
324
+ {
325
+ "text": "\"Worst\" case (Scan-OCR-MT pass) As can be seen in figure 2, not all of the original 80 Spanish words in the source document retain their correct spelling after being OCR-ed. Only 26 OCR-ed \"words\" are found in the NIT lexicon, i.e., recognized as valid Spanish words. Forty-nine of the OCR-ed \"words\" are treated as \"not found words\" (NFWs) by the MT engine, even though they may in fact be actual Spanish words. Five other OCR-ed \"words\" are lost in 9 The following formulas summarize the relations among the count measures: A ffi B+C; B ffi D+E+F; G ffi H+I; H = J+K; J ffi L+M. 10 For example, we found that the word la in the Spanish text was not present in the TL output, i.e., the English equivalent the did not appear in the English translation. the MT process. Thus, the OCR process reduced the number of Spanish words that the MT engine could accept as input by more than 60%.",
326
+ "cite_spans": [],
327
+ "ref_spans": [],
328
+ "eq_spans": [],
329
+ "section": "Embedded Spanish MT System Test",
330
+ "sec_num": "4.1"
331
+ },
332
+ {
333
+ "text": "Of the remaining 40% that generated 29 English words, we found that 5 were \"filterrelevant\" as follows. The MT engine ignored 49 post-OCR Spanish \"words\" and working from the remaining 26 Spanish words, generated 29 English words? 1 Seventeen were open class words and 12 were closed class words. Nearly all of the open class words were translated correctly or were semantically appropriate for the domain (16 out of 17). From this correct set of 16 open class words, 5 were domain-relevant and 9 were not. That is, 5 of the 29 generated English words, or 17%, were semantically related and domain relevant words, i.e., triggers for filtering judgments.",
334
+ "cite_spans": [],
335
+ "ref_spans": [],
336
+ "eq_spans": [],
337
+ "section": "Embedded Spanish MT System Test",
338
+ "sec_num": "4.1"
339
+ },
340
+ {
341
+ "text": "\"Best\" case (GT-MT pass) The MT engine generated 77 English words from the 80 original Spanish words. Thirtyeight, or half of the 77, were open class words; 39 were closed class words. All of the 38 open class words were correctly translated or semantically related to the preferred translation. And half of those, 17, were domain-relevant. Thus, the 77 English words generated by the MT engine contained 17 \"filter-relevant\" words, or 22%.",
342
+ "cite_spans": [],
343
+ "ref_spans": [],
344
+ "eq_spans": [],
345
+ "section": "Embedded Spanish MT System Test",
346
+ "sec_num": "4.1"
347
+ },
348
+ {
349
+ "text": "Surprisingly the GT-MT pass only yields a 5% improvement in filtering judgments over the Scan-OCR-MT pass, even though the OCR itself reduced the number of Spanish words that the MT engine could accept as input by more than 60%. We must be cautious in interpreting the significance of this comparison, given the single, short paragraph used only for illustrating our methodology.",
350
+ "cite_spans": [],
351
+ "ref_spans": [],
352
+ "eq_spans": [],
353
+ "section": "Comparing the Two Passes",
354
+ "sec_num": null
355
+ },
356
+ {
357
+ "text": "\"Worst\" case (Scan-OCR-MT pass) The OCR process converted the original 84 Arabic words into 88 \"words\". Of the original 84 Arabic words in the source document, only 55 retain their correct spelling after being OCRed and are found in the MT lexicon, i.e., recognized as valid Arabic words. Ten of the other OCR-ed \"words\" are treated as NFWs by the MT engine. The remaining 23 OCR-ed mixture of original words and OCR-induced \"words\" are not found in the Arabic MT lexicon. Thus, the OCR process reduced the number of original Arabic words that the MT engine could accept as input by slightly more than 65%.",
358
+ "cite_spans": [],
359
+ "ref_spans": [],
360
+ "eq_spans": [],
361
+ "section": "Embedded Arabic MT System Test",
362
+ "sec_num": "4.2"
363
+ },
364
+ {
365
+ "text": "Of the remaining 35% that generated 70 English words, we found that 7 were \"filterrelevant\" as follows. The MT lexicon did not contain 10 post-OCR Arabic \"words\" and working from the remaining 55 Arabic words, the MT engine generated 70 English words. 12 Thirty of the 70 were open class words and 40 were closed class words. Only one-third of the open class words were translated correctly or were semantically appropriate for the domain (10 out of 30). From this correct set of 10 open class words, 7 were domain-relevant and 3 were not. Thus, this pass yields 7 words for filtering judgments from the 70 generated English words, or 10%, were semantically related and domain relevant words.",
366
+ "cite_spans": [],
367
+ "ref_spans": [],
368
+ "eq_spans": [],
369
+ "section": "Embedded Arabic MT System Test",
370
+ "sec_num": "4.2"
371
+ },
372
+ {
373
+ "text": "\"Best\" case (GT-MT pass) Of the 84 original Arabic words, even with the GT as input, 28 were not found in the MT lexicon, reflecting the engine's emerging status and the need for further development. Two others were not found in the Arabic MT lexicon, leaving 54 remaining words as input to the MT engine. The MT engine generated 68 English words from these 54 words. Thirty-one of the 68 were open class words; 37 were closed class words. Of the open class words, 25 were translated correctly or semantically related. And 8 of those 25 were domain-relevant. Thus, the 68 English words generated by the MT engine contained 8 \"filter-relevant\" words, or 12%.",
374
+ "cite_spans": [],
375
+ "ref_spans": [],
376
+ "eq_spans": [],
377
+ "section": "Embedded Arabic MT System Test",
378
+ "sec_num": "4.2"
379
+ },
380
+ {
381
+ "text": "The GT-MT pass yields a 2% improvement in filtering judgments over the Scan-OCR-MT pass, even though the OCR itself reduced the 12 This expansion rate is consistent with the rule-ofthumb that Arabic linguists have for every one Arabic word yielding on average 1.3 words in English. number of Arabic words that the MT engine could accept as input by about 65%.",
382
+ "cite_spans": [],
383
+ "ref_spans": [],
384
+ "eq_spans": [],
385
+ "section": "Comparing the Two Passes",
386
+ "sec_num": null
387
+ },
388
+ {
389
+ "text": "One of the interesting findings about OCR-ed Arabic \"words\" was the presence of \"false positives,\" inaccurately OCR-ed source document words that were nonetheless valid in Arabic. That is, we found instances of valid Arabic words in the OCR output that appeared as different words in the original document. 13",
390
+ "cite_spans": [],
391
+ "ref_spans": [],
392
+ "eq_spans": [],
393
+ "section": "Comparing the Two Passes",
394
+ "sec_num": null
395
+ },
396
+ {
397
+ "text": "\"Worst\" case (Scan-OCR-MT pass) In the template for the 76-word Haitian Creole source document, we see that 27 words were lost in the OCR process, leaving only 49 in the post-OCR document. Of those 49, only 20 exhibit their correct spelling after being OCR-ed and are found in the MT lexicon. Twenty-nine of the 49 OCR-ed \"words\" are not found (NFWs) by the MT engine. The OCR process reduced the number of original Haitian Creole words acceptable by the MT engine from 76 to 20, or 74%.",
398
+ "cite_spans": [],
399
+ "ref_spans": [],
400
+ "eq_spans": [],
401
+ "section": "Embedded Haitian MT System Test",
402
+ "sec_num": "4.3"
403
+ },
404
+ {
405
+ "text": "Of the remaining 26% that generated 22 English words, we found that none were \"filterrelevant,\" i.e., 0%, as follows. The MT engine ignored 29 post-OCR \"words\" and working from the remaining 20 Haitian words, generated 22 English words. Ten were open class words and 12 were closed class words. Only 2 out of the 10 open class words were translated correctly or were semantically appropriate for the domain. From this correct set of 2 open class words, none were domain-relevant. The human would be unable to use this final document version to make his or her f'dtering relevance judgments.",
406
+ "cite_spans": [],
407
+ "ref_spans": [],
408
+ "eq_spans": [],
409
+ "section": "Embedded Haitian MT System Test",
410
+ "sec_num": "4.3"
411
+ },
412
+ {
413
+ "text": "\"Best\" case (GT-MT pass) The MT engine generated 63 English words from the 76 original Haitian Creole words. Thirty of the 63 were open class words; 33 were closed class words. Only 11 of the 30 open class words were correctly translated or semantically related. Of those 11 words, 3 were domainrelevant. So, from the 63 generated English words, only 3 were \"filter-relevant\", or 5%. ",
414
+ "cite_spans": [],
415
+ "ref_spans": [],
416
+ "eq_spans": [],
417
+ "section": "Embedded Haitian MT System Test",
418
+ "sec_num": "4.3"
419
+ },
420
+ {
421
+ "text": "With an OCR package not trained for this specific language and an MT engine from a research effort, the embedded MT system with these components does not assist the human on the filtering task. And even with the groundtruth input, the MT engine is not sufficiently robust to produce useful translations of walid Haitian Creole words.",
422
+ "cite_spans": [],
423
+ "ref_spans": [],
424
+ "eq_spans": [],
425
+ "section": "Comparing the Two Passes",
426
+ "sec_num": null
427
+ },
428
+ {
429
+ "text": "In figure 3 we compare the three languagespecific systems, we make use of four measures derived from the basic counts, A. through M., as defined in figure 2.",
430
+ "cite_spans": [],
431
+ "ref_spans": [],
432
+ "eq_spans": [],
433
+ "section": "Cross-System Results",
434
+ "sec_num": "5"
435
+ },
436
+ {
437
+ "text": "This measure on the GT pass in all 3 systems gives us the proportion of words in the original SL document that are in the individual lVIT lexicons. The Spanish lexicon is strong for the domain of our document (W --95%). The measures for Arabic and Haitian Creole reflect the fact that their MT lexicons are still under development (W --64% and 79%, respectively). This measure on the OCR pass, given the corresponding measure on the GT pass as a baseline, captures the degradation introduced by the Scan-OCR processing of the document. From figure 3 we see that the Spanish system loses approximately 55% of its original document words going into the MT engine (95% minus 40%), the Haitian Creole 53% (79% minus 26%), and the Arabic 29% (64% minus 35%). Recall that the Spanish and Haitian Creole systems included the same OCR software, which may account for the similar level of performance here. This software was not available to us for Arabic.",
438
+ "cite_spans": [],
439
+ "ref_spans": [],
440
+ "eq_spans": [],
441
+ "section": "W. Original Doeument-MT Word Recall % of original SL document words translatable by the MT engine after being OCR-ed. (D/A)",
442
+ "sec_num": null
443
+ },
444
+ {
445
+ "text": "This measure is intended to assess whether a system can be used for filtering broad-level topics (in contrast to domains with specialized vocabulary that we discuss below). Here we see evidence for two patterns that recur in the two measures below. First, the GT pass---with one exception---exhibits better performance than the OCR pass. Second, there is a ranking of the systems with Haitian Creole at the low end, Arabic in the middle, and Spanish at the high end. We will need more data to determine the significance of the one exception (55% versus 49%).",
446
+ "cite_spans": [],
447
+ "ref_spans": [],
448
+ "eq_spans": [],
449
+ "section": "X. MT Semantic Adequacy % of TL words generated by MT engine that are open class & semantically adequate in their translation (J/G )",
450
+ "sec_num": null
451
+ },
452
+ {
453
+ "text": "In all of the systems there was a slight gain in domain-relevant faltering performance from the OCR pass to the GT pass. We can rank the systems with the Haitian Creole at the low end, the Arabic in the middle, and the Spanish at the high end: the measures in both the OCR and GT passes in Haitian Creole are lower than in the Arabic, which are lower than in the Spanish. Only the Spanish documents, but not the Arabic or Haitian Creole ones, when machine translated in either pass were judged domain-relevant by five people dunng an informal test. 14 Thus, our data suggests that the Spanish system's lower bound (OCR pass) of 17% on this measure is needed for faltering.",
454
+ "cite_spans": [],
455
+ "ref_spans": [],
456
+ "eq_spans": [],
457
+ "section": "Y. MT Domain-Relevant Adequacy % of TL words generated by MT engine that are open class, semantically adequate in their translation, and domain-relevant (L/G)",
458
+ "sec_num": null
459
+ },
460
+ {
461
+ "text": "The same pattern emerges with this measure. In each system there is an improvement in performance stepping from the OCR pass to the GT pass. Across systems we see the same ranking, with the OCR and GT passes of the Haitian Creole falling below the Arabic which falls below the Spanish.",
462
+ "cite_spans": [],
463
+ "ref_spans": [],
464
+ "eq_spans": [],
465
+ "section": "Z. MT Open Class Semantic Adequacy % of open class TL words generated by MT engine that are semantically adequate in their translation (J/H)",
466
+ "sec_num": null
467
+ },
468
+ {
469
+ "text": "Our main contribution has been the proposal of an end-to-end process analysis template and a replicable evaluation methodology. We present measures to evaluate filtering performance and preliminary results on Spanish, Arabic and Haitian Creole FALCon systems.",
470
+ "cite_spans": [],
471
+ "ref_spans": [],
472
+ "eq_spans": [],
473
+ "section": "Conclusion and Future Work",
474
+ "sec_num": null
475
+ },
476
+ {
477
+ "text": "The cross-system comparisons using the measures presented, with one exception, yielded the following expected rankings: (i) the GT-MT pass exhibits better performance than the Scan-OCR-MT pass and (ii) the Haitian Creole system is at the low end, Arabic is in the middle, and Spanish is at the high end.",
478
+ "cite_spans": [],
479
+ "ref_spans": [],
480
+ "eq_spans": [],
481
+ "section": "Conclusion and Future Work",
482
+ "sec_num": null
483
+ },
484
+ {
485
+ "text": "Our long-term objective is to compare the results of the system-internal \"measures of performance\" (MOPs) presented here with results we still need from system-external \"measures of effectiveness\" (MOEs)25 MOEbased methods evaluate (i) baseline unaided human performance, (ii) human performance using a new system and (iii) human expert performance. From this comparison we will be able to determine whether these two independently derived sets of measures are replicable and validate each other. So far, we have only addressed our original question, \"when is an embedded MT system good enough for filtering?\" in terms of MOPs. We found that, for our particular passage in the medical domain, documents need to reach at least 17% on our derived measure Y., MT domain-relevant adequacy (recall discussion of derived measure Y, in section 5).",
486
+ "cite_spans": [],
487
+ "ref_spans": [],
488
+ "eq_spans": [],
489
+ "section": "Conclusion and Future Work",
490
+ "sec_num": null
491
+ },
492
+ {
493
+ "text": "Given that all but one process step (\"ID wrong TL words\" as shown in figure 1 where a human stick figure appears) in filling the template can be automated, the next phase of this work will be to create a software tool to speed up and systematize this process, improving our system evaluation by increasing the number of 15 See Roche and Watts (1991) for definitions of these terms. documents that can be regularly Used to test each new system and reducing the burden on the operational linguists who assist us for the one critical step. Currently available tools for parallel text processing, including text alignment software, may provide new user interface options as well, improving the interactive assessment process and possibly extending the input set to include transcribed speech.",
494
+ "cite_spans": [
495
+ {
496
+ "start": 327,
497
+ "end": 349,
498
+ "text": "Roche and Watts (1991)",
499
+ "ref_id": "BIBREF5"
500
+ }
501
+ ],
502
+ "ref_spans": [],
503
+ "eq_spans": [],
504
+ "section": "Conclusion and Future Work",
505
+ "sec_num": null
506
+ },
507
+ {
508
+ "text": "See Voss and Van Ess-Dykema (2000) for a description of the products used.",
509
+ "cite_spans": [],
510
+ "ref_spans": [],
511
+ "eq_spans": [],
512
+ "section": "",
513
+ "sec_num": null
514
+ },
515
+ {
516
+ "text": "This occurred because the MT engine was not using a word-for-word scheme. The Spanish verb for debo is translated into 2 English words, I must. As we will note further on, different languages have different expansion rates into English.4.",
517
+ "cite_spans": [],
518
+ "ref_spans": [],
519
+ "eq_spans": [],
520
+ "section": "",
521
+ "sec_num": null
522
+ },
523
+ {
524
+ "text": "As a result, the number of words in the two passes can differ. As we see in figure 2 in the Scan-OCR-MT pass, there were 55 SL words translated but, in the GT-MT pass, only 54 SL words in the original text.",
525
+ "cite_spans": [],
526
+ "ref_spans": [],
527
+ "eq_spans": [],
528
+ "section": "",
529
+ "sec_num": null
530
+ },
531
+ {
532
+ "text": "We are in the process of running an experiment to validate the protocol for establishing domain-relevant judgments as part of our research in measures of effectiveness (MOEs) for task-based evaluation.",
533
+ "cite_spans": [],
534
+ "ref_spans": [],
535
+ "eq_spans": [],
536
+ "section": "",
537
+ "sec_num": null
538
+ }
539
+ ],
540
+ "back_matter": [
541
+ {
542
+ "text": "We would like to acknowledge Lisa Decrozant (Army Research Laboratory) and Brian Branagan (Department of Defense) for language expertise and Francis Fisher (Army Research Laboratory) for systems engineering expertise.",
543
+ "cite_spans": [],
544
+ "ref_spans": [],
545
+ "eq_spans": [],
546
+ "section": "Acknowledgements",
547
+ "sec_num": null
548
+ }
549
+ ],
550
+ "bib_entries": {
551
+ "BIBREF0": {
552
+ "ref_id": "b0",
553
+ "title": "Good Applications for Crummy Machine Translation",
554
+ "authors": [
555
+ {
556
+ "first": "K",
557
+ "middle": [],
558
+ "last": "Church",
559
+ "suffix": ""
560
+ },
561
+ {
562
+ "first": "E",
563
+ "middle": [],
564
+ "last": "Hovy",
565
+ "suffix": ""
566
+ }
567
+ ],
568
+ "year": 1993,
569
+ "venue": "DARPA 1999. Translingual Information Detection, Extraction, and Summarization (TIDES) Initiative",
570
+ "volume": "8",
571
+ "issue": "",
572
+ "pages": "239--258",
573
+ "other_ids": {},
574
+ "num": null,
575
+ "urls": [],
576
+ "raw_text": "Church, K. and Hovy, E. 1993. Good Applications for Crummy Machine Translation. Machine Translation, Volume 8, pages 239 -258. DARPA 1999. Translingual Information Detection, Extraction, and Summarization (TIDES) Initiative. http://www.darpa.mil/ito/research/tides/index.html",
577
+ "links": null
578
+ },
579
+ "BIBREF1": {
580
+ "ref_id": "b1",
581
+ "title": "FALCon, an MT System Support Tool for Non-linguists",
582
+ "authors": [
583
+ {
584
+ "first": "F",
585
+ "middle": [],
586
+ "last": "Fisher",
587
+ "suffix": ""
588
+ },
589
+ {
590
+ "first": "C",
591
+ "middle": [
592
+ "R"
593
+ ],
594
+ "last": "Voss",
595
+ "suffix": ""
596
+ }
597
+ ],
598
+ "year": 1997,
599
+ "venue": "Proceedings of the Advanced Information Processing and Analysis Conference",
600
+ "volume": "",
601
+ "issue": "",
602
+ "pages": "",
603
+ "other_ids": {},
604
+ "num": null,
605
+ "urls": [],
606
+ "raw_text": "Fisher, F. and Voss, C. R. 1997. \"FALCon, an MT System Support Tool for Non-linguists.\" In Proceedings of the Advanced Information Processing and Analysis Conference. McLean,VA.",
607
+ "links": null
608
+ },
609
+ "BIBREF2": {
610
+ "ref_id": "b2",
611
+ "title": "Presentation at TIDES Planning Workshop",
612
+ "authors": [
613
+ {
614
+ "first": "D",
615
+ "middle": [],
616
+ "last": "Harmon",
617
+ "suffix": ""
618
+ }
619
+ ],
620
+ "year": 1999,
621
+ "venue": "",
622
+ "volume": "",
623
+ "issue": "",
624
+ "pages": "",
625
+ "other_ids": {},
626
+ "num": null,
627
+ "urls": [],
628
+ "raw_text": "Harmon, D. 1999. \"A Framework for Evaluation in TIDES.\" Presentation at TIDES Planning Workshop, with link at http://www.dyncorp- is.com/darpa/meetings/fides99jul/agenda.html, July 28-30, Leesburg, VA.",
629
+ "links": null
630
+ },
631
+ "BIBREF3": {
632
+ "ref_id": "b3",
633
+ "title": "Sparse Training Data and EBMT",
634
+ "authors": [
635
+ {
636
+ "first": "J.-M",
637
+ "middle": [],
638
+ "last": "Pomarede",
639
+ "suffix": ""
640
+ },
641
+ {
642
+ "first": "K",
643
+ "middle": [],
644
+ "last": "Taylor",
645
+ "suffix": ""
646
+ },
647
+ {
648
+ "first": "C",
649
+ "middle": [],
650
+ "last": "Van Ess-Dykema",
651
+ "suffix": ""
652
+ }
653
+ ],
654
+ "year": 1998,
655
+ "venue": "Proceedings of the Workshop on Embedded MT Systems: Design, Construction, and Evaluation of Systems with an MT Component held in conjunction with the Association for Machine Translation in the Americas (AMTA'98",
656
+ "volume": "",
657
+ "issue": "",
658
+ "pages": "",
659
+ "other_ids": {},
660
+ "num": null,
661
+ "urls": [],
662
+ "raw_text": "Pomarede, J.-M., Taylor, K., and Van Ess-Dykema, C. 1998. Sparse Training Data and EBMT. In Proceedings of the Workshop on Embedded MT Systems: Design, Construction, and Evaluation of Systems with an MT Component held in conjunction with the Association for Machine Translation in the Americas (AMTA'98),",
663
+ "links": null
664
+ },
665
+ "BIBREF4": {
666
+ "ref_id": "b4",
667
+ "title": "Evaluating Multilingual Gisting of Web Pages",
668
+ "authors": [
669
+ {
670
+ "first": "P",
671
+ "middle": [
672
+ "A"
673
+ ],
674
+ "last": "Langhorne",
675
+ "suffix": ""
676
+ },
677
+ {
678
+ "first": "",
679
+ "middle": [],
680
+ "last": "October",
681
+ "suffix": ""
682
+ },
683
+ {
684
+ "first": "P",
685
+ "middle": [],
686
+ "last": "Resnik",
687
+ "suffix": ""
688
+ }
689
+ ],
690
+ "year": 1997,
691
+ "venue": "Working Notes of the AAA1 Spring Symposium on Natural Language Processing for the Worm Wide Web",
692
+ "volume": "",
693
+ "issue": "",
694
+ "pages": "",
695
+ "other_ids": {},
696
+ "num": null,
697
+ "urls": [],
698
+ "raw_text": "Langhorne, PA, October. Resnik, P. 1997. Evaluating Multilingual Gisting of Web Pages. In Working Notes of the AAA1 Spring Symposium on Natural Language Processing for the Worm Wide Web, Palo Alto, CA.",
699
+ "links": null
700
+ },
701
+ "BIBREF5": {
702
+ "ref_id": "b5",
703
+ "title": "Choosing Analytic Measures",
704
+ "authors": [
705
+ {
706
+ "first": "J",
707
+ "middle": [
708
+ "G"
709
+ ],
710
+ "last": "Roche",
711
+ "suffix": ""
712
+ },
713
+ {
714
+ "first": "B",
715
+ "middle": [
716
+ "D"
717
+ ],
718
+ "last": "Watts",
719
+ "suffix": ""
720
+ }
721
+ ],
722
+ "year": 1991,
723
+ "venue": "The Journal of Strategic Studies",
724
+ "volume": "14",
725
+ "issue": "",
726
+ "pages": "165--209",
727
+ "other_ids": {},
728
+ "num": null,
729
+ "urls": [],
730
+ "raw_text": "Roche, J. G. and Watts, B. D. 1991. Choosing Analytic Measures. The Journal of Strategic Studies, Volume 14, pages 165-209, June.",
731
+ "links": null
732
+ },
733
+ "BIBREF6": {
734
+ "ref_id": "b6",
735
+ "title": "Evaluating Natural Language Processing Systems",
736
+ "authors": [
737
+ {
738
+ "first": "Sparck",
739
+ "middle": [],
740
+ "last": "Jones",
741
+ "suffix": ""
742
+ },
743
+ {
744
+ "first": "K",
745
+ "middle": [],
746
+ "last": "Galliers",
747
+ "suffix": ""
748
+ },
749
+ {
750
+ "first": "J",
751
+ "middle": [],
752
+ "last": "",
753
+ "suffix": ""
754
+ }
755
+ ],
756
+ "year": 1996,
757
+ "venue": "",
758
+ "volume": "",
759
+ "issue": "",
760
+ "pages": "",
761
+ "other_ids": {},
762
+ "num": null,
763
+ "urls": [],
764
+ "raw_text": "Sparck Jones, K. and Galliers, J. 1996. Evaluating Natural Language Processing Systems. Springer- Verlag Publishers, Berlin, Germany.",
765
+ "links": null
766
+ },
767
+ "BIBREF7": {
768
+ "ref_id": "b7",
769
+ "title": "Predicting What MT is Good for: User Judgments and Task Performance",
770
+ "authors": [
771
+ {
772
+ "first": "K",
773
+ "middle": [],
774
+ "last": "Taylor",
775
+ "suffix": ""
776
+ },
777
+ {
778
+ "first": "J",
779
+ "middle": [],
780
+ "last": "White",
781
+ "suffix": ""
782
+ }
783
+ ],
784
+ "year": 1998,
785
+ "venue": "Proceedings of the Third",
786
+ "volume": "",
787
+ "issue": "",
788
+ "pages": "",
789
+ "other_ids": {},
790
+ "num": null,
791
+ "urls": [],
792
+ "raw_text": "Taylor, K. and White, J. 1998. Predicting What MT is Good for: User Judgments and Task Performance. In Proceedings of the Third",
793
+ "links": null
794
+ }
795
+ },
796
+ "ref_entries": {
797
+ "FIGREF1": {
798
+ "num": null,
799
+ "text": "Figure 1 Analysis Template",
800
+ "uris": null,
801
+ "type_str": "figure"
802
+ },
803
+ "FIGREF2": {
804
+ "num": null,
805
+ "text": "Figure 3 Summary of Language-Specific Results (percentages)",
806
+ "uris": null,
807
+ "type_str": "figure"
808
+ },
809
+ "TABREF0": {
810
+ "num": null,
811
+ "type_str": "table",
812
+ "text": "",
813
+ "html": null,
814
+ "content": "<table><tr><td/><td/><td>Arabic</td><td colspan=\"2\">Haitian Creole</td></tr><tr><td/><td>OCR GT</td><td>OCR GT</td><td colspan=\"2\">OCR GT</td></tr><tr><td>W.</td><td>40 95</td><td>35 64</td><td colspan=\"2\">26 79</td></tr><tr><td>X.</td><td>55 49</td><td>14 37</td><td colspan=\"2\">9 17</td></tr><tr><td>Y.</td><td>17 22</td><td>10 12</td><td>0</td><td>5</td></tr><tr><td>Z.</td><td>94 100</td><td>33 67</td><td colspan=\"2\">20 33</td></tr></table>"
815
+ }
816
+ }
817
+ }
818
+ }
Full_text_JSON/prefixW/json/W00/W00-0502.json ADDED
@@ -0,0 +1,797 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0502",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:58.354088Z"
6
+ },
7
+ "title": "Task Tolerance of MT Output in Integrated Text Processes",
8
+ "authors": [
9
+ {
10
+ "first": "John",
11
+ "middle": [
12
+ "S"
13
+ ],
14
+ "last": "White",
15
+ "suffix": "",
16
+ "affiliation": {},
17
+ "email": "white_john@prc.com"
18
+ },
19
+ {
20
+ "first": "Jennifer",
21
+ "middle": [
22
+ "B"
23
+ ],
24
+ "last": "Doyon",
25
+ "suffix": "",
26
+ "affiliation": {},
27
+ "email": "doyon__jennifer@prc.com"
28
+ },
29
+ {
30
+ "first": "Susan",
31
+ "middle": [
32
+ "W"
33
+ ],
34
+ "last": "Talbott",
35
+ "suffix": "",
36
+ "affiliation": {},
37
+ "email": "talbott_susan@prc.com"
38
+ },
39
+ {
40
+ "first": "Litton",
41
+ "middle": [],
42
+ "last": "Prc",
43
+ "suffix": "",
44
+ "affiliation": {},
45
+ "email": ""
46
+ }
47
+ ],
48
+ "year": "",
49
+ "venue": null,
50
+ "identifiers": {},
51
+ "abstract": "The importance of machine translation (MT) in the stream of text-handling processes has become readily apparent in many current production settings as well as in research programs such as the Translingual Information Detection, Extraction, and Summarization (TIDES) program. The MT Proficiency Scale project has developed a means of baselining the inherent \"tolerance\" that a text-handling task has for raw MT output, and thus how good the output must be in order to be of use to that task. This method allows for a prediction of how useful a particular system can be in a text-handling process stream, whether in integrated, MTembedded processes, or less integrated userintensive processes.",
52
+ "pdf_parse": {
53
+ "paper_id": "W00-0502",
54
+ "_pdf_hash": "",
55
+ "abstract": [
56
+ {
57
+ "text": "The importance of machine translation (MT) in the stream of text-handling processes has become readily apparent in many current production settings as well as in research programs such as the Translingual Information Detection, Extraction, and Summarization (TIDES) program. The MT Proficiency Scale project has developed a means of baselining the inherent \"tolerance\" that a text-handling task has for raw MT output, and thus how good the output must be in order to be of use to that task. This method allows for a prediction of how useful a particular system can be in a text-handling process stream, whether in integrated, MTembedded processes, or less integrated userintensive processes.",
58
+ "cite_spans": [],
59
+ "ref_spans": [],
60
+ "eq_spans": [],
61
+ "section": "Abstract",
62
+ "sec_num": null
63
+ }
64
+ ],
65
+ "body_text": [
66
+ {
67
+ "text": "Issues of evaluation have been pre-eminent in MT since its beginning, yet there are no measures or metrics which are universally accepted as standard or adequate. This is in part because, at present, different evaluation methods are required to measure different attributes of MT, depending on what a particular stakeholder needs to know (e.g., Arnold 1993) . A venture capitalist who wants to invest in an MT start-up needs to know a different set of attributes about the system than does a developer who needs to see if the most recent software changes improved (or degraded) the system. Users need to know another set of metrics, namely those associated with whether the MT system in situ improves or degrades the other tasks in their overall process. Task-based evaluation of this sort is of particular value because of the recently envisioned role of MT as an embedded part of production processes rather than a stand-alone translator's tool. In this context, MT can be measured in terms of its effect on the \"downstream\" tasks, i.e., the tasks that a user or system performs on the output of the MT.",
68
+ "cite_spans": [
69
+ {
70
+ "start": 345,
71
+ "end": 357,
72
+ "text": "Arnold 1993)",
73
+ "ref_id": "BIBREF1"
74
+ }
75
+ ],
76
+ "ref_spans": [],
77
+ "eq_spans": [],
78
+ "section": "Introduction",
79
+ "sec_num": "1"
80
+ },
81
+ {
82
+ "text": "'The assertion that usefulness could be gauged by tasks to which output might be applied has been used for systems and for processes (JEIDA 1992 , Albisser 1993 , and also particular theoretical approaches (Church and Hovy 1991) . However, the potential for rapidly adaptable systems for which MT could be expected to run without human intervention, and to interact flexibly with automated extraction, summarization, filtering, and document detection calls for an evaluation method that measures usefulness across several different downstream tasks.",
83
+ "cite_spans": [
84
+ {
85
+ "start": 133,
86
+ "end": 144,
87
+ "text": "(JEIDA 1992",
88
+ "ref_id": null
89
+ },
90
+ {
91
+ "start": 145,
92
+ "end": 160,
93
+ "text": ", Albisser 1993",
94
+ "ref_id": "BIBREF0"
95
+ },
96
+ {
97
+ "start": 206,
98
+ "end": 228,
99
+ "text": "(Church and Hovy 1991)",
100
+ "ref_id": "BIBREF3"
101
+ }
102
+ ],
103
+ "ref_spans": [],
104
+ "eq_spans": [],
105
+ "section": "Introduction",
106
+ "sec_num": "1"
107
+ },
108
+ {
109
+ "text": "The U.S. government MT Functional Proficiency Scale project has conducted methodology research that has resulted in a ranking of text-handling tasks by their tolerance to MT output.",
110
+ "cite_spans": [],
111
+ "ref_spans": [],
112
+ "eq_spans": [],
113
+ "section": "Introduction",
114
+ "sec_num": "1"
115
+ },
116
+ {
117
+ "text": "When an MT system's output is mapped onto this scale, the set of tasks for which the output is useful, or not useful, can be predicted. The method used to develop the scale can also be used to map a particular system onto the scale.",
118
+ "cite_spans": [],
119
+ "ref_spans": [],
120
+ "eq_spans": [],
121
+ "section": "Introduction",
122
+ "sec_num": "1"
123
+ },
124
+ {
125
+ "text": "Development of the scale required the identification of the text-handling tasks members of a user community perform, and then the development of exercises to test output from several MT systems (Japanese-to-English). The level of ease users can perform these exercises on the corpus reflects the tolerance that the tasks have for MT output of varying quality. The following sections detail the identification of text-handling tasks, the evaluation corpus, exercise development, and inference of the proficiency scale .from the apparent tolerance of the downstream texthandling tasks.",
126
+ "cite_spans": [],
127
+ "ref_spans": [],
128
+ "eq_spans": [],
129
+ "section": "Introduction",
130
+ "sec_num": "1"
131
+ },
132
+ {
133
+ "text": "In order to determine the suitability of MT output for text-handling tasks, it was necessary to interview users of text-handling tools to identify the tasks they actually perform with translated material. It was necessary also to compile a corpus of translations and create exercises to measure the usefulness of the translations.",
134
+ "cite_spans": [],
135
+ "ref_spans": [],
136
+ "eq_spans": [],
137
+ "section": "Proficiency Scale Development",
138
+ "sec_num": "2"
139
+ },
140
+ {
141
+ "text": "Expert user judgments were needed to ensure confidence in the resulting proficiency scale. The users who provided these judgments work monolingually on document collections that include translated material. Preliminary interviews were conducted with 17 users. During the preliminary interviews, users completed questionnaires providing information identifying the text-handling tasks that ultimately formed the proficiency scale.",
142
+ "cite_spans": [],
143
+ "ref_spans": [],
144
+ "eq_spans": [],
145
+ "section": "Task Identification",
146
+ "sec_num": "2.1"
147
+ },
148
+ {
149
+ "text": "For a 1994 evaluation effort, the Defense Advanced Research Projects Agency (DARPA) Machine Translation Initiative developed a corpus of 100 general news texts taken from Japanese newswires.",
150
+ "cite_spans": [],
151
+ "ref_spans": [],
152
+ "eq_spans": [],
153
+ "section": "Corpus Composition",
154
+ "sec_num": "2.2"
155
+ },
156
+ {
157
+ "text": "These texts were translated into English and were incorporated into what is now known as the \"3Q94\" evaluation. A subset of these translations was used for the MT Functional Proficiency Scale project.",
158
+ "cite_spans": [],
159
+ "ref_spans": [],
160
+ "eq_spans": [],
161
+ "section": "Corpus Composition",
162
+ "sec_num": "2.2"
163
+ },
164
+ {
165
+ "text": "The 100 3Q94 Japanese source texts were translated into six English output versions, four from commercial and research MT systems (Systran (SY), Pivot (P), Lingstat (L), and Pangloss (PN)), and two from professional expert translations (E) used as baseline and control for the 3Q94 evaluations. Translations were selected from all of these sets for the proficiency scale corpus. For the purpose of validating the project's results, two additional systems' translations were added to its corpus. These included translations from a current version of Systran (SY2) and Typhoon flY).",
166
+ "cite_spans": [],
167
+ "ref_spans": [],
168
+ "eq_spans": [],
169
+ "section": "Corpus Composition",
170
+ "sec_num": "2.2"
171
+ },
172
+ {
173
+ "text": "The user exercises were designed to determine if users could successfully accomplish their regular tasks with translations of varying qualities, by eliciting judgments that indicated the usefulness of these translations. A variety of human factors issues were relevant to the development of the exercise sets. Since the texts to be seen by the users were general news texts, it was unlikely they would be relevant to the users' usual domains of interest . This issue was handled by selecting texts related to domains that were thought to be similar, but broader, than those typically handled by users . Additionally, the simple elicitation of a judgment (to a question such as \"can you do your job with this text\") is possibly biased by a predisposition to cooperate (Taylor and White 1998 ). Therefore, it was necessary to develop two complementary sets of exercises: the snap judgment exercise and the task-specific exercises. Detailed definitions of these two exercises can be found in Kathryn B. Taylor and John S. White's paper \"Predicting What MT is Good for: User Judgments and Task Performance\" in the Proceedings of the Third Conference of the Association for Machine Translation in the Americas, AMTA '98.",
174
+ "cite_spans": [
175
+ {
176
+ "start": 767,
177
+ "end": 789,
178
+ "text": "(Taylor and White 1998",
179
+ "ref_id": "BIBREF9"
180
+ }
181
+ ],
182
+ "ref_spans": [],
183
+ "eq_spans": [],
184
+ "section": "Exercise Definitions",
185
+ "sec_num": "2.3"
186
+ },
187
+ {
188
+ "text": "The user responses for the snap judgment exercise are shown in Exhibit 1. In the snap judgment exercise, the users were asked to look at 15 translations and categorize each as being of a good enough quality to successfully complete their text-handling task, i.e., \"YES\" or \"Y,\" or if they could not use the translation to perform their task, i.e., \"NO\" or \"N.\" The top row of Exhibit 1 lists the 15 translations by their document identification codes. Each document identification code includes a document number followed by the code of the MT system that produced it (MT system codes can be found in the Corpus Composition section above).",
189
+ "cite_spans": [],
190
+ "ref_spans": [],
191
+ "eq_spans": [],
192
+ "section": "Compilation of Responses",
193
+ "sec_num": "3.1"
194
+ },
195
+ {
196
+ "text": "The first column of Exhibit 1 contains a list of the users who participated in the snap judgment exercise separated by which text-handling task they performed. The users' responses of \"Y\" or \"N\" appear under each of the translations' document identification codes by user. The snap judgment scores for each of the text handling tasks was calculated as the percentage of \"Ys\" for the corpus of 15 translations by all users performing that task.",
197
+ "cite_spans": [],
198
+ "ref_spans": [],
199
+ "eq_spans": [],
200
+ "section": "Compilation of Responses",
201
+ "sec_num": "3.1"
202
+ },
203
+ {
204
+ "text": "The user responses and results for the gisting exercise are shown in Exhibit 2. In the gisting exercise, each user was asked to rate decision points in a translation on a 1-5 scale. The top row of Exhibit 2 lists the seven documents seen by the users by their document identification codes.",
205
+ "cite_spans": [],
206
+ "ref_spans": [],
207
+ "eq_spans": [],
208
+ "section": "Compilation of Responses",
209
+ "sec_num": "3.1"
210
+ },
211
+ {
212
+ "text": "The first column of Exhibit 2 contains a list of users who participated in the gisting exercise.",
213
+ "cite_spans": [],
214
+ "ref_spans": [],
215
+ "eq_spans": [],
216
+ "section": "Compilation of Responses",
217
+ "sec_num": "3.1"
218
+ },
219
+ {
220
+ "text": "User ratings averaged for each translation appear under each of the translation codes for each of the users.",
221
+ "cite_spans": [],
222
+ "ref_spans": [],
223
+ "eq_spans": [],
224
+ "section": "Compilation of Responses",
225
+ "sec_num": "3.1"
226
+ },
227
+ {
228
+ "text": "The scores for each of the translations were calculated by totaling a user's ratings and dividing that total by the number of decision points contained in the document.",
229
+ "cite_spans": [],
230
+ "ref_spans": [],
231
+ "eq_spans": [],
232
+ "section": "Compilation of Responses",
233
+ "sec_num": "3.1"
234
+ },
235
+ {
236
+ "text": "The user responses and results for the triage exercise are shown in Exhibit 3. In the triage exercise, each user was asked to order three separate stacks of translations by their relevance to a problem statement. The top row of Exhibit 3 lists the 15 translations seen by the users by their document identification codes. The first column of Exhibit 3 contains a list of users who participated in the triage exercise. User responses of ordinal number rankings appear under each of the document identification codes by user. Each of the category rankings was scored by comparing its results to that of a ground truth ranking of the same translations.",
237
+ "cite_spans": [],
238
+ "ref_spans": [],
239
+ "eq_spans": [],
240
+ "section": "Compilation of Responses",
241
+ "sec_num": "3.1"
242
+ },
243
+ {
244
+ "text": "The user responses and results for the extraction exercise are shown in Exhibit 4. In the extraction exercise, each user was asked to identify named entities in each translation: persons, locations, organizations, dates, times, and money/percent. This extraction exercise was modeled after the \"Named Entity\" task of the Message Understanding Conference (MUC) (Chinchor and Dungca, 1995) . Exhibit 4 contains two charts. The top row of both charts contain a list of users who participated in the extraction exercise. The first column of both charts lists seven documents seen by the users by their document identification codes. In the top chart, recall scores appear under each of the users for each translation. In the bottom chart, precision scores appear under each of the users for each translation.",
245
+ "cite_spans": [
246
+ {
247
+ "start": 360,
248
+ "end": 387,
249
+ "text": "(Chinchor and Dungca, 1995)",
250
+ "ref_id": "BIBREF2"
251
+ }
252
+ ],
253
+ "ref_spans": [],
254
+ "eq_spans": [],
255
+ "section": "Compilation of Responses",
256
+ "sec_num": "3.1"
257
+ },
258
+ {
259
+ "text": "Recall was calculated by the number of possible named entities in a translation the user identified. Precision was calculated by the number of items the user identified as being named entities that were actually named entities.",
260
+ "cite_spans": [],
261
+ "ref_spans": [],
262
+ "eq_spans": [],
263
+ "section": "Compilation of Responses",
264
+ "sec_num": "3.1"
265
+ },
266
+ {
267
+ "text": "The user responses and results for the filtering exercise are shown in Exhibit 5. In the filtering exercise, each user was asked to look at 15 documents to determine if a document fit into any one of the three categories of Crime, Economics, or Government and Politics, i.e., \"YES\" or \"Y,\" none of the three categories, i.e., \"NO\" or \"N,\" or if they could not make a decision either way, i.e., \"CANNOT BE DETERMINED\" or \"CBD.\" Exhibit 5 contains two charts. The top row of both charts lists the 15 translations seen by the users by their document identification codes. The first column of both charts contains a list of users who participated in the filtering exercise. The users' responses of \"Y,\" \"N,\" or \"CBD\" appear under each of the translations' document identification codes by user. The results of the filtering exercise were calculated with the measure of recall.",
268
+ "cite_spans": [],
269
+ "ref_spans": [],
270
+ "eq_spans": [],
271
+ "section": "Compilation of Responses",
272
+ "sec_num": "3.1"
273
+ },
274
+ {
275
+ "text": "Recall was calculated by the number of translated documents related to the three categories of Crime, Economics, and Government and Politics the user identified.",
276
+ "cite_spans": [],
277
+ "ref_spans": [],
278
+ "eq_spans": [],
279
+ "section": "Compilation of Responses",
280
+ "sec_num": "3.1"
281
+ },
282
+ {
283
+ "text": "The user responses and results for the detection exercise are shown in Exhibit 6. In the detection exercise, each user was asked to look at 15 documents to determine if the. document belonged to the category of Crime (C), the category of Economics (E), the category of Government and Politics (G&P), none of the three categories, i.e., \"NO\" or \"N,\" or if they could not make a decision either way, i.e., \"CANNOT BE DETERMINED\" or \"CBD.\"",
284
+ "cite_spans": [],
285
+ "ref_spans": [],
286
+ "eq_spans": [],
287
+ "section": "Compilation of Responses",
288
+ "sec_num": "3.1"
289
+ },
290
+ {
291
+ "text": "Exhibit 6 contains three charts. The top row of all three charts lists the 15 translations seen by the users by their document identification codes. The first column of all three charts contains a list of users who participated in the detection exercise.",
292
+ "cite_spans": [],
293
+ "ref_spans": [],
294
+ "eq_spans": [],
295
+ "section": "Compilation of Responses",
296
+ "sec_num": "3.1"
297
+ },
298
+ {
299
+ "text": "User responses of \"C,\" \"E,\" \"G&P,\" \"CBD,\" or \"NOTA\" appear under each of the translations' document identification codes by user. The results of the detection exercise were calculated with the measure of recall. Recall was calculated by the number of translated documents related to each of the three categories of Crime, Economics, and Government and Politics the user identified.",
300
+ "cite_spans": [],
301
+ "ref_spans": [],
302
+ "eq_spans": [],
303
+ "section": "Compilation of Responses",
304
+ "sec_num": "3.1"
305
+ },
306
+ {
307
+ "text": "The results of the snap judgment exercise are shown in Exhibit 7. In the snap judgment exercise each user was asked whether a document was coherent enough that it could The bars in Exhibit 7 represent the percentage of affirmatives for the corpus of 15 texts by all users.",
308
+ "cite_spans": [],
309
+ "ref_spans": [],
310
+ "eq_spans": [],
311
+ "section": "Mapping Results onto Tolerance Scale",
312
+ "sec_num": "3.2"
313
+ },
314
+ {
315
+ "text": "The results for the user exercises needed be computed in a way which allowed their comparison across tasks, but which used l:he metrics relevant to each task at the same time. We address the computation of each of these in turn.",
316
+ "cite_spans": [],
317
+ "ref_spans": [],
318
+ "eq_spans": [],
319
+ "section": "Mapping Results onto Tolerance Scale",
320
+ "sec_num": "3.2"
321
+ },
322
+ {
323
+ "text": "Gisting. Computing the acceptability cut-off for gisting follows the general pattern, except that the text scores are not recall or precision. Rather, since gisting judgments were elicited with an \"adequacy\" measure, each text for each user has an average of the scores for the decision points in that text.",
324
+ "cite_spans": [],
325
+ "ref_spans": [],
326
+ "eq_spans": [],
327
+ "section": "Mapping Results onto Tolerance Scale",
328
+ "sec_num": "3.2"
329
+ },
330
+ {
331
+ "text": "In turn, the average of these average scores gives the cutoff for acceptability for gisting, namely 2.52 out of a minimum of one and maximum of 5. By this means, 2 texts are identified as acceptable for gisting, indicated in Exhibit 2.",
332
+ "cite_spans": [],
333
+ "ref_spans": [],
334
+ "eq_spans": [],
335
+ "section": "Mapping Results onto Tolerance Scale",
336
+ "sec_num": "3.2"
337
+ },
338
+ {
339
+ "text": "Triage. As shown in Exhibit 3, triage requires the comparison of ordinal rankings, with ordinal rankings from the ground truth set. Here, a uniformity of agreement measure was established, defined as the mean of the standard deviations for each text in each problem statement. Then the mean for each text in the user ranking was compared to the ground truth ranking, plus-or-minus the uniformity measure. A text is acceptable if it matches the ground truth within the uniformity measure. Based on this computation, 7 of 15, or 46.7%, of the texts are acceptable for gisting.",
340
+ "cite_spans": [],
341
+ "ref_spans": [],
342
+ "eq_spans": [],
343
+ "section": "Mapping Results onto Tolerance Scale",
344
+ "sec_num": "3.2"
345
+ },
346
+ {
347
+ "text": "Extraction. Extraction was computed using both recall and precision measures. As with filtering and detection, average recall is computed (62%), which is used as the cut-off for acceptability, and identifies 3 texts as acceptable. Similarly, the average precision, 87.7%, creates a cut-off at 4 texts. To show extraction as a single value, the total acceptable in precision and in recall are averaged, equaling 3.5, or 50% of the texts in the 7-text set. These are shown in Exhibit 4.",
348
+ "cite_spans": [],
349
+ "ref_spans": [],
350
+ "eq_spans": [],
351
+ "section": "Mapping Results onto Tolerance Scale",
352
+ "sec_num": "3.2"
353
+ },
354
+ {
355
+ "text": "Filtering. For filtering, user responses are computed on two tables conforming to the .ground truth values for each text (\"Y\" or \"N\", I.e., whether the text was relevant to crime or not). The average recall over all users and all \u2022 texts is 66.7% for Y and 75% for N. These averages create for the Y and N chart the respective cutoff boundaries for \"YES\" (text output is acceptable for filtering) and \"NO\" (it is not). The total number of YES's from the Y and N tables is 8 or 53% of the texts in the corpus acceptable for filtering. These results are illustrated in Exhibit 5.",
356
+ "cite_spans": [],
357
+ "ref_spans": [],
358
+ "eq_spans": [],
359
+ "section": "Mapping Results onto Tolerance Scale",
360
+ "sec_num": "3.2"
361
+ },
362
+ {
363
+ "text": "Detection. As shown in Exhibit 6, there are three tables in detection, corresponding to the three domain areas of Crime, Economics, and Government and Politics. As with filtering, the average recall is computed for each domain over all users and texts, and this average establishes the cut-off boundary of acceptability of text outputs for detection. For the Crime domain, the average is 82.1%, for Economics 94%, and for Government and Politics 50%. The total number of texts thus identified as acceptable is 10, or 67% texts acceptable for detection.",
364
+ "cite_spans": [],
365
+ "ref_spans": [],
366
+ "eq_spans": [],
367
+ "section": "Mapping Results onto Tolerance Scale",
368
+ "sec_num": "3.2"
369
+ },
370
+ {
371
+ "text": "Exhibit 8 shows the results of the task exercises.",
372
+ "cite_spans": [],
373
+ "ref_spans": [],
374
+ "eq_spans": [],
375
+ "section": "Mapping Results onto Tolerance Scale",
376
+ "sec_num": "3.2"
377
+ },
378
+ {
379
+ "text": "At the inception of this project, we established a heuristic scale of task tolerance, based on common understanding of the nature of each of these tasks. This scale -filtering, detection, triage, extraction, and gisting, m order of tolerance -was not a hypothesis per se; nevertheless, it is rather surprising that the results vary from the heuristic significantly. The results showed detection to be the most tolerant task, rather than filtering.",
380
+ "cite_spans": [],
381
+ "ref_spans": [],
382
+ "eq_spans": [],
383
+ "section": "Exhibit 8 -Task Exercises Results",
384
+ "sec_num": null
385
+ },
386
+ {
387
+ "text": "The presumption had been that the filtering task, which simply requires a \"yes\" if a document is related to a specific topic or \"no\" if it is not, could be performed with higher accuracy than the task of detection that requires classifying each document by subject matter. In fact, when precision measures are factored in for filtering and detection (as they were for extraction), filtering appears to be even less tolerant than extraction. This outcome seems plausible when we consider that detection is often possible even when only small quantities of key words can be found in a document.",
388
+ "cite_spans": [],
389
+ "ref_spans": [],
390
+ "eq_spans": [],
391
+ "section": "Exhibit 8 -Task Exercises Results",
392
+ "sec_num": null
393
+ },
394
+ {
395
+ "text": "Also surprising, the triage task was less tolerant of MT output then expected. It was supposed that the ability to rank relevance to a particular problem could be done with sufficient keywords in otherwise unintelligible text; rather, a greater depth of understanding is necessary to successfully complete this task.",
396
+ "cite_spans": [],
397
+ "ref_spans": [],
398
+ "eq_spans": [],
399
+ "section": "Exhibit 8 -Task Exercises Results",
400
+ "sec_num": null
401
+ },
402
+ {
403
+ "text": "There are at least two evaluation techniques that can use the task tolerance scale to predict the usefulness of an MT system for a particular downstream task.",
404
+ "cite_spans": [],
405
+ "ref_spans": [],
406
+ "eq_spans": [],
407
+ "section": "Future Research",
408
+ "sec_num": "4"
409
+ },
410
+ {
411
+ "text": "The set of exercises used to elicit the task tolerance hierarchy reported here can also be used to determine the position on the scale of a particular system. The system translates texts from the corpus for which ground truth has already been established, and the user exercises are performed on these translations. The result is a set of tasks for which the system's output appears to be suitable. The pre-existing scale can help to resolve ambiguous results, or can be used to make scale-wide inferences from a subset of the exercises: it may be possible to perform just one exercise (e.g., triage) and infer the actual position of the system on the scale by the degree of acceptability above or below the mlmmum acceptability for triage itself.",
412
+ "cite_spans": [],
413
+ "ref_spans": [],
414
+ "eq_spans": [],
415
+ "section": "Future Research",
416
+ "sec_num": "4"
417
+ },
418
+ {
419
+ "text": "A second technique offers more potential for rapid, inexpensive test and re-test. This involves the development of a diagnostic test set , derived from the same source as the proficiency scale itself. For every task in the exercise results, there are \"borderline\" texts, that is, texts acceptable for one task but not for the next less tolerant task. These texts will exhibit translation phenomena (grammatical, lexical, orthographic, formatting, etc.) which are diagnostic of the difference between suitability at one tolerance level and another. The text will also contain phenomena that are not diagnostic at this level but are at a less tolerant level.",
420
+ "cite_spans": [
421
+ {
422
+ "start": 398,
423
+ "end": 452,
424
+ "text": "(grammatical, lexical, orthographic, formatting, etc.)",
425
+ "ref_id": null
426
+ }
427
+ ],
428
+ "ref_spans": [],
429
+ "eq_spans": [],
430
+ "section": "Future Research",
431
+ "sec_num": "4"
432
+ },
433
+ {
434
+ "text": "By characterizing the phenomena that occur in the border texts for each task, it is possible to determine the phenomena diagnostic to each tolerance level.",
435
+ "cite_spans": [],
436
+ "ref_spans": [],
437
+ "eq_spans": [],
438
+ "section": "Future Research",
439
+ "sec_num": "4"
440
+ },
441
+ {
442
+ "text": "A pilot investigation of these translation phenomena White 1998, Doyon et al. 1999) categorized the translation phenomena in terms of pedagogy-based descriptions of the contrasts between Japanese and English (Connor-Linton 1995) . This characterization allows for the representation of several individual problem instances with a single suite of pair-specific, controlled, source language patterns designed to test MT systems for coverage of each phenomenon. These patterns may be tested by any MT system for that language pair, and the results of the test will indicate where that system falls on the proficiency scale by its successful coverage of the diagnostic patterns associated with that tolerance level.",
443
+ "cite_spans": [
444
+ {
445
+ "start": 53,
446
+ "end": 83,
447
+ "text": "White 1998, Doyon et al. 1999)",
448
+ "ref_id": null
449
+ },
450
+ {
451
+ "start": 208,
452
+ "end": 228,
453
+ "text": "(Connor-Linton 1995)",
454
+ "ref_id": "BIBREF5"
455
+ }
456
+ ],
457
+ "ref_spans": [],
458
+ "eq_spans": [],
459
+ "section": "Future Research",
460
+ "sec_num": "4"
461
+ },
462
+ {
463
+ "text": "The purpose of the user exercises is to establish a scale of MT tolerance for the downstream text handling tasks. However, the same method can be used to determine the usefulness of a particular system for any of the tasks by performing these exercises with the system to be tested. It is possible, for example, to isolate the performance of systems in the set used here, though the sample size from each system is too small to draw any conclusions in this case. We hope to perform this exercises with larger samples both to validate these findings and to execute evaluations on candidate MT systems.",
464
+ "cite_spans": [],
465
+ "ref_spans": [],
466
+ "eq_spans": [],
467
+ "section": "Future Research",
468
+ "sec_num": "4"
469
+ },
470
+ {
471
+ "text": "Among other validation steps in the future will be confirmation of the exercise approach from an empirical perspective (e.g., whether to include \"cannot be determined\" as a choice), and a validation of the ground truth in the triage exercise.",
472
+ "cite_spans": [],
473
+ "ref_spans": [],
474
+ "eq_spans": [],
475
+ "section": "Future Research",
476
+ "sec_num": "4"
477
+ },
478
+ {
479
+ "text": "Finally, we continue to refine the application of the methodology to reduce time and increase user acceptance. In particular, we have developed a web-based version of several of the exercises to make the process easier for the user and more automatic for scoring.",
480
+ "cite_spans": [],
481
+ "ref_spans": [],
482
+ "eq_spans": [],
483
+ "section": "Future Research",
484
+ "sec_num": "4"
485
+ },
486
+ {
487
+ "text": "The MT Functional Proficiency Scale project has not only demonstrated that it is possible for poor MT output to be of use for certain text-handling tasks, but has also indicated the different tolerances each such task has for possibly poor MT output.",
488
+ "cite_spans": [],
489
+ "ref_spans": [],
490
+ "eq_spans": [],
491
+ "section": "Conclusion",
492
+ "sec_num": "5"
493
+ },
494
+ {
495
+ "text": "This task-based methodology developed in the MT Functional Proficiency Scale project using Japanese-to-English corpora should prove useful in evaluating other language pair systems. There is also potential for evaluating other text-handling systems, such as summarization, information retrieval, gisting, and information extraction, in the context of the other tasks that might process their output.",
496
+ "cite_spans": [],
497
+ "ref_spans": [],
498
+ "eq_spans": [],
499
+ "section": "Conclusion",
500
+ "sec_num": "5"
501
+ },
502
+ {
503
+ "text": "Task-based evaluations provide a direct way for understanding how text-handlJ:~ng technologies can interact with each other in end-to-end processes. In the case of MT systems, it is possible to predict the effective applicability of MT systems whose output seems far less than perfect.",
504
+ "cite_spans": [],
505
+ "ref_spans": [],
506
+ "eq_spans": [],
507
+ "section": "Conclusion",
508
+ "sec_num": "5"
509
+ }
510
+ ],
511
+ "back_matter": [],
512
+ "bib_entries": {
513
+ "BIBREF0": {
514
+ "ref_id": "b0",
515
+ "title": "Evaluation of MT Systems at Union Bank of Switzerland",
516
+ "authors": [
517
+ {
518
+ "first": "D",
519
+ "middle": [],
520
+ "last": "Albisser",
521
+ "suffix": ""
522
+ }
523
+ ],
524
+ "year": 1993,
525
+ "venue": "Machine Translation 8-1",
526
+ "volume": "",
527
+ "issue": "",
528
+ "pages": "25--28",
529
+ "other_ids": {},
530
+ "num": null,
531
+ "urls": [],
532
+ "raw_text": "Albisser, D. (1993). \"Evaluation of MT Systems at Union Bank of Switzerland.\" Machine Translation 8-1/2: 25-28.",
533
+ "links": null
534
+ },
535
+ "BIBREF1": {
536
+ "ref_id": "b1",
537
+ "title": "Evaluation: an assessment",
538
+ "authors": [
539
+ {
540
+ "first": "A",
541
+ "middle": [],
542
+ "last": "Arnold",
543
+ "suffix": ""
544
+ },
545
+ {
546
+ "first": "L",
547
+ "middle": [],
548
+ "last": "Sadler",
549
+ "suffix": ""
550
+ },
551
+ {
552
+ "first": "R",
553
+ "middle": [],
554
+ "last": "Humphreys",
555
+ "suffix": ""
556
+ }
557
+ ],
558
+ "year": 1993,
559
+ "venue": "Machine Translation 8-1",
560
+ "volume": "2",
561
+ "issue": "",
562
+ "pages": "1--24",
563
+ "other_ids": {},
564
+ "num": null,
565
+ "urls": [],
566
+ "raw_text": "Arnold, A., L. Sadler, and R. Humphreys. (1993). \"Evaluation: an assessment.\" Machine Translation 8-1/2: 1-24.",
567
+ "links": null
568
+ },
569
+ "BIBREF2": {
570
+ "ref_id": "b2",
571
+ "title": "Four Scorers and Seven Years Ago: The Sconng Method for MUC-6",
572
+ "authors": [
573
+ {
574
+ "first": "Nancy",
575
+ "middle": [],
576
+ "last": "Chinchor",
577
+ "suffix": ""
578
+ },
579
+ {
580
+ "first": "Gary",
581
+ "middle": [],
582
+ "last": "Dungca",
583
+ "suffix": ""
584
+ }
585
+ ],
586
+ "year": 1995,
587
+ "venue": "Proceedings of Sixth Message Understanding Conference (MUC-6)",
588
+ "volume": "",
589
+ "issue": "",
590
+ "pages": "",
591
+ "other_ids": {},
592
+ "num": null,
593
+ "urls": [],
594
+ "raw_text": "Chinchor, Nancy, and Gary Dungca. (1995). \"Four Scorers and Seven Years Ago: The Sconng Method for MUC-6.\" Proceedings of Sixth Message Understanding Conference (MUC-6).",
595
+ "links": null
596
+ },
597
+ "BIBREF3": {
598
+ "ref_id": "b3",
599
+ "title": "Good Applications for Crummy Machine Translation",
600
+ "authors": [
601
+ {
602
+ "first": "M",
603
+ "middle": [
604
+ "D"
605
+ ],
606
+ "last": "Columbia",
607
+ "suffix": ""
608
+ },
609
+ {
610
+ "first": "Kenneth",
611
+ "middle": [],
612
+ "last": "Church",
613
+ "suffix": ""
614
+ },
615
+ {
616
+ "first": "Eduard",
617
+ "middle": [],
618
+ "last": "Hovy",
619
+ "suffix": ""
620
+ }
621
+ ],
622
+ "year": 1991,
623
+ "venue": "",
624
+ "volume": "",
625
+ "issue": "",
626
+ "pages": "",
627
+ "other_ids": {},
628
+ "num": null,
629
+ "urls": [],
630
+ "raw_text": "Columbia, MD. Church, Kenneth, and Eduard Hovy. (1991). \"Good Applications for Crummy Machine Translation.\" in J. Neal and S.",
631
+ "links": null
632
+ },
633
+ "BIBREF4": {
634
+ "ref_id": "b4",
635
+ "title": "Natural Language Processing Systems Evaluation Workshop",
636
+ "authors": [
637
+ {
638
+ "first": "",
639
+ "middle": [],
640
+ "last": "Walter",
641
+ "suffix": ""
642
+ }
643
+ ],
644
+ "year": null,
645
+ "venue": "",
646
+ "volume": "",
647
+ "issue": "",
648
+ "pages": "147--157",
649
+ "other_ids": {},
650
+ "num": null,
651
+ "urls": [],
652
+ "raw_text": "Walter (eds.), Natural Language Processing Systems Evaluation Workshop. Rome Laboratory Report #RL-TR-91-362. Pp. 147-157.",
653
+ "links": null
654
+ },
655
+ "BIBREF5": {
656
+ "ref_id": "b5",
657
+ "title": "Cross-cultural comparison of writing standards",
658
+ "authors": [
659
+ {
660
+ "first": "Jeff",
661
+ "middle": [],
662
+ "last": "Connor-Linton",
663
+ "suffix": ""
664
+ }
665
+ ],
666
+ "year": 1995,
667
+ "venue": "",
668
+ "volume": "",
669
+ "issue": "",
670
+ "pages": "",
671
+ "other_ids": {},
672
+ "num": null,
673
+ "urls": [],
674
+ "raw_text": "Connor-Linton, Jeff. (1995). \"Cross-cultural comparison of writing standards:",
675
+ "links": null
676
+ },
677
+ "BIBREF7": {
678
+ "ref_id": "b7",
679
+ "title": "Task-Based Evaluation for Machine Translation",
680
+ "authors": [
681
+ {
682
+ "first": "Jennifer",
683
+ "middle": [],
684
+ "last": "Doyon",
685
+ "suffix": ""
686
+ },
687
+ {
688
+ "first": "Kathryn",
689
+ "middle": [
690
+ "B"
691
+ ],
692
+ "last": "Taylor",
693
+ "suffix": ""
694
+ },
695
+ {
696
+ "first": "John",
697
+ "middle": [
698
+ "S"
699
+ ],
700
+ "last": "White",
701
+ "suffix": ""
702
+ }
703
+ ],
704
+ "year": 1999,
705
+ "venue": "Proceedings of Machine Translation Summit VII '99",
706
+ "volume": "",
707
+ "issue": "",
708
+ "pages": "",
709
+ "other_ids": {},
710
+ "num": null,
711
+ "urls": [],
712
+ "raw_text": "Doyon, Jennifer, Kathryn B. Taylor, and John S. White. (1999). \"Task-Based Evaluation for Machine Translation.\" Proceedings of Machine Translation Summit VII '99. Singapore.",
713
+ "links": null
714
+ },
715
+ "BIBREF8": {
716
+ "ref_id": "b8",
717
+ "title": "JEIDA Methodology and Criteria on Machine Translation Evaluation",
718
+ "authors": [],
719
+ "year": 1992,
720
+ "venue": "Tokyo: JEIDA",
721
+ "volume": "",
722
+ "issue": "",
723
+ "pages": "",
724
+ "other_ids": {},
725
+ "num": null,
726
+ "urls": [],
727
+ "raw_text": "Japanese Electronic Industry Development Association. (1992). \"JEIDA Methodology and Criteria on Machine Translation Evaluation.\" Tokyo: JEIDA.",
728
+ "links": null
729
+ },
730
+ "BIBREF9": {
731
+ "ref_id": "b9",
732
+ "title": "Predicting what MT is Good for: User Judgments and Task Performance",
733
+ "authors": [
734
+ {
735
+ "first": "Kathryn",
736
+ "middle": [
737
+ "B"
738
+ ],
739
+ "last": "Taylor",
740
+ "suffix": ""
741
+ },
742
+ {
743
+ "first": "John",
744
+ "middle": [
745
+ "S"
746
+ ],
747
+ "last": "White",
748
+ "suffix": ""
749
+ }
750
+ ],
751
+ "year": 1998,
752
+ "venue": "Proceedings of Third Conference of the Association for Machine Translation in the Americas, AMTA'98",
753
+ "volume": "",
754
+ "issue": "",
755
+ "pages": "",
756
+ "other_ids": {},
757
+ "num": null,
758
+ "urls": [],
759
+ "raw_text": "Taylor, Kathryn B., and John S. White (1998). \"Predicting what MT is Good for: User Judgments and Task Performance.\" Proceedings of Third Conference of the Association for Machine Translation in the Americas, AMTA'98. Philadelphia, PA.",
760
+ "links": null
761
+ },
762
+ "BIBREF10": {
763
+ "ref_id": "b10",
764
+ "title": "A Task-Oriented Evaluation Metric for Machine Translation",
765
+ "authors": [
766
+ {
767
+ "first": "John",
768
+ "middle": [
769
+ "S"
770
+ ],
771
+ "last": "White",
772
+ "suffix": ""
773
+ },
774
+ {
775
+ "first": "Kathryn",
776
+ "middle": [
777
+ "B"
778
+ ],
779
+ "last": "Taylor",
780
+ "suffix": ""
781
+ }
782
+ ],
783
+ "year": 1998,
784
+ "venue": "Proceedings of Language Resources and Evaluation Conference, LREC-98",
785
+ "volume": "I",
786
+ "issue": "",
787
+ "pages": "21--27",
788
+ "other_ids": {},
789
+ "num": null,
790
+ "urls": [],
791
+ "raw_text": "White, John S., and Kathryn B. Taylor. (1998). \"A Task-Oriented Evaluation Metric for Machine Translation.\" Proceedings of Language Resources and Evaluation Conference, LREC-98, Volume I. 21-27.",
792
+ "links": null
793
+ }
794
+ },
795
+ "ref_entries": {}
796
+ }
797
+ }
Full_text_JSON/prefixW/json/W00/W00-0503.json ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0503",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:35:09.872303Z"
6
+ },
7
+ "title": "At Your Service: Embedded MT As a Service",
8
+ "authors": [
9
+ {
10
+ "first": "Florence",
11
+ "middle": [
12
+ "M"
13
+ ],
14
+ "last": "Reeder",
15
+ "suffix": "",
16
+ "affiliation": {
17
+ "laboratory": "",
18
+ "institution": "The MITRE Corporation",
19
+ "location": {
20
+ "addrLine": "1820 Dolley Madison Blvd. McLean",
21
+ "postCode": "22102",
22
+ "region": "VA"
23
+ }
24
+ },
25
+ "email": "freeder@mitre.org"
26
+ }
27
+ ],
28
+ "year": "",
29
+ "venue": null,
30
+ "identifiers": {},
31
+ "abstract": "A growing trend in Machine Translation (MT) is to view MT as an embedded part of an overall process instead of an end result itself. For the last four years, we have fielded (primarily) Commercial-Off-The-Shelf (COTS) MT systems in an operational process. MT has been used to facilitate cross-language information retrieval (IR), topic detection and other, wide-scoped scenarios. These uses caused a fundamental shift in our views about MT-everything from user interface to system evaluation to the basic system structures. This paper presents our lessons learned in developing an MT service for a wide range of user needs.",
32
+ "pdf_parse": {
33
+ "paper_id": "W00-0503",
34
+ "_pdf_hash": "",
35
+ "abstract": [
36
+ {
37
+ "text": "A growing trend in Machine Translation (MT) is to view MT as an embedded part of an overall process instead of an end result itself. For the last four years, we have fielded (primarily) Commercial-Off-The-Shelf (COTS) MT systems in an operational process. MT has been used to facilitate cross-language information retrieval (IR), topic detection and other, wide-scoped scenarios. These uses caused a fundamental shift in our views about MT-everything from user interface to system evaluation to the basic system structures. This paper presents our lessons learned in developing an MT service for a wide range of user needs.",
38
+ "cite_spans": [],
39
+ "ref_spans": [],
40
+ "eq_spans": [],
41
+ "section": "Abstract",
42
+ "sec_num": null
43
+ }
44
+ ],
45
+ "body_text": [
46
+ {
47
+ "text": "The foreign language material to be handled by the government is increasingly diverse and problematic. Foreign language processing needs are increasing because of the changing conditions of the world. Traditionally, users could focus on just a few foreign languages and a limited number of sources of foreign language materials. As we begin the 21 ~' century, users of online materials are faced with having to process, utilise and exploit documents that may be in one of many languages or a combination of languages. It is not feasible to expect a given user to know all of the languages related to their topic of research. It is equally unrealistic to expect to have on-demand translators available in every language whenever they are needed. Because of the expanding need, tools are being developed to automate the use of foreign language materials.",
48
+ "cite_spans": [],
49
+ "ref_spans": [],
50
+ "eq_spans": [],
51
+ "section": "Introduction .-,",
52
+ "sec_num": null
53
+ },
54
+ {
55
+ "text": "Unlike previous views of tools, the current vision for machine translation (MT) is as a small part of a larger, mostly automated process. For many users, this does not mean yet another tool with yet another interface, but a nearly invisible companion that incorporates translation and necessary support technologies.",
56
+ "cite_spans": [],
57
+ "ref_spans": [],
58
+ "eq_spans": [],
59
+ "section": "Introduction .-,",
60
+ "sec_num": null
61
+ },
62
+ {
63
+ "text": "One such system, the Army Research Lab (ARL) FALCON system, combines scanning, optical character recognition (OCR), translation and filtering into a single process. Another view of this is the DARPA Translingual Information Detection, Extraction and Summarisation effort (TIDES). TIDES represents the pinnacle of information access and is a real challenge for MT. MT supports the translingual aspects of the effort and can be viewed as an embedded tool which facilitates other technologies. Finally, the integration of MT into the process for intelligence analysis serves as the basis for the CyberTrans project. For this paper, we will discuss the CyberTrans project, the lessons learned and the supporting technologies necessary for the successful integration of MT into other systems.",
64
+ "cite_spans": [],
65
+ "ref_spans": [],
66
+ "eq_spans": [],
67
+ "section": "Introduction .-,",
68
+ "sec_num": null
69
+ },
70
+ {
71
+ "text": "Proposed Architecture",
72
+ "cite_spans": [],
73
+ "ref_spans": [],
74
+ "eq_spans": [],
75
+ "section": "1",
76
+ "sec_num": null
77
+ },
78
+ {
79
+ "text": "The incarnation of CyberTrans grew as a demonstration that MT technology could be useful in the intelligence analysis process. As a result of an MT survey (Benoit et al, 1991) , MT technology was believed to be ready for incorporation into an operational environment. Initially, CyberTrans was designed as a wrapper around Commercial-Off-The-Shelf (COTS) and Government-Off-The-Shelf (GOTS) MT systems in Unix environments. A client-server architecture, implemented in a combination of Lisp and C, allowed for uniform user interfaces to translation engines (Systran, Globalink and Gister). The server software interacted with the translation engines and the client software interacted with the users. The server interacted with client programs through Remote Procedure Call (RPC) passing of translation parameters (such as language, dictionary and output format) and file transfer of translation data. The clients provided were: e-mail, web, FrameMaker and command line. By providing translation through these media, users could translate documents in a familiar interface without having to worry is much more forgiving of low quality input data while automated processing suffers from poor input data.",
80
+ "cite_spans": [
81
+ {
82
+ "start": 155,
83
+ "end": 175,
84
+ "text": "(Benoit et al, 1991)",
85
+ "ref_id": "BIBREF0"
86
+ }
87
+ ],
88
+ "ref_spans": [],
89
+ "eq_spans": [],
90
+ "section": "Original Prototype",
91
+ "sec_num": "1.1"
92
+ },
93
+ {
94
+ "text": "This forced the designers to implement a series of pre-and post-processing tools to be provided in the translation server. Initially, they were included in the functional architecture as depicted in Figure 1 . This addition of language tools caused a necessary redesign of the architecture from a client-server model to an enterprise service model which is charactefised by an open architecture view of loosely coupled modules performing services for",
95
+ "cite_spans": [],
96
+ "ref_spans": [
97
+ {
98
+ "start": 199,
99
+ "end": 207,
100
+ "text": "Figure 1",
101
+ "ref_id": null
102
+ }
103
+ ],
104
+ "eq_spans": [],
105
+ "section": "Original Prototype",
106
+ "sec_num": "1.1"
107
+ },
108
+ {
109
+ "text": "CHent ~~S RPC Communication g ! ..... . w V Identify ~~NorInaliseyHandleNot-~~VReassembleN~ Language / Spelling / Translated Format Code Set A~ Format Words A~ T",
110
+ "cite_spans": [],
111
+ "ref_spans": [],
112
+ "eq_spans": [],
113
+ "section": "Original Prototype",
114
+ "sec_num": "1.1"
115
+ },
116
+ {
117
+ "text": "about differences between translation products. The languages provided in the first prototype were those available to the government from Systran (French, German, Spanish, Itali.an, Portuguese, Russian to English); those purchased from Globalink (French, German, Spanish, Russian to/from English); and those available from the GOTS System Gister (language list is unavailable for publication). At the time of delivery in 1995/1996, this represented a relatively new method for delivenng MT technology to general users.",
118
+ "cite_spans": [],
119
+ "ref_spans": [],
120
+ "eq_spans": [],
121
+ "section": "Figure 1: Original Architectural Flow",
122
+ "sec_num": null
123
+ },
124
+ {
125
+ "text": "Shortly after the fielding of the initial prototype, the need for additional language services to accompany translation became apparent. As will be discussed in Section 2, the data sent to the translation engines pointed out the differences between translation in an interactive environment and translation in an embedded, automated environment. Interactive translation multiple applications. The newer design will be discussed in the next section. At this time, other system architectures were beginning to be introduced into the community such as those provided by ALIS Technologies; Systran and in FALCON. Because this is a specific lessons learned about the CyberTrans experience, it is beyond the scope of this paper to compare this architecture with other architectures.",
126
+ "cite_spans": [],
127
+ "ref_spans": [],
128
+ "eq_spans": [],
129
+ "section": "Figure 1: Original Architectural Flow",
130
+ "sec_num": null
131
+ },
132
+ {
133
+ "text": "Because of the addition of new tools and technologies into the CyberTrans model, it became necessary to re-engineer the server design. As part of the transition of a prototype system into a production-quality system, the reengineering also addressed issues such as system administration support, robust operation for 24/7 service and metrics. As can sometimes be the case, the prototype started being used in continuous operation, causing a demand for improvement concurrent with ongoing operation. The reengineering was shaped by the fact that the system had expanded for new capabilities (in pre-and post-processing); the fact that the system had to remain operational all of the time; the fact that the system was being used in ways that were unanticipated by COTS/GOTS MT developers; the fact that the system was to be multi-platform (to include PCs) for an expanding list of languages and the fact that the system was beginning to be seen as providing a service similar to other system services (such as e-mail). These factors caused the system to be reengineered in an enterprise services model as an object-oriented design.",
134
+ "cite_spans": [],
135
+ "ref_spans": [],
136
+ "eq_spans": [],
137
+ "section": "Updated Design",
138
+ "sec_num": "1.2"
139
+ },
140
+ {
141
+ "text": "In this architecture, demon processes broker translations -a request for translation is passed to the system by a client program; the translation is planned out as a series of translation-related services; each service is requested from the responsible system object and the resulting translation is then passed back to the client programs. Implemented in a combination of C++, Java and Lisp, the new version represents a service-oriented architecture. Figure 2 shows an updated architecture pictur e . Translation services include Systran (French, German, Italian, Spanish, Portuguese, Russian, Serbo-Croatian, Ukrainian, Chinese, Japanese, Korean into English); Globalink (French, German, Spanish, Russian to/from English) and Gister (language set list unavailable) with plans to incorporate engines for languages such as Arabic. Language processing services include language/code set identification; code set conversion; data normalisation, including diacritic reinsertion and generalised spell checking; format preservation for Hyper-Text Mark-up Language (HTML) documents; nottranslated word preservation and others. The clients remain e-mail, Web and FrameMaker. Platforms include both Unix and PC platforms for clients and with the capability to incorporate PC-based tools as part of the service. Having described the architectures, we turn to lessons learned as a result of having an operational MT capability, running 24\u00a27 for over 6000 translations per month.",
142
+ "cite_spans": [],
143
+ "ref_spans": [
144
+ {
145
+ "start": 453,
146
+ "end": 461,
147
+ "text": "Figure 2",
148
+ "ref_id": null
149
+ }
150
+ ],
151
+ "eq_spans": [],
152
+ "section": "Updated Design",
153
+ "sec_num": "1.2"
154
+ },
155
+ {
156
+ "text": "The biggest surprise we encountered in fielding CyberTrans is related to the expectations of the users. The average user initially approaches MT. with an almost Star Trek-like view -that it is possible for the system to understand and translate perfectly any given document regardless of content, form, format or even language. While this is an unrealistic expectation of this or any system, an overriding goal which emerges is that embedded MT should be as automated as possible. This represents a fundamental shift from the traditional view of MT as an interactive, userdriven process to as a passive, data-driven process. We will now describe four areas where specific technologies need development for the smooth incorporation of MT into a \"real-world\" setting: language and code set identification; data normalisation; format preservation and lexicon development. Finally we will describe software engineering issues and challenges which facilitate the straight-forward embedding of MT into existing processes.",
157
+ "cite_spans": [],
158
+ "ref_spans": [],
159
+ "eq_spans": [],
160
+ "section": "Implementing Embedded MT",
161
+ "sec_num": "2"
162
+ },
163
+ {
164
+ "text": "Knowing the language and encoding~ or code set, of a document is a necessary first step in utilizing on-line text. For automated MT, the identification of the language(s) or code set of a text is necessary for systems to operate efffectively. A Spanish-English translation system will not successfully process an Italian document and will be even less successful in processing a Chinese one. The first requiren:tent, then, which enables automated, embedded processing is the detection of the languagle(s) and code set(s) of a given document.",
165
+ "cite_spans": [],
166
+ "ref_spans": [],
167
+ "eq_spans": [],
168
+ "section": "Language / Code Set Identification",
169
+ "sec_num": "2.1"
170
+ },
171
+ {
172
+ "text": "In preparing the tools which permit the accurate detection of languages and code sets in an operational setting, we found characteristics of the data which carry throughout all of the processing we discuss. The first, and foremost, is that the data is not clean, well-formed text. Frequently documents will have a mix of languages (both human and machine), code sets (including formatting information) and information pieces (such as e-mail headers, ASCII-art, etc.). For example, chat is very idiomatic and has many pre-defined acronyms. Finally, about 10% of translation materials are very short -between one and ten words. All of these factors contribute to the difficulty of preparing a service for language and code set identification as well as other natural language processing (NLP) tools. The implemented algorithm for language/code set identification is a trainable n-graph algorithm and has been discussed in more detail elsewhere (Reeder & Geisler, 1998) . Currently our language and code set identification works for on the order of 30 languages (mostly European) and about 1130 code sets (including many ASCII transliterations) yet these numbers are still insufficient for the data routinely processed by CyberTrans.",
173
+ "cite_spans": [
174
+ {
175
+ "start": 942,
176
+ "end": 966,
177
+ "text": "(Reeder & Geisler, 1998)",
178
+ "ref_id": "BIBREF2"
179
+ }
180
+ ],
181
+ "ref_spans": [],
182
+ "eq_spans": [],
183
+ "section": "Language / Code Set Identification",
184
+ "sec_num": "2.1"
185
+ },
186
+ {
187
+ "text": "The step after language identification is data normalisation and will be discussed as the next result of lessons learned from CyberTrans.",
188
+ "cite_spans": [],
189
+ "ref_spans": [],
190
+ "eq_spans": [],
191
+ "section": "Language / Code Set Identification",
192
+ "sec_num": "2.1"
193
+ },
194
+ {
195
+ "text": "Machine translation works best with clean, wellformed input text. Operationally, this is an ideal, but not reality. In reality, data that is being translated can suffer from many types of errors including misspellings and grammar mistakes, missing diacritics and transliterati:on problems, scanning errors and transmission obstacles. With her evaluation of MT systems, Flanagan (1994) describes reasons for errors in translation. MT systems were examined in light of the outputs of translation and the types of errors that can be generated by the translation engine. These include spelling errors, words not translated, incorrect accenting, incorrect capitalisation as well as grammatical and semantic errors. This study does not look at the kinds of inputs that can cause failure in a translation process. A second paper (Flanagan, 1996) examines the quality of inputs to the translation process, arguing for pre-editing tools such as spelling checkers. Yet, this continues to be an interactive view of the translation process. Another study (Reeder & Loehr, 1998) show at least 40% of translation failures (not translated tokens) are attributable to the types of errors, or non-normalised data, presented here.",
196
+ "cite_spans": [
197
+ {
198
+ "start": 369,
199
+ "end": 384,
200
+ "text": "Flanagan (1994)",
201
+ "ref_id": "BIBREF1"
202
+ },
203
+ {
204
+ "start": 822,
205
+ "end": 838,
206
+ "text": "(Flanagan, 1996)",
207
+ "ref_id": "BIBREF1"
208
+ },
209
+ {
210
+ "start": 1043,
211
+ "end": 1065,
212
+ "text": "(Reeder & Loehr, 1998)",
213
+ "ref_id": "BIBREF3"
214
+ }
215
+ ],
216
+ "ref_spans": [],
217
+ "eq_spans": [],
218
+ "section": "Data Normalisation",
219
+ "sec_num": "2.2"
220
+ },
221
+ {
222
+ "text": "In an embedded process, the system must automatically detect and correct errors. Instead of being random, the errors are regular, especially in generated or automated documents. For instance, a writer of French without a French keyboard will systematically omit diacritics. In this case, the errors in the document are far from random. Along these lines, we have grouped similar error sources together. Operational data can have one or more of these error types: misspellings and grammar mistakes; missing diacritics; mixed language documents; improper capitalisation; transliteration / transcription / code set mismatch; scanning (OCR) errors; web page or e-mail specific standards; conversion errors; network transmission errors; segmentation problems; character omissions. These error types can be categorised by the origination of the problem as in Table 1 . Much current CyberTrans work consists of developing and transitioning tools which can accurately detect and remediate errors, converting documents into a standard (normalised) form. The order in which the normalisation techniques are applied is a subject of ongoing research.",
223
+ "cite_spans": [],
224
+ "ref_spans": [
225
+ {
226
+ "start": 853,
227
+ "end": 860,
228
+ "text": "Table 1",
229
+ "ref_id": "TABREF0"
230
+ }
231
+ ],
232
+ "eq_spans": [],
233
+ "section": "Data Normalisation",
234
+ "sec_num": "2.2"
235
+ },
236
+ {
237
+ "text": "Documents arrive in many formats that have meaning in their structure. For instance, web pages contain HTML indicators plus language. A challenge for MT is that the HTML should not be translated whereas the text must be. The fde name rouge,gif should not be translated to red.gif if the web page is to be reassembled. Consider also, the task of translating a bulleted list. It is desirable to maintain a bulleted list with appropriate syntax. Table headings and labels also should be translated without destroying the format of the table. This, too, is a matter of ongoing research.",
238
+ "cite_spans": [],
239
+ "ref_spans": [
240
+ {
241
+ "start": 443,
242
+ "end": 457,
243
+ "text": "Table headings",
244
+ "ref_id": null
245
+ }
246
+ ],
247
+ "eq_spans": [],
248
+ "section": "Format Preservation",
249
+ "sec_num": "2.3"
250
+ },
251
+ {
252
+ "text": "The highest portion of the cost of providing a machine translation capability reflects the amount of lexicography that must be done -as much as 70% of the cost of a machine translation engine. In addition, the government requires specialised lexical repositories which reflect unique domains such as military, legal, scientific and medical. We must fred ways to update lexicons intelligently, using such sources as dictionaries, working aids, specialised word lists and other information reservoirs to provide a broad coverage of words.",
253
+ "cite_spans": [],
254
+ "ref_spans": [],
255
+ "eq_spans": [],
256
+ "section": "Lexicon Update",
257
+ "sec_num": "2.4"
258
+ },
259
+ {
260
+ "text": "One current approach is to record the list of words which do not translate and automate the handling of these. An issue in this is determining how to provide sufficient context for lexicographers. Additionally, different translation engines encode lexical entries in widely differing ways, meaning that sharing lexicon entries amongst translation capabilities is problematic. We are working on a lexicon service bureau (LSB) designed to facilitate the sharing of lexical materials. One part of this is the automatic extraction of lexical entries from on-line, machine readable dictionaries. Another part is the analysis of not-translated words. A final portion is research into a specialised category of lexical items -named entities. As with other processes in this section, we are addressing this as part of ongoing research -each advance raises the bar for the level of input text that can be handled.",
261
+ "cite_spans": [],
262
+ "ref_spans": [],
263
+ "eq_spans": [],
264
+ "section": "Lexicon Update",
265
+ "sec_num": "2.4"
266
+ },
267
+ {
268
+ "text": "A final lessons learned from the CyberTrans experience relates to the software engineering challenges of putting together diverse technologies from many vendors for multiple purposes. The first of these is the problem of API's from COTS systems and GOTS systems. Behind our initial command line, fde-based interaction lies the fact that translation engines do not routinely provide APIs, presenting an integration challenge. Platform-specific tools also contribute to the integration problem. The second software engineering challenge stemming from this is the amount of time necessary to bring up a translation engine. A good translation engine has a lexicon in the tens of thousands of entries which takes time to load up. Currently, the loading of a translation engine takes as much time as all of the rest of the pre-and post-processing combined. A third challenge is deciding on and enacting a language representation. Although Unicode makes good strides towards the uniform sharing of data, many of the tools needed to convert real data into Unicode need to be improved. Additionally, current implementations of Java and C++ do not have all of the necessary pieces for seamlessly handling of a wide range of languages. Finally, the challenge is in the management and ordering of the translation process. To effectively manage a translation, requires a translation manager which can be a single point of failure.",
269
+ "cite_spans": [],
270
+ "ref_spans": [],
271
+ "eq_spans": [],
272
+ "section": "Software Engineering Challenges",
273
+ "sec_num": "2.5"
274
+ },
275
+ {
276
+ "text": "We have identified the lessons learned from a specific embedding of MT into an overall process. We have identified issues and concerns resulting from this experience. We continue to refine and examine the issues of supporting MT, of making it palatable and viable in multiple applications and frameworks. This system is just one example of embedding MT. Future work must compare this effort to other work in the field.",
277
+ "cite_spans": [],
278
+ "ref_spans": [],
279
+ "eq_spans": [],
280
+ "section": "Conclusion",
281
+ "sec_num": null
282
+ }
283
+ ],
284
+ "back_matter": [],
285
+ "bib_entries": {
286
+ "BIBREF0": {
287
+ "ref_id": "b0",
288
+ "title": "Machine Translation Technology Survey and Evaluation Report",
289
+ "authors": [
290
+ {
291
+ "first": "J",
292
+ "middle": [],
293
+ "last": "Benoit",
294
+ "suffix": ""
295
+ },
296
+ {
297
+ "first": "P",
298
+ "middle": [],
299
+ "last": "Jordan",
300
+ "suffix": ""
301
+ },
302
+ {
303
+ "first": "B",
304
+ "middle": [],
305
+ "last": "Dorr",
306
+ "suffix": ""
307
+ }
308
+ ],
309
+ "year": 1991,
310
+ "venue": "MITRE Techlfical Report",
311
+ "volume": "",
312
+ "issue": "",
313
+ "pages": "",
314
+ "other_ids": {},
315
+ "num": null,
316
+ "urls": [],
317
+ "raw_text": "Benoit, J., Jordan, P., Dorr, B. (1991) Machine Translation Technology Survey and Evaluation Report. MITRE Techlfical Report.",
318
+ "links": null
319
+ },
320
+ "BIBREF1": {
321
+ "ref_id": "b1",
322
+ "title": "Two Years OnJine: Experiences, Challenges and Trends",
323
+ "authors": [
324
+ {
325
+ "first": "M",
326
+ "middle": [],
327
+ "last": "Flanagan",
328
+ "suffix": ""
329
+ }
330
+ ],
331
+ "year": 1994,
332
+ "venue": "Technology Partnerships for Crossing the Language Barrier: Proceedings of the First Conference of the Association for Machine Translation in the Americas",
333
+ "volume": "",
334
+ "issue": "",
335
+ "pages": "192--197",
336
+ "other_ids": {},
337
+ "num": null,
338
+ "urls": [],
339
+ "raw_text": "Flanagan, M. (1994) Error Classification for MT Evaluation. In Technology Partnerships for Crossing the Language Barrier: Proceedings of the First Conference of the Association for Machine Translation in the Americas, Columbia, MD. Flanagan, M. (1996) Two Years OnJine: Experiences, Challenges and Trends. In Expanding MT Horizons: Proceedings of the Second Conference of the Association for Machine Translation in the Americas, (pp. 192-197). Washington, DC: AMTA.",
340
+ "links": null
341
+ },
342
+ "BIBREF2": {
343
+ "ref_id": "b2",
344
+ "title": "Multi-Byte Issues in Encoding / Language Identification",
345
+ "authors": [
346
+ {
347
+ "first": "F",
348
+ "middle": [],
349
+ "last": "Reeder",
350
+ "suffix": ""
351
+ },
352
+ {
353
+ "first": "J",
354
+ "middle": [],
355
+ "last": "Geisler",
356
+ "suffix": ""
357
+ }
358
+ ],
359
+ "year": 1998,
360
+ "venue": "Proceedings of the Embedded MT Workshop, AMTA-98",
361
+ "volume": "",
362
+ "issue": "",
363
+ "pages": "",
364
+ "other_ids": {},
365
+ "num": null,
366
+ "urls": [],
367
+ "raw_text": "Reeder, F. & Geisler, J. (1998) Multi-Byte Issues in Encoding / Language Identification. In Proceedings of the Embedded MT Workshop, AMTA-98. Langhome, PA.",
368
+ "links": null
369
+ },
370
+ "BIBREF3": {
371
+ "ref_id": "b3",
372
+ "title": "Finding the Riight Words: An Analysis of Not-Translated Words in Machine Translation",
373
+ "authors": [
374
+ {
375
+ "first": "F",
376
+ "middle": [],
377
+ "last": "Reeder",
378
+ "suffix": ""
379
+ },
380
+ {
381
+ "first": "D",
382
+ "middle": [],
383
+ "last": "Loehr",
384
+ "suffix": ""
385
+ }
386
+ ],
387
+ "year": 1998,
388
+ "venue": "Proceedings of the 3rdconference of the Association for Machine Translation in the Americas, AMTA-98",
389
+ "volume": "",
390
+ "issue": "",
391
+ "pages": "",
392
+ "other_ids": {},
393
+ "num": null,
394
+ "urls": [],
395
+ "raw_text": "Reeder, F. & Loehr, D. (1998) Finding the Riight Words: An Analysis of Not-Translated Words in Machine Translation. In Proceedings of the 3rdconference of the Association for Machine Translation in the Americas, AMTA-98. Langhome, PA.",
396
+ "links": null
397
+ }
398
+ },
399
+ "ref_entries": {
400
+ "FIGREF0": {
401
+ "uris": null,
402
+ "num": null,
403
+ "text": "Figure 2: Current Architectural Flow",
404
+ "type_str": "figure"
405
+ },
406
+ "TABREF0": {
407
+ "num": null,
408
+ "text": "",
409
+ "html": null,
410
+ "type_str": "table",
411
+ "content": "<table><tr><td>Segmentation</td></tr><tr><td>Character omissions</td></tr><tr><td>Mixed languages</td></tr><tr><td>Input Source</td></tr><tr><td>Misspellings</td></tr><tr><td>Grammar mistakes</td></tr><tr><td>Missing Diacritics</td></tr><tr><td>Transliterations</td></tr><tr><td>Capitalisation</td></tr><tr><td>Production Source</td></tr><tr><td>Scanning / OCR</td></tr><tr><td>Electronic representation</td></tr><tr><td>Conversion errors</td></tr><tr><td>Acquisition Source</td></tr><tr><td>Network transmission</td></tr></table>"
412
+ }
413
+ }
414
+ }
415
+ }
Full_text_JSON/prefixW/json/W00/W00-0504.json ADDED
@@ -0,0 +1,1338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0504",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:32:38.863384Z"
6
+ },
7
+ "title": "Mandarin-English Information (MEI): Investigating Translingual Speech Retrieval",
8
+ "authors": [
9
+ {
10
+ "first": "Helen",
11
+ "middle": [],
12
+ "last": "Meng",
13
+ "suffix": "",
14
+ "affiliation": {
15
+ "laboratory": "3University of Maryland and 4Academia Sinica",
16
+ "institution": "2Johns Hopkins University",
17
+ "location": {
18
+ "country": "Taiwan"
19
+ }
20
+ },
21
+ "email": "hmmeng@se.cuhk.edu.hk"
22
+ },
23
+ {
24
+ "first": "Sanjeev",
25
+ "middle": [],
26
+ "last": "Khudanpur",
27
+ "suffix": "",
28
+ "affiliation": {
29
+ "laboratory": "3University of Maryland and 4Academia Sinica",
30
+ "institution": "2Johns Hopkins University",
31
+ "location": {
32
+ "country": "Taiwan"
33
+ }
34
+ },
35
+ "email": "sanjeev@clsp.jhu.edu"
36
+ },
37
+ {
38
+ "first": "Gina",
39
+ "middle": [],
40
+ "last": "Levow",
41
+ "suffix": "",
42
+ "affiliation": {
43
+ "laboratory": "3University of Maryland and 4Academia Sinica",
44
+ "institution": "2Johns Hopkins University",
45
+ "location": {
46
+ "country": "Taiwan"
47
+ }
48
+ },
49
+ "email": ""
50
+ },
51
+ {
52
+ "first": "Douglas",
53
+ "middle": [
54
+ "W"
55
+ ],
56
+ "last": "Oard",
57
+ "suffix": "",
58
+ "affiliation": {
59
+ "laboratory": "3University of Maryland and 4Academia Sinica",
60
+ "institution": "2Johns Hopkins University",
61
+ "location": {
62
+ "country": "Taiwan"
63
+ }
64
+ },
65
+ "email": "oard@@glue.umd.edu"
66
+ },
67
+ {
68
+ "first": "Hsin-Min",
69
+ "middle": [],
70
+ "last": "Wang'",
71
+ "suffix": "",
72
+ "affiliation": {
73
+ "laboratory": "3University of Maryland and 4Academia Sinica",
74
+ "institution": "2Johns Hopkins University",
75
+ "location": {
76
+ "country": "Taiwan"
77
+ }
78
+ },
79
+ "email": ""
80
+ }
81
+ ],
82
+ "year": "",
83
+ "venue": null,
84
+ "identifiers": {},
85
+ "abstract": "We describe a system which supports English text queries searching for Mandarin Chinese spoken documents. This is one of the first attempts to tightly couple speech recognition with machine translation technologies for cross-media and cross-language retrieval. The Mandarin Chinese news audio are indexed with word and subword units by speech recognition. Translation of these multiscale units can effect cross-language information retrieval. The integrated technologies will be evaluated based on the performance of translingnal speech retrieval.",
86
+ "pdf_parse": {
87
+ "paper_id": "W00-0504",
88
+ "_pdf_hash": "",
89
+ "abstract": [
90
+ {
91
+ "text": "We describe a system which supports English text queries searching for Mandarin Chinese spoken documents. This is one of the first attempts to tightly couple speech recognition with machine translation technologies for cross-media and cross-language retrieval. The Mandarin Chinese news audio are indexed with word and subword units by speech recognition. Translation of these multiscale units can effect cross-language information retrieval. The integrated technologies will be evaluated based on the performance of translingnal speech retrieval.",
92
+ "cite_spans": [],
93
+ "ref_spans": [],
94
+ "eq_spans": [],
95
+ "section": "Abstract",
96
+ "sec_num": null
97
+ }
98
+ ],
99
+ "body_text": [
100
+ {
101
+ "text": "Massive quantities of audio and multimedia programs are becoming available. For example, in mid-February 2000, www.real.com listed 1432 radio stations, 381",
102
+ "cite_spans": [],
103
+ "ref_spans": [],
104
+ "eq_spans": [],
105
+ "section": "Introduction",
106
+ "sec_num": "1."
107
+ },
108
+ {
109
+ "text": "Internet-only broadcasters, and 86 television stations with Internet-accessible content, with 529 broadcasting in languages other than English. Monolingual speech retrieval is now practical, as evidenced by services such as SpeechBot (speechbot.research.compaq.com), and it is clear that there is a potential demand for translingual speech retrieval if effective techniques can be developed. The Mandarin-English Information (MEI) project represents one of the first efforts in that direction.",
110
+ "cite_spans": [],
111
+ "ref_spans": [],
112
+ "eq_spans": [],
113
+ "section": "Introduction",
114
+ "sec_num": "1."
115
+ },
116
+ {
117
+ "text": "MEI is one of the four projects selected for the Johns Hopkins University (JHU) Summer Workshop 2000.1 Our research focus is on the integration of speech recognition and embedded translation technologies in the context of translingual speech retrieval.",
118
+ "cite_spans": [],
119
+ "ref_spans": [],
120
+ "eq_spans": [],
121
+ "section": "Introduction",
122
+ "sec_num": "1."
123
+ },
124
+ {
125
+ "text": "Possible applications of this work include audio and video browsing, spoken document retrieval, automated routing of information, and automatically alerting the user when special events occur.",
126
+ "cite_spans": [],
127
+ "ref_spans": [],
128
+ "eq_spans": [],
129
+ "section": "Introduction",
130
+ "sec_num": "1."
131
+ },
132
+ {
133
+ "text": "At the time of this writing, most of the MEI team members have been identified. This paper provides an update beyond our first proposal [Meng et al., 2000] . We present some ongoing work of our current team members, as well as our ideas on an evolving plan for the upcoming JHU Summer Workshop 2000. We believe the input from the research community will benefit us greatly in formulating ourfinal plan.",
134
+ "cite_spans": [
135
+ {
136
+ "start": 136,
137
+ "end": 155,
138
+ "text": "[Meng et al., 2000]",
139
+ "ref_id": "BIBREF11"
140
+ }
141
+ ],
142
+ "ref_spans": [],
143
+ "eq_spans": [],
144
+ "section": "Introduction",
145
+ "sec_num": "1."
146
+ },
147
+ {
148
+ "text": "The earliest work on large-vocabulary crosslanguage information retrieval from free-text (i,e., without manual topic indexing) was reported in 1990 [Landauer and Littman, 1990] , and the topic has received increasing attention over the last five years [Oard and Diekema, 1998 ]. Work on large-vocabulary retrieval from recorded speech is more recent, with some initial work reported in 1995 using subword indexing [Wechsler and Schauble, 1995] , followed by the first TREC 2 Spoken Document Retrieval (SDR) I http://www.clsp,jhu.edu/ws2000/ 2 Text REtrieval Conference, http://trec.nist.gov evaluation [Garofolo et al., 2000] . The Topic Detection and Tracking (TDT) evaluations, which started in 1998, fall within our definition of speech retrieval for this purpose, differing from other evaluations principally in the nature of the criteria that human assessors use when assessing the relevance of a news stow to an information need. In TDT, stories are assessed for relevance to an event, while in TREC stories are assessed for relevance to an explicitly stated information need that is often subject-rather than event-oriented.",
149
+ "cite_spans": [
150
+ {
151
+ "start": 148,
152
+ "end": 176,
153
+ "text": "[Landauer and Littman, 1990]",
154
+ "ref_id": "BIBREF7"
155
+ },
156
+ {
157
+ "start": 252,
158
+ "end": 275,
159
+ "text": "[Oard and Diekema, 1998",
160
+ "ref_id": "BIBREF13"
161
+ },
162
+ {
163
+ "start": 414,
164
+ "end": 443,
165
+ "text": "[Wechsler and Schauble, 1995]",
166
+ "ref_id": null
167
+ },
168
+ {
169
+ "start": 602,
170
+ "end": 625,
171
+ "text": "[Garofolo et al., 2000]",
172
+ "ref_id": "BIBREF7"
173
+ }
174
+ ],
175
+ "ref_spans": [],
176
+ "eq_spans": [],
177
+ "section": "Translingual Information Retrieval",
178
+ "sec_num": "2.1"
179
+ },
180
+ {
181
+ "text": "The TDT-33 evaluation marked the first case of translingual speech retrieval -the task of finding information in a collection of recorded speech based on evidence of the information need that might be expressed (at least partially) in a different language. Translingual speech retrieval thus merges two lines of research that have developed separately until now. In the TDT-3 topic tracking evaluation, recognizer transcripts which have recognition errors were available, and it appears that every team made use of them. This provides a valuable point of reference for investigation of techniques that more tightly couple speech recognition with translingual retrieval. We plan to explore one way of doing this in the Mandarin-English Information (MEI) project.",
182
+ "cite_spans": [],
183
+ "ref_spans": [],
184
+ "eq_spans": [],
185
+ "section": "Translingual Information Retrieval",
186
+ "sec_num": "2.1"
187
+ },
188
+ {
189
+ "text": "In order to retrieve Mandarin audio documents, we should consider a number of linguistic characteristics of the Chinese language:",
190
+ "cite_spans": [],
191
+ "ref_spans": [],
192
+ "eq_spans": [],
193
+ "section": "The Chinese Language",
194
+ "sec_num": "2.2"
195
+ },
196
+ {
197
+ "text": "The Chinese language has many dialects. Different dialects are characterized by their differences in the phonetics, vocabularies and syntax. Mandarin, also known as Putonglma (\"the common language\"), is the most widely used dialect. Another major dialect is Cantonese, predominant in Hong Kong, Macau, South China and many overseas Chinese communities.",
198
+ "cite_spans": [],
199
+ "ref_spans": [],
200
+ "eq_spans": [],
201
+ "section": "The Chinese Language",
202
+ "sec_num": "2.2"
203
+ },
204
+ {
205
+ "text": "Chinese is a syllable-based language, where each syllable carries a lexical tone. Mandarin has about 400 base syllables and four lexical tones, plus a \"light\" tone for reduced syllables. There are about 1,200 distinct, tonal syllables for Mandarin. Certain syllable-tone 3 http://morph.ldc.upenn.edu/Projects/TDT3/ combinations are non-existent in the language. The acoustic correlates of the lexical tone include the syllable's fundamental frequency (pitch contour) and duration. However, these acoustic features are also highly dependent on prosodic variations of spoken utterances.",
206
+ "cite_spans": [],
207
+ "ref_spans": [],
208
+ "eq_spans": [],
209
+ "section": "The Chinese Language",
210
+ "sec_num": "2.2"
211
+ },
212
+ {
213
+ "text": "The structure of Mandarin (base) syllables is (CG)V(X), where (CG) the syllable onset -C the initial consonant, G is the optional medial glide, V is the nuclear vowel, and X is the coda (which may be a glide, alveolar nasal or velar nasal). Syllable onsets and codas are optional.",
214
+ "cite_spans": [],
215
+ "ref_spans": [],
216
+ "eq_spans": [],
217
+ "section": "The Chinese Language",
218
+ "sec_num": "2.2"
219
+ },
220
+ {
221
+ "text": "Generally C is known as the syllable initial, and the rest (GVX) syllable final. 4 Mandarin has approximately 21 initials and 39 finals. 5",
222
+ "cite_spans": [],
223
+ "ref_spans": [],
224
+ "eq_spans": [],
225
+ "section": "The Chinese Language",
226
+ "sec_num": "2.2"
227
+ },
228
+ {
229
+ "text": "In its written form, Chinese is a sequence of characters. A word may contain one or more characters. Each character is pronounced as a tonal syllable. The character-syllable mapping is degenerate. On one hand, a given character may have multiple syllable pronunciations -for example, the character/d\" may be pronounced as /hang2/, 6/hang4/, or/xing2/. On the other hand, a given tonal syllable may correspond to multiple characters. Consider the two-syllable pronunciation/fu4 shu4/, which corresponds to a two-character word. Possible homophones include ~,, (meaning \"rich\"), ~ ~tR, (\"negative number\"), ~1~1~, (\"complex number\" or \"plural\"), ~1~ (\"repeat\"). 7",
230
+ "cite_spans": [],
231
+ "ref_spans": [],
232
+ "eq_spans": [],
233
+ "section": "The Chinese Language",
234
+ "sec_num": "2.2"
235
+ },
236
+ {
237
+ "text": "Aside from homographs and homophones, another source of ambiguity in the Chinese language is the definition of a Chinese word. The word has no delimiters, and the distinction between a word and a phrase is often vague. The lexical structure of the Chinese word is very different compared to English. Inflectional forms are minimal, while morphology and word derivations abide by a different set of rules. A word may inherit the syntax and semantics of (some of) its compositional characters, for example, 8 ~ means red (a noun or an adjective), ~., means color (a noun), and ~., together means \"the color red\"(a noun) or simply \"red\" (an adjective). Alternatively, a word may take on totally different characteristics of its own, e.g. ~. means east (a noun or an adjective), ~ means west (a noun or an adjective), and .~.~ together means thing (a noun). Yet another case is where the compositional characters of a word do not form independent lexical entries in isolation, e.g. D~ means fancy (a verb), but its characters do not occur individually. Possible ways of deriving new words from characters are legion. The problem of identifying the words string in a character sequence is known as the segmentation / tokenization problem. Consider the syllable string:",
238
+ "cite_spans": [],
239
+ "ref_spans": [],
240
+ "eq_spans": [],
241
+ "section": "The Chinese Language",
242
+ "sec_num": "2.2"
243
+ },
244
+ {
245
+ "text": "/zhe4 yil wan3 hui4 ru2 chang2 ju3 xing2/",
246
+ "cite_spans": [],
247
+ "ref_spans": [],
248
+ "eq_spans": [],
249
+ "section": "The Chinese Language",
250
+ "sec_num": "2.2"
251
+ },
252
+ {
253
+ "text": "The corresponding character string has three possible segmentations -all are correct, but each involves a distinct set of words:",
254
+ "cite_spans": [],
255
+ "ref_spans": [],
256
+ "eq_spans": [],
257
+ "section": "The Chinese Language",
258
+ "sec_num": "2.2"
259
+ },
260
+ {
261
+ "text": "(Meaning: It will be take place tonight as usual.) (Meaning: The evening banquet will take place as usual.) (Meaning: If this evening banquet takes place frequently...)",
262
+ "cite_spans": [],
263
+ "ref_spans": [],
264
+ "eq_spans": [],
265
+ "section": "The Chinese Language",
266
+ "sec_num": "2.2"
267
+ },
268
+ {
269
+ "text": "The above considerations lead to a number of techniques we plan to use for our task. We concentrate on three equally critical problems related to our theme of translingual speech retrieval: (i) indexing Mandarin Chinese audio with word and subword units, (ii) translating variable-size units for cross-language information retrieval, and (iii) devising effective retrieval strategies for English text queries and Mandarin Chinese news audio.",
270
+ "cite_spans": [],
271
+ "ref_spans": [],
272
+ "eq_spans": [],
273
+ "section": "The Chinese Language",
274
+ "sec_num": "2.2"
275
+ },
276
+ {
277
+ "text": "A popular approach to spoken document retrieval is to apply Large-Vocabulary s Examples drawn from [Meng and Ip, 1999] .",
278
+ "cite_spans": [
279
+ {
280
+ "start": 99,
281
+ "end": 118,
282
+ "text": "[Meng and Ip, 1999]",
283
+ "ref_id": "BIBREF10"
284
+ }
285
+ ],
286
+ "ref_spans": [],
287
+ "eq_spans": [],
288
+ "section": "Multiscale Audio Indexing",
289
+ "sec_num": "3."
290
+ },
291
+ {
292
+ "text": "Continuous Speech Recognition (LVCSR) 9 for audio indexing, followed by text retrieval techniques. Mandarin Chinese presents a challenge for word-level indexing by LVCSR, because of the ambiguity in tokenizing a sentence into words (as mentioned earlier). Furthermore, LVCSR with a static vocabulary is hampered by the out-of-vocabulary (OOV) problem, especially when searching sources with topical coverage as diverse as that found in broadcast news.",
293
+ "cite_spans": [],
294
+ "ref_spans": [],
295
+ "eq_spans": [],
296
+ "section": "Multiscale Audio Indexing",
297
+ "sec_num": "3."
298
+ },
299
+ {
300
+ "text": "By virtue of the monosyllabic nature of the Chinese language and its dialects, the syllable inventory can provide a complete phonological coverage for spoken documents, and circumvent the OOV problem in news audio indexing, offering the potential for greater recall in subsequent retrieval. The approach thus supports searches for previously unknown query terms in the indexed audio.",
301
+ "cite_spans": [],
302
+ "ref_spans": [],
303
+ "eq_spans": [],
304
+ "section": "Multiscale Audio Indexing",
305
+ "sec_num": "3."
306
+ },
307
+ {
308
+ "text": "The pros and cons of subword indexing for an English spoken document retrieval task was studied in [Ng, 2000] . Ng pointed out that the exclusion of lexical knowledge when subword indexing is performed in isolation may adversely impact discrimination power for retrieval, but that some of that impact can be mitigated by modeling sequential constraints among subword units. We plan to investigate the efficacy of using both word and subword units for Mandarin audio indexing [Meng et al., 2000] . Although Ng found that such an approach produced little gain over words alone for English, the structure of Mandarin Chinese may produce more useful subword features.",
309
+ "cite_spans": [
310
+ {
311
+ "start": 99,
312
+ "end": 109,
313
+ "text": "[Ng, 2000]",
314
+ "ref_id": "BIBREF12"
315
+ },
316
+ {
317
+ "start": 475,
318
+ "end": 494,
319
+ "text": "[Meng et al., 2000]",
320
+ "ref_id": "BIBREF11"
321
+ }
322
+ ],
323
+ "ref_spans": [],
324
+ "eq_spans": [],
325
+ "section": "Multiscale Audio Indexing",
326
+ "sec_num": "3."
327
+ },
328
+ {
329
+ "text": "We have thus far used overlapping syllable Ngrams for spoken document retrieval for two Chinese dialects -Mandarin and Cantonese. Results on a known-item retrieval task with over 1,800 error-free news transcripts [Meng et al., 1999] indicate that constraints from overlapping bigrams can yield significant improvements in retrieval performance over syllable unigrams, producing retrieval performance competitive with that obtained using automatically tokenized Chinese words.",
330
+ "cite_spans": [
331
+ {
332
+ "start": 213,
333
+ "end": 232,
334
+ "text": "[Meng et al., 1999]",
335
+ "ref_id": "BIBREF10"
336
+ }
337
+ ],
338
+ "ref_spans": [],
339
+ "eq_spans": [],
340
+ "section": "Modeling Syllable Sequence Constraints",
341
+ "sec_num": "3.1"
342
+ },
343
+ {
344
+ "text": "The study in [Chen, Wang and Lee, 2000] also used syllable pairs with skipped syllables in between. This is because many Chinese abbreviations are derived from skipping characters, e.g. J.~:~.~t:~ ~ ~ National Science Council\" can be abbreviated as l~r~ (including only the first, third and the last characters). Moreover, synonyms often differ by one or two characters, e.g. both ~'/~4~ and ~.~,,Ag mean \"Chinese culture\". Inclusion of these \"skipped syllable pairs\" also contributed to retrieval performance.",
345
+ "cite_spans": [
346
+ {
347
+ "start": 13,
348
+ "end": 39,
349
+ "text": "[Chen, Wang and Lee, 2000]",
350
+ "ref_id": "BIBREF3"
351
+ }
352
+ ],
353
+ "ref_spans": [],
354
+ "eq_spans": [],
355
+ "section": "Modeling Syllable Sequence Constraints",
356
+ "sec_num": "3.1"
357
+ },
358
+ {
359
+ "text": "modeling sequential syllable constraints, lexical constraints on recognized words may be helpful. We thus plan to exp]Iore the potential for integrated sequential model]ling of both words and syllables [Meng et al., 20013] .",
360
+ "cite_spans": [
361
+ {
362
+ "start": 202,
363
+ "end": 222,
364
+ "text": "[Meng et al., 20013]",
365
+ "ref_id": null
366
+ }
367
+ ],
368
+ "ref_spans": [],
369
+ "eq_spans": [],
370
+ "section": "When",
371
+ "sec_num": null
372
+ },
373
+ {
374
+ "text": "Figures 1 and 2 illustrate two translingual retrieval strategies. In query translation, English text queries are transformed into Mandarin and then used to retrieve Mandarin documents. For document translation, Mandarin documents are translated into English before they are indexed and then matched with English queries.",
375
+ "cite_spans": [],
376
+ "ref_spans": [],
377
+ "eq_spans": [],
378
+ "section": "Multiseale Embedded Translation",
379
+ "sec_num": "4."
380
+ },
381
+ {
382
+ "text": "McCarley has reported improved effectiveness from techniques that couple the two techniques [McCarley, 1999] , but time constraints may limit us to explonng only the query translation strategy dunng the six-week Workshop.",
383
+ "cite_spans": [
384
+ {
385
+ "start": 92,
386
+ "end": 108,
387
+ "text": "[McCarley, 1999]",
388
+ "ref_id": "BIBREF9"
389
+ }
390
+ ],
391
+ "ref_spans": [],
392
+ "eq_spans": [],
393
+ "section": "Multiseale Embedded Translation",
394
+ "sec_num": "4."
395
+ },
396
+ {
397
+ "text": "While we make use of sub-word transcription to smooth out-of-vocabulary(OOV) problems in speech recognition as described above, and to alleviate the OOV problem :for translation as we discuss in the next section, accurate translation generally relies on the additional information available at the word and phrase levels.",
398
+ "cite_spans": [],
399
+ "ref_spans": [],
400
+ "eq_spans": [],
401
+ "section": "4,1 Word Translation",
402
+ "sec_num": null
403
+ },
404
+ {
405
+ "text": "Since the \"bag of words\" information retrieval techniques do not incorporate any meaningful degree of language understanding to assess similarity between queries and documents, a word-for-word (or, more generally, term-for-term) embedded translation approach can achieve a useful level of effectiveness for many translingual retrieval applications [Oard and Diekema, 1998 ].",
406
+ "cite_spans": [
407
+ {
408
+ "start": 348,
409
+ "end": 371,
410
+ "text": "[Oard and Diekema, 1998",
411
+ "ref_id": "BIBREF13"
412
+ }
413
+ ],
414
+ "ref_spans": [],
415
+ "eq_spans": [],
416
+ "section": "4,1 Word Translation",
417
+ "sec_num": null
418
+ },
419
+ {
420
+ "text": "We have developed such a technique for the TDT-3 topic tracking evaluation [Levow and Oard, 2000] . For that work we extracted an enriched bilingual Mandarin-English term list by combining two term lists: (i) A list assembled by the Linguistic Data Consortium from freely available on-line resources; and (ii) entries from the CETA file (sometimes referred to as \"Optilex\").",
421
+ "cite_spans": [
422
+ {
423
+ "start": 75,
424
+ "end": 97,
425
+ "text": "[Levow and Oard, 2000]",
426
+ "ref_id": "BIBREF8"
427
+ }
428
+ ],
429
+ "ref_spans": [],
430
+ "eq_spans": [],
431
+ "section": "4,1 Word Translation",
432
+ "sec_num": null
433
+ },
434
+ {
435
+ "text": "This is a Chinese to English translation resource that was manually compiled by a team of linguists from more than 250 text sources, including special and general-purpose print dictionaries, and other text sources such as newspapers. The CETA file contains over 250,000 entries, but for our lexical work we extracted a subset of those entries drawn from contemporary general-purpose sources. We also excluded definitions such as \"particle indicating a yes/no question.\" Our resulting Chinese to English merged bilingual term list contains translations for almost 200,000 Chinese terms, with average of almost two translation alternatives per term. We have also used the same resources to construct an initial English to Chinese bilingual term list that we plan to refine before the Workshop. Three significant challenges faced by termto-term translation systems are term selection in the source language, the source language coverage of the bilingual term list, and translation selection in the target language when more than one alternative translation is known. Word segmentation is a natural by-product of large vocabulary Mandarin speech recognition, and white space provides word boundaries for the English queries. We thus plan to choose words as our basic term set, perhaps augmenting this with the multiword expressions found in the bilingual term list.",
436
+ "cite_spans": [],
437
+ "ref_spans": [],
438
+ "eq_spans": [],
439
+ "section": "4,1 Word Translation",
440
+ "sec_num": null
441
+ },
442
+ {
443
+ "text": "Achieving adequate source language coverage is challenging in news retrieval applications of the type modelled by TDT, because proper names and technical terms that may not be present in general-purpose lexical resources often provide important retrieval cues. Parallel (translation equivalent) corpora have proven to be a useful source of translation equivalent terms, but obtaining appropriate domain-specific parallel corpora in electronic form may not be practical in some applications. We therefore plan to investigate the use of comparable corpora to learn translation equivalents, based on techniques in [Fung, 1998 ]. Subword translation, described below, provides a complementary way of handling terms for which translation equivalents cannot be reliably extracted from the available comparable corpora.",
444
+ "cite_spans": [
445
+ {
446
+ "start": 611,
447
+ "end": 622,
448
+ "text": "[Fung, 1998",
449
+ "ref_id": "BIBREF6"
450
+ }
451
+ ],
452
+ "ref_spans": [],
453
+ "eq_spans": [],
454
+ "section": "4,1 Word Translation",
455
+ "sec_num": null
456
+ },
457
+ {
458
+ "text": "One way of dealing with multiple translations is to weight the alternative translations using either a statistical translation model trained on parallel or comparable corpora to estimate translation probability conditioned on the source language term.",
459
+ "cite_spans": [],
460
+ "ref_spans": [],
461
+ "eq_spans": [],
462
+ "section": "4,1 Word Translation",
463
+ "sec_num": null
464
+ },
465
+ {
466
+ "text": "When such resources are not sufficiently informative, it is generally possible to back off to an unconditioned preference statistic based on usage frequency of each possible translation in a representative monolingual corpus in the target language. In retrospective retrieval applications the collection being searched can be used for this purpose. We have applied simple versions of this approach with good results [Levow and Oard, 2000] .",
467
+ "cite_spans": [
468
+ {
469
+ "start": 416,
470
+ "end": 438,
471
+ "text": "[Levow and Oard, 2000]",
472
+ "ref_id": "BIBREF8"
473
+ }
474
+ ],
475
+ "ref_spans": [],
476
+ "eq_spans": [],
477
+ "section": "4,1 Word Translation",
478
+ "sec_num": null
479
+ },
480
+ {
481
+ "text": "We have recently observed that a simpler technique introduced by [Pirkola, 1998 ] can produce excellent results. The key idea is to use the structure of the lexicon, in which several target language terms can represent a single source language term, to induce structure in the translated query that the retrieval system can automatically exploit. In essence, the translated query becomes a bag of bags of terms, where each smaller bag corresponds to the set of possible translations for one source-language term. We plan to implement this structured query translation approach using the Inquery [Callan, 1992] \"synonym\" operator in the same manner as [Pirkola, 1998] , and to the potential to extend the technique to accommodate alternative recognition hypothesis and subword units as well:",
482
+ "cite_spans": [
483
+ {
484
+ "start": 65,
485
+ "end": 79,
486
+ "text": "[Pirkola, 1998",
487
+ "ref_id": "BIBREF13"
488
+ },
489
+ {
490
+ "start": 595,
491
+ "end": 609,
492
+ "text": "[Callan, 1992]",
493
+ "ref_id": "BIBREF1"
494
+ },
495
+ {
496
+ "start": 651,
497
+ "end": 666,
498
+ "text": "[Pirkola, 1998]",
499
+ "ref_id": "BIBREF13"
500
+ }
501
+ ],
502
+ "ref_spans": [],
503
+ "eq_spans": [],
504
+ "section": "4,1 Word Translation",
505
+ "sec_num": null
506
+ },
507
+ {
508
+ "text": "Since Mandarin spoken documents can be indexed with both words and subwords, the translation (or \"phonetic transliteration\") of subword units is of particular interest. We plan to make use of cross-language phonetic mappings derived from English and Mandarin pronunciation rules for this purpose. This should be especially useful for handling named entities in the queries, e.g. names of people, places and organizations, etc. which are generally important for retrieval, but may not be easily translated. Chinese translations of English proper nouns may involve semantic as well as phonetic mappings. For example, \"Northern Ireland\" is translated as :~b~ttlM --where the first character ~ means 'north', and the remaining characters ~tllllll are pronounced as /ai4-er3-lan2L Hence the translation is both semantic and phonetic. When Chinese translations strive to attain phonetic similarity, the mapping may be inconsistent. For example, consider the translation of \"Kosovo\" -sampling Chinese newspapers in China, Taiwan and Hong Kong produces the following translations: ~-~r~ /kel-suo3-wo4\u00a2, ~-~ /kel-suo3-fo2/, ~'~&/kel-suo3-ful/fl4\"~dt/kel-suo3-fu2/, or ~/ke 1-suo3-fo2/.",
509
+ "cite_spans": [],
510
+ "ref_spans": [],
511
+ "eq_spans": [],
512
+ "section": "Subword Translation",
513
+ "sec_num": "4.2"
514
+ },
515
+ {
516
+ "text": "As can be seen, there is no systematic mapping to the Chinese character sequences, but the translated Chinese pronunciations bear some resemblance to the English pronunciation (/k ow s ax vow/). In order to support retrieval under these circumstances, the approach should involve approximate matches between the English pronunciation and the Chinese pronunciation. The matching algorithm should also accommodate phonological variations. Pronunciation dictionaries, or pronunciation generation tools for both English words and Chinese words / characters will be useful for the matching algorithm. We can probably leverage off of ideas in the development of universal speech recognizers [Cohen et al., 1997] .",
517
+ "cite_spans": [
518
+ {
519
+ "start": 685,
520
+ "end": 705,
521
+ "text": "[Cohen et al., 1997]",
522
+ "ref_id": "BIBREF5"
523
+ }
524
+ ],
525
+ "ref_spans": [],
526
+ "eq_spans": [],
527
+ "section": "Subword Translation",
528
+ "sec_num": "4.2"
529
+ },
530
+ {
531
+ "text": "We intend to use both words and subwords for retrieval. Loose coupling would involve separate retrieval runs using words and subwords, producing two ranked lists, followed by list merging using techniques such as those explored by [Voorhees, 1995] .",
532
+ "cite_spans": [
533
+ {
534
+ "start": 231,
535
+ "end": 247,
536
+ "text": "[Voorhees, 1995]",
537
+ "ref_id": "BIBREF14"
538
+ }
539
+ ],
540
+ "ref_spans": [],
541
+ "eq_spans": [],
542
+ "section": "Coupling Words and Subwords",
543
+ "sec_num": "5.1"
544
+ },
545
+ {
546
+ "text": "Tight coupling, by contrast, would require creation of a unified index containing both word and subword units, resulting in a single ranked list. We hope to explore both techniques during the Workshop.",
547
+ "cite_spans": [],
548
+ "ref_spans": [],
549
+ "eq_spans": [],
550
+ "section": "Coupling Words and Subwords",
551
+ "sec_num": "5.1"
552
+ },
553
+ {
554
+ "text": "It should be noted that speech recognition exacerbates uncertainty when indexing audio, and that translation or transliteration exacerbates uncertainty when translating queries and/or documents. To achieve robustness for retrieval, we have tried three techniques that we have found useful: (i) Syllable lattices were used in and [Chien et al., 2000] for monolingual Chinese retrieval experiments. The lattices were pruned to constrain the search space, but were able to achieve robust retrieval based on imperfect recognized transcripts. (ii) Query expansion, in which syllable transcription were expanded to include possibly confusable syllable sequences based on a syllable confusion matrix derived from recognition errors, was used in [Meng et al., 1999] . (iii) We have expanded the document representation using terms extracted from similar documents in a comparable collection [Levow and Oard, 2000] , and similar techniques are known to work well in the case of query translation (Ballesteros and Croft, 1997) . We hope to add to this set: of techniques by exploring the potential for query expansion based on cross-language phonetic mapping.",
555
+ "cite_spans": [
556
+ {
557
+ "start": 329,
558
+ "end": 349,
559
+ "text": "[Chien et al., 2000]",
560
+ "ref_id": "BIBREF4"
561
+ },
562
+ {
563
+ "start": 738,
564
+ "end": 757,
565
+ "text": "[Meng et al., 1999]",
566
+ "ref_id": "BIBREF10"
567
+ },
568
+ {
569
+ "start": 883,
570
+ "end": 905,
571
+ "text": "[Levow and Oard, 2000]",
572
+ "ref_id": "BIBREF8"
573
+ },
574
+ {
575
+ "start": 987,
576
+ "end": 1016,
577
+ "text": "(Ballesteros and Croft, 1997)",
578
+ "ref_id": "BIBREF0"
579
+ }
580
+ ],
581
+ "ref_spans": [],
582
+ "eq_spans": [],
583
+ "section": "Imperfect Indexing and Translation",
584
+ "sec_num": "5.2"
585
+ },
586
+ {
587
+ "text": "We plan to use the TDT-2 collection for development testing and the TDT-3 collection for evaluation. Both collections provide documents from two English newswire sources, six English broadcast news audio sources, two Mandarin Chinese newswire sources, and one Mandarin broadcast news source (Voice of America).",
588
+ "cite_spans": [],
589
+ "ref_spans": [],
590
+ "eq_spans": [],
591
+ "section": "Using the TDT-3 Collection",
592
+ "sec_num": "6."
593
+ },
594
+ {
595
+ "text": "Manually established story boundaries are available for all audio collections, and we plan to exploit that information to simplify our experiment design. The TDT-2 collection includes complete relevance assessments for 20 topics, and the TDT-3 collection provides the same for 60 additional topics, 56 of which have at least one relevant audio story. For each topic, at least four English stories and four Chinese stories are known.",
596
+ "cite_spans": [],
597
+ "ref_spans": [],
598
+ "eq_spans": [],
599
+ "section": "Using the TDT-3 Collection",
600
+ "sec_num": "6."
601
+ },
602
+ {
603
+ "text": "We plan to automatically derive text queries based on one or more English stories that are presented as exemplars, and to use those queries to search the Mandarin audio collection. Manually constructed queries will provide a contrastive condition. Unlike the TDT \"topic tracking\" task in which stories must be declared relevant or not relevant in the order of their arrival, we plan to perform retrospective retrieval experiments in which all documents are known when the query is issued. By relaxing the temporal ordering of the TDT topic tracking task, we can meaningfully search for Mandarin Chinese stories that may have arrived before the exemplar story or stories. We thus plan to report ranked retrieval measures of effectiveness such as average precision in addition to the detection statistics (miss and false alarm) typically reported in TDT.",
604
+ "cite_spans": [],
605
+ "ref_spans": [],
606
+ "eq_spans": [],
607
+ "section": "Using the TDT-3 Collection",
608
+ "sec_num": "6."
609
+ },
610
+ {
611
+ "text": "This paper presents our current ideas and evolving plan for the MEI project, to take place at the Johns Hopkins University Summer Workshop 2000. Translingual speech retrieval is a long-term research direction, and our team looks forward to jointly taking an initial step to tackle the problem. The authors welcome all comments and suggestions, as we strive to better define the problem in preparation for the sixweek Workshop.",
612
+ "cite_spans": [],
613
+ "ref_spans": [],
614
+ "eq_spans": [],
615
+ "section": "Summary",
616
+ "sec_num": "7."
617
+ },
618
+ {
619
+ "text": "http://m\u00b0rph'ldc'upenn'edu/Pr\u00b0jects/Chinese/intr\u00b0'html 5 The corresponding linguistic characteristics of Cantonese are very similar.6 These are Mandarin pinyin, the number encodes the tone of the syllable. 7 Example drawn from[Leung, 1999].",
620
+ "cite_spans": [],
621
+ "ref_spans": [],
622
+ "eq_spans": [],
623
+ "section": "",
624
+ "sec_num": null
625
+ },
626
+ {
627
+ "text": "The lexicon size of a typical large-vocabulary continuous speech recognizer can range from 10,000 to 100,000 word forms.",
628
+ "cite_spans": [],
629
+ "ref_spans": [],
630
+ "eq_spans": [],
631
+ "section": "",
632
+ "sec_num": null
633
+ }
634
+ ],
635
+ "back_matter": [
636
+ {
637
+ "text": "The authors wish to thank Patrick Schone, Erika Grams, Fred Jelinek, Charles Wayne, Kenney \u2022 Ng, John Garofolo, and ",
638
+ "cite_spans": [],
639
+ "ref_spans": [],
640
+ "eq_spans": [],
641
+ "section": "Acknowledgments",
642
+ "sec_num": null
643
+ },
644
+ {
645
+ "text": " Figure 2 . Document translation strategy.",
646
+ "cite_spans": [],
647
+ "ref_spans": [
648
+ {
649
+ "start": 1,
650
+ "end": 9,
651
+ "text": "Figure 2",
652
+ "ref_id": null
653
+ }
654
+ ],
655
+ "eq_spans": [],
656
+ "section": "annex",
657
+ "sec_num": null
658
+ }
659
+ ],
660
+ "bib_entries": {
661
+ "BIBREF0": {
662
+ "ref_id": "b0",
663
+ "title": "Phrasal Translation and Query Expansion Techniques for Cross-Language Information Retrieval",
664
+ "authors": [
665
+ {
666
+ "first": "W",
667
+ "middle": [
668
+ "B"
669
+ ],
670
+ "last": "Ballesteros",
671
+ "suffix": ""
672
+ },
673
+ {
674
+ "first": "",
675
+ "middle": [],
676
+ "last": "Croft",
677
+ "suffix": ""
678
+ }
679
+ ],
680
+ "year": 1997,
681
+ "venue": "Proceedings of ACM SIGIR",
682
+ "volume": "",
683
+ "issue": "",
684
+ "pages": "",
685
+ "other_ids": {},
686
+ "num": null,
687
+ "urls": [],
688
+ "raw_text": "Ballesteros and W. B. Croft, \"Phrasal Translation and Query Expansion Techniques for Cross-Language Information Retrieval,\" Proceedings of ACM SIGIR, 1997.",
689
+ "links": null
690
+ },
691
+ "BIBREF1": {
692
+ "ref_id": "b1",
693
+ "title": "The INQUERY Retrieval System",
694
+ "authors": [
695
+ {
696
+ "first": "J",
697
+ "middle": [
698
+ "P"
699
+ ],
700
+ "last": "Callan",
701
+ "suffix": ""
702
+ },
703
+ {
704
+ "first": "W",
705
+ "middle": [
706
+ "B"
707
+ ],
708
+ "last": "Croft",
709
+ "suffix": ""
710
+ },
711
+ {
712
+ "first": "S",
713
+ "middle": [
714
+ "M"
715
+ ],
716
+ "last": "Harding",
717
+ "suffix": ""
718
+ }
719
+ ],
720
+ "year": 1992,
721
+ "venue": "Proceedings of the 3rd International Conference on Database and Expert Systems Applications",
722
+ "volume": "",
723
+ "issue": "",
724
+ "pages": "",
725
+ "other_ids": {},
726
+ "num": null,
727
+ "urls": [],
728
+ "raw_text": "Callan, J. P., W. B. Croft, and S. M. Harding, \"The INQUERY Retrieval System,\" Proceedings of the 3rd International Conference on Database and Expert Systems Applications, 1992.",
729
+ "links": null
730
+ },
731
+ "BIBREF2": {
732
+ "ref_id": "b2",
733
+ "title": "Translingual Information Retrieval: A Comparative Evaluation",
734
+ "authors": [
735
+ {
736
+ "first": "J",
737
+ "middle": [],
738
+ "last": "Carbonnell",
739
+ "suffix": ""
740
+ },
741
+ {
742
+ "first": "Y",
743
+ "middle": [],
744
+ "last": "Yang",
745
+ "suffix": ""
746
+ },
747
+ {
748
+ "first": "R",
749
+ "middle": [],
750
+ "last": "Frederking",
751
+ "suffix": ""
752
+ },
753
+ {
754
+ "first": "R",
755
+ "middle": [
756
+ "D"
757
+ ],
758
+ "last": "Brown",
759
+ "suffix": ""
760
+ }
761
+ ],
762
+ "year": 1997,
763
+ "venue": "Proceedings of IJCAI",
764
+ "volume": "",
765
+ "issue": "",
766
+ "pages": "",
767
+ "other_ids": {},
768
+ "num": null,
769
+ "urls": [],
770
+ "raw_text": "Carbonnell, J., Y. Yang, R. Frederking and R.D. Brown, \"Translingual Information Retrieval: A Comparative Evaluation,\" Proceedings of IJCAI, 1997.",
771
+ "links": null
772
+ },
773
+ "BIBREF3": {
774
+ "ref_id": "b3",
775
+ "title": "Retrieval of Broadcast News Speech in Mandarin Chinese Collected in Taiwan using Syllable-Level Statistical Characteristics",
776
+ "authors": [
777
+ {
778
+ "first": "B",
779
+ "middle": [],
780
+ "last": "Chen",
781
+ "suffix": ""
782
+ },
783
+ {
784
+ "first": "H",
785
+ "middle": [
786
+ "M"
787
+ ],
788
+ "last": "Wang",
789
+ "suffix": ""
790
+ },
791
+ {
792
+ "first": "L",
793
+ "middle": [
794
+ "S"
795
+ ],
796
+ "last": "Lee",
797
+ "suffix": ""
798
+ }
799
+ ],
800
+ "year": 2000,
801
+ "venue": "Proceedings of ICASSP",
802
+ "volume": "",
803
+ "issue": "",
804
+ "pages": "",
805
+ "other_ids": {},
806
+ "num": null,
807
+ "urls": [],
808
+ "raw_text": "Chen, B., H.M. Wang, and L.S. Lee, \"Retrieval of Broadcast News Speech in Mandarin Chinese Collected in Taiwan using Syllable-Level Statistical Characteristics,\" Proceedings of ICASSP, 2000.",
809
+ "links": null
810
+ },
811
+ "BIBREF4": {
812
+ "ref_id": "b4",
813
+ "title": "A Spoken-Access Approach for Chinese Text and Speech Information Retrieval",
814
+ "authors": [
815
+ {
816
+ "first": "L",
817
+ "middle": [
818
+ "F"
819
+ ],
820
+ "last": "Chien",
821
+ "suffix": ""
822
+ },
823
+ {
824
+ "first": "H",
825
+ "middle": [
826
+ "M"
827
+ ],
828
+ "last": "Wang",
829
+ "suffix": ""
830
+ },
831
+ {
832
+ "first": "B",
833
+ "middle": [
834
+ "R"
835
+ ],
836
+ "last": "Bai",
837
+ "suffix": ""
838
+ },
839
+ {
840
+ "first": "S",
841
+ "middle": [
842
+ "C"
843
+ ],
844
+ "last": "Lin",
845
+ "suffix": ""
846
+ }
847
+ ],
848
+ "year": 1999,
849
+ "venue": "Journal of the American Society for Information Science",
850
+ "volume": "51",
851
+ "issue": "4",
852
+ "pages": "313--323",
853
+ "other_ids": {},
854
+ "num": null,
855
+ "urls": [],
856
+ "raw_text": "Chien, L. F., H. M. Wang, B. R. Bai, and S. C. Lin, \"A Spoken-Access Approach for Chinese Text and Speech Information Retrieval,\" Journal of the American Society for Information Science, 51 (4), pp. 313-323, 2000. Choy, C. Y., \"Acoustic Units for Mandarin Chinese Speech Recognition,\" M.Phil. Thesis, The Chinese University of Hong Kong, Hong Kong SAR, China, 1999.",
857
+ "links": null
858
+ },
859
+ "BIBREF5": {
860
+ "ref_id": "b5",
861
+ "title": "Towards a Universal Speech Recognizer for Multiple Languages",
862
+ "authors": [
863
+ {
864
+ "first": "P",
865
+ "middle": [],
866
+ "last": "Cohen",
867
+ "suffix": ""
868
+ },
869
+ {
870
+ "first": "S",
871
+ "middle": [],
872
+ "last": "Dharanipragada",
873
+ "suffix": ""
874
+ },
875
+ {
876
+ "first": "J",
877
+ "middle": [],
878
+ "last": "Gros",
879
+ "suffix": ""
880
+ },
881
+ {
882
+ "first": "M",
883
+ "middle": [],
884
+ "last": "Mondowski",
885
+ "suffix": ""
886
+ },
887
+ {
888
+ "first": "C",
889
+ "middle": [],
890
+ "last": "Neti",
891
+ "suffix": ""
892
+ },
893
+ {
894
+ "first": "S",
895
+ "middle": [],
896
+ "last": "Roukos",
897
+ "suffix": ""
898
+ },
899
+ {
900
+ "first": "T",
901
+ "middle": [],
902
+ "last": "Ward",
903
+ "suffix": ""
904
+ }
905
+ ],
906
+ "year": 1997,
907
+ "venue": "Proceedings of ASRU",
908
+ "volume": "",
909
+ "issue": "",
910
+ "pages": "",
911
+ "other_ids": {},
912
+ "num": null,
913
+ "urls": [],
914
+ "raw_text": "Cohen, P., S. Dharanipragada, J. Gros, M. Mondowski, C. Neti, S. Roukos and T. Ward, \"Towards a Universal Speech Recognizer for Multiple Languages,\" Proceedings of ASRU, 1997.",
915
+ "links": null
916
+ },
917
+ "BIBREF6": {
918
+ "ref_id": "b6",
919
+ "title": "A Statistical View on Bilingual Lexicon Extraction: From parallel corpora to non-parallel corpora",
920
+ "authors": [
921
+ {
922
+ "first": "P",
923
+ "middle": [],
924
+ "last": "Fung",
925
+ "suffix": ""
926
+ }
927
+ ],
928
+ "year": 1998,
929
+ "venue": "Proceedings of AMTA",
930
+ "volume": "",
931
+ "issue": "",
932
+ "pages": "",
933
+ "other_ids": {},
934
+ "num": null,
935
+ "urls": [],
936
+ "raw_text": "Fung, P., \"A Statistical View on Bilingual Lexicon Extraction: From parallel corpora to non-parallel corpora,\" Proceedings of AMTA, 1998.",
937
+ "links": null
938
+ },
939
+ "BIBREF7": {
940
+ "ref_id": "b7",
941
+ "title": "Fully Automatic Cross-Language Document Retrieval Using Latent Semantic Indexing",
942
+ "authors": [
943
+ {
944
+ "first": "J",
945
+ "middle": [
946
+ "S"
947
+ ],
948
+ "last": "Garofolo",
949
+ "suffix": ""
950
+ },
951
+ {
952
+ "first": "G",
953
+ "middle": [
954
+ "P"
955
+ ],
956
+ "last": "Auzanne",
957
+ "suffix": ""
958
+ },
959
+ {
960
+ "first": "E",
961
+ "middle": [
962
+ "M"
963
+ ],
964
+ "last": "Voorhees",
965
+ "suffix": ""
966
+ },
967
+ {
968
+ "first": "K",
969
+ "middle": [],
970
+ "last": "Knight",
971
+ "suffix": ""
972
+ },
973
+ {
974
+ "first": "J",
975
+ "middle": [],
976
+ "last": "Landauer",
977
+ "suffix": ""
978
+ },
979
+ {
980
+ "first": "T",
981
+ "middle": [
982
+ "K"
983
+ ],
984
+ "last": "",
985
+ "suffix": ""
986
+ },
987
+ {
988
+ "first": "M",
989
+ "middle": [
990
+ "L"
991
+ ],
992
+ "last": "Littman",
993
+ "suffix": ""
994
+ },
995
+ {
996
+ "first": "R",
997
+ "middle": [],
998
+ "last": "Leung",
999
+ "suffix": ""
1000
+ }
1001
+ ],
1002
+ "year": 1990,
1003
+ "venue": "Proceedings of the Recherche d'informations Assistre par Ordinateur: Content-Based Multimedia Information Access Conference",
1004
+ "volume": "",
1005
+ "issue": "",
1006
+ "pages": "",
1007
+ "other_ids": {},
1008
+ "num": null,
1009
+ "urls": [],
1010
+ "raw_text": "Garofolo, J.S., Auzanne, G.P., Voorhees, E.M., \"The TREC Spoken Document Retrieval Track: A Success Story,\" Proceedings of the Recherche d'informations Assistre par Ordinateur: Content- Based Multimedia Information Access Conference, April 12-14, 2000,to be published. Knight, K. and J. Graehl, \"Machine Transliteration,\" Proceedings of ACL, 1997. Landauer, T. K. and M.L. Littman, \"Fully Automatic Cross-Language Document Retrieval Using Latent Semantic Indexing,\" Proceedings of the 6 th Annual Conference of the UW Centre for the New Oxford English Dictionary, 1990. Leung, R., \"Lexical Access for Large Vocabulary Chinese Speech Recognition,\" M. Phil. Thesis, The Chinese University of Hong Kong, Hong Kong SAR, China 1999.",
1011
+ "links": null
1012
+ },
1013
+ "BIBREF8": {
1014
+ "ref_id": "b8",
1015
+ "title": "Translingual Topic Tracking with PRISE",
1016
+ "authors": [
1017
+ {
1018
+ "first": "G",
1019
+ "middle": [],
1020
+ "last": "Levow",
1021
+ "suffix": ""
1022
+ },
1023
+ {
1024
+ "first": "D",
1025
+ "middle": [
1026
+ "W"
1027
+ ],
1028
+ "last": "Oard",
1029
+ "suffix": ""
1030
+ }
1031
+ ],
1032
+ "year": 2000,
1033
+ "venue": "Working notes of the DARPA TDT-3 Workshop",
1034
+ "volume": "",
1035
+ "issue": "",
1036
+ "pages": "",
1037
+ "other_ids": {},
1038
+ "num": null,
1039
+ "urls": [],
1040
+ "raw_text": "Levow, G. and D.W. Oard, \"Translingual Topic Tracking with PRISE,\" Working notes of the DARPA TDT-3 Workshop, 2000.",
1041
+ "links": null
1042
+ },
1043
+ "BIBREF9": {
1044
+ "ref_id": "b9",
1045
+ "title": "Speech Recognition on Mandarin Call Home: A Large-Vocabulary, Conversational, and Telephone Speech Corpus",
1046
+ "authors": [
1047
+ {
1048
+ "first": "C",
1049
+ "middle": [
1050
+ "H"
1051
+ ],
1052
+ "last": "Lin",
1053
+ "suffix": ""
1054
+ },
1055
+ {
1056
+ "first": "L",
1057
+ "middle": [
1058
+ "S"
1059
+ ],
1060
+ "last": "Lee",
1061
+ "suffix": ""
1062
+ },
1063
+ {
1064
+ "first": "P",
1065
+ "middle": [
1066
+ "Y"
1067
+ ],
1068
+ "last": "Ting ; Liu~",
1069
+ "suffix": ""
1070
+ },
1071
+ {
1072
+ "first": "F",
1073
+ "middle": [
1074
+ "H"
1075
+ ],
1076
+ "last": "",
1077
+ "suffix": ""
1078
+ },
1079
+ {
1080
+ "first": "M",
1081
+ "middle": [],
1082
+ "last": "Picheny",
1083
+ "suffix": ""
1084
+ },
1085
+ {
1086
+ "first": "P",
1087
+ "middle": [],
1088
+ "last": "Srinivasa",
1089
+ "suffix": ""
1090
+ },
1091
+ {
1092
+ "first": "M",
1093
+ "middle": [],
1094
+ "last": "Monkowski",
1095
+ "suffix": ""
1096
+ },
1097
+ {
1098
+ "first": "J",
1099
+ "middle": [],
1100
+ "last": "Chen ; Mccarley",
1101
+ "suffix": ""
1102
+ },
1103
+ {
1104
+ "first": "S",
1105
+ "middle": [],
1106
+ "last": "",
1107
+ "suffix": ""
1108
+ }
1109
+ ],
1110
+ "year": 1993,
1111
+ "venue": "Proceedings of ICASSP",
1112
+ "volume": "",
1113
+ "issue": "",
1114
+ "pages": "",
1115
+ "other_ids": {},
1116
+ "num": null,
1117
+ "urls": [],
1118
+ "raw_text": "Lin, C. H., L. S. Lee, and P. Y. Ting, \"A New Framework for Recognition of Mandarin Syllables with Tones using Sub-Syllabic Units,\" Proceedings of ICASSP, 1993. Liu~ F. H., M. Picheny, P. Srinivasa, M. Monkowski and J. Chen, \"Speech Recognition on Mandarin Call Home: A Large-Vocabulary, Conversational, and Telephone Speech Corpus,\" Proceedings of ICASSP, 1996. McCarley, S., \"Should we Translate the Documents or the Queries in Cross-Language Information Retrieval,\" Proceedings of ACL, 1999.",
1119
+ "links": null
1120
+ },
1121
+ "BIBREF10": {
1122
+ "ref_id": "b10",
1123
+ "title": "A Study on the Use of Syllables for Chinese Spoken Document Retrieval",
1124
+ "authors": [
1125
+ {
1126
+ "first": "H",
1127
+ "middle": [],
1128
+ "last": "Meng",
1129
+ "suffix": ""
1130
+ },
1131
+ {
1132
+ "first": "C",
1133
+ "middle": [
1134
+ "W"
1135
+ ],
1136
+ "last": "Ip ; Meng",
1137
+ "suffix": ""
1138
+ },
1139
+ {
1140
+ "first": "H",
1141
+ "middle": [],
1142
+ "last": "",
1143
+ "suffix": ""
1144
+ },
1145
+ {
1146
+ "first": "W",
1147
+ "middle": [
1148
+ "K"
1149
+ ],
1150
+ "last": "Lo",
1151
+ "suffix": ""
1152
+ },
1153
+ {
1154
+ "first": "Y",
1155
+ "middle": [
1156
+ "C"
1157
+ ],
1158
+ "last": "Li",
1159
+ "suffix": ""
1160
+ },
1161
+ {
1162
+ "first": "P",
1163
+ "middle": [
1164
+ "C"
1165
+ ],
1166
+ "last": "Ching",
1167
+ "suffix": ""
1168
+ }
1169
+ ],
1170
+ "year": 1999,
1171
+ "venue": "Proceedings of the Research On Computational Lingustics (ROCLING) Conference",
1172
+ "volume": "",
1173
+ "issue": "",
1174
+ "pages": "",
1175
+ "other_ids": {},
1176
+ "num": null,
1177
+ "urls": [],
1178
+ "raw_text": "Meng, H. and C. W. Ip, \"An Analytical Study of Transformational Tagging of Chinese Text,\" Proceedings of the Research On Computational Lingustics (ROCLING) Conference, 1999. Meng, H., W. K. Lo, Y. C. Li and P. C. Ching, \"A Study on the Use of Syllables for Chinese Spoken Document Retrieval,\" Technical Report SEEM1999-11, The Chinese University of Hong Kong, 1999.",
1179
+ "links": null
1180
+ },
1181
+ "BIBREF11": {
1182
+ "ref_id": "b11",
1183
+ "title": "Mandarin-English Information (MEI),\" Working notes of the DARPA TDT-3 Workshop",
1184
+ "authors": [
1185
+ {
1186
+ "first": "H",
1187
+ "middle": [],
1188
+ "last": "Meng",
1189
+ "suffix": ""
1190
+ },
1191
+ {
1192
+ "first": "S",
1193
+ "middle": [],
1194
+ "last": "Khudanpur",
1195
+ "suffix": ""
1196
+ },
1197
+ {
1198
+ "first": "D",
1199
+ "middle": [
1200
+ "W"
1201
+ ],
1202
+ "last": "Oard",
1203
+ "suffix": ""
1204
+ },
1205
+ {
1206
+ "first": "H",
1207
+ "middle": [
1208
+ "M"
1209
+ ],
1210
+ "last": "Wang",
1211
+ "suffix": ""
1212
+ }
1213
+ ],
1214
+ "year": 2000,
1215
+ "venue": "",
1216
+ "volume": "",
1217
+ "issue": "",
1218
+ "pages": "",
1219
+ "other_ids": {},
1220
+ "num": null,
1221
+ "urls": [],
1222
+ "raw_text": "Meng, H., Khudanpur, S., Oard, D. W. and Wang, H. M., \"Mandarin-English Information (MEI),\" Working notes of the DARPA TDT-3 Workshop, 2000.",
1223
+ "links": null
1224
+ },
1225
+ "BIBREF12": {
1226
+ "ref_id": "b12",
1227
+ "title": "Subword-based Approaches for Spoken Document Retrieval",
1228
+ "authors": [
1229
+ {
1230
+ "first": "K",
1231
+ "middle": [],
1232
+ "last": "Ng",
1233
+ "suffix": ""
1234
+ }
1235
+ ],
1236
+ "year": 2000,
1237
+ "venue": "",
1238
+ "volume": "",
1239
+ "issue": "",
1240
+ "pages": "",
1241
+ "other_ids": {},
1242
+ "num": null,
1243
+ "urls": [],
1244
+ "raw_text": "Ng, K., \"Subword-based Approaches for Spoken Document Retrieval,\" Ph.D. Thesis, MIT, February 2000.",
1245
+ "links": null
1246
+ },
1247
+ "BIBREF13": {
1248
+ "ref_id": "b13",
1249
+ "title": "The effects of query structure and dictionary setups in dictionary-based crosslanguage information retrieval",
1250
+ "authors": [
1251
+ {
1252
+ "first": "D",
1253
+ "middle": [
1254
+ "W"
1255
+ ],
1256
+ "last": "Oard",
1257
+ "suffix": ""
1258
+ },
1259
+ {
1260
+ "first": "A",
1261
+ "middle": [
1262
+ "R"
1263
+ ],
1264
+ "last": "Diekema ; Pirkola",
1265
+ "suffix": ""
1266
+ },
1267
+ {
1268
+ "first": "A",
1269
+ "middle": [],
1270
+ "last": "",
1271
+ "suffix": ""
1272
+ }
1273
+ ],
1274
+ "year": 1998,
1275
+ "venue": "Proceedings of ACM SIGIR",
1276
+ "volume": "33",
1277
+ "issue": "",
1278
+ "pages": "",
1279
+ "other_ids": {},
1280
+ "num": null,
1281
+ "urls": [],
1282
+ "raw_text": "Oard, D. W. and A.R. Diekema, \"Cross- Language Information Retrieval,\" Annual Review of Information Science and Technology, vol.33, 1998. Pirkola, A., \"The effects of query structure and dictionary setups in dictionary-based cross- language information retrieval,\" Proceedings of ACM SIGIR, 1998.",
1283
+ "links": null
1284
+ },
1285
+ "BIBREF14": {
1286
+ "ref_id": "b14",
1287
+ "title": "Experiments in Multilingual Information Retrieval using the SPIDER System",
1288
+ "authors": [
1289
+ {
1290
+ "first": "P",
1291
+ "middle": [],
1292
+ "last": "Sheridan",
1293
+ "suffix": ""
1294
+ },
1295
+ {
1296
+ "first": "J",
1297
+ "middle": [
1298
+ "P"
1299
+ ],
1300
+ "last": "Ballerini",
1301
+ "suffix": ""
1302
+ },
1303
+ {
1304
+ "first": "E",
1305
+ "middle": [],
1306
+ "last": "Voorhees",
1307
+ "suffix": ""
1308
+ },
1309
+ {
1310
+ "first": "H",
1311
+ "middle": [
1312
+ "M. ; P"
1313
+ ],
1314
+ "last": "Wang",
1315
+ "suffix": ""
1316
+ },
1317
+ {
1318
+ "first": "",
1319
+ "middle": [],
1320
+ "last": "Schaiible",
1321
+ "suffix": ""
1322
+ }
1323
+ ],
1324
+ "year": 1995,
1325
+ "venue": "Proceedings of the Fourth International Workshop on Information Retrieval in Asian Languages",
1326
+ "volume": "",
1327
+ "issue": "",
1328
+ "pages": "",
1329
+ "other_ids": {},
1330
+ "num": null,
1331
+ "urls": [],
1332
+ "raw_text": "Sheridan P. and J. P. Ballerini, \"Experiments in Multilingual Information Retrieval using the SPIDER System,\" Proceedings of ACM SIGIR, 1996. Voorhees, E., \"Learning Collection Fusion Strategies,\" Proceedings of SIGIR, 1995. Wang, H. M., \"Retrieval of Mandarin Spoken Documents Based on Syllable Lattice Matching,\" Proceedings of the Fourth International Workshop on Information Retrieval in Asian Languages, 1999. Wechsler, M. and P. Schaiible, \"Speech Retrieval Based on Automatic Indexing,\" Proceedings of MIRO-1995.",
1333
+ "links": null
1334
+ }
1335
+ },
1336
+ "ref_entries": {}
1337
+ }
1338
+ }
Full_text_JSON/prefixW/json/W00/W00-0505.json ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0505",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:35:08.891680Z"
6
+ },
7
+ "title": "Towards Translingual Information Access using Portable Information Extraction",
8
+ "authors": [
9
+ {
10
+ "first": "Michael",
11
+ "middle": [],
12
+ "last": "White",
13
+ "suffix": "",
14
+ "affiliation": {},
15
+ "email": ""
16
+ },
17
+ {
18
+ "first": "Claire",
19
+ "middle": [],
20
+ "last": "Cardie",
21
+ "suffix": "",
22
+ "affiliation": {},
23
+ "email": "cardie@cs@edu"
24
+ },
25
+ {
26
+ "first": "Chung-Hye",
27
+ "middle": [],
28
+ "last": "Han",
29
+ "suffix": "",
30
+ "affiliation": {},
31
+ "email": "chunghye@babel"
32
+ },
33
+ {
34
+ "first": "Nari",
35
+ "middle": [],
36
+ "last": "Kim",
37
+ "suffix": "",
38
+ "affiliation": {},
39
+ "email": ""
40
+ },
41
+ {
42
+ "first": "Benoit",
43
+ "middle": [],
44
+ "last": "Lavoie",
45
+ "suffix": "",
46
+ "affiliation": {},
47
+ "email": ""
48
+ },
49
+ {
50
+ "first": "Martha",
51
+ "middle": [],
52
+ "last": "Palmer",
53
+ "suffix": "",
54
+ "affiliation": {},
55
+ "email": ""
56
+ },
57
+ {
58
+ "first": "Owen",
59
+ "middle": [],
60
+ "last": "Rainbow",
61
+ "suffix": "",
62
+ "affiliation": {},
63
+ "email": ""
64
+ },
65
+ {
66
+ "first": "Juntae",
67
+ "middle": [],
68
+ "last": "Yoon",
69
+ "suffix": "",
70
+ "affiliation": {},
71
+ "email": "jtyoon@linc.cis.upenn.edu"
72
+ }
73
+ ],
74
+ "year": "",
75
+ "venue": null,
76
+ "identifiers": {},
77
+ "abstract": "We report on a small study undertaken to demonstrate the feasibility of combining portable information extraction with MT in order to support translingual information access. After describing the proposed system's usage scenario and system design, we describe our investigation of transferring information extraction techniques developed for English to Korean. We conclude with a brief discussion of related MT issues we plan to investigate in future work.",
78
+ "pdf_parse": {
79
+ "paper_id": "W00-0505",
80
+ "_pdf_hash": "",
81
+ "abstract": [
82
+ {
83
+ "text": "We report on a small study undertaken to demonstrate the feasibility of combining portable information extraction with MT in order to support translingual information access. After describing the proposed system's usage scenario and system design, we describe our investigation of transferring information extraction techniques developed for English to Korean. We conclude with a brief discussion of related MT issues we plan to investigate in future work.",
84
+ "cite_spans": [],
85
+ "ref_spans": [],
86
+ "eq_spans": [],
87
+ "section": "Abstract",
88
+ "sec_num": null
89
+ }
90
+ ],
91
+ "body_text": [
92
+ {
93
+ "text": "In this paper, we report on a small study undertaken to demonstrate the feasibility of combining portable information extraction with MT in order to support translingual information access. The goal of our proposed system is to better enable analysts to perform information filtering tasks on foreign language documents. This effort was funded by a SBIR Phase I award from the U.S. Army Research Lab, and will be pursued further under the DARPA TIDES initiative.",
94
+ "cite_spans": [],
95
+ "ref_spans": [],
96
+ "eq_spans": [],
97
+ "section": "Introduction",
98
+ "sec_num": "1"
99
+ },
100
+ {
101
+ "text": "Information extraction (IE) systems are designed to extract specific types of information from natural language texts. In order to achieve acceptable accuracy, IE systems need to be tuned for a given topic domain. Since this domain tuning can be labor intensive, recent IE research has focused on developing learning algorithms for training IE system components (cf. Cardie, 1997 , for a survey). To date, however, little work has been done on IE systems for languages other than English (though cf. MUC-5, 1994, and MUC-7, 1998 , for Japanese IE systems); and, to our knowledge, none of the available techniques for the core task of learning information extraction patterns have been extended or evaluated for multilingual information extraction (though again cf. MUC-7, 1998 , where the use of learning techniques for the IE subtasks of named entity recognition and coreference resolution are described).",
102
+ "cite_spans": [
103
+ {
104
+ "start": 367,
105
+ "end": 379,
106
+ "text": "Cardie, 1997",
107
+ "ref_id": "BIBREF0"
108
+ },
109
+ {
110
+ "start": 500,
111
+ "end": 516,
112
+ "text": "MUC-5, 1994, and",
113
+ "ref_id": "BIBREF4"
114
+ },
115
+ {
116
+ "start": 517,
117
+ "end": 528,
118
+ "text": "MUC-7, 1998",
119
+ "ref_id": null
120
+ },
121
+ {
122
+ "start": 765,
123
+ "end": 776,
124
+ "text": "MUC-7, 1998",
125
+ "ref_id": null
126
+ }
127
+ ],
128
+ "ref_spans": [],
129
+ "eq_spans": [],
130
+ "section": "Introduction",
131
+ "sec_num": "1"
132
+ },
133
+ {
134
+ "text": "Given this situation, the primary objective of our study was to demonstrate the feasibility of using portable--i.e., easily trainable--IE technology on Korean documents, focusing on techniques for learning information extraction patterns. Secondary objectives of the study were to elaborate the analyst scenario and system design.",
135
+ "cite_spans": [],
136
+ "ref_spans": [],
137
+ "eq_spans": [],
138
+ "section": "Introduction",
139
+ "sec_num": "1"
140
+ },
141
+ {
142
+ "text": "Analyst Scenario Figure 1 illustrates how an intelligence analyst might use the proposed system:",
143
+ "cite_spans": [],
144
+ "ref_spans": [
145
+ {
146
+ "start": 17,
147
+ "end": 25,
148
+ "text": "Figure 1",
149
+ "ref_id": null
150
+ }
151
+ ],
152
+ "eq_spans": [],
153
+ "section": "2",
154
+ "sec_num": null
155
+ },
156
+ {
157
+ "text": "\u2022 The analyst selects one or more Korean documents in which to search for information (this step not shown). The analyst selects one or more scenario template, to activate in the query. Each scenario template corresponds to a specific type of event. Available scenario templates might include troop movements, acts of violence, meetings and negotiathms, protests, etc. In Figure 1 , the selected event is of type meeting (understood broadly).",
158
+ "cite_spans": [],
159
+ "ref_spans": [
160
+ {
161
+ "start": 372,
162
+ "end": 380,
163
+ "text": "Figure 1",
164
+ "ref_id": null
165
+ }
166
+ ],
167
+ "eq_spans": [],
168
+ "section": "2",
169
+ "sec_num": null
170
+ },
171
+ {
172
+ "text": "The analyst fills in the available slots of the selected scenario template in order to restrict the search to the information considered to be relevant. In Figure 1 , the values specified in the scenario template indicate that the information to f'md is about meetings having as location South Korea and as issue North Korea and missiles.",
173
+ "cite_spans": [],
174
+ "ref_spans": [
175
+ {
176
+ "start": 156,
177
+ "end": 164,
178
+ "text": "Figure 1",
179
+ "ref_id": null
180
+ }
181
+ ],
182
+ "eq_spans": [],
183
+ "section": "2",
184
+ "sec_num": null
185
+ },
186
+ {
187
+ "text": "specifies what information s/he wants to be reported when information matching the query is found. In Figure 1 , the selected boxes under the Report column indicate that all information found satisfying the query should be reported except for the meeting participants. 1 \u2022 Once the analyst submits the query for evaluation, the system searches the input documents for information matching the query. As a result, a hypertext document is generated describing the information matching the query as well as the source of this information. Note that the query contains English keywords that are automatically translated into Korean prior to matching.",
188
+ "cite_spans": [],
189
+ "ref_spans": [
190
+ {
191
+ "start": 102,
192
+ "end": 110,
193
+ "text": "Figure 1",
194
+ "ref_id": null
195
+ }
196
+ ],
197
+ "eq_spans": [],
198
+ "section": "The analyst also",
199
+ "sec_num": null
200
+ },
201
+ {
202
+ "text": "The extracted information is presented in English after being translated from Korean. In Figure 1 , the generated hypertext response indicates two documents in the input set that matched the query totally or in part. Each summary in the response includes just the translations of the extracted information that the analyst requested to be reported.",
203
+ "cite_spans": [],
204
+ "ref_spans": [
205
+ {
206
+ "start": 89,
207
+ "end": 97,
208
+ "text": "Figure 1",
209
+ "ref_id": null
210
+ }
211
+ ],
212
+ "eq_spans": [],
213
+ "section": "The analyst also",
214
+ "sec_num": null
215
+ },
216
+ {
217
+ "text": "\u2022 For each document extract matching the analyst query, the analyst can obtain a complete machine translation of the Korean document where the match was found, and where the matched information is highlighted.",
218
+ "cite_spans": [],
219
+ "ref_spans": [],
220
+ "eq_spans": [],
221
+ "section": "The analyst also",
222
+ "sec_num": null
223
+ },
224
+ {
225
+ "text": "Working with a human translator, the analyst can also verify the accuracy of the reported information by accessing the documents in their original language. Figure 2 shows the high-level design of the system. It consists of the following components:",
226
+ "cite_spans": [],
227
+ "ref_spans": [
228
+ {
229
+ "start": 157,
230
+ "end": 165,
231
+ "text": "Figure 2",
232
+ "ref_id": null
233
+ }
234
+ ],
235
+ "eq_spans": [],
236
+ "section": "The analyst also",
237
+ "sec_num": null
238
+ },
239
+ {
240
+ "text": "\u2022 The User Interface. The browser-based interface is for entering queries and displaying the resulting presentations. (White and Caldwell, 1998) and the RealPro syntactic realizer (Lavoie and Rainbow, 1997) .",
241
+ "cite_spans": [
242
+ {
243
+ "start": 118,
244
+ "end": 144,
245
+ "text": "(White and Caldwell, 1998)",
246
+ "ref_id": "BIBREF12"
247
+ },
248
+ {
249
+ "start": 180,
250
+ "end": 206,
251
+ "text": "(Lavoie and Rainbow, 1997)",
252
+ "ref_id": null
253
+ }
254
+ ],
255
+ "ref_spans": [],
256
+ "eq_spans": [],
257
+ "section": "System Design",
258
+ "sec_num": "3"
259
+ },
260
+ {
261
+ "text": "In our feasibility study, the majority of the effort went towards developing the PIE component, described in the next section. This component was implemented in a general way, i.e. in a way that we would expect to work beyond the specific training/test corpus described below. In contrast, we only implemented initial versions of the User Interface, Ranker and Presentation Generator components, in order to demonstrate the system concept; that is, these initial versions were only intended.to work with our training/test corpus, and will require considerable further development prior to reaching operational status. For the MT component, we used an early version of the lexical transfer-based system currently under development in an ongoing SBIR Phase II project (cf. Nasr et al., 1997; Palmer et al., 1998; Lavoie et al., 2000) , though with a limited lexicon specifically for translating the slot fillers in our training/test corpus. ",
262
+ "cite_spans": [
263
+ {
264
+ "start": 771,
265
+ "end": 789,
266
+ "text": "Nasr et al., 1997;",
267
+ "ref_id": "BIBREF8"
268
+ },
269
+ {
270
+ "start": 790,
271
+ "end": 810,
272
+ "text": "Palmer et al., 1998;",
273
+ "ref_id": "BIBREF9"
274
+ },
275
+ {
276
+ "start": 811,
277
+ "end": 831,
278
+ "text": "Lavoie et al., 2000)",
279
+ "ref_id": "BIBREF2"
280
+ }
281
+ ],
282
+ "ref_spans": [],
283
+ "eq_spans": [],
284
+ "section": "System Design",
285
+ "sec_num": "3"
286
+ },
287
+ {
288
+ "text": "For our Phase I feasibility demonstration, we chose a minimal scenario template for meeting and negotiation events consisting of one or more participant slots plus optional date and location slots. 2 We then gathered a small corpus of thirty articles by searching for articles containing \"North Korea\" and one or more of about 15 keywords. The first two sentences (with a few exceptions) were then annotated with the slots to be extracted, leading to a total of 51 sentences containing 47 scenario templates and 89 total 2 In the end, we did not use the 'issue' slot shown in Figure 1 , as it contained more complex Idlers than those that typically have been handled in IE systems.",
289
+ "cite_spans": [],
290
+ "ref_spans": [
291
+ {
292
+ "start": 576,
293
+ "end": 584,
294
+ "text": "Figure 1",
295
+ "ref_id": null
296
+ }
297
+ ],
298
+ "eq_spans": [],
299
+ "section": "Scenario Template and Training/Fest Corpus",
300
+ "sec_num": "4.1"
301
+ },
302
+ {
303
+ "text": "correct slots. Note that in a couple of cases more than one template was given for a single long sentence. When compared to the MUC scenario template task, our extraction task was considerably simpler, for the following reasons: * The answer keys only contained information that could be found within a single sentence, i.e. the answer keys did not require merging information across sentences. \u2022 The answer keys did not require anaphoric references to be resolved, and we did not deal with conjuncts separately. \u2022 We did not attempt to normalize dates or remove appositives from NPs.",
304
+ "cite_spans": [],
305
+ "ref_spans": [],
306
+ "eq_spans": [],
307
+ "section": "Scenario Template and Training/Fest Corpus",
308
+ "sec_num": "4.1"
309
+ },
310
+ {
311
+ "text": "For our feasibility study, we chose to follow the AutoSlog (Lehnert et al., 1992; Riloff, 1993) approach to extraction pattern acquisition. In this approach, extraction patterns are acquired Figure 3 via a one-shot general-to-specific learning algorithm designed specifically for the information extraction task. 3 The learning algorithm is straightforward and depends only on the existence of a (partial) parser and a small set of general linguistic patterns that direct the creation of specific patterns. As a training corpus, it requires a set of texts with noun phrases annotated with the slot type to be extracted.",
312
+ "cite_spans": [
313
+ {
314
+ "start": 59,
315
+ "end": 81,
316
+ "text": "(Lehnert et al., 1992;",
317
+ "ref_id": "BIBREF3"
318
+ },
319
+ {
320
+ "start": 82,
321
+ "end": 95,
322
+ "text": "Riloff, 1993)",
323
+ "ref_id": "BIBREF10"
324
+ }
325
+ ],
326
+ "ref_spans": [
327
+ {
328
+ "start": 191,
329
+ "end": 199,
330
+ "text": "Figure 3",
331
+ "ref_id": null
332
+ }
333
+ ],
334
+ "eq_spans": [],
335
+ "section": "Extraction Pattern Learning",
336
+ "sec_num": "4.2"
337
+ },
338
+ {
339
+ "text": "To adapt the AutoSlog approach to Korean, we first devised Korean equivalents of the English patterns, two of which are shown in Figure 3 . It turned out that for our corpus, we could collapse some of these patterns, though some new ones were also needed. In the end we used just nine generic patterns.",
340
+ "cite_spans": [],
341
+ "ref_spans": [
342
+ {
343
+ "start": 129,
344
+ "end": 137,
345
+ "text": "Figure 3",
346
+ "ref_id": null
347
+ }
348
+ ],
349
+ "eq_spans": [],
350
+ "section": "Extraction Pattern Learning",
351
+ "sec_num": "4.2"
352
+ },
353
+ {
354
+ "text": "Important issues that arose in adapting the approach were (1) greater flexibility in word order and heavier reliance on morphological cues in Korean, and (2) the predominance of light verbs (verbs with little semantic content of their own) and aspectual verbs in the chosen domain. We discuss these issues in the next two sections.",
355
+ "cite_spans": [],
356
+ "ref_spans": [],
357
+ "eq_spans": [],
358
+ "section": "Extraction Pattern Learning",
359
+ "sec_num": "4.2"
360
+ },
361
+ {
362
+ "text": "We used Yoon's hybrid statistical Korean parser (Yoon et al., 1997 (Yoon et al., , 1999 Yoon, 1999) to process the input sentences prior to extraction. The parser incorporates a POS tagger and morphological analyzer and yields a dependency representation as its output? The use of a dependency representation enabled us to handle the greater flexibility in word order in Korean.",
363
+ "cite_spans": [
364
+ {
365
+ "start": 48,
366
+ "end": 66,
367
+ "text": "(Yoon et al., 1997",
368
+ "ref_id": "BIBREF15"
369
+ },
370
+ {
371
+ "start": 67,
372
+ "end": 87,
373
+ "text": "(Yoon et al., , 1999",
374
+ "ref_id": "BIBREF14"
375
+ },
376
+ {
377
+ "start": 88,
378
+ "end": 99,
379
+ "text": "Yoon, 1999)",
380
+ "ref_id": "BIBREF13"
381
+ }
382
+ ],
383
+ "ref_spans": [],
384
+ "eq_spans": [],
385
+ "section": "Korean Parser",
386
+ "sec_num": "4.3"
387
+ },
388
+ {
389
+ "text": "To facilitate pattern matching, we wrote a simple program to convert the parser's output to XML form. During the XML conversion, two simple heuristics were applied, one to recover implicit subjects, and another to correct a recurring misanalysis of noun compounds.",
390
+ "cite_spans": [],
391
+ "ref_spans": [],
392
+ "eq_spans": [],
393
+ "section": "Korean Parser",
394
+ "sec_num": "4.3"
395
+ },
396
+ {
397
+ "text": "In the newswire corpus we looked at, meeting events were rarely described with the verb 'mannata' ('to meet').",
398
+ "cite_spans": [],
399
+ "ref_spans": [],
400
+ "eq_spans": [],
401
+ "section": "Trigger Word Filtering and Generalization",
402
+ "sec_num": "4.4"
403
+ },
404
+ {
405
+ "text": "Instead, they were usually described with a noun that stands for 'meeting' and a light or aspectual verb, for example, 'hoyuy-lul kacta' ('to have a meeting') or 'hoyuy-lul machita' ('to finish a meeting'). In order to acquire extraction patterns that made appropriate use of such collocations, we decided to go beyond the AutoSlog approach and explicitly group trigger words (such as 'hoyuy') into classes, and to likewise group any collocations, such as those involving light verbs or aspectual verbs. To fmd collocations for the trigger words, we reviewed a Korean lexical cooccurrence base which was constructed from a corpus of 40 million words (Yoon et al., 1997) . We then used the resulting specification to filter the learned patterns to just those containing the trigger words or trigger word collocations, as well as to generalize the patterns to the word class level. Because the number of tr:igger words is small, this specification can be done quickly, and soon pays off in terms of time saved in manually filtering the learned patterns.",
406
+ "cite_spans": [
407
+ {
408
+ "start": 650,
409
+ "end": 669,
410
+ "text": "(Yoon et al., 1997)",
411
+ "ref_id": "BIBREF15"
412
+ }
413
+ ],
414
+ "ref_spans": [],
415
+ "eq_spans": [],
416
+ "section": "Trigger Word Filtering and Generalization",
417
+ "sec_num": "4.4"
418
+ },
419
+ {
420
+ "text": "In testing our approach, we obtained overall results of 79% recall and 67% precision in a hold-one-out cross validation test. In a cross validation test, one repeatedly divides a corpus into different training and test sets, averaging the results; in the hold-one-out version, the system is tested on a held-out example after being trained on the rest. In the IE setting, the recall measure is the number of correct slots found divided by the total number of correct slots, while the precision measure is the number of correct slots found divided by the total number of slots found.",
421
+ "cite_spans": [],
422
+ "ref_spans": [],
423
+ "eq_spans": [],
424
+ "section": "Results",
425
+ "sec_num": "4.5"
426
+ },
427
+ {
428
+ "text": "While direct comparisons with the MUC conference results cannot be made for the reasons we gave above, we nevertheless consider these results quite promising, as these scores exceed the best scores reported at MUC-6 on the scenario template task. 5 A breakdown by slot is shown in Table 1 . We may note that precision is low for date and location slots because we used a simplistic sentence-level merge, rather than dependencies.",
429
+ "cite_spans": [],
430
+ "ref_spans": [
431
+ {
432
+ "start": 281,
433
+ "end": 288,
434
+ "text": "Table 1",
435
+ "ref_id": "TABREF4"
436
+ }
437
+ ],
438
+ "eq_spans": [],
439
+ "section": "Results",
440
+ "sec_num": "4.5"
441
+ },
442
+ {
443
+ "text": "To measure the impact of our approach to generalization, we may compare the results in Table 1 with those shown in Table 2 , where generalization is not used. As can be seen, the generalization step adds substantially to overall recall.",
444
+ "cite_spans": [],
445
+ "ref_spans": [
446
+ {
447
+ "start": 87,
448
+ "end": 122,
449
+ "text": "Table 1 with those shown in Table 2",
450
+ "ref_id": "TABREF4"
451
+ }
452
+ ],
453
+ "eq_spans": [],
454
+ "section": "Results",
455
+ "sec_num": "4.5"
456
+ },
457
+ {
458
+ "text": "To illustrate the effect of generalization, consider the pattern to extract the subject NP of the light verb 'kac (hold)' when paired with an object NP headed by the noun 'hyepsang (negotiation)'. Since this pattern only occurs once in our corpus, the slot is not successfully extracted in the cross-validation test without generalization. However, since this example does fall under the more generalized pattern of extracting the subject NP of a verb in the light verb class when paired with an object NP headed by a noun the 'hoytam-hyepsang' class, the slot is successfully extracted in the crossvalidation test using the generalized patterns. Cases like these are the source of the 18% boost in recall of participant slots, from 57% to 75%.",
459
+ "cite_spans": [],
460
+ "ref_spans": [],
461
+ "eq_spans": [],
462
+ "section": "Results",
463
+ "sec_num": "4.5"
464
+ },
465
+ {
466
+ "text": "Our feasibility study has focused our attention on several questions concerning the interaction of IE and MT, which we hope to pursue under the DARPA TIDES initiative. One question is the extent to which slot filler translation is more practicable than general-purpose MT; one would expect to achieve much higher quality on slot fillers, as they are typically relatively brief noun phrases, and instantiation of a slot implies a degree of semantic classification. On the other hand, one might find that higher quality is required in order to take translated phrases out of their original context. Another question is how to automate the construction of bilingual lexicons. An important issue here will be how to combine information from different sources, given that automatically acquired lexical information is apt to be less reliable, though domain-specific.",
467
+ "cite_spans": [],
468
+ "ref_spans": [],
469
+ "eq_spans": [],
470
+ "section": "Discussion",
471
+ "sec_num": "5"
472
+ },
473
+ {
474
+ "text": "For TIDES, we plan to use more sophisticated learning algorithms, as well as active learning techniques, such as those described inThompson et al. (1999).",
475
+ "cite_spans": [],
476
+ "ref_spans": [],
477
+ "eq_spans": [],
478
+ "section": "",
479
+ "sec_num": null
480
+ },
481
+ {
482
+ "text": "Overall dependency precision is reported to be 89.4%(Yoon, 1999).",
483
+ "cite_spans": [],
484
+ "ref_spans": [],
485
+ "eq_spans": [],
486
+ "section": "",
487
+ "sec_num": null
488
+ },
489
+ {
490
+ "text": "http://www.nist.gov/itl/div894/894.02/related_project s/tipster/muc.htm",
491
+ "cite_spans": [],
492
+ "ref_spans": [],
493
+ "eq_spans": [],
494
+ "section": "",
495
+ "sec_num": null
496
+ }
497
+ ],
498
+ "back_matter": [
499
+ {
500
+ "text": "Our thanks go to Richard Kittredge and Tanya Korelsky for helpful comments and advice. This work was supported by ARL contract DAAD 17-99-C-0005.",
501
+ "cite_spans": [],
502
+ "ref_spans": [],
503
+ "eq_spans": [],
504
+ "section": "Acknowledgements",
505
+ "sec_num": null
506
+ }
507
+ ],
508
+ "bib_entries": {
509
+ "BIBREF0": {
510
+ "ref_id": "b0",
511
+ "title": "Empirical Methods in Information Extraction",
512
+ "authors": [
513
+ {
514
+ "first": "C",
515
+ "middle": [],
516
+ "last": "Cardie",
517
+ "suffix": ""
518
+ }
519
+ ],
520
+ "year": 1997,
521
+ "venue": "AI Magazine",
522
+ "volume": "18",
523
+ "issue": "4",
524
+ "pages": "65--79",
525
+ "other_ids": {},
526
+ "num": null,
527
+ "urls": [],
528
+ "raw_text": "Cardie, C. (1997). Empirical Methods in Information Extraction. AI Magazine 18(4):65-79.",
529
+ "links": null
530
+ },
531
+ "BIBREF1": {
532
+ "ref_id": "b1",
533
+ "title": "RealPro --A fast, portable sentence realizer",
534
+ "authors": [
535
+ {
536
+ "first": "B",
537
+ "middle": [],
538
+ "last": "Lavoie",
539
+ "suffix": ""
540
+ },
541
+ {
542
+ "first": "O",
543
+ "middle": [],
544
+ "last": "Rambow",
545
+ "suffix": ""
546
+ }
547
+ ],
548
+ "year": 1997,
549
+ "venue": "Proceedings of the Conference on Applied Natural Language Processing (ANLP'97)",
550
+ "volume": "",
551
+ "issue": "",
552
+ "pages": "",
553
+ "other_ids": {},
554
+ "num": null,
555
+ "urls": [],
556
+ "raw_text": "Lavoie, B. and Rambow, O. (1997). RealPro --A fast, portable sentence realizer. In Proceedings of the Conference on Applied Natural Language Processing (ANLP'97), Washington, DC.",
557
+ "links": null
558
+ },
559
+ "BIBREF2": {
560
+ "ref_id": "b2",
561
+ "title": "A Framework for MT and Multilingual NLG Systems Based on Uniform Lexico-Structural Processing",
562
+ "authors": [
563
+ {
564
+ "first": "B",
565
+ "middle": [],
566
+ "last": "Lavoie",
567
+ "suffix": ""
568
+ },
569
+ {
570
+ "first": "T",
571
+ "middle": [],
572
+ "last": "Korelsky",
573
+ "suffix": ""
574
+ },
575
+ {
576
+ "first": "O",
577
+ "middle": [],
578
+ "last": "Rambow",
579
+ "suffix": ""
580
+ }
581
+ ],
582
+ "year": 2000,
583
+ "venue": "Proceedings of the Sixth Conference on Applied Natural Language Processing",
584
+ "volume": "",
585
+ "issue": "",
586
+ "pages": "",
587
+ "other_ids": {},
588
+ "num": null,
589
+ "urls": [],
590
+ "raw_text": "Lavoie, B., Korelsky, T., and Rambow, O. (2000). A Framework for MT and Multilingual NLG Systems Based on Uniform Lexico-Structural Processing. To appear in Proceedings of the Sixth Conference on Applied Natural Language Processing (ANLP- 2000), Seattle, WA.",
591
+ "links": null
592
+ },
593
+ "BIBREF3": {
594
+ "ref_id": "b3",
595
+ "title": "University of Massachusetts: Description of the CIRCUS system as used in MUC-4",
596
+ "authors": [
597
+ {
598
+ "first": "W",
599
+ "middle": [],
600
+ "last": "Lehnert",
601
+ "suffix": ""
602
+ },
603
+ {
604
+ "first": "C",
605
+ "middle": [],
606
+ "last": "Cardie",
607
+ "suffix": ""
608
+ },
609
+ {
610
+ "first": "D",
611
+ "middle": [],
612
+ "last": "Fisher",
613
+ "suffix": ""
614
+ },
615
+ {
616
+ "first": "J",
617
+ "middle": [],
618
+ "last": "Mccarthy",
619
+ "suffix": ""
620
+ },
621
+ {
622
+ "first": "E",
623
+ "middle": [],
624
+ "last": "Riloff",
625
+ "suffix": ""
626
+ },
627
+ {
628
+ "first": "S",
629
+ "middle": [],
630
+ "last": "Soderland",
631
+ "suffix": ""
632
+ }
633
+ ],
634
+ "year": 1992,
635
+ "venue": "Proceedings of the Fourth Message Understanding Conference (MUC-4)",
636
+ "volume": "",
637
+ "issue": "",
638
+ "pages": "282--288",
639
+ "other_ids": {},
640
+ "num": null,
641
+ "urls": [],
642
+ "raw_text": "Lehnert, W., Cardie, C., Fisher, D., McCarthy, J., Riloff, E., and Soderland, S. (1992). University of Massachusetts: Description of the CIRCUS system as used in MUC-4. In Proceedings of the Fourth Message Understanding Conference (MUC-4), pages 282-288, San Mateo, CA. Morgan Kaufmann.",
643
+ "links": null
644
+ },
645
+ "BIBREF4": {
646
+ "ref_id": "b4",
647
+ "title": "Proceedings of the Fifth Message Understanding Conference (MUC-5)",
648
+ "authors": [
649
+ {
650
+ "first": "",
651
+ "middle": [],
652
+ "last": "Muc-5",
653
+ "suffix": ""
654
+ }
655
+ ],
656
+ "year": 1994,
657
+ "venue": "",
658
+ "volume": "",
659
+ "issue": "",
660
+ "pages": "",
661
+ "other_ids": {},
662
+ "num": null,
663
+ "urls": [],
664
+ "raw_text": "MUC-5 (1994). Proceedings of the Fifth Message Understanding Conference (MUC-5).",
665
+ "links": null
666
+ },
667
+ "BIBREF6": {
668
+ "ref_id": "b6",
669
+ "title": "Proceedings of the Seventh Message Understanding Conference (MUC-7)",
670
+ "authors": [],
671
+ "year": 1998,
672
+ "venue": "",
673
+ "volume": "",
674
+ "issue": "",
675
+ "pages": "",
676
+ "other_ids": {},
677
+ "num": null,
678
+ "urls": [],
679
+ "raw_text": "MUC-7 (1998). Proceedings of the Seventh Message Understanding Conference (MUC-7).",
680
+ "links": null
681
+ },
682
+ "BIBREF8": {
683
+ "ref_id": "b8",
684
+ "title": "Enriching lexical transfer with crosslinguistic semantic features",
685
+ "authors": [
686
+ {
687
+ "first": "A",
688
+ "middle": [],
689
+ "last": "Nasr",
690
+ "suffix": ""
691
+ },
692
+ {
693
+ "first": "O",
694
+ "middle": [],
695
+ "last": "Rambow",
696
+ "suffix": ""
697
+ },
698
+ {
699
+ "first": "M",
700
+ "middle": [],
701
+ "last": "Palmer",
702
+ "suffix": ""
703
+ },
704
+ {
705
+ "first": "J",
706
+ "middle": [],
707
+ "last": "Rosenzweig",
708
+ "suffix": ""
709
+ }
710
+ ],
711
+ "year": 1997,
712
+ "venue": "Proceedings of the lnterlingua Workshop at the MT Summit",
713
+ "volume": "",
714
+ "issue": "",
715
+ "pages": "",
716
+ "other_ids": {},
717
+ "num": null,
718
+ "urls": [],
719
+ "raw_text": "Nasr, A., Rambow, O., Palmer, M., and Rosenzweig, J. (1997). Enriching lexical transfer with cross- linguistic semantic features. In Proceedings of the lnterlingua Workshop at the MT Summit, San Diego, CA.",
720
+ "links": null
721
+ },
722
+ "BIBREF9": {
723
+ "ref_id": "b9",
724
+ "title": "Rapid prototyping of domain-specific machine translation systems",
725
+ "authors": [
726
+ {
727
+ "first": "M",
728
+ "middle": [],
729
+ "last": "Palmer",
730
+ "suffix": ""
731
+ },
732
+ {
733
+ "first": "O",
734
+ "middle": [],
735
+ "last": "Rambow",
736
+ "suffix": ""
737
+ },
738
+ {
739
+ "first": "A",
740
+ "middle": [],
741
+ "last": "Nasr",
742
+ "suffix": ""
743
+ }
744
+ ],
745
+ "year": 1998,
746
+ "venue": "Machine Translation and the Information Soup -Proceedings of the Third Conference of the Association for Machine Translation in the Americas AMTA'98",
747
+ "volume": "1529",
748
+ "issue": "",
749
+ "pages": "",
750
+ "other_ids": {},
751
+ "num": null,
752
+ "urls": [],
753
+ "raw_text": "Palmer, M., Rambow, O., and Nasr, A. (1998). Rapid prototyping of domain-specific machine translation systems. In Machine Translation and the Information Soup -Proceedings of the Third Conference of the Association for Machine Translation in the Americas AMTA'98, Springer Verlag (Lecture Notes in Artificial Intelligence No. 1529), Berlin.",
754
+ "links": null
755
+ },
756
+ "BIBREF10": {
757
+ "ref_id": "b10",
758
+ "title": "Automatically constructing a dictionary for information exlxaction tasks",
759
+ "authors": [
760
+ {
761
+ "first": "E",
762
+ "middle": [],
763
+ "last": "Riloff",
764
+ "suffix": ""
765
+ }
766
+ ],
767
+ "year": 1993,
768
+ "venue": "Proceedings of the Eleventh National Conference on Artificial Intelligence",
769
+ "volume": "",
770
+ "issue": "",
771
+ "pages": "811--816",
772
+ "other_ids": {},
773
+ "num": null,
774
+ "urls": [],
775
+ "raw_text": "Riloff, E. (1993). Automatically constructing a dictionary for information exlxaction tasks. In Proceedings of the Eleventh National Conference on Artificial Intelligence, pages 811-816, Washington, DC. AAAI Press / MIT Press.",
776
+ "links": null
777
+ },
778
+ "BIBREF11": {
779
+ "ref_id": "b11",
780
+ "title": "Active learning for natural language parsing and information extraction",
781
+ "authors": [
782
+ {
783
+ "first": "C",
784
+ "middle": [
785
+ "A"
786
+ ],
787
+ "last": "Thompson",
788
+ "suffix": ""
789
+ },
790
+ {
791
+ "first": "M",
792
+ "middle": [
793
+ "E"
794
+ ],
795
+ "last": "Califf",
796
+ "suffix": ""
797
+ },
798
+ {
799
+ "first": "R",
800
+ "middle": [
801
+ "J"
802
+ ],
803
+ "last": "Mooney",
804
+ "suffix": ""
805
+ }
806
+ ],
807
+ "year": 1999,
808
+ "venue": "Proceedings of the Sixteenth International Machine Learning Conference",
809
+ "volume": "",
810
+ "issue": "",
811
+ "pages": "1--99",
812
+ "other_ids": {},
813
+ "num": null,
814
+ "urls": [],
815
+ "raw_text": "Thompson, C. A., Califf, M. E., and Mooney, R. J. (1999). Active learning for natural language parsing and information extraction. In Proceedings of the Sixteenth International Machine Learning Conference (1CML-99), Bled, Slovenia.",
816
+ "links": null
817
+ },
818
+ "BIBREF12": {
819
+ "ref_id": "b12",
820
+ "title": "EXEMPLARS: A practical, extensible framework for dynamic text generation",
821
+ "authors": [
822
+ {
823
+ "first": "M",
824
+ "middle": [],
825
+ "last": "White",
826
+ "suffix": ""
827
+ },
828
+ {
829
+ "first": "T",
830
+ "middle": [],
831
+ "last": "Caldwell",
832
+ "suffix": ""
833
+ }
834
+ ],
835
+ "year": 1998,
836
+ "venue": "Proceedings of the 8th International Workshop on Natural Language Generation",
837
+ "volume": "",
838
+ "issue": "",
839
+ "pages": "",
840
+ "other_ids": {},
841
+ "num": null,
842
+ "urls": [],
843
+ "raw_text": "White, M. and Caldwell, T. (1998). EXEMPLARS: A practical, extensible framework for dynamic text generation. In Proceedings of the 8th International Workshop on Natural Language Generation, Niagara-on-the-Lake, Ontario.",
844
+ "links": null
845
+ },
846
+ "BIBREF13": {
847
+ "ref_id": "b13",
848
+ "title": "Efficient dependency parsing based on three types of chunking and lexical association",
849
+ "authors": [
850
+ {
851
+ "first": "J",
852
+ "middle": [],
853
+ "last": "Yoon",
854
+ "suffix": ""
855
+ }
856
+ ],
857
+ "year": 1999,
858
+ "venue": "",
859
+ "volume": "",
860
+ "issue": "",
861
+ "pages": "",
862
+ "other_ids": {},
863
+ "num": null,
864
+ "urls": [],
865
+ "raw_text": "Yoon, J. (1999). Efficient dependency parsing based on three types of chunking and lexical association. Submitted.",
866
+ "links": null
867
+ },
868
+ "BIBREF14": {
869
+ "ref_id": "b14",
870
+ "title": "Three types of chunking in Korean and dependency analysis based on lexical association",
871
+ "authors": [
872
+ {
873
+ "first": "J",
874
+ "middle": [],
875
+ "last": "Yoon",
876
+ "suffix": ""
877
+ },
878
+ {
879
+ "first": "K.-S",
880
+ "middle": [],
881
+ "last": "Choi",
882
+ "suffix": ""
883
+ },
884
+ {
885
+ "first": "M",
886
+ "middle": [],
887
+ "last": "Song",
888
+ "suffix": ""
889
+ }
890
+ ],
891
+ "year": 1999,
892
+ "venue": "Proceedings of lCCPOL",
893
+ "volume": "",
894
+ "issue": "",
895
+ "pages": "",
896
+ "other_ids": {},
897
+ "num": null,
898
+ "urls": [],
899
+ "raw_text": "Yoon, J., Choi, K.-S., and Song, M. (1999). Three types of chunking in Korean and dependency analysis based on lexical association. In Proceedings of lCCPOL.",
900
+ "links": null
901
+ },
902
+ "BIBREF15": {
903
+ "ref_id": "b15",
904
+ "title": "New parsing method using global association table",
905
+ "authors": [
906
+ {
907
+ "first": "J",
908
+ "middle": [],
909
+ "last": "Yoon",
910
+ "suffix": ""
911
+ },
912
+ {
913
+ "first": "S",
914
+ "middle": [],
915
+ "last": "Kim",
916
+ "suffix": ""
917
+ },
918
+ {
919
+ "first": "M",
920
+ "middle": [],
921
+ "last": "Song",
922
+ "suffix": ""
923
+ }
924
+ ],
925
+ "year": 1997,
926
+ "venue": "Proceedings of the 5th International Workshop on Parsing Technology",
927
+ "volume": "",
928
+ "issue": "",
929
+ "pages": "",
930
+ "other_ids": {},
931
+ "num": null,
932
+ "urls": [],
933
+ "raw_text": "Yoon, J., Kim, S., and Song, M. (1997). New parsing method using global association table. In Proceedings of the 5th International Workshop on Parsing Technology.",
934
+ "links": null
935
+ }
936
+ },
937
+ "ref_entries": {
938
+ "FIGREF0": {
939
+ "num": null,
940
+ "text": ". ~ ~.]Ol DIXF~ ~F ~,~FI,,t ~'-9-, ~.~OF ~t~l.~. ~tl ud~Otl ~l.:,rt} ~]l~i/ ~ol~.-E.II .?-INto ?,,toiSF.~. ~t.-Ol-~ 8-.~01 ~XlI~II= = ~ZISH LDFPI~_ ~C.F~ uH~C3 ~-~-~.1-~..~ OF-..It~ ~01 ~cF. x~.~ ~.~OI ~l,.Lt~ EH~= = ~SlOI ...~CI.~ ~.~_o~ ~It~,/~F a~_tOI LO~O KILL= ~0~OPj ~-~/~1 )H~F ~dXl~ 8~9F Figure 1",
941
+ "uris": null,
942
+ "type_str": "figure"
943
+ },
944
+ "FIGREF1": {
945
+ "num": null,
946
+ "text": "Parsed Document ~ ::i~i\u00b0~'~vii~i? ' ~.~:Qi~I~:i~-'-iL [:!::ili:::.:: ~tr~. :::::::::::::::::::::",
947
+ "uris": null,
948
+ "type_str": "figure"
949
+ },
950
+ "TABREF0": {
951
+ "num": null,
952
+ "html": null,
953
+ "content": "<table><tr><td/><td>Ouery</td></tr><tr><td/><td/><td>Find</td><td>Report</td></tr><tr><td/><td colspan=\"2\">Event: Iseun:i~'North Korea\" AND \"missiles\" i</td></tr><tr><td/><td/><td>I</td></tr><tr><td/><td colspan=\"2\">Response to Ouery</td></tr><tr><td/><td colspan=\"2\">The reports Indicate 2 meetings held In South Korea on the</td></tr><tr><td/><td colspan=\"2\">issues of North Korea anti missiles:</td></tr><tr><td/><td>Sources</td><td>Translated Extracts</td></tr><tr><td/><td>Joon,ap~l</td><td>\u00c3</td></tr><tr><td/><td>.... ,!nerF, orea j I Noes / --t~Q I</td><td>,4 ~tln, g ~# ,#~=1# o,1 ApfJl ~ sLYout tP~ I10t /ine\u00a2 ~t~wn Saoul end Tokyo for the ela~gen\u00a2\u00a5 d~/tu~tlons ~uc/t eg AlottlI Kofgm'~</td></tr><tr><td>Translation</td><td colspan=\"2\">of Korean Source Report</td></tr><tr><td>[Joongang Dally]</td><td/></tr><tr><td colspan=\"3\">Korean. Japanese Hinisters Discuss NK Policy</td></tr><tr><td colspan=\"3\">The tmo ministers ~9rsed that any further launching of a</td></tr><tr><td colspan=\"3\">missile by North Korean would undermine the security of</td></tr><tr><td colspan=\"3\">~Northeast Asia and the Korea, the United States and Japan</td></tr><tr><td colspan=\"3\">should take Joint steps against the North Korean missile</td></tr><tr><td>threat.</td><td/></tr><tr><td colspan=\"3\">}-long requested that Koeura cork to normalize Japan's</td></tr><tr><td colspan=\"3\">relations with North Korea. rather than cutting channels</td></tr><tr><td colspan=\"2\">of dialogue bet#men the two countries.</td></tr><tr><td colspan=\"3\">Koeura said that if North Korea continues Its missile</td></tr><tr><td colspan=\"3\">testing, the Japanese government will definitely stop</td></tr><tr><td colspan=\"2\">making contributions to KEDO.</td></tr><tr><td colspan=\"3\">The tee ministers also tentatively agreed that J~anese</td></tr><tr><td colspan=\"3\">primo minister Kslzo Obuchl should make a state visit to</td></tr><tr><td colspan=\"2\">Korea on or around Nerch 20.</td></tr></table>",
954
+ "text": "Nest !!lg ........... sourcn:l ...... ........................................................... ~ ~ sate: I ................. ' ......................... ' ................. i Locat Ion: I~u.'~h K..e~.e..a.; ...................................... j I~ Part clpant : I .................................................................. i",
955
+ "type_str": "table"
956
+ },
957
+ "TABREF1": {
958
+ "num": null,
959
+ "html": null,
960
+ "content": "<table><tr><td colspan=\"2\">Extraction Pattem Library --</td><td>which</td></tr><tr><td colspan=\"3\">contains the set of extraction patterns</td></tr><tr><td colspan=\"3\">learned in the lab, one set per scenario</td></tr><tr><td colspan=\"3\">template --to extract specific types of</td></tr><tr><td colspan=\"3\">information from the input Korean</td></tr><tr><td>documents, once parsed.</td><td/></tr><tr><td colspan=\"3\">\u2022 The Ranker component. This component</td></tr><tr><td colspan=\"3\">ranks the extracted information returned by</td></tr><tr><td colspan=\"3\">the PIE component according to how well it</td></tr><tr><td colspan=\"3\">matches the keyword restrictions in the</td></tr><tr><td colspan=\"3\">query. The MT component's English-to-</td></tr><tr><td colspan=\"3\">Korean Transfer Lexicon is used to map the</td></tr><tr><td colspan=\"3\">English keywords to corresponding Korean</td></tr><tr><td colspan=\"3\">ones. When the match falls below a user-</td></tr><tr><td>\u2022 configurable threshold,</td><td colspan=\"2\">the extracted</td></tr><tr><td>information is filtered out.</td><td/></tr><tr><td colspan=\"3\">\u2022 The MT component. The MT component</td></tr><tr><td colspan=\"3\">(cf. Lavoie et al., 2000) translates the</td></tr><tr><td colspan=\"3\">extracted Korean phrases or sentences into</td></tr><tr><td>corresponding English ones.</td><td/></tr></table>",
961
+ "text": "\u2022 The Portable Information Extractor (PIE) component. The PIE component uses the While in this example the exclusion of participant information in the resulting report is rather artificial, in general a scenario template may contain many different types of information, not all of which are likely to interest an analyst at once.",
962
+ "type_str": "table"
963
+ },
964
+ "TABREF4": {
965
+ "num": null,
966
+ "html": null,
967
+ "content": "<table><tr><td>Slots</td><td colspan=\"2\">Recall Precision</td></tr><tr><td>All</td><td>79%</td><td>67%</td></tr><tr><td>Participant</td><td>75%</td><td>84%</td></tr><tr><td>Date/Location</td><td>86%</td><td>54%</td></tr><tr><td>Table2: Slots</td><td colspan=\"2\">Recall Precision</td></tr><tr><td>All</td><td>61%</td><td>64%</td></tr><tr><td>Participant</td><td>57%</td><td>81%</td></tr><tr><td>Date/Location</td><td>67%</td><td>52%</td></tr></table>",
968
+ "text": "",
969
+ "type_str": "table"
970
+ }
971
+ }
972
+ }
973
+ }
Full_text_JSON/prefixW/json/W00/W00-0506.json ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "W00-0506",
3
+ "header": {
4
+ "generated_with": "S2ORC 1.0.0",
5
+ "date_generated": "2023-01-19T05:33:30.174097Z"
6
+ },
7
+ "title": "Pre-processing Closed Captions for Machine Translation",
8
+ "authors": [
9
+ {
10
+ "first": "Davide",
11
+ "middle": [],
12
+ "last": "Turcato",
13
+ "suffix": "",
14
+ "affiliation": {},
15
+ "email": ""
16
+ },
17
+ {
18
+ "first": "Fred",
19
+ "middle": [],
20
+ "last": "Popowich",
21
+ "suffix": "",
22
+ "affiliation": {},
23
+ "email": ""
24
+ },
25
+ {
26
+ "first": "Paul",
27
+ "middle": [],
28
+ "last": "Mcfetridge",
29
+ "suffix": "",
30
+ "affiliation": {},
31
+ "email": ""
32
+ },
33
+ {
34
+ "first": "Devlan",
35
+ "middle": [],
36
+ "last": "Nicholson",
37
+ "suffix": "",
38
+ "affiliation": {},
39
+ "email": ""
40
+ },
41
+ {
42
+ "first": "Janine",
43
+ "middle": [],
44
+ "last": "Toole",
45
+ "suffix": "",
46
+ "affiliation": {},
47
+ "email": ""
48
+ }
49
+ ],
50
+ "year": "",
51
+ "venue": null,
52
+ "identifiers": {},
53
+ "abstract": "We describe an approach to Machine Translation of transcribed speech, as found in closed captions. We discuss how the colloquial nature and input format peculiarities of closed captions are dealt with in a pre-processing pipeline that prepares the input for effective processing by a core MT system. In particular, we describe components for proper name recognition and input segmentation. We evaluate the contribution of such modules to the system performance. The described methods have been implemented on an MT system for translating English closed captions to Spanish and Portuguese.",
54
+ "pdf_parse": {
55
+ "paper_id": "W00-0506",
56
+ "_pdf_hash": "",
57
+ "abstract": [
58
+ {
59
+ "text": "We describe an approach to Machine Translation of transcribed speech, as found in closed captions. We discuss how the colloquial nature and input format peculiarities of closed captions are dealt with in a pre-processing pipeline that prepares the input for effective processing by a core MT system. In particular, we describe components for proper name recognition and input segmentation. We evaluate the contribution of such modules to the system performance. The described methods have been implemented on an MT system for translating English closed captions to Spanish and Portuguese.",
60
+ "cite_spans": [],
61
+ "ref_spans": [],
62
+ "eq_spans": [],
63
+ "section": "Abstract",
64
+ "sec_num": null
65
+ }
66
+ ],
67
+ "body_text": [
68
+ {
69
+ "text": "1 Introduction Machine Translation (MT) technology can be embedded in a device to perform real time translation of closed captions included in TV signals. While speed is one factor associated with the construction of such a device, another factor is the language type and format. The challenges posed by closed captions to MT can be attributed to three distinct characteristics:",
70
+ "cite_spans": [],
71
+ "ref_spans": [],
72
+ "eq_spans": [],
73
+ "section": "",
74
+ "sec_num": null
75
+ },
76
+ {
77
+ "text": "Firstly, closed captions are transcribed speech. Although closed captions are not a completely faithful transcription of TV programs, they render spoken language and therefore the language used is typically colloquial (Nyberg and Mitamura, 1997) . They contain many of the phenomena which characterize spoken language: interjections, repetitions, stuttering, ellipsis, interruptions, hesitations. Linguistically and stylistically they differ from written language: sentences are shorter and poorly structured, and contain idiomatic expressions, ungrammaticality, etc. The associated difficulties stem from the inherently colloquial nature of closed captions, and, to different degrees, of all forms of transcribed speech (Hindle, 1983) .",
78
+ "cite_spans": [
79
+ {
80
+ "start": 218,
81
+ "end": 245,
82
+ "text": "(Nyberg and Mitamura, 1997)",
83
+ "ref_id": "BIBREF5"
84
+ },
85
+ {
86
+ "start": 721,
87
+ "end": 735,
88
+ "text": "(Hindle, 1983)",
89
+ "ref_id": "BIBREF4"
90
+ }
91
+ ],
92
+ "ref_spans": [],
93
+ "eq_spans": [],
94
+ "section": "",
95
+ "sec_num": null
96
+ },
97
+ {
98
+ "text": "Such difficulties require a different approach than is taken for written documents.",
99
+ "cite_spans": [],
100
+ "ref_spans": [],
101
+ "eq_spans": [],
102
+ "section": "",
103
+ "sec_num": null
104
+ },
105
+ {
106
+ "text": "Secondly, closed captions come in a specific format, which poses problems for their optimal processing. Closed-captioners may often split a single utterance between two screens, if the character limit for a screen has been exceeded. The split is based on consideration about string length, rather than linguistic considerations, hence it can happen at non-constituent boundaries (see Table 1 ), thus making the real time processing of the separate segments problematic. Another problem is that captions have no upper/lower case distinction. This poses challenges for proper name recognition since names cannot be identified by an initial capital. Additionally, we cannot rely on the initial uppercase letter to identify a sentence initial word. This problematic aspect sets the domain of closed captions apart from most text-to-text MT domains, making it more akin, in this respect, to speech translation systems. Although, from a technical point of view, such input format characteristics could be amended, most likely they are not under a developer's control, hence they have to be presumed.",
107
+ "cite_spans": [],
108
+ "ref_spans": [
109
+ {
110
+ "start": 384,
111
+ "end": 391,
112
+ "text": "Table 1",
113
+ "ref_id": "TABREF0"
114
+ }
115
+ ],
116
+ "eq_spans": [],
117
+ "section": "",
118
+ "sec_num": null
119
+ },
120
+ {
121
+ "text": "Thirdly, closed captions are used under operational constraints. Users have no control over the speed of the image or caption flow so (s)he must comprehend the caption in the limited time that the caption appears on the screen. Accordingly, the translation of closed captions is a \"time-constrained\" application, where the user has limited time to comprehend the system output. Hence, an MT system should produce translations comprehensible within the limited time available to the viewer.",
122
+ "cite_spans": [],
123
+ "ref_spans": [],
124
+ "eq_spans": [],
125
+ "section": "",
126
+ "sec_num": null
127
+ },
128
+ {
129
+ "text": "In this paper we focus on the first two factors, as the third has been discussed in (Toole et al., 1998) . We discuss how such domain-good evening, i'm jim lehrer. on the \"newshour\" tonight, four members of congress debate the u.n. deal with iraq; paul solman tells the troubled story of indonesia's currency; mark shields and paul gigot analyze the political week; and elizabeth farnsworth explains how the universe is getting larger. dependent, problematic factors are dealt with in a pre-processing pipeline that prepares the input for processing by a core MT system. The described methods have been implemented for an MT system that translates English closed captions to Spanish and Portuguese. All the examples here refer to the Spanish module.",
130
+ "cite_spans": [
131
+ {
132
+ "start": 84,
133
+ "end": 104,
134
+ "text": "(Toole et al., 1998)",
135
+ "ref_id": "BIBREF7"
136
+ }
137
+ ],
138
+ "ref_spans": [],
139
+ "eq_spans": [],
140
+ "section": "",
141
+ "sec_num": null
142
+ },
143
+ {
144
+ "text": "2 Pre-processing design Input pre-processing is essential in an embedded real time system, in order to simplify the core processing and make it both time-and memoryeffective. In addition to this, we followed the guideline of separating domain-dependent processes and resources from general purpose ones. On the one hand, grammars and lexicons are costly resources. It would be desirable for them to be domain-independent and portable across different domains, as well as declarative and bidirectional. On the other hand, a domain with distinctive characteristics requires some specific treatment, if a system aims at robustness. We decided to have a domain independent core MT system, locating the domain dependent processing in a pipeline of low-level components, easy to implement, aiming at fast and robust processing and using limited linguistic knowledge.",
145
+ "cite_spans": [],
146
+ "ref_spans": [],
147
+ "eq_spans": [],
148
+ "section": "",
149
+ "sec_num": null
150
+ },
151
+ {
152
+ "text": "We use declarative and bidirectional grammars and lexicons. The lexicMist approach is indeed suitable to the closed caption domain, e.g. in terms of its capability of handling loosely structured or incomplete sentences. Also, the linguistic resources are geared towards this domain in terms of grammatical and lexical coverage. However, our system architecture and formalism make them equally usable in any other domain and translation direction, as the linguistic knowledge therein contained is valid in any domain. For the architecture we refer the reader to (Popowich et al., 1997) . In the rest of this paper we focus on the pre-processing module and how it deals with the issues discussed in the introduction.",
153
+ "cite_spans": [
154
+ {
155
+ "start": 561,
156
+ "end": 584,
157
+ "text": "(Popowich et al., 1997)",
158
+ "ref_id": "BIBREF6"
159
+ }
160
+ ],
161
+ "ref_spans": [],
162
+ "eq_spans": [],
163
+ "section": "",
164
+ "sec_num": null
165
+ },
166
+ {
167
+ "text": "The task of the pre-processing pipeline is to make the input amenable to a linguisticallyprincipled, domain independent treatment. This task is accomplished in two ways:",
168
+ "cite_spans": [],
169
+ "ref_spans": [],
170
+ "eq_spans": [],
171
+ "section": "",
172
+ "sec_num": null
173
+ },
174
+ {
175
+ "text": "1. By normalizing the input, i.e. removing noise, reducing the input to standard typographical conventions, and also restructuring and simplifying it, whenever this can be done in a reliable, meaning-preserving way.",
176
+ "cite_spans": [],
177
+ "ref_spans": [],
178
+ "eq_spans": [],
179
+ "section": "",
180
+ "sec_num": null
181
+ },
182
+ {
183
+ "text": "2. By annotating the input with linguistic information, whenever this can be reliably done with a shallow linguistic analysis, to reduce input ambiguity and make a full linguistic analysis more manageable.",
184
+ "cite_spans": [],
185
+ "ref_spans": [],
186
+ "eq_spans": [],
187
+ "section": "",
188
+ "sec_num": null
189
+ },
190
+ {
191
+ "text": "(1) shows the system architecture, with a particular emphasis on the preprocessing pipeline. The next section describes the pipeline up to tagging.",
192
+ "cite_spans": [],
193
+ "ref_spans": [],
194
+ "eq_spans": [],
195
+ "section": "Figure",
196
+ "sec_num": null
197
+ },
198
+ {
199
+ "text": "Proper name recognition and segmentation, which deal more specifically with the problems described in the introduction, are discussed in further sections.",
200
+ "cite_spans": [],
201
+ "ref_spans": [],
202
+ "eq_spans": [],
203
+ "section": "Figure",
204
+ "sec_num": null
205
+ },
206
+ {
207
+ "text": "The label normalization groups three components, which clean up and tokenize the input.",
208
+ "cite_spans": [],
209
+ "ref_spans": [],
210
+ "eq_spans": [],
211
+ "section": "Normalization and tagging",
212
+ "sec_num": "3"
213
+ },
214
+ {
215
+ "text": "The text-level normalization module performs operations at the string level, such as removing extraneous text and punctuation (e.g. curly brackets , used to mark off sound effects), or removing periods from abbreviations. E.g.: (2) \"I\" \"went\" \"to\" \"high\" \"school\" \"in\" \"the\" \"usa\" \" \" \"I\" \"went\" \"to\" \"high school\" \"in\" \"the\" \"usa\" \" \"",
216
+ "cite_spans": [],
217
+ "ref_spans": [],
218
+ "eq_spans": [],
219
+ "section": "Normalization and tagging",
220
+ "sec_num": "3"
221
+ },
222
+ {
223
+ "text": "(3) \"W-wh-wha~'s\" \"that\" \"?\"0 \"what\"/stutter \"is\" \"that\" \"?\"",
224
+ "cite_spans": [],
225
+ "ref_spans": [],
226
+ "eq_spans": [],
227
+ "section": "Normalization and tagging",
228
+ "sec_num": "3"
229
+ },
230
+ {
231
+ "text": "Note that annotations associated with tokens are carried along the entire translation process, so as to be used in producing the output (e.g. stutters are re-inserted in the output).",
232
+ "cite_spans": [],
233
+ "ref_spans": [],
234
+ "eq_spans": [],
235
+ "section": "Normalization and tagging",
236
+ "sec_num": "3"
237
+ },
238
+ {
239
+ "text": "The tagger assigns parts of speech to tokens.",
240
+ "cite_spans": [],
241
+ "ref_spans": [],
242
+ "eq_spans": [],
243
+ "section": "Normalization and tagging",
244
+ "sec_num": "3"
245
+ },
246
+ {
247
+ "text": "Part of speech information is used by the subsequent pre-processing modules, and also in parsing, to prioritize the most likely lexical assignments of ambiguous items.",
248
+ "cite_spans": [],
249
+ "ref_spans": [],
250
+ "eq_spans": [],
251
+ "section": "Normalization and tagging",
252
+ "sec_num": "3"
253
+ },
254
+ {
255
+ "text": "Proper names are ubiquitous in closed captions (see Table 1 ). Their recognition is important for effective comprehension of closed captions, particularly in consideration of two facts: (i) users have little time to mentally rectify a mistranslation; (ii) a name can occur repeatedly in a program (e.g. a movie), with an annoying effect if it is systematically mistranslated (e.g. a golf tournament where the golfer named",
256
+ "cite_spans": [],
257
+ "ref_spans": [
258
+ {
259
+ "start": 52,
260
+ "end": 59,
261
+ "text": "Table 1",
262
+ "ref_id": "TABREF0"
263
+ }
264
+ ],
265
+ "eq_spans": [],
266
+ "section": "Proper name recognition",
267
+ "sec_num": "4"
268
+ },
269
+ {
270
+ "text": "Tiger Woods is systematically referred to as los bosques del tigre, lit. 'the woods of the tiger').",
271
+ "cite_spans": [],
272
+ "ref_spans": [],
273
+ "eq_spans": [],
274
+ "section": "Proper name recognition",
275
+ "sec_num": "4"
276
+ },
277
+ {
278
+ "text": "Name recognition is made harder in the closed caption domain by the fact that no capitalization information is given, thus making unusable all methods that rely on capitalization as the main way to identify candidates (Wolinski et al., 1995) (Wacholder et al., 1997) . For instance, an expression like 'mark shields', as occurs in Table (1), is problematic in the absence of capitalization, as both 'mark' and 'shields' are threeway ambiguous (proper name, common noun and verb). Note that this identical problem may be encountered if an MT system is embedded in a speech-to-speech translation as well. This situation forced us to explore different ways of identifying proper names. The goal of our recognizer is to identify proper names in a tagged line and annotate them accordingly, in order to override any other possiblelexical assignment in the following modules. The recognizer also overrides previous tokenization, by possibly compounding two or more tokens into a single one, which will be treated as such thereafter. Besides part of speech, the only other information used by the recognizer is the lexical status of words, i.e. their ambiguity class (i.e. the range of possible syntactic categories it can be assigned) or their status as an unknown word (i.e. a word that is not in the lexicon). The recognizer scans an input line from left to right, and tries to match each item against a sequences of patterns. Each pattern expresses constraints (in terms of word, part of speech tag and lexical status) on the item under inspection and its left and right contexts. Any number of items can be inspected to the left and right of the current item. Such patterns also make use of regular expression bperators (conjunction, disjunction, negation, Kleene star). For instance (a simplified version of) a pattern might look like the following:",
279
+ "cite_spans": [
280
+ {
281
+ "start": 218,
282
+ "end": 241,
283
+ "text": "(Wolinski et al., 1995)",
284
+ "ref_id": "BIBREF9"
285
+ },
286
+ {
287
+ "start": 242,
288
+ "end": 266,
289
+ "text": "(Wacholder et al., 1997)",
290
+ "ref_id": "BIBREF8"
291
+ }
292
+ ],
293
+ "ref_spans": [],
294
+ "eq_spans": [],
295
+ "section": "Proper name recognition",
296
+ "sec_num": "4"
297
+ },
298
+ {
299
+ "text": "(4) /the/DEW (NOUNIADJ)*] X' ['NOUN]",
300
+ "cite_spans": [],
301
+ "ref_spans": [],
302
+ "eq_spans": [],
303
+ "section": "Proper name recognition",
304
+ "sec_num": "4"
305
+ },
306
+ {
307
+ "text": "where we adopt the convention of representing words by lowercase strings, part of speech tags by uppercase strings and variables by primed Xs. The left and right context are enclosed in square brackets, respectively to the left and right of the current item. They can also contain special markers for the beginning and end of a line, and for the left or right boundary of the proper name being identified. This way tokenization can be overridden and separate tokens joined into a single name. Constraints on the lexical status of items are expressed as predicates associated with pattern elements, e.g.:",
308
+ "cite_spans": [],
309
+ "ref_spans": [],
310
+ "eq_spans": [],
311
+ "section": "Proper name recognition",
312
+ "sec_num": "4"
313
+ },
314
+ {
315
+ "text": "(5) proper_and_common (X') A pattern like the one above (4-5) would match a lexically ambiguous proper/common noun preceded by a determiner (with any number of nouns or adjectives in between), and not followed by a noun (e.g. 'the bill is...'). Besides identifying proper names, some patterns may establish that a given item is not a name (as in the case above). A return value is associated with each pattern, specifying whether the current match is or is not a proper name. Once a successful match occurs, no further patterns are tried. Patterns are ordered from more to less specific. At the bottom of the pattern sequence are the simplest patterns, e.g.:",
316
+ "cite_spans": [],
317
+ "ref_spans": [],
318
+ "eq_spans": [],
319
+ "section": "Proper name recognition",
320
+ "sec_num": "4"
321
+ },
322
+ {
323
+ "text": "(6) ( [] X' [] ), proper_and_common(X') yes",
324
+ "cite_spans": [],
325
+ "ref_spans": [],
326
+ "eq_spans": [],
327
+ "section": "Proper name recognition",
328
+ "sec_num": "4"
329
+ },
330
+ {
331
+ "text": "which is the default assignment for words like 'bill' if no other pattern matched. However (6) is overridden by more specific patterns like:",
332
+ "cite_spans": [],
333
+ "ref_spans": [],
334
+ "eq_spans": [],
335
+ "section": "Proper name recognition",
336
+ "sec_num": "4"
337
+ },
338
+ {
339
+ "text": "(7) ( [x''] x' [] ), proper_and_common (X'), common(X\") no 41 (s) ( [x'] x' [] ), proper_and_common(X'), proper(X\") yes",
340
+ "cite_spans": [],
341
+ "ref_spans": [],
342
+ "eq_spans": [],
343
+ "section": "Proper name recognition",
344
+ "sec_num": "4"
345
+ },
346
+ {
347
+ "text": "The former pattern covers cases like 'telecommunications bill', preventing 'bill' from being interpreted as a proper name, the latter covers cases like 'damian bill', where 'bill' is more likely to be a name.",
348
+ "cite_spans": [],
349
+ "ref_spans": [],
350
+ "eq_spans": [],
351
+ "section": "Proper name recognition",
352
+ "sec_num": "4"
353
+ },
354
+ {
355
+ "text": "In general, the recognizer tries to disambiguate lexically ambiguous nouns or to assign a category to unknown words on the basis of the available context. However, in principle any word could be turned into a proper name. For instance, verbs or adjectives can be turned into proper names, when the context contains strong cues, like a title. Increasingly larger contexts provide evidence for more informed guesses, which override guesses based on narrower contexts. Consider the following examples that show how a word or expression is treated differently depending on the available context. Recognized names are in italics. The lexically ambiguous bill, interpreted as a proper name in isolation, becomes a common noun if preceded by a determiner. However, the interpretation reverts to proper name if another noun follows. Likewise the unknown word clinton is (incorrectly) interpreted as a common noun in (11), as it is the last item of a noun phrase introduced by a determiner, but it becomes a proper name if another noun follows.",
356
+ "cite_spans": [],
357
+ "ref_spans": [],
358
+ "eq_spans": [],
359
+ "section": "Proper name recognition",
360
+ "sec_num": "4"
361
+ },
362
+ {
363
+ "text": "We also use a name memory, which patterns have access to. As proper names are found in an input stream, they are added to the name memory. A previous occurrence of a proper name is used as evidence in making decisions about further occurrences. The idea is to cache names occurred in an 'easy' context (e.g. a name preceded by a title, which provides strong evidence for its status as a proper name), to use them later to make decisions in 'difficult' contexts, where the internal evidence would not be sufficient to support a proper name interpretation.",
364
+ "cite_spans": [],
365
+ "ref_spans": [],
366
+ "eq_spans": [],
367
+ "section": "Proper name recognition",
368
+ "sec_num": "4"
369
+ },
370
+ {
371
+ "text": "Hence, what typically happens is that the same name in the same context is interpreted differently at different times, if previously the name has occurred in an 'easy' context and has been memorized. E.g.: The name memory was designed to suit the peculiarity of closed captions. Typically, in this domain proper names have a low dispersion. They are concentrated in sections of an input stream (e.g. the name of the main characters in a movie), then disappear for long sections (e.g. after the movie is over). Therefore, a name memory needs to be reset to reflect such changes. However, it is problematic to decide when to reset the name memory. Even if it was possible to detect when a new program starts, one should take into account the possible scenario of an MT system embedded in a consumer product, in which case the user might unpredictably change channel at any time. In order to keep a name memory aligned with the current program, without any detection of program changes, we structured the name memory as a relatively short queue (first in, first out). Every time a new item is added to the end of the queue, the first item is removed and all the other items are shifted. Moreover, we do not check whether a name is already in the memory. Every time a suitable item is found, we add it to the memory, regardless of whether it is already there. Hence, the same item could be present twice or more in the memory at any given time. The result of this arrangement is that a name only remains in the memory :for a relatively short time. It can only remain :[or a longer time if it keeps reappearing frequently in the input stream (as typically happens), otherwise it is removed shortly after it stopped appearing. In this way, the name memory is kept aligned with the current program, with only a short transition period, during which names no longer pertinent are still present in the memory, before getting replaced by pertinent ones.",
372
+ "cite_spans": [],
373
+ "ref_spans": [],
374
+ "eq_spans": [],
375
+ "section": "Proper name recognition",
376
+ "sec_num": "4"
377
+ },
378
+ {
379
+ "text": "The recognizer currently contains 63 patterns. We tested the recognizer on a sample of 1000 lines (5 randomly chosen continuous fragments of 200 lines each). The results, shown in table (2), illustrate a recall of 72.7% and a precision of 95.0%. These results reflect our cautious approach to name recognition. Since the core MT system has its own means of identifying some proper names (either in the lexicon or via default assignments to unknown words) we aimed at recognizing names in pre-processing only when this could be done reliably. Note also that 6 out of the 8 false positives were isolated interjections that would be better left untranslated (e.g. pffoo, el smacko), or closed captioner's typos (e.g. yo4swear).",
380
+ "cite_spans": [],
381
+ "ref_spans": [],
382
+ "eq_spans": [],
383
+ "section": "Proper name recognition",
384
+ "sec_num": "4"
385
+ },
386
+ {
387
+ "text": "Segmentation breaks a line into one or more segments, which are passed separately to subsequent modules (Ejerhed, 1996) (Beeferman et al., 1997) . In translation, segmentation is applied to split a line into a sequence of translationally self-contained units (Lavie et al., 1996) . In our system, the translation units we identify are syntactic units, motivated by crosslinguistic considerations. Each unit is a constituent that dan be translated independently. Its translation is insensitive to the context in which the unit occurs, and the order of the units is preserved by translation.",
388
+ "cite_spans": [
389
+ {
390
+ "start": 104,
391
+ "end": 119,
392
+ "text": "(Ejerhed, 1996)",
393
+ "ref_id": "BIBREF2"
394
+ },
395
+ {
396
+ "start": 120,
397
+ "end": 144,
398
+ "text": "(Beeferman et al., 1997)",
399
+ "ref_id": "BIBREF0"
400
+ },
401
+ {
402
+ "start": 259,
403
+ "end": 279,
404
+ "text": "(Lavie et al., 1996)",
405
+ "ref_id": null
406
+ }
407
+ ],
408
+ "ref_spans": [],
409
+ "eq_spans": [],
410
+ "section": "Segmentation",
411
+ "sec_num": "5"
412
+ },
413
+ {
414
+ "text": "One motivation for segmenting is that processing is faster: syntactic ambiguity is reduced, and backtracking from a module to a previous one does not involve re-processing an entire line, but only the segment that failed. A second motivation is robustness: a failure in one segment does not involve a failure in the entire line, and error-recovery can be limited only to a segment. Further motivations are provided by the colloquial nature of closed captions. A line often contains fragments with a loose syntactic relation to each other and to the main clause: vocatives, false starts, tag questions, etc. These are most easily translated as individual segments. Parenthetical expressions are often also found in the middle of a main clause, thus making complete parses problematic. However, the solution involves a heavier intervention than just segmenting. Dealing with parentheticals requires restructuring a line, and reducing it to a 'normal' form which ideally always has parenthetical expressions at one end of a sentence (under the empirical assumption that the overall meaning is not affected). We will see how this kind of problem is handled in segmentation. A third motivation is given by the format of closed captions, with input lines split across non-constituent boundaries. One solution would be delaying translation until a sentence boundary is found, and restructuring the stored lines in a linguistically principled way. However, the requirements of real time translation (either because of real time captioning at the source, or because the MT system is embedded in a consumer product), together with the requirement that translations be aligned with the source text and, above all, with the images, makes this solution problematic. The solution we are left with, if we want lines to be broken along constituent boundaries, is to further segment a sentence, even at the cost of sometimes separating elements that should go together for an optimal translation. We also argued elsewhere (Toole et al., 1998) that in a time-constrained application the output grammaticality is of paramount importance, even at the cost of a complete meaning equivalence with the source. For this reason, we also simplify likely problematic input, when a simplification is possible without affecting the core meaning.",
415
+ "cite_spans": [
416
+ {
417
+ "start": 2005,
418
+ "end": 2025,
419
+ "text": "(Toole et al., 1998)",
420
+ "ref_id": "BIBREF7"
421
+ }
422
+ ],
423
+ "ref_spans": [],
424
+ "eq_spans": [],
425
+ "section": "Segmentation",
426
+ "sec_num": "5"
427
+ },
428
+ {
429
+ "text": "To sum up, the task at hand is broader than just segmentation: re-ordering of constituents and removal of words are also required, to syntactically 'normalize' the input. As with name recognition, we aim at using efficient and easy to implement techniques, relying on limited linguistic information. The segmenter works by matching input lines against a set of templates represented by pushdown transducers. Each transducer is specified in a fairly standard way (Gazdar and Mellish, 1989, 82) , by defining an initial state, a final state, and a set of transitions of the following form:",
430
+ "cite_spans": [
431
+ {
432
+ "start": 462,
433
+ "end": 492,
434
+ "text": "(Gazdar and Mellish, 1989, 82)",
435
+ "ref_id": null
436
+ }
437
+ ],
438
+ "ref_spans": [],
439
+ "eq_spans": [],
440
+ "section": "Segmentation",
441
+ "sec_num": "5"
442
+ },
443
+ {
444
+ "text": "(14) (State I, State2, Label, Transducer>",
445
+ "cite_spans": [],
446
+ "ref_spans": [],
447
+ "eq_spans": [],
448
+ "section": "Segmentation",
449
+ "sec_num": "5"
450
+ },
451
+ {
452
+ "text": "Such a transition specifies that Transducer can move from Statel to State2 when the input specified by Label is found. Label can be either a pair (InputSymbol, OutputSymbol) or the name of another transducer, which needs to be entirely traversed for the transition from Statel to State2 to take place. An input symbol is a <Word, Tag> pair. An output symbol is an integer ranging from 0 to 3, specifying to which of two output segments an input symbol is assigned (0 = neither segment, 3 = both segments, 1 and 2 to be interpreted in the obvious way). The output codes are then used to perform the actual split of a line. A successful match splits a line into two segments at most. However, on a successful split, the resulting segments are recursively fed to the segmenter, until no match is found. Therefore, there is no limit to the number of segments obtained from an input line. The segmenter currently contains 37 top-level transducers, i.e. segmenting patterns. Not all of them are used at the same time. The implementation of patterns is straightforward and the segmenter can be easily adapted to different domains, by implementing specific patterns and excluding others. For instance, a very simple patterns split a line at every comma, a slightly more sophisticated one, splits a line at every comma, unless tagged as a coordination; other patterns split a final adverb, interjection, prepositional phrase, etc.",
453
+ "cite_spans": [],
454
+ "ref_spans": [],
455
+ "eq_spans": [],
456
+ "section": "Segmentation",
457
+ "sec_num": "5"
458
+ },
459
+ {
460
+ "text": "Note that a segment can be a discontinuous part of a line, as the same output code can be assigned to non-contiguous elements. This feature is used, e.g., in restructuring a sentence, as when a parenthetical expression is encountered. Thefollowing example shows an input sentence, an assignment, and a resulting segmentation.",
461
+ "cite_spans": [],
462
+ "ref_spans": [],
463
+ "eq_spans": [],
464
+ "section": "Segmentation",
465
+ "sec_num": "5"
466
+ },
467
+ {
468
+ "text": "(15) this, however, is a political science course.",
469
+ "cite_spans": [],
470
+ "ref_spans": [],
471
+ "eq_spans": [],
472
+ "section": "Segmentation",
473
+ "sec_num": "5"
474
+ },
475
+ {
476
+ "text": "(16) this/2 ,/0 however/l ,/i is/2 a/2 political/2 science/2 course/2. We sometimes use the segmenter's ability to simplify the input, e.g. with adverbs like just,",
477
+ "cite_spans": [],
478
+ "ref_spans": [],
479
+ "eq_spans": [],
480
+ "section": "Segmentation",
481
+ "sec_num": "5"
482
+ },
483
+ {
484
+ "text": "which are polysemous and difficult to translate, but seldom contribute to the core meaning of a sentence.",
485
+ "cite_spans": [],
486
+ "ref_spans": [],
487
+ "eq_spans": [],
488
+ "section": "Segmentation",
489
+ "sec_num": "5"
490
+ },
491
+ {
492
+ "text": "We ran a test to evaluate how the recognizer and segmenter affected the quality of translations. We selected a sample of 200 lines of closed captioning, comprising four continuous sections of 50 lines each. The sample was run through the MT system twice, once with the recognizer and segmenter activated and once without. The results were evaluated by two native Spanish speakers. We adopted a very simple evaluation measure, asking the subjects to tell whether one translation was better than the other. The translations differed for 32 input lines out of 200 (16%). Table ( 3) shows the evaluation results, with input lines as the unit of measurement. The third column shows the intersection of the two evaluations, i.e. the evaluations on which the two subjects agreed. The three rows show how often the translation was better (i) with pre-processing, (ii) without pre-processing, or (iii) no difference could be appreciated. The results show a discrepancy in the evaluations. One evaluator also pointed out that it is hard to make sense of transcribed closed captions, without the audio-visual context. These two facts seem to point out that an appropriate evaluation should be done in the operational context in which closed captions are normally used. Still, the intersection of the subjects' evaluations shows that pre-processing improves the output quality. In three of the four cases where the two evaluators agreed that pre-processing yielded a worse result, the worse performance was due to an incorrect name recognition oi\" segmentation. However, in two of the three cases, the original problem was an incorrect tagging.",
493
+ "cite_spans": [],
494
+ "ref_spans": [
495
+ {
496
+ "start": 568,
497
+ "end": 575,
498
+ "text": "Table (",
499
+ "ref_id": null
500
+ }
501
+ ],
502
+ "eq_spans": [],
503
+ "section": "Performance",
504
+ "sec_num": "6"
505
+ },
506
+ {
507
+ "text": "Note that even when the name recognizer and segmenter are off, the system can identify some names, and recover from translation failures by piecing together translations of fragments. Therefore, what was being tested was not so much name recognition and segmenting per se, but the idea of having separate modules for such tasks in the system front end.",
508
+ "cite_spans": [],
509
+ "ref_spans": [],
510
+ "eq_spans": [],
511
+ "section": "Performance",
512
+ "sec_num": "6"
513
+ },
514
+ {
515
+ "text": "Finally, the test did not take into account speed, as we set higher time thresholds than an embedded application would require. Since segmentation reduces processing time, it is also expected to reduce the impact of tighter time thresholds, all other things being equal.",
516
+ "cite_spans": [],
517
+ "ref_spans": [],
518
+ "eq_spans": [],
519
+ "section": "Performance",
520
+ "sec_num": "6"
521
+ },
522
+ {
523
+ "text": "We are planning to conduct an operational evaluation of the system. The goal is to evaluate the system output in its proper visual context, and compare the results with parallel results for human translated closed captions. Different groups of participants will watch a video With either human-or machine-translated subtitles, and complete a questionnaire based on the subtitles in the video. The questionnaire will contain a set of questions to elicit the subject's assessment on the translation quality, and a set of questions to assess the subject's level of comprehension of the program.",
524
+ "cite_spans": [],
525
+ "ref_spans": [],
526
+ "eq_spans": [],
527
+ "section": "Performance",
528
+ "sec_num": "6"
529
+ },
530
+ {
531
+ "text": "It is apparent that the peculiarity of closed captions, both in terms of transcribed speech characteristic and constraints due to the input format, require an ad hoc treatment, considerably different from the approaches suitable for written documents. Yet the knowledge about a language (or the bilingual knowledge about a language-pair) is largely invariant across different applications domains and should therefore be portable from one application domain to another. The architecture we have proposed strives to combine the need for domain independent linguistic resources and linguistically principled methods with the need for robust MT systems tuned to real world, noisy and idiosyncratic input, as encountered when embedding MT in real woi:ld devices.",
532
+ "cite_spans": [],
533
+ "ref_spans": [],
534
+ "eq_spans": [],
535
+ "section": "Conclusion",
536
+ "sec_num": "7"
537
+ },
538
+ {
539
+ "text": "In terms of adequacy, a standard evaluation and a comparison among different MT systems frtom different domains is hard, as the adequacy of a system depends on its application (Church and Hovy, 1993) . This is even truer with-closed captions, where the use of translation output is heavily influenced by operational constraints (time constraints, the presence of images, sound, etc.). In some cases such constraints may place a heavier burden on a system (e.g. the time constraint), in some other cases Judge 1 Judge 2 Both agreed Better with pre~processing Better without pre-processing No difference 21 16 15 4 12 4 7 4 3 Table 3 : Evaluation results.",
540
+ "cite_spans": [
541
+ {
542
+ "start": 176,
543
+ "end": 199,
544
+ "text": "(Church and Hovy, 1993)",
545
+ "ref_id": "BIBREF1"
546
+ }
547
+ ],
548
+ "ref_spans": [
549
+ {
550
+ "start": 503,
551
+ "end": 646,
552
+ "text": "Judge 1 Judge 2 Both agreed Better with pre~processing Better without pre-processing No difference 21 16 15 4 12 4 7 4 3 Table 3",
553
+ "ref_id": "TABREF0"
554
+ }
555
+ ],
556
+ "eq_spans": [],
557
+ "section": "Conclusion",
558
+ "sec_num": "7"
559
+ },
560
+ {
561
+ "text": "they can make an imperfect translation acceptable (e.g. the presence of images and sounds). We did not attempt an assessment in absolute terms, which we believe should take into account the operational environment and involve real-world users. More modestly, we aimed at showing that our pre-processing techniques provide an improvement in performance.",
562
+ "cite_spans": [],
563
+ "ref_spans": [],
564
+ "eq_spans": [],
565
+ "section": "Conclusion",
566
+ "sec_num": "7"
567
+ },
568
+ {
569
+ "text": "Our work on closed captions also shows that the challenges coming from this domain, even in terms on low-level issues of input format, can lead to interesting developments of new linguistic techniques. We believe that our solutions to specific problems (namely, proper name recognition and segmentation) in the closed caption domain bear relevance to a wider context, and offer techniques that can be usefully employed in a wider range of applications.",
570
+ "cite_spans": [],
571
+ "ref_spans": [],
572
+ "eq_spans": [],
573
+ "section": "Conclusion",
574
+ "sec_num": "7"
575
+ }
576
+ ],
577
+ "back_matter": [],
578
+ "bib_entries": {
579
+ "BIBREF0": {
580
+ "ref_id": "b0",
581
+ "title": "Text segmentation using exponential models",
582
+ "authors": [
583
+ {
584
+ "first": "Doug",
585
+ "middle": [],
586
+ "last": "Beeferman",
587
+ "suffix": ""
588
+ },
589
+ {
590
+ "first": "Adam",
591
+ "middle": [],
592
+ "last": "Berger",
593
+ "suffix": ""
594
+ },
595
+ {
596
+ "first": "John",
597
+ "middle": [],
598
+ "last": "Lafferty",
599
+ "suffix": ""
600
+ }
601
+ ],
602
+ "year": 1997,
603
+ "venue": "Proceedings of the Second Conference on Empirical Methods in Natural Language Processing",
604
+ "volume": "",
605
+ "issue": "",
606
+ "pages": "",
607
+ "other_ids": {},
608
+ "num": null,
609
+ "urls": [],
610
+ "raw_text": "Doug Beeferman, Adam Berger, and John Laf- ferty. 1997. Text segmentation using expo- nential models. In Proceedings of the Second Conference on Empirical Methods in Natu- ral Language Processing (EMNLP-2), Prov- idence, USA.",
611
+ "links": null
612
+ },
613
+ "BIBREF1": {
614
+ "ref_id": "b1",
615
+ "title": "Good applications for crummy machine translation",
616
+ "authors": [
617
+ {
618
+ "first": "W",
619
+ "middle": [],
620
+ "last": "Kenneth",
621
+ "suffix": ""
622
+ },
623
+ {
624
+ "first": "Eduard",
625
+ "middle": [
626
+ "H"
627
+ ],
628
+ "last": "Church",
629
+ "suffix": ""
630
+ },
631
+ {
632
+ "first": "",
633
+ "middle": [],
634
+ "last": "Hovy",
635
+ "suffix": ""
636
+ }
637
+ ],
638
+ "year": 1993,
639
+ "venue": "Machine Translation",
640
+ "volume": "8",
641
+ "issue": "",
642
+ "pages": "239--258",
643
+ "other_ids": {},
644
+ "num": null,
645
+ "urls": [],
646
+ "raw_text": "Kenneth W. Church and Eduard H. Hovy. 1993. Good applications for crummy machine translation. Machine Translation, 8:239-258.",
647
+ "links": null
648
+ },
649
+ "BIBREF2": {
650
+ "ref_id": "b2",
651
+ "title": "Finite state segmentation of discourse into clauses",
652
+ "authors": [
653
+ {
654
+ "first": "Eva",
655
+ "middle": [],
656
+ "last": "Ejerhed",
657
+ "suffix": ""
658
+ }
659
+ ],
660
+ "year": 1996,
661
+ "venue": "Proceedings of the ECAI-96 Workshop Extended Finite State Models of Language",
662
+ "volume": "",
663
+ "issue": "",
664
+ "pages": "",
665
+ "other_ids": {},
666
+ "num": null,
667
+ "urls": [],
668
+ "raw_text": "Eva Ejerhed. 1996. Finite state segmentation of discourse into clauses. In A. Kornai, ed- itor, Proceedings of the ECAI-96 Workshop Extended Finite State Models of Language, Budapest,Hungary.",
669
+ "links": null
670
+ },
671
+ "BIBREF3": {
672
+ "ref_id": "b3",
673
+ "title": "Natural Language Processing in PRO-LOG: an Introduction to Computational Linguistics",
674
+ "authors": [
675
+ {
676
+ "first": "Gerald",
677
+ "middle": [],
678
+ "last": "Gazdar",
679
+ "suffix": ""
680
+ },
681
+ {
682
+ "first": "Christopher",
683
+ "middle": [
684
+ "S"
685
+ ],
686
+ "last": "Mellish",
687
+ "suffix": ""
688
+ }
689
+ ],
690
+ "year": 1989,
691
+ "venue": "",
692
+ "volume": "",
693
+ "issue": "",
694
+ "pages": "",
695
+ "other_ids": {},
696
+ "num": null,
697
+ "urls": [],
698
+ "raw_text": "Gerald Gazdar and Christopher S. Mellish. 1989. Natural Language Processing in PRO- LOG: an Introduction to Computational Lin- guistics. Addison-Wesley Publishing Com- pany, Wokingham, England.",
699
+ "links": null
700
+ },
701
+ "BIBREF4": {
702
+ "ref_id": "b4",
703
+ "title": "Input segmentation of spontaneous speech in janus: a speechto-speech translation system",
704
+ "authors": [
705
+ {
706
+ "first": "Donald",
707
+ "middle": [],
708
+ "last": "Hindle",
709
+ "suffix": ""
710
+ }
711
+ ],
712
+ "year": 1983,
713
+ "venue": "Proceedings of the 21st Annual Meeting of the Association for Computational Linguistics (ACL-83)",
714
+ "volume": "",
715
+ "issue": "",
716
+ "pages": "123--128",
717
+ "other_ids": {},
718
+ "num": null,
719
+ "urls": [],
720
+ "raw_text": "Donald Hindle. 1983. Deterministic parsing of syntactic non-fluencies. In Proceedings of the 21st Annual Meeting of the Association for Computational Linguistics (ACL-83), pages 123-128, Cambridge, Massachusetts, USA. Alon Lavie, Donna Gates, Noah Coccaro, and Lori Levin. 1996. Input segmentation of spontaneous speech in janus: a speech- to-speech translation system. In Proceed- ings of ECAI-96 Workshop on Dialogue Pro- cessing in Spoken Language Systems, Bu- dapest,Hungary.",
721
+ "links": null
722
+ },
723
+ "BIBREF5": {
724
+ "ref_id": "b5",
725
+ "title": "A real-time MT system for translating broadcast captions",
726
+ "authors": [
727
+ {
728
+ "first": "Eric",
729
+ "middle": [],
730
+ "last": "Nyberg",
731
+ "suffix": ""
732
+ },
733
+ {
734
+ "first": "Teruko",
735
+ "middle": [],
736
+ "last": "Mitamura",
737
+ "suffix": ""
738
+ }
739
+ ],
740
+ "year": 1997,
741
+ "venue": "Proceedings of the Sixth Machine Translation Summit",
742
+ "volume": "",
743
+ "issue": "",
744
+ "pages": "51--57",
745
+ "other_ids": {},
746
+ "num": null,
747
+ "urls": [],
748
+ "raw_text": "Eric Nyberg and Teruko Mitamura. 1997. A real-time MT system for translating broad- cast captions. In Proceedings of the Sixth Ma- chine Translation Summit, pages 51-57, San Diego, California, USA.",
749
+ "links": null
750
+ },
751
+ "BIBREF6": {
752
+ "ref_id": "b6",
753
+ "title": "A lexicalist approach to the translation of colloquial text",
754
+ "authors": [
755
+ {
756
+ "first": "Fred",
757
+ "middle": [],
758
+ "last": "Popowich",
759
+ "suffix": ""
760
+ },
761
+ {
762
+ "first": "Davide",
763
+ "middle": [],
764
+ "last": "Turcato",
765
+ "suffix": ""
766
+ },
767
+ {
768
+ "first": "Olivier",
769
+ "middle": [],
770
+ "last": "Laurens",
771
+ "suffix": ""
772
+ },
773
+ {
774
+ "first": "Paul",
775
+ "middle": [],
776
+ "last": "Mcfetridge",
777
+ "suffix": ""
778
+ },
779
+ {
780
+ "first": "J",
781
+ "middle": [
782
+ "Devlan"
783
+ ],
784
+ "last": "Nicholson",
785
+ "suffix": ""
786
+ },
787
+ {
788
+ "first": "Patrick",
789
+ "middle": [],
790
+ "last": "Mcgivern",
791
+ "suffix": ""
792
+ },
793
+ {
794
+ "first": "Maricela",
795
+ "middle": [],
796
+ "last": "Corzo-Pena",
797
+ "suffix": ""
798
+ },
799
+ {
800
+ "first": "Lisa",
801
+ "middle": [],
802
+ "last": "Pidruchney",
803
+ "suffix": ""
804
+ },
805
+ {
806
+ "first": "Scott",
807
+ "middle": [],
808
+ "last": "Macdonald",
809
+ "suffix": ""
810
+ }
811
+ ],
812
+ "year": 1997,
813
+ "venue": "Proceedings of the 7th International Conference on Theoretical and Methodological Issues in Machine Translation",
814
+ "volume": "",
815
+ "issue": "",
816
+ "pages": "76--86",
817
+ "other_ids": {},
818
+ "num": null,
819
+ "urls": [],
820
+ "raw_text": "Fred Popowich, Davide Turcato, Olivier Lau- rens, Paul McFetridge, J. Devlan Nicholson, Patrick McGivern, Maricela Corzo-Pena, Lisa Pidruchney, and Scott MacDonald. 1997. A lexicalist approach to the translation of collo- quial text. In Proceedings of the 7th Interna- tional Conference on Theoretical and Method- ological Issues in Machine Translation, pages 76-86, Santa Fe, New Mexico, USA.",
821
+ "links": null
822
+ },
823
+ "BIBREF7": {
824
+ "ref_id": "b7",
825
+ "title": "Timeconstrained Machine Translation",
826
+ "authors": [
827
+ {
828
+ "first": "Janine",
829
+ "middle": [],
830
+ "last": "Toole",
831
+ "suffix": ""
832
+ },
833
+ {
834
+ "first": "Davide",
835
+ "middle": [],
836
+ "last": "Turcato",
837
+ "suffix": ""
838
+ },
839
+ {
840
+ "first": "Fred",
841
+ "middle": [],
842
+ "last": "Popowich",
843
+ "suffix": ""
844
+ },
845
+ {
846
+ "first": "Dan",
847
+ "middle": [],
848
+ "last": "Fass",
849
+ "suffix": ""
850
+ },
851
+ {
852
+ "first": "Paul",
853
+ "middle": [],
854
+ "last": "Mcfetridge",
855
+ "suffix": ""
856
+ }
857
+ ],
858
+ "year": 1998,
859
+ "venue": "Proceedings of the Third Conference of the Association for Machine Translation in the Americas (AMTA-98)",
860
+ "volume": "",
861
+ "issue": "",
862
+ "pages": "103--112",
863
+ "other_ids": {},
864
+ "num": null,
865
+ "urls": [],
866
+ "raw_text": "Janine Toole, Davide Turcato, Fred Popowich, Dan Fass, and Paul McFetridge. 1998. Time- constrained Machine Translation. In Proceed- ings of the Third Conference of the Associa- tion for Machine Translation in the Ameri- cas (AMTA-98), pages 103-112, Langhorne, Pennsylvania, USA.",
867
+ "links": null
868
+ },
869
+ "BIBREF8": {
870
+ "ref_id": "b8",
871
+ "title": "Disambiguation of proper names in texts",
872
+ "authors": [
873
+ {
874
+ "first": "Nina",
875
+ "middle": [],
876
+ "last": "Wacholder",
877
+ "suffix": ""
878
+ },
879
+ {
880
+ "first": "Yael",
881
+ "middle": [],
882
+ "last": "Ravin",
883
+ "suffix": ""
884
+ },
885
+ {
886
+ "first": "Misook",
887
+ "middle": [],
888
+ "last": "Choi",
889
+ "suffix": ""
890
+ }
891
+ ],
892
+ "year": 1997,
893
+ "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing (ANLP-97)",
894
+ "volume": "",
895
+ "issue": "",
896
+ "pages": "202--208",
897
+ "other_ids": {},
898
+ "num": null,
899
+ "urls": [],
900
+ "raw_text": "Nina Wacholder, Yael Ravin, and Misook Choi. \u2022 1997. Disambiguation of proper names in texts. In Proceedings of the Fifth Confer- ence on Applied Natural Language Processing (ANLP-97), pages 202-208, Washington, DC, USA. Association for Computational Linguis- tics.",
901
+ "links": null
902
+ },
903
+ "BIBREF9": {
904
+ "ref_id": "b9",
905
+ "title": "Automatic processing of proper names in texts",
906
+ "authors": [
907
+ {
908
+ "first": "Francis",
909
+ "middle": [],
910
+ "last": "Wolinski",
911
+ "suffix": ""
912
+ },
913
+ {
914
+ "first": "Frantz",
915
+ "middle": [],
916
+ "last": "Vichot",
917
+ "suffix": ""
918
+ },
919
+ {
920
+ "first": "Bruno",
921
+ "middle": [],
922
+ "last": "Dillet",
923
+ "suffix": ""
924
+ }
925
+ ],
926
+ "year": 1995,
927
+ "venue": "Proceedings of the 7th Conference of the European Chapter of the Asscociation for Computational Linguistics (EACL-95)",
928
+ "volume": "",
929
+ "issue": "",
930
+ "pages": "23--30",
931
+ "other_ids": {},
932
+ "num": null,
933
+ "urls": [],
934
+ "raw_text": "Francis Wolinski, Frantz Vichot, and Bruno Dil- let. 1995. Automatic processing of proper names in texts. In Proceedings of the 7th Conference of the European Chapter of the Asscociation for Computational Linguistics (EACL-95), pages 23-30, Dublin, Ireland.",
935
+ "links": null
936
+ }
937
+ },
938
+ "ref_entries": {
939
+ "FIGREF0": {
940
+ "uris": null,
941
+ "type_str": "figure",
942
+ "num": null,
943
+ "text": "to high school in the u.s.\"\"I went to high school in the usa.\"The tokenizer breaks a line into words. The token-level normalization recognizes and annotates tokens belonging to special categories System architecture.(times, numbers, etc.), expands contractions, recognizes, normalizes and annotates stutters (e.g. b-b-b-bright), identifies compound words and converts number words into digits. E.g.:"
944
+ },
945
+ "FIGREF1": {
946
+ "uris": null,
947
+ "type_str": "figure",
948
+ "num": null,
949
+ "text": "the bill is ... (11) the bill clinton is ... (12) the bill clinton administration is"
950
+ },
951
+ "FIGREF2": {
952
+ "uris": null,
953
+ "type_str": "figure",
954
+ "num": null,
955
+ "text": "professional life of tiger woods has been an open book."
956
+ },
957
+ "TABREF0": {
958
+ "type_str": "table",
959
+ "num": null,
960
+ "content": "<table/>",
961
+ "html": null,
962
+ "text": "Closed caption script fragment."
963
+ },
964
+ "TABREF2": {
965
+ "type_str": "table",
966
+ "num": null,
967
+ "content": "<table/>",
968
+ "html": null,
969
+ "text": "Name recognition evaluation results."
970
+ }
971
+ }
972
+ }
973
+ }