File size: 61,409 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
{
    "paper_id": "I08-1016",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:40:17.712906Z"
    },
    "title": "Entity-driven Rewrite for Multi-document Summarization",
    "authors": [
        {
            "first": "Ani",
            "middle": [],
            "last": "Nenkova",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Pennsylvania",
                "location": {}
            },
            "email": "nenkova@seas.upenn.edu"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "In this paper we explore the benefits from and shortcomings of entity-driven noun phrase rewriting for multi-document summarization of news. The approach leads to 20% to 50% different content in the summary in comparison to an extractive summary produced using the same underlying approach, showing the promise the technique has to offer. In addition, summaries produced using entity-driven rewrite have higher linguistic quality than a comparison non-extractive system. Some improvement is also seen in content selection over extractive summarization as measured by pyramid method evaluation.",
    "pdf_parse": {
        "paper_id": "I08-1016",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "In this paper we explore the benefits from and shortcomings of entity-driven noun phrase rewriting for multi-document summarization of news. The approach leads to 20% to 50% different content in the summary in comparison to an extractive summary produced using the same underlying approach, showing the promise the technique has to offer. In addition, summaries produced using entity-driven rewrite have higher linguistic quality than a comparison non-extractive system. Some improvement is also seen in content selection over extractive summarization as measured by pyramid method evaluation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Two of the key components of effective summarizations are the ability to identify important points in the text and to adequately reword the original text in order to convey these points. Automatic text summarization approaches have offered reasonably well-performing approximations for identifiying important sentences (Lin and Hovy, 2002; Schiffman et al., 2002; Erkan and Radev, 2004; Mihalcea and Tarau, 2004; Daum\u00e9 III and Marcu, 2006) but, not surprisingly, text (re)generation has been a major challange despite some work on sub-sentential modification (Jing and McKeown, 2000; Knight and Marcu, 2000 ; Barzilay and McKeown, 2005 ). An additional drawback of extractive approaches is that estimates for the importance of larger text units such as sentences depend on the length of the sentence (Nenkova et al., 2006) .",
                "cite_spans": [
                    {
                        "start": 319,
                        "end": 339,
                        "text": "(Lin and Hovy, 2002;",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 340,
                        "end": 363,
                        "text": "Schiffman et al., 2002;",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 364,
                        "end": 386,
                        "text": "Erkan and Radev, 2004;",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 387,
                        "end": 412,
                        "text": "Mihalcea and Tarau, 2004;",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 413,
                        "end": 439,
                        "text": "Daum\u00e9 III and Marcu, 2006)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 559,
                        "end": 583,
                        "text": "(Jing and McKeown, 2000;",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 584,
                        "end": 606,
                        "text": "Knight and Marcu, 2000",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 609,
                        "end": 635,
                        "text": "Barzilay and McKeown, 2005",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 800,
                        "end": 822,
                        "text": "(Nenkova et al., 2006)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Sentence simplification or compaction algorithms are driven mainly by grammaticality considerations. Whether approaches for estimating importance can be applied to units smaller than sentences and used in text rewrite in the summary production is a question that remains unanswered. The option to operate on smaller units, which can be mixed and matched from the input to give novel combinations in the summary, offers several possible advantages.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Improve content Sometimes sentences in the input can contain both information that is very appropriate to include in a summary and information that should not appear in a summary. Being able to remove unnecessary parts can free up space for better content. Similarly, a sentence might be good overall, but could be further improved if more details about an entity or event are added in. Overall, a summarizer capable of operating on subsentential units would in principle be better at content selection.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Improve readability Linguistic quality evaluation of automatic summaries in the Document Understanding Conference reveals that summarizers perform rather poorly on several readability aspects, including referential clarity. The gap between human and automatic performance is much larger for linguistic quality aspects than for content selection. In more than half of the automatic summaries there were entities for which it was not clear what/who they were and how they were related to the story. The ability to add in descriptions for entities in the summaries could improve the referential clarity of summaries and can be achieved through text rewrite of subsentential units.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "IP issues Another very practical reason to be interested in altering the original wording of sentences in summaries in a news browsing system involves intellectual property issues. Newspapers are not willing to allow verbatim usage of long passages of their articles on commercial websites. Being able to change the original wording can thus allow companies to include longer than one sentence summaries, which would increase user satisfaction .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "These considerations serve as direct motivation for exploring how a simple but effective summarizer framework can accommodate noun phrase rewrite in multi-document summarization of news. The idea is for each sentence in a summary to automatically examine the noun phrases in it and decide if a different noun phrase is more informative and should be included in the sentence in place of the original. Consider the following example:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Sentence 1 The arrest caused an international controversy.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The arrest in London of former Chilean dictator Augusto Pinochet caused an international controversy.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence 2",
                "sec_num": null
            },
            {
                "text": "Now, consider the situation where we need to express in a summary that the arrest was controversial and this is the first sentence in the summary, and sentence 1 is available in the input (\"The arrest caused an international controversy\"), as well as an unrelated sentence such as \"The arrest in London of former Chilean dictator Augusto Pinochet was widely discussed in the British press\". NP rewrite can allow us to form the rewritten sentence 2, which would be a much more informative first sentence for the summary: \"The arrest in London of former Chilean dictator Augusto Pinochet caused an international controversy\". Similarly, if sentence 2 is available in the input and it is selected in the summary after a sentence that expresses the fact that the arrest took place, it will be more appropriate to rewrite sentence 2 into sentence 1 for inclusion in the summary.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence 2",
                "sec_num": null
            },
            {
                "text": "This example shows the potential power of noun phrase rewrite. It also suggests that context will play a role in the rewrite process, since different noun phrase realizations will be most appropriate depending on what has been said in the summary up to the point at which rewrite takes place.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence 2",
                "sec_num": null
            },
            {
                "text": "Frequency and frequency-related measures of importance have been traditionally used in text summarization as indicators of importance (Luhn, 1958; Lin and Hovy, 2000; Conroy et al., 2006) . Notably, a greedy frequency-driven approach leads to very good results in content selection (Nenkova et al., 2006) . In this approach sentence importance is measured as a function of the frequency in the input of the content words in that sentence. The most important sentence is selected, the weight of words in it are adjusted, and sentence weights are recomputed for the new weights beofre selecting the next sentence. This conceptually simple summarization approach can readily be extended to include NP rewrite and allow us to examine the effect of rewrite capabilities on overall content selection and readability. The specific algorithm for frequency-driven summarization and rewrite is as follows:",
                "cite_spans": [
                    {
                        "start": 134,
                        "end": 146,
                        "text": "(Luhn, 1958;",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 147,
                        "end": 166,
                        "text": "Lin and Hovy, 2000;",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 167,
                        "end": 187,
                        "text": "Conroy et al., 2006)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 282,
                        "end": 304,
                        "text": "(Nenkova et al., 2006)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "Step 1 Estimate the importance of each content word w i based on its frequency in the input",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "n i , p(w i ) = n i N .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "Step 2 For each sentence S j in the input, estimate its importance based on the words in the sentence w i \u2208 S j : the weight of the sentence is equal to the average weight of content words appearing in it.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "W eight(S j ) = w i \u2208S j p(w i ) |w i \u2208S j |",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "Step 3 Select the sentence with the highest weight.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "Step 4 For each maximum noun phrase N P k in the selected sentence 4.1 For each coreferring noun phrase N P i , such that N P i \u2261 N P k from all input documents, compute a weight W eight(N P i ) = F RW (w r \u2208 N P i ). 4.2 Select the noun phrase with the highest weight and insert it in the sentence in place of the original NP. In case of ties, select the shorter noun phrase.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "Step 5 For each content word in the rewritten sentence, update its weight by setting it to 0.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "Step 6 If the desired summary length has not been reached, go to step 2.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "Step 4 is the NP rewriting step. The function F RW is the rewrite composition function that assigns weights to noun phrases based on the importance of words that appear in the noun phrase. The two options that we explore here are F RW \u2261 Avr and F RW \u2261 Sum; the weight of an NP equals the average weight or sum of weights of content words in the NP respectively. The two selections lead to different behavior in rewrite. F RW \u2261 Avr will generally prefer the shorter noun phrases, typically consisting of just the noun phrase head and it will overall tend to reduce the selected sentence. F RW \u2261 Sum will behave quite differently: it will insert relevant information that has not been conveyed by the summary so far (add a longer noun phrase) and will reduce the NP if the words in it already appear in the summary. This means that F RW \u2261 Sum will have the behavior close to what we expect for entity-centric rewrite: inluding more descriptive information at the first mention of the entity, and using shorter references at subsequent mentions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "Maximum noun phrases are the unit on which NP rewrite operates. They are defined in a dependency parse tree as the subtree that has as a root a noun such that there is no other noun on the path between it and the root of the tree. For example , there are two maximum NPs, with heads \"police\" and \"Augusto Pinochet\" in the sentence \"British police arrested former Chilean dictator Augusto Pinochet\". The noun phrase \"former chilean dictator\" is not a maximum NP, since there is a noun (augusto pinochet) on the path in the dependency tree between the noun \"dictator\" and the root of the tree. By definition a maximum NP includes all nominal and adjectival premodifiers of the head, as well as postmodifiers such as prepositional phrases, appositions, and relative clauses. This means that maximum NPs can be rather complex, covering a wide range of production rules in a context-free grammar.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "The dependency tree definition of maximum noun phrase makes it easy to see why these are a good unit for subsentential rewrite: the subtree that has the head of the NP as a root contains only modifiers of the head, and by rewriting the noun phrase, the amount of information expressed about the head entity can be varied.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "In our implementation, a context free grammar probabilistic parser (Charniak, 2000) was used to parse the input. The maximum noun phrases were identified by finding sequences of <np>...</np> tags in the parse such that the number of opening and closing tags is equal. Each NP identified by such tag spans was considered as a candidate for rewrite.",
                "cite_spans": [
                    {
                        "start": 67,
                        "end": 83,
                        "text": "(Charniak, 2000)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "Coreference classes A coreference class CR m is the class of all maximum noun phrases in the input that refer to the same entity E m . The general problem of coreference resolution is hard, and is even more complicated for the multi-document summarization case, in which cross-document resolution needs to be performed. Here we make a simplifying assumption, stating that all noun phrases that have the same noun as a head belong to the same coreference class. While we expected that this assumption would lead to some wrong decisions, we also suspected that in most common summarization scenarios, even if there are more than one entities expressed with the same noun, only one of them would be the main focus for the news story and will appear more often across input sentences. References to such main entities will be likely to be picked in a sentence for inclusion in the summary by chance more often than other competeing entities. We thus used the head noun equivalance to form the classes. A post-evaluation inspection of the summaries confirmed that our assumption was correct and there were only a small number of errors in the rewritten summaries that were due to coreference errors, which were greatly outnumbered by parsing errors for example. In a future evaluation, we will evaluate the rewrite module assuming perfect coreference and parsing, in order to see the impact of the core NP-rewrite approach itself.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-rewrite enhanced frequency summarizer",
                "sec_num": "2"
            },
            {
                "text": "The NP rewrite summarization algorithm was applied to the 50 test sets for generic multi-document summarization from the 2004 Document Understanding Conference. Two examples of its operation with F RW \u2261 Avr are shown below. Original.1 While the British government defended the arrest, it took no stand on extradition of Pinochet to Spain. NP-Rewite.1 While the British government defended the arrest in London of former Chilean dictator Augusto Pinochet, it took no stand on extradition of Pinochet to Spain. Original.2 Duisenberg has said growth in the euro area countries next year will be about 2.5 percent, lower than the 3 percent predicted earlier.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP rewrite evaluation",
                "sec_num": "3"
            },
            {
                "text": "European Central Bank, has said growth in the euro area will be about 2.5 percent, lower than just 1 percent in the euro-zone unemployment predicted earlier.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-Rewrite.2 Wim Duisenberg, the head of the new",
                "sec_num": null
            },
            {
                "text": "We can see that in both cases, the NP rewrite pasted into the sentence important additional information. But in the second example we also see an error that was caused by the simplifying assumption for the creation of the coreference classes according to which the percentage of unemployment and growth have been put in the same class.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-Rewrite.2 Wim Duisenberg, the head of the new",
                "sec_num": null
            },
            {
                "text": "In order to estimate how much the summary is changed because of the use of the NP rewrite, we computed the unigram overlap between the original extractive summary and the NP-rewrite summary. As expected, F F W \u2261 Sum leads to bigger changes and on average the rewritten summaries contained only 54% of the unigrams from the extractive summaries; for F RW \u2261 Avr, there was a smaller change between the extractive and the rewritten summary, with 79% of the unigrams being the same between the two summaries.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NP-Rewrite.2 Wim Duisenberg, the head of the new",
                "sec_num": null
            },
            {
                "text": "Noun phrase rewrite has the potential to improve the referential clarity of summaries, by inserting in the sentences more information about entities when such is available. It is of interest to see how the rewrite version of the summarizer would compare to the extractive version, as well as how its linguistic quality compares to that of other summarizers that participated in DUC. Four summarizers were evaluated: peer 117, which was a system that used generation techniques to produce the summary and was the only real non-extractive summarizer participant at DUC 2004 (Vanderwende et al., 2004) ; the extractive frequency summarizer, and the two versions of the rewrite algorithm (Sum and Avr). The evaluated rewritten summaries had potential errors coming from different sources, such as coreference resolution, parsing errors, sentence splitting errors, as well as errors coming directly from rewrite, in which an unsuitable NP is chosen to be included in the summary. Improvements in parsing for example could lead to better overall rewrite results, but we evaluated the output as is, in order to see what is the performance that can be expected in a realistic setting for fully automatic rewrite. The evaluation was done by five native English speakers, using the five DUC linguistic quality questions on grammaticality (Q 1 ), repetition (Q 2 ), referential clarity (Q 3 ), focus (Q 4 ) and coherence (Q 5 ). Five evaluators were used so that possible idiosyncratic preference of a single evaluator could be avoided. Each evaluator evaluated all five summaries for each test set, presented in a random order. The results are shown in table 3.1. Each summary was evaluated for each of the properties on a scale from 1 to 5, with 5 being very good with respect to the quality and 1, very bad.",
                "cite_spans": [
                    {
                        "start": 572,
                        "end": 598,
                        "text": "(Vanderwende et al., 2004)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic quality evaluation",
                "sec_num": "3.1"
            },
            {
                "text": "SYSTEM Q 1 Q 2 Q 3 Q 4 Q 5 SUM",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic quality evaluation",
                "sec_num": "3.1"
            },
            {
                "text": "Comparing NP rewrite to extraction Here we would be interested in comparing the extractive frequency summarizer (SUM Id ), and the two version of systems that rewrite noun phrases: SUM Avr (which changes about 20% of the text) and SUM Sum (which changes about 50% of the text). The general trend that we see for all five dimensions of linguistic quality is that the more the text is automatically altered, the worse the linguistic quality of the summary gets. In particular, the grammaticality of the summaries drops significantly for the rewrite systems. The increase of repetition is also significant between SUM Id and SUM Sum . Error analysis showed that sometimes increased repetition occurred in the process of rewrite for the following reason: the context weight update for words is done only after each noun phrase in the sentence has been rewritten. Occasionally, this led to a situation in which a noun phrase was augmented with information that was expressed later in the original sentence. The referential clarity of rewritten summaries also drops significantly, which is a rather disappointing result, since one of the motivations for doing noun phrase rewrite was the desire to improve referential clarity by adding information where such is necessary. One of the problems here is that it is almost impossible for human evaluators to ignore grammatical errors when judging referential clarity. Grammatical errors decrease the overall readability and a summary that is given a lower grammaticality score tends to also receive lower referential clarity score. This fact of quality perception is a real challenge for summarizeration systems that move towards abstraction and alter the original wording of sentences since certainly automatic approaches are likely to introduce ingrammaticalities.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic quality evaluation",
                "sec_num": "3.1"
            },
            {
                "text": "Comparing SUM Sum and peer 117 We now turn to the comparison of between SUM Sum and the generation based system 117. This system is unique among the DUC 2004 systems, and the only one that year that experimented with generation techniques for summarization. System 117 is verbdriven: it analizes the input in terms of predicateargument triples and identifies the most important triples. These are then verbalized by a generation system originally developed as a realization component in a machine translation engine. As a result, peer 117 possibly made even more changes to the original text then the NP-rewrite system. The results of the comparison are consistent with the observation that the more changes are made to the original sentences, the more the readability of summaries decreases. SUM Sum is significantly better than peer 117 on all five readability aspects, with notable difference in the grammaticality and referential quality, for which SUM Sum outperforms peer 117 by a full point. This indicates that NPs are a good candidate granularity for sentence changes and it can lead to substantial altering of the text while preserving significantly better overall readability.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Linguistic quality evaluation",
                "sec_num": "3.1"
            },
            {
                "text": "We now examine the question of how the content in the summaries changed due to the NP-rewrite, since improving content selection was the other motivation for exploring rewrite. In particular, we are interested in the change in content selection between SUM Sum and SUM Id (the extractive version of the summarizer). We use SUM Sum for the comparison because it led to bigger changes in the summary text compared to the purely extractive version. We used the pyramid evaluation method: four human summaries for each input were manually analyzed to identify shared content units. The weight of each content unit is equal to the number of model summaries that express it. The pyramid score of an automatic summary is equal to the weight of the content units expressed in the summary divided by the weight of an ideally informative summary of the same length (the content unit identification is again done manually by an annotator).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content selection evaluation",
                "sec_num": "3.2"
            },
            {
                "text": "Of the 50 test sets, there were 22 sets in which the NP-rewritten version had lower pyramid scores than the extractive version of the summary, 23 sets in which the rewritten summaries had better scores, and 5 sets in which the rewritten and extractive summaries had exactly the same scores. So we see that in half of the cases the NP-rewrite actually improved the content of the summary. The summarizer version that uses NP-rewrite has overall better content selection performance than the purely extractive system. The original pyramid score increased from 0.4039 to 0.4169 for the version with rewrite. This improvement is not significant, but shows a trend in the expected direction of improvement.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content selection evaluation",
                "sec_num": "3.2"
            },
            {
                "text": "The lack of significance in the improvement is due to large variation in performance: when np rewrite worked as expected, content selection improved. But on occasions when errors occurred, both readability and content selection were noticeably compromised. Here is an example of summaries for the same input in which the NP-rewritten version had better content. After each summary, we list the content units from the pyramid content analysis that were expressed in the summary. The weight of each content unit is given in brackets before the label of the unit and content units that differ between the extractive and rewritten version are displayed in italic. The rewritten version conveys high weight content units that do not appear in the extractive version, with weights 4 (maximum weight here) and 3 respectively.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content selection evaluation",
                "sec_num": "3.2"
            },
            {
                "text": "Extractive summary Italy's Communist Refounding Party rejected Prime Minister Prodi's proposed 1999 budget. By one vote, Premier Romano Prodi's center-left coalition lost a confidence vote in the Chamber of Deputies Friday, and he went to the presidential palace to rsign. Three days after the collapse of Premier Romano Prodi's center-left government, Italy's president began calling in political leaders Monday to try to reach a consensus on a new government. Prodi has said he would call a confidence vote if he lost the Communists' support.\" I have always acted with coherence,\" Prodi said before a morning meeting with President Oscar Luigi.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content selection evaluation",
                "sec_num": "3.2"
            },
            {
                "text": "(4) Prodi lost a confidence vote (4) The Refounding Party is Italy's Communist Party (4) The Refounding Party rejected the government's budget",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content selection evaluation",
                "sec_num": "3.2"
            },
            {
                "text": "(3) The dispute is over the 1999 budget (2) Prodi's coalition was center-left coalition (2) The confidence vote was lost by only 1 vote (1) Prodi is the Italian Prime Minister (1) Prodi wants a confidence vote from Parliament NP-rewrite version Communist Refounding, a fringe group of hard-line leftists who broke with the minstream Communists after they overhauled the party following the collapse of Communism in Eastern Europe rejected Prime Minister Prodi's proposed 1999 budget. By only one vote, the center-left prime minister of Italy, Romano Prodi, lost The vote in the lower chamber of Parliament 313 against the confidence motion brought by the government to 312 in favor in Parliament Friday and was toppled from power. President Oscar Luigi Scalfaro, who asked him to stay on as caretaker premier while the head of state decides whether to call elections. Below is another example, showing the worse deterioration of the rewritten summary compared to the extractive one, both in terms of grammaticality and content. Here, the problem with repetition during rewrite arises: the same person is mentioned twice in the sentence and at both places the same overly long description is selected during rewrie, rendering the sentence practically unreadable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content selection evaluation",
                "sec_num": "3.2"
            },
            {
                "text": "McKinney lured Shepard from the bar by saying they too were gay and one of their girlfriends said Shepard had embarrassed one of the men by making a pass at him. 1,000 people mourned Matthew Shepherd, the gay University of Wyoming student who was severely beaten and left to die tied to a fence. With passersby spontaneously joining the protest group, two women held another sign that read,\" No Hate Crimes in Wyoming.\" Two candlelight vigils were held Sunday night. Russell Anderson, 21, and Aaron McKinney, 21, were charged with attempted murder.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extractive summary Police said Henderson and",
                "sec_num": null
            },
            {
                "text": "(2) There were candlelight vigils in support for the victim",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extractive summary Police said Henderson and",
                "sec_num": null
            },
            {
                "text": "(1) Russell Henderson and Aaron McKinney are the names of the people responsible for the death NP-rewrite version Police said Henderson and McKinney lured the The slight, soft-spoken 21year-old Shepard, a freshman at the University of Wyoming, who became an overnight symbol of antigay violence after he was found dangling from the fence by a passerby from a bar by saying they too were gay and one of their girlfriends said the The slight, soft-spoken 21-year-old Shepard, a freshman at the University of Wyoming, who became an overnight symbol of anti-gay violence after he was found dangling from the fence by a passerby had embarrassed one of the new ads in that supposedly hate-free crusade.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extractive summary Police said Henderson and",
                "sec_num": null
            },
            {
                "text": "(4) The victim was a student at the University of Wyoming",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extractive summary Police said Henderson and",
                "sec_num": null
            },
            {
                "text": "(3)The nearly lifeless body was tied to a fence (1) A passerby found the victim",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extractive summary Police said Henderson and",
                "sec_num": null
            },
            {
                "text": "(1) Russell Henderson and Aaron McKinney are the names of the people responsible for the death",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extractive summary Police said Henderson and",
                "sec_num": null
            },
            {
                "text": "(1) The victim was 22-year old",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extractive summary Police said Henderson and",
                "sec_num": null
            },
            {
                "text": "Even from this unsuccessful attempt for rewrite we can see how changes of the original text can be desirable, since some of the newly introduced information is in fact suitable for the summary.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extractive summary Police said Henderson and",
                "sec_num": null
            },
            {
                "text": "We have demonstrated that an entity-driven approach to rewrite in multi-document summarization can lead to considerably different summary, in terms of content, compared to the extractive version of the same system. Indeed, the difference leads to some improvement measurable in terms of pyramid method evaluation. The approach also significantly outperforms in linguistic quality a non-extractive event-centric system.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "4"
            },
            {
                "text": "Results also show that in terms of linguistic quality, extractive systems will be curently superior to systems that alter the original wording from the input. Sadly, extractive and abstractive systems are evaluated together and compared against each other, putting pressure on system developers and preventing them from fully exploring the strengths of generation techniques. It seems that if researchers in the field are to explore non-extractive methods, they would need to compare their systems separately from extractive systems, at least in the beginning exploration stages. The development of nonextractive approaches in absolutely necessary if automatic summarization were to achieve levels of performance close to human, given the highly abstractive form of summaries written by people.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "4"
            },
            {
                "text": "Results also indicate that both extractive and nonextractive systems perform rather poorly in terms of the focus and coherence of the summaries that they produce, identifying macro content planning as an important area for summarization.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "4"
            },
            {
                "text": "(4) The victim was a student at the University of Wyoming (4) The victim was brutally beaten (4) The victim was openly gay (3) The crime was widely denounced (3) The nearly lifeless body was tied to a fence (3) The victim died (3) The victim was left to die (2) The men were arrested on charges of kidnapping and attempted first degree murder",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Sentence fusion for multidocument news summarization",
                "authors": [
                    {
                        "first": "Regina",
                        "middle": [],
                        "last": "Barzilay",
                        "suffix": ""
                    },
                    {
                        "first": "Kathleen",
                        "middle": [],
                        "last": "Mckeown",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Computational Linguistics",
                "volume": "",
                "issue": "3",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Regina Barzilay and Kathleen McKeown. 2005. Sen- tence fusion for multidocument news summarization. Computational Linguistics, 31(3).",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "A maximum-entropy-inspired parser",
                "authors": [
                    {
                        "first": "Eugene",
                        "middle": [],
                        "last": "Charniak",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "NAACL-2000",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Eugene Charniak. 2000. A maximum-entropy-inspired parser. In NAACL-2000.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Topic-focused multi-document summarization using an approximate oracle score",
                "authors": [
                    {
                        "first": "John",
                        "middle": [],
                        "last": "Conroy",
                        "suffix": ""
                    },
                    {
                        "first": "Judith",
                        "middle": [],
                        "last": "Schlesinger",
                        "suffix": ""
                    },
                    {
                        "first": "Dianne O'",
                        "middle": [],
                        "last": "Leary",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of ACL, companion volume",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "John Conroy, Judith Schlesinger, and Dianne O'Leary. 2006. Topic-focused multi-document summarization using an approximate oracle score. In Proceedings of ACL, companion volume.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Bayesian queryfocused summarization",
                "authors": [
                    {
                        "first": "Hal",
                        "middle": [],
                        "last": "Daum\u00e9",
                        "suffix": ""
                    },
                    {
                        "first": "Iii",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    },
                    {
                        "first": "Daniel",
                        "middle": [],
                        "last": "Marcu",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of the Conference of the Association for Computational Linguistics (ACL)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hal Daum\u00e9 III and Daniel Marcu. 2006. Bayesian query- focused summarization. In Proceedings of the Confer- ence of the Association for Computational Linguistics (ACL), Sydney, Australia.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Lexrank: Graph-based centrality as salience in text summarization",
                "authors": [
                    {
                        "first": "Gunes",
                        "middle": [],
                        "last": "Erkan",
                        "suffix": ""
                    },
                    {
                        "first": "Dragomir",
                        "middle": [],
                        "last": "Radev",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Journal of Artificial Intelligence Research",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gunes Erkan and Dragomir Radev. 2004. Lexrank: Graph-based centrality as salience in text summa- rization. Journal of Artificial Intelligence Research (JAIR).",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Cut and paste based text summarization",
                "authors": [
                    {
                        "first": "Hongyan",
                        "middle": [],
                        "last": "Jing",
                        "suffix": ""
                    },
                    {
                        "first": "Kathleen",
                        "middle": [],
                        "last": "Mckeown",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 1st Conference of the North American Chapter of the Association for Computational Linguistics (NAACL'00)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hongyan Jing and Kathleen McKeown. 2000. Cut and paste based text summarization. In Proceedings of the 1st Conference of the North American Chap- ter of the Association for Computational Linguistics (NAACL'00).",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Statistics-based summarization -step one: Sentence compression",
                "authors": [
                    {
                        "first": "Kevin",
                        "middle": [],
                        "last": "Knight",
                        "suffix": ""
                    },
                    {
                        "first": "Daniel",
                        "middle": [],
                        "last": "Marcu",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceeding of The American Association for Artificial Intelligence Conference (AAAI-2000)",
                "volume": "",
                "issue": "",
                "pages": "703--710",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kevin Knight and Daniel Marcu. 2000. Statistics-based summarization -step one: Sentence compression. In Proceeding of The American Association for Artificial Intelligence Conference (AAAI-2000), pages 703-710.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "The automated acquisition of topic signatures for text summarization",
                "authors": [
                    {
                        "first": "Chin-Yew",
                        "middle": [],
                        "last": "Lin",
                        "suffix": ""
                    },
                    {
                        "first": "Eduard",
                        "middle": [],
                        "last": "Hovy",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 18th conference on Computational linguistics",
                "volume": "",
                "issue": "",
                "pages": "495--501",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Chin-Yew Lin and Eduard Hovy. 2000. The automated acquisition of topic signatures for text summarization. In Proceedings of the 18th conference on Computa- tional linguistics, pages 495-501.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Automated multi-document summarization in neats",
                "authors": [
                    {
                        "first": "Chin-Yew",
                        "middle": [],
                        "last": "Lin",
                        "suffix": ""
                    },
                    {
                        "first": "Eduard",
                        "middle": [],
                        "last": "Hovy",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the Human Language Technology Conference (HLT2002 )",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Chin-Yew Lin and Eduard Hovy. 2002. Automated multi-document summarization in neats. In Proceed- ings of the Human Language Technology Conference (HLT2002 ).",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "The automatic creation of literature abstracts",
                "authors": [
                    {
                        "first": "H",
                        "middle": [
                            "P"
                        ],
                        "last": "Luhn",
                        "suffix": ""
                    }
                ],
                "year": 1958,
                "venue": "IBM Journal of Research and Development",
                "volume": "2",
                "issue": "2",
                "pages": "159--165",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "H. P. Luhn. 1958. The automatic creation of literature abstracts. IBM Journal of Research and Development, 2(2):159-165.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Do summaries help? a task-based evaluation of multi-document summarization",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Mckeown",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Passonneau",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Elson",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Nenkova",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Hirschberg",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "SIGIR",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. McKeown, R. Passonneau, D. Elson, A. Nenkova, and J. Hirschberg. 2005. Do summaries help? a task-based evaluation of multi-document summariza- tion. In SIGIR.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Textrank: Bringing order into texts",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Mihalcea",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Tarau",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of EMNLP 2004",
                "volume": "",
                "issue": "",
                "pages": "404--411",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "R. Mihalcea and P. Tarau. 2004. Textrank: Bringing or- der into texts. In Proceedings of EMNLP 2004, pages 404-411.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "A compositional context sensitive multidocument summarizer: exploring the factors that influence summarization",
                "authors": [
                    {
                        "first": "Ani",
                        "middle": [],
                        "last": "Nenkova",
                        "suffix": ""
                    },
                    {
                        "first": "Lucy",
                        "middle": [],
                        "last": "Vanderwende",
                        "suffix": ""
                    },
                    {
                        "first": "Kathleen",
                        "middle": [],
                        "last": "Mckeown",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of SIGIR",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ani Nenkova, Lucy Vanderwende, and Kathleen McKe- own. 2006. A compositional context sensitive multi- document summarizer: exploring the factors that influ- ence summarization. In Proceedings of SIGIR.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Experiments in multidocument summarization",
                "authors": [
                    {
                        "first": "Barry",
                        "middle": [],
                        "last": "Schiffman",
                        "suffix": ""
                    },
                    {
                        "first": "Ani",
                        "middle": [],
                        "last": "Nenkova",
                        "suffix": ""
                    },
                    {
                        "first": "Kathleen",
                        "middle": [],
                        "last": "Mckeown",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the Human Language Technology Conference",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Barry Schiffman, Ani Nenkova, and Kathleen McKeown. 2002. Experiments in multidocument summarization. In Proceedings of the Human Language Technology Conference.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Event-centric summary generation",
                "authors": [
                    {
                        "first": "Lucy",
                        "middle": [],
                        "last": "Vanderwende",
                        "suffix": ""
                    },
                    {
                        "first": "Michele",
                        "middle": [],
                        "last": "Banko",
                        "suffix": ""
                    },
                    {
                        "first": "Arul",
                        "middle": [],
                        "last": "Menezes",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of the Document Understanding Conference (DUC'04)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lucy Vanderwende, Michele Banko, and Arul Menezes. 2004. Event-centric summary generation. In Pro- ceedings of the Document Understanding Conference (DUC'04).",
                "links": null
            }
        },
        "ref_entries": {}
    }
}