File size: 64,580 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
{
    "paper_id": "I08-1006",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:42:04.509980Z"
    },
    "title": "Story Link Detection based on Dynamic Information Extending",
    "authors": [
        {
            "first": "Xiaoyan",
            "middle": [],
            "last": "Zhang",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "National University of Defense Technology No",
                "location": {
                    "addrLine": "137, Yanwachi Street",
                    "postCode": "410073",
                    "settlement": "Changsha",
                    "region": "Hunan",
                    "country": "P.R.China"
                }
            },
            "email": "zhangxiaoyan@nudt.edu.cn"
        },
        {
            "first": "Ting",
            "middle": [],
            "last": "Wang",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "National University of Defense Technology No",
                "location": {
                    "addrLine": "137, Yanwachi Street",
                    "postCode": "410073",
                    "settlement": "Changsha",
                    "region": "Hunan",
                    "country": "P.R.China"
                }
            },
            "email": "tingwang@nudt.edu.cn"
        },
        {
            "first": "Huowang",
            "middle": [],
            "last": "Chen",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "National University of Defense Technology No",
                "location": {
                    "addrLine": "137, Yanwachi Street",
                    "postCode": "410073",
                    "settlement": "Changsha",
                    "region": "Hunan",
                    "country": "P.R.China"
                }
            },
            "email": "hwchen@nudt.edu.cn"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Topic Detection and Tracking refers to automatic techniques for locating topically related materials in streams of data. As the core technology of it, story link detection is to determine whether two stories are about the same topic. To overcome the limitation of the story length and the topic dynamic evolution problem in data streams, this paper presents a method of applying dynamic information extending to improve the performance of link detection. The proposed method uses previous latest related story to extend current processing story, generates new dynamic models for computing the similarity between the current two stories. The work is evaluated on the TDT4 Chinese corpus, and the experimental results indicate that story link detection using this method can make much better performance on all evaluation metrics.",
    "pdf_parse": {
        "paper_id": "I08-1006",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Topic Detection and Tracking refers to automatic techniques for locating topically related materials in streams of data. As the core technology of it, story link detection is to determine whether two stories are about the same topic. To overcome the limitation of the story length and the topic dynamic evolution problem in data streams, this paper presents a method of applying dynamic information extending to improve the performance of link detection. The proposed method uses previous latest related story to extend current processing story, generates new dynamic models for computing the similarity between the current two stories. The work is evaluated on the TDT4 Chinese corpus, and the experimental results indicate that story link detection using this method can make much better performance on all evaluation metrics.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Topic Detection and Tracking (TDT) refers to a variety of automatic techniques for discovering and threading together topically related material in streams of data such as newswire or broadcast news. Such automatic discovering and threading could be quite valuable in many applications where people need timely and efficient access to large quantities of information. Supported by such technology, users could be alerted with new events and new information about known events. By examining one or two stories, users define the topic described in them. Then with TDT technologies they could go to a large archive, find all the stories about this topic, and learn how it evolved.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Story link detection, as the core technology defined in TDT, is a task of determining whether two stories are about the same topic, or topically linked. In TDT, a topic is defined as \"something that happens at some specific time and place\" . Link detection is considered as the basis of other event-based TDT tasks, such as topic tracking, topic detection, and first story detection. Since story link detection focuses on the streams of news stories, it has its specific characteristic compared with the traditional Information Retrieval (IR) or Text Classification task: new topics usually come forth frequently during the procedure of the task, but nothing about them is known in advance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The paper is organized as follows: Section 2 describes the procedure of story link detection; Section 3 introduces the related work in story link detection; Section 4 explains a baseline method which will be compared with the proposed dynamic method in Section 5; the experiment results and analysis are given in Section 6; finally, Section 7 concludes the paper.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "(s i1 , s i2 ), s i1 \u2208 S j , s i2 \u2208 S k , 1 \u2264 i \u2264 m, 1 \u2264 j \u2264 k \u2264 n.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The system is required to make decisions on all story pairs to judge if they describe a same topic.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We formalize the procedure for processing a pair of stories as follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "For a story pair P i = (s i1 , s i2 ):",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "1. Get background corpus B i of P i . According to the supposed application situation and the custom that people usually look ahead when they browse something, in TDT research the system is usually allowed to look ahead N (usually 10) source files when deciding whether the current pair is linked. So",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "B i = {S 1 , S 2 , S 3 , . . . , S l }",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": ", where l = k + 10 , s i2 \u2208 S k and (k + 10) \u2264 n n , s i2 \u2208 S k and (k + 10) > n .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "(M i1 , M i2 ) for two stories in P i . M = {(f s , w s ) | s \u2265 1},",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Produce the representation models",
                "sec_num": "2."
            },
            {
                "text": "where f s is a feature extracted from a story and w s is the weight of the feature in the story. They are computed with some parameters counted from current story and the background.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Produce the representation models",
                "sec_num": "2."
            },
            {
                "text": "3. Choose a similarity function F and computing the similarity between two models. If t is a predefined threshold and F (M i1 , M i2 ) \u2265 t, then stories in P i are topically linked.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Produce the representation models",
                "sec_num": "2."
            },
            {
                "text": "A number of works has been developed on story link detection. It can be classified into two categories: vector-based methods and probabilistic-based methods. The vector space model is widely used in IR and Text Classification research. Cosine similarity between document vectors with tf * idf term weighting (Connell et al., 2004) (Chen et al., 2004) (Allan et al., 2003) is also one of the best technologies for link detection. We have examined a number of similarity measures in story link detection, including cosine, Hellinger and Tanimoto, and found that cosine similarity produced outstanding results. Furthermore, (Allan et al., 2000) also confirms this conclusion among cosine, weighted sum, language modeling and Kullback-Leibler divergence in its story link detection research.",
                "cite_spans": [
                    {
                        "start": 308,
                        "end": 330,
                        "text": "(Connell et al., 2004)",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 331,
                        "end": 350,
                        "text": "(Chen et al., 2004)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 351,
                        "end": 371,
                        "text": "(Allan et al., 2003)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 621,
                        "end": 641,
                        "text": "(Allan et al., 2000)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "3"
            },
            {
                "text": "Probabilistic-based method has been proven to be very effective in several IR applications. One of its attractive features is that it is firmly rooted in the theory of probability, thereby allowing the researcher to explore more sophisticated models guided by the theoretical framework. (Nallapati and Allan, 2002) (Lavrenko et al., 2002) (Nallapati, 2003) all apply probability models (language model or relevance model) for story link detection. And the experiment results indicate that the performances are comparable with those using traditional vector space models, if not better.",
                "cite_spans": [
                    {
                        "start": 287,
                        "end": 314,
                        "text": "(Nallapati and Allan, 2002)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 315,
                        "end": 338,
                        "text": "(Lavrenko et al., 2002)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 339,
                        "end": 356,
                        "text": "(Nallapati, 2003)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "3"
            },
            {
                "text": "On the basis of vector-based methods, this paper represents a method of dynamic information extending to improve the performance of story link detection. It makes use of the previous latest topically related story to extend the vector model of current being processed story. New dynamic models are generated for computing the similarity between two stories in current pair. This method resolves the problems of information shortage in stories and topic dynamic evolution in streams of data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "3"
            },
            {
                "text": "Before introducing the proposed method, we first describe a method which is implemented with vector model and cosine similarity function. This straight and classic method is used as a baseline to be compared with the proposed method.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "3"
            },
            {
                "text": "The related work in story link detection shows that vector representation model with cosine function can be used to build the state-of-the-art story link detection systems. Many research organizations take this as their baseline system (Connell et al., 2004) (Yang et al., 2002) . In this paper, we make a similar choice.",
                "cite_spans": [
                    {
                        "start": 236,
                        "end": 258,
                        "text": "(Connell et al., 2004)",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 259,
                        "end": 278,
                        "text": "(Yang et al., 2002)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Baseline Story Link Detection",
                "sec_num": "4"
            },
            {
                "text": "The baseline method represents each story as a vector in term space, where the coordinates represent the weights of the term features in the story. Each vector terms (or feature) is a single word plus its tag which is produced by a segmenter and part of speech tagger for Chinese. So if two tokens with same spelling are tagged with different tags, they will be taken as different terms (or features). It is notable that in it is independent between processing any two comparisons the baseline method.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Baseline Story Link Detection",
                "sec_num": "4"
            },
            {
                "text": "A preprocessing has been performed for TDT Chinese corpus. For each story we tokenize the text, tag the generated tokens, remove stop words, and then get a candidate set of terms for its vector model. After that, the term-frequency for each token in the story and the length of the story will also be acquired. In the baseline and dynamic methods, both training and test data are preprocessed in this way.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Preprocessing",
                "sec_num": "4.1"
            },
            {
                "text": "The segmenter and tagger used here is ICTCLAS 1 . The stop word list is composed of 507 terms. Although the term feature in the vector representation is the word plus its corresponding tag, we will ignore the tag information when filtering stop words, because almost all the words in the list should be filtered out whichever part of speech is used to tag them.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Preprocessing",
                "sec_num": "4.1"
            },
            {
                "text": "One important issue in the vector model is weighting the individual terms (features) that occur in the vector. Most IR systems employed the traditional tf * idf weighting, which also provide the base for the baseline and dynamic methods in this paper. Furthermore, this paper adopts a dynamic way to compute the tf * idf weighting:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Feature Weighting",
                "sec_num": "4.2"
            },
            {
                "text": "w i (f i , d) = tf (f i , d) * idf (f i ) tf = t/(t + 0.5 + 1.5dl/dl avg ) idf = log((N + 0.5)/df )/log(N + 1)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Feature Weighting",
                "sec_num": "4.2"
            },
            {
                "text": "where t is the term frequency in a story, dl is the length of a story, dl avg is the average length of stories in the background corpus, N is the number of stories in the corpus, df is the number of the stories containing the term in the corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Feature Weighting",
                "sec_num": "4.2"
            },
            {
                "text": "The tf shows how much a term represents the story, while the idf reflects the distinctive ability of distinguishing current story from others. The dynamic attribute of the tf * idf weighting lies in the dynamic computation of dl avg , N and df . The background corpus used for statistics is incremental. As more story pairs are processed, more source files could be seen, and the background is expanding as well. Whenever the size of the background has changed, the values of dl avg , N and df will update accordingly. We call this as incremental tf * idf weighting. A story might have different term vectors in different story pairs.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Feature Weighting",
                "sec_num": "4.2"
            },
            {
                "text": "Another important issue in the vector model is determining the right function to measure the similarity between two vectors. We have firstly tried three functions: cosine, Hellinger and Tanimoto, among which cosine function performs best for its substantial advantages and the most stable performance. So we consider the cosine function in baseline method.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Similarity Function",
                "sec_num": "4.3"
            },
            {
                "text": "Cosine similarity, as a classic measure and consistent with the vector representation, is simply an inner product of two vectors where each vector is normalized to the unit length. It represents cosine of the angle between two vector models",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Similarity Function",
                "sec_num": "4.3"
            },
            {
                "text": "M 1 = {(f 1i , w 1i ), i \u2265 1} and M 2 = {(f 2i , w 2i ), i \u2265 1}. cos(M 1 , M 2 ) = (\u03a3(w 1i \u00d7 w 2i ))/ (\u03a3w 2 1i )(\u03a3w 2 2i )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Similarity Function",
                "sec_num": "4.3"
            },
            {
                "text": "Cosine similarity tends to perform best at full dimensionality, as in the case of comparing two stories. Performance degrades as one of the vectors becomes shorter. Because of the built-in length normalization, cosine similarity is less dependent on specific term weighting.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Similarity Function",
                "sec_num": "4.3"
            },
            {
                "text": "Investigation on the TDT corpus shows that news stories are usually short, which makes that their representation models are too sparse to reflect topics described in them. A possible method of solving this problem is to extend stories with other related information. The information can be synonym in a dictionary, related documents in external corpora, etc. However, extending with synonym is mainly adding repetitious information, which can not define the topics more clearly. On the other hand, topicbased research should be real-sensitive. The corpora in the same period as the test corpora are not easy to gather, and the number of related documents in previous period is very few. So it is also not feasible to extend the stories with related documents in other corpora. We believe that it is more reasonable that the best extending information may be the story corpus itself. Following the TDT evaluation requirement, we will not use entire corpus at a time. Instead, when we process current pair of stories, we utilize all the stories before the current pair in the story corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Motivation",
                "sec_num": "5.1"
            },
            {
                "text": "In addition, topics described by stories usually evolve along with time. A topic usually begins with a seminal event. After that, it will focus mainly on the consequence of the event or other directly related events as the time goes. When the focus in later stories has changed, the words used in them may change remarkably. Keeping topic descriptions unchanged from the beginning to the end is obviously improper. So topic representation models should also be updated as the topic emphases in stories has changed. Formerly we have planed to use related information to extend a story to make up the information shortage in stories. Considering more about topic evolution, we extend a story with its latest related story. In addition, up to now almost all research in story link detection takes the hypothesis that whether two stories in one pair are topically linked is independent of that in another pair. But we realize that if two stories in a pair describe a same topic, one story can be taken as related information to extend another story in later pairs. Compared with extending with more than one story, extending only with its latest related story can keep representation of the topic as fresh as possible, and avoid extending too much similar information at the same time, which makes the length of the extended vector too long. Since the vector will be renormalized, a too big length means evidently decreasing the weight of an individual feature which will instead cause a lower cosine similarity. This idea has also been confirmed by the experiment showing that the performance extending with one latest related story is superior to that extending with more than one related story, as described in section 6.3. The experiment results also show that this method of dynamic information extending apparently improves the performance of story link detection.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Motivation",
                "sec_num": "5.1"
            },
            {
                "text": "The proposed dynamic method is actually the baseline method plus dynamic information extending. The preprocessing, feature weighting and similarity computation in dynamic method are similar as those in baseline method. However, the vector representation for a story here is dynamic. This method needs a training corpus to get the extending threshold deciding whether a story should be used to extend another story in a pair. We split the sequence of time-ordered story pairs into two parts: the former is for training and the later is for testing. The following is the processing steps:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method Description",
                "sec_num": "5.2"
            },
            {
                "text": "1. Preprocess to create a set of terms for representing each story as a term vector, which is same as baseline method.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method Description",
                "sec_num": "5.2"
            },
            {
                "text": "2. Run baseline system on the training corpora and find an optimum topically link threshold.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method Description",
                "sec_num": "5.2"
            },
            {
                "text": "We take this threshold as extending threshold.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method Description",
                "sec_num": "5.2"
            },
            {
                "text": "The topically link threshold used for making link decision in dynamic method is another predefined one.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method Description",
                "sec_num": "5.2"
            },
            {
                "text": "3. Along with the ordered story pairs in the test corpora, repeat a) and b):",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method Description",
                "sec_num": "5.2"
            },
            {
                "text": "(a) When processing a pair of stories P i = (s i1 , s i2 ), if s i1 or s i2 has an extending story, then update the corresponding vector model with its related story to a new dynamic one. The generation procedure of dynamic vector will be described in next subsection. (b) Computing the cosine similarity between the two dynamic term vectors. If it exceeds the extending threshold, then s i1 and s i2 are the latest related stories for each other. If one story already has an extending story, replace the old one with the new one. So a story always has no more than one extending story at any time. If the similarity exceeds topically link threshold, s i1 and s i2 are topically linked.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method Description",
                "sec_num": "5.2"
            },
            {
                "text": "From the above description, it is obvious that dynamic method needs two thresholds, one for making extending decision and the other for making link decision. Since in this paper we will focus on the optimum performance of systems, the first threshold is more important. But topically link threshold is also necessary to be properly defined to approach a better performance. In the baseline method, term vectors are dynamic because of the incremental tf * idf weighting. However, dynamic information extending is another more important reason in the dynamic method. Whenever a story has an extending story, its vector representation will update to include the extending information. Having the extending method, the representation model can have more information to describe the topic in a story and make the topic evolve along with time. The dynamic method can define topic description clearer and get a more accurate similarity between stories.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method Description",
                "sec_num": "5.2"
            },
            {
                "text": "In the dynamic method, we have tried two ways for the generation of dynamic vector models: increment model and average model. Supposing we use vector model",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dynamic Vector Model",
                "sec_num": "5.3"
            },
            {
                "text": "M 1 = {(f 1i , w 1i ), i \u2265 1} of story s 1 to ex- tend vector model M 2 = {(f 2i , w 2i ), i \u2265 1} of story s 2",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dynamic Vector Model",
                "sec_num": "5.3"
            },
            {
                "text": ", M 2 will change to representing the latest evolving topic described in current story after extending.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dynamic Vector Model",
                "sec_num": "5.3"
            },
            {
                "text": "f 1i in M 1 , if",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Increment Model: For each term",
                "sec_num": "1."
            },
            {
                "text": "it also occurs as f 2j in M 2 , then w 2j will not change, otherwise (f 1i , w 1i ) will be added into M 2 . This dynamic vector model only takes interest in the new information that occurs only in M 1 . For features both occurred in M 1 and M 2 , the dynamic model will respect to their original weights.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Increment Model: For each term",
                "sec_num": "1."
            },
            {
                "text": "2. Average Model: For each term f 1i in M 1 , if it also occurs as f 2j in M 2 , then w 2j = 0.5 * (w 1i + w 2j ), otherwise (f 1i , w 1i ) will be added into M 2 . This dynamic model will take account of all information in M 1 . So the difference between those two dynamic models is the weight recalculation method of the feature occurred in both M 1 and M 2 .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Increment Model: For each term",
                "sec_num": "1."
            },
            {
                "text": "Both the above two dynamic models can take account of information extending and topic evolution. Increment Model is closer to topic description since it is more dependent on latest term weights, while Average Model makes more reference to the centroid concept. The experiment results show that dynamic method with Average Model is a little superior to that with Increment Model.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Increment Model: For each term",
                "sec_num": "1."
            },
            {
                "text": "To evaluate the proposed method, we use the Chinese subset of TDT4 corpus (LDC, 2003) developed by the Linguistic Data Consortium (LDC) for TDT research. This subset contains 27145 stories all in Chinese from October 2000 through January 2001, which are gathered from news, broadcast or TV shows.",
                "cite_spans": [
                    {
                        "start": 74,
                        "end": 85,
                        "text": "(LDC, 2003)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiment Data",
                "sec_num": "6.1"
            },
            {
                "text": "LDC totally labeled 40 topics on TDT4 for 2003 evaluation. There are totally 12334 stories pairs from 1151 source files in the experiment data. The answers for these pairs are based on 28 topics of these topics, generated from the LDC 2003 annotation documents. The first 2334 pairs are used for training and finding extending threshold of dynamic method. The rest 10000 pairs are testing data used for comparing performances of baseline and the dynamic methods.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiment Data",
                "sec_num": "6.1"
            },
            {
                "text": "The work is measured by the TDT evaluation software, which could be referred to (Hoogma, 2005) for detail. Here is a brief description. The goal of link detection is to minimize the cost due to errors caused by the system. The TDT tasks are evaluated by computing a \"detection cost\":",
                "cite_spans": [
                    {
                        "start": 80,
                        "end": 94,
                        "text": "(Hoogma, 2005)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Measures",
                "sec_num": "6.2"
            },
            {
                "text": "C det = C miss \u2022P miss \u2022P target +C f a \u2022P f a \u2022P non\u2212target",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Measures",
                "sec_num": "6.2"
            },
            {
                "text": "where C miss is the cost of a miss, P miss is the estimated probability of a miss, P target is the prior probability under which a pair of stories are linked, C f a is the cost of a false alarm, P f a is the estimated probability of a false alarm, and P non\u2212target is the prior probability under which a pair of stories are not linked. A miss occurs when a linked story pair is not identified as being linked by the system. A false alarm occurs when the pair of stories that are not linked are identified as being linked by the system. A target is a pair of linked stories; conversely a nontarget is a pair of stories that are not linked. For the link detection task these parameters are set as follows: C miss is 1, P target is 0.02, and C f a is 0.1. The cost for each topic is equally weighted (usually the cost of topic-weighted is the mainly evaluation parameter) and normalized so that for a given system, the normalized value (C det ) norm can be no less than one without extracting information from the source data:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Measures",
                "sec_num": "6.2"
            },
            {
                "text": "(C det ) norm = C det min(C miss P target , C f a P non\u2212target ) (C det ) overall = \u03a3 i (C i det )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Measures",
                "sec_num": "6.2"
            },
            {
                "text": "norm /#topics where the sum is over topics i. A detection curve (DET curve) is computed by sweeping a threshold over the range of scores, and the minimum cost over the DET curve is identified as the minimum detection cost or min DET. The topic-weighted DET cost is dependent on both a good minimum cost and a good method for selecting an operating point, which is usually implemented by selecting a threshold. A system with a very low min DET cost can have a much larger topic-weighted DET score. Therefore, we focus on the minimum DET cost for the experiments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Measures",
                "sec_num": "6.2"
            },
            {
                "text": "In this paper, we have tried three methods for story link detection: the baseline method described in Section 4 and two dynamic methods with different dynamic vectors introduced in Section 5. The following In the table, Clink min is the minimum (C det ) overall , DET Graph Minimum Detection Cost (topic-weighted), Clink norm is the normalized minimum (C det ) overall , the dynamic 1 is the dynamic method which uses Increment Model and the dynamic 2 is the dynamic method which uses Average Model. We can see that the proposed two dynamic methods are both much better than baseline method on all four metrics. The Clink N orm of dynamic 1 and 2 are improved individually by 27.2% and 27.8% as compared to that of baseline method. The difference between two dynamic methods is due to different in the P miss . However, it is too little to compare the two dynamic systems. We also make additional experiments in which a story is extended with all of its previous related stories. The minimum (Cdet)overall is 0.0614 for the system using Increment Model, and 0.0608 for the system using Average Model. Although the performances are also much superior to baseline, it is still a little poorer than that with only one latest related story, which confirm the ideal described in section 5.1. Figure 1, 2 and 3 show the detail evaluation information for individual topic on Minimum Norm Detection Cost, P miss and P f a . From Figure 1 we know these two dynamic methods have improved the performance on almost all the topic, except topic 12, 26 and 32. Note that detection cost is a function of P miss and P f a . Figure 2 shows that both two dynamic methods reduce the false alarm rates on all evaluation topics. In Figure 3 there are 20 topics on which the miss rates remain zero or unchange. The dynamic methods reduce the miss rates on 5 topics. However, dynamic methods get relatively poorer results on topic 12, 26 and 32 . Altogether dynamic methods can notably improve system performance on evaluation metrics of both individual and weighted topic, especially the false alarm rate, but on some topics, it gets poorer results.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 1287,
                        "end": 1304,
                        "text": "Figure 1, 2 and 3",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 1421,
                        "end": 1429,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 1608,
                        "end": 1616,
                        "text": "Figure 2",
                        "ref_id": null
                    },
                    {
                        "start": 1711,
                        "end": 1719,
                        "text": "Figure 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Experiment Results",
                "sec_num": "6.3"
            },
            {
                "text": "Further investigation shows that topic 12, 26 and 32 are about Presidential election in Ivory Coast on October 25, 2000, Airplane Crash in Chiang Kai Shek International Airport in Taiwan on October 31, 2000, and APEC Conference on November 12-15, 2000 at Brunei. After analyzing those story pairs with error link decision, we can split them into two sets. One is that two stories in a pair are general linked but not TDT specific topically linked. Here general linked means that there are many common words in two stories, but the events described in them happened in different times or different places. For example, Airplane Crash is a general topic, while Airplane Crash in certain location at specification time is a TDT topic. The other is that two stories in a pair are TDT topically linked while they describe the topic from different perspectives. In this condition they will have few common words. These may be due to that the information extracted from stories is still not accurate enough to represent them. It also may be because of the 1  3  5  7  9  1  1  1  3  1  5  1  7  1  9  2  1  2  3  2  5  2  7  2  9  3  1  3  3  3  5  3  7  3  9  T  o  p  i  c  I 1  3  5  7  9  1  1  1  3  1  5  1  7  1  9  2  1  2  3  2  5  2  7  2  9  3  1  3  3  3  5  3  7  3 Figure 3: P miss for individual topic deficiency of vector model itself. Furthermore, we know that the extending story is chosen by cosine similarity, which results that the extending story and the extended story are usually topically linked from the same perspectives, seldom from different perspectives. Therefore the method of information extending may sometimes turn the above first problem worse and have no impact on the second problem. So mining more useful information or making more use of other useful resources to solve these problems will be the next work. In addition, how to represent this information with a proper model and seeking better or more proper representation models for TDT stories are also important issues. In a word, the method of information extending has been verified efficient in story link detection and can provide a reference to improve the performance of some other similar systems whose data must be processed serially, and it is also hopeful to combined with other improvement technologies.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 1049,
                        "end": 1170,
                        "text": "1  3  5  7  9  1  1  1  3  1  5  1  7  1  9  2  1  2  3  2  5  2  7  2  9  3  1  3  3  3  5  3  7  3  9  T  o  p  i  c  I",
                        "ref_id": null
                    },
                    {
                        "start": 1171,
                        "end": 1271,
                        "text": "1  3  5  7  9  1  1  1  3  1  5  1  7  1  9  2  1  2  3  2  5  2  7  2  9  3  1  3  3  3  5  3  7  3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Experiment Results",
                "sec_num": "6.3"
            },
            {
                "text": "Story link detection is a key technique in TDT research. Though many approaches have been tried, there are still some characters ignored. After analyzing the characters and deficiency in TDT stories and story link detection, this paper presents a method of dynamic information extending to improve the system performance by focus on two problems: information deficiency and topic evolution. The experiment results indicate that this method can effectively improve the performance on both miss and false alarm rates, especially the later one. However, we should realize that there are still some problems to solve in story link detection. How to compare general topically linked stories and how to compare stories describing a TDT topic from different angles will be very vital to improve system performance. The next work will focus on mining more and deeper useful information in TDT stories and exploiting more proper models to represent them.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "7"
            },
            {
                "text": "Problem DefinitionIn the task definition of story link detection(NIST, 2003), a link detection system is given a sequence of time-ordered news source files S = S 1 , S 2 , S 3 , . . . , S n where each S i includes a set of stories, and a sequence of time-ordered story pairs P = P 1 , P 2 , P 3 , . . . , P m where P i =",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "http://sewm.pku.edu.cn/QA/reference/ICTCLAS/FreeICT-CLAS/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "This research is supported by the National Natural Science Foundation of China (60403050), Program for New Century Excellent Talents in University (NCET-06-0926) and the National Grand ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgement",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Detections, bounds, and timelines: Umass and tdt-3",
                "authors": [
                    {
                        "first": "James",
                        "middle": [],
                        "last": "Allan",
                        "suffix": ""
                    },
                    {
                        "first": "Victor",
                        "middle": [],
                        "last": "Lavrenko",
                        "suffix": ""
                    },
                    {
                        "first": "Daniella",
                        "middle": [],
                        "last": "Malin",
                        "suffix": ""
                    },
                    {
                        "first": "Russell",
                        "middle": [],
                        "last": "Swan",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of Topic Detection and Tracking (TDT-3)",
                "volume": "",
                "issue": "",
                "pages": "167--174",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "James Allan, Victor Lavrenko, Daniella Malin, and Rus- sell Swan. 2000. Detections, bounds, and timelines: Umass and tdt-3. In Proceedings of Topic Detection and Tracking (TDT-3), pages 167-174.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Umass tdt 2003 research summary. In proceedings of TDT workshop",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Allan",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Bolivar",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Connell",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Cronen-Townsend",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Feng",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Feng",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Kumaran",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Larkey",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Lavrenko",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Raghavan",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Allan, A. Bolivar, M. Connell, S. Cronen-Townsend, A Feng, F. Feng, G. Kumaran, L. Larkey, V. Lavrenko, and H. Raghavan. 2003. Umass tdt 2003 research summary. In proceedings of TDT workshop.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Topic Detection and Tracking: Event-based Information Organization",
                "authors": [],
                "year": 2002,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "James Allan, editor. 2002. Topic Detection and Track- ing: Event-based Information Organization. Kluwer Academic Publishers, Norvell, Massachusetts.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Multiple similarity measures and source-pair information in story link detection",
                "authors": [
                    {
                        "first": "Francine",
                        "middle": [],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "Ayman",
                        "middle": [],
                        "last": "Farahat",
                        "suffix": ""
                    },
                    {
                        "first": "Thorsten",
                        "middle": [],
                        "last": "Brants",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "HLT-NAACL",
                "volume": "",
                "issue": "",
                "pages": "313--320",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Francine Chen, Ayman Farahat, and Thorsten Brants. 2004. Multiple similarity measures and source-pair information in story link detection. In HLT-NAACL, pages 313-320.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Umass at tdt",
                "authors": [
                    {
                        "first": "Margaret",
                        "middle": [],
                        "last": "Connell",
                        "suffix": ""
                    },
                    {
                        "first": "Ao",
                        "middle": [],
                        "last": "Feng",
                        "suffix": ""
                    },
                    {
                        "first": "Giridhar",
                        "middle": [],
                        "last": "Kumaran",
                        "suffix": ""
                    },
                    {
                        "first": "Hema",
                        "middle": [],
                        "last": "Raghavan",
                        "suffix": ""
                    },
                    {
                        "first": "Chirag",
                        "middle": [],
                        "last": "Shah",
                        "suffix": ""
                    },
                    {
                        "first": "James",
                        "middle": [],
                        "last": "Allan",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "TDT2004 Workshop",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Margaret Connell, Ao Feng, Giridhar Kumaran, Hema Raghavan, Chirag Shah, and James Allan. 2004. Umass at tdt 2004. In TDT2004 Workshop.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "The modules and methods of topic detection and tracking",
                "authors": [
                    {
                        "first": "Niek",
                        "middle": [],
                        "last": "Hoogma",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "2nd Twente Student Conference on IT",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Niek Hoogma. 2005. The modules and methods of topic detection and tracking. In 2nd Twente Student Confer- ence on IT.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Relevance models for topic detection and tracking",
                "authors": [
                    {
                        "first": "Victor",
                        "middle": [],
                        "last": "Lavrenko",
                        "suffix": ""
                    },
                    {
                        "first": "James",
                        "middle": [],
                        "last": "Allan",
                        "suffix": ""
                    },
                    {
                        "first": "Edward",
                        "middle": [],
                        "last": "Deguzman",
                        "suffix": ""
                    },
                    {
                        "first": "Daniel",
                        "middle": [],
                        "last": "Laflamme",
                        "suffix": ""
                    },
                    {
                        "first": "Veera",
                        "middle": [],
                        "last": "Pollard",
                        "suffix": ""
                    },
                    {
                        "first": "Stephen",
                        "middle": [],
                        "last": "Thomas",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of Human Language Technology Conference (HLT)",
                "volume": "",
                "issue": "",
                "pages": "104--110",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Victor Lavrenko, James Allan, Edward DeGuzman, Daniel LaFlamme, Veera Pollard, and Stephen Thomas. 2002. Relevance models for topic detec- tion and tracking. In Proceedings of Human Language Technology Conference (HLT), pages 104-110.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Topic detection and tracking -phase 4",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Ldc",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Linguistic Data Consortium",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "LDC. 2003. Topic detection and tracking -phase 4. Technical report, Linguistic Data Consortium.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Capturing term dependencies using a language model based on sentence trees",
                "authors": [
                    {
                        "first": "Ramesh",
                        "middle": [],
                        "last": "Nallapati",
                        "suffix": ""
                    },
                    {
                        "first": "James",
                        "middle": [],
                        "last": "Allan",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the eleventh international conference on Information and knowledge management",
                "volume": "",
                "issue": "",
                "pages": "383--390",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ramesh Nallapati and James Allan. 2002. Capturing term dependencies using a language model based on sentence trees. In Proceedings of the eleventh interna- tional conference on Information and knowledge man- agement, pages 383-390. ACM Press.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Semantic language models for topic detection and tracking",
                "authors": [
                    {
                        "first": "Ramesh",
                        "middle": [],
                        "last": "Nallapati",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "HLT-NAACL",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ramesh Nallapati. 2003. Semantic language models for topic detection and tracking. In HLT-NAACL.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "The 2003 topic detection and tracking task definition and evaluation plan",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Nist",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "National Institute of Standards and Technology(NIST)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "NIST. 2003. The 2003 topic detection and tracking task definition and evaluation plan. Technical report, Na- tional Institute of Standards and Technology(NIST).",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Topic-conditioned novelty detection",
                "authors": [
                    {
                        "first": "Yiming",
                        "middle": [],
                        "last": "Yang",
                        "suffix": ""
                    },
                    {
                        "first": "Jian",
                        "middle": [],
                        "last": "Zhang",
                        "suffix": ""
                    },
                    {
                        "first": "Jaime",
                        "middle": [],
                        "last": "Carbonell",
                        "suffix": ""
                    },
                    {
                        "first": "Chun",
                        "middle": [],
                        "last": "Jin",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the eighth ACM SIGKDD international conference on Knowledge discovery and data mining",
                "volume": "",
                "issue": "",
                "pages": "688--693",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yiming Yang, Jian Zhang, Jaime Carbonell, and Chun Jin. 2002. Topic-conditioned novelty detection. In Proceedings of the eighth ACM SIGKDD international conference on Knowledge discovery and data mining, pages 688-693. ACM Press.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "num": null,
                "uris": null,
                "type_str": "figure",
                "text": "Normalized Minimum Detection Cost for individual topic"
            },
            "FIGREF1": {
                "num": null,
                "uris": null,
                "type_str": "figure",
                "text": "Figure 2: P f a for individual topic"
            },
            "TABREF0": {
                "type_str": "table",
                "content": "<table><tr><td/><td colspan=\"3\">their evaluation results.</td></tr><tr><td>metrics</td><td colspan=\"3\">baseline dynamic 1 dynamic 2</td></tr><tr><td>P miss</td><td>0.0514</td><td>0.0348</td><td>0.0345</td></tr><tr><td>P f a</td><td>0.0067</td><td>0.0050</td><td>0.0050</td></tr><tr><td>Clink min</td><td>0.0017</td><td>0.0012</td><td>0.0012</td></tr><tr><td colspan=\"2\">Clink norm 0.0840</td><td>0.0591</td><td>0.0588</td></tr><tr><td colspan=\"4\">Table 1: Experiment Results of Baseline System and</td></tr><tr><td colspan=\"2\">Dynamic Systems</td><td/><td/></tr></table>",
                "html": null,
                "text": "",
                "num": null
            }
        }
    }
}