File size: 67,945 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
{
    "paper_id": "I08-1040",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:41:55.438634Z"
    },
    "title": "Unsupervised Classification of Sentiment and Objectivity in Chinese Text",
    "authors": [
        {
            "first": "Taras",
            "middle": [],
            "last": "Zagibalov",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Sussex",
                "location": {
                    "postCode": "BN1 9QH",
                    "settlement": "Brighton",
                    "country": "UK"
                }
            },
            "email": "t.zagibalov@sussex.ac.uk"
        },
        {
            "first": "John",
            "middle": [],
            "last": "Carroll",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Sussex",
                "location": {
                    "postCode": "BN1 9QH",
                    "settlement": "Brighton",
                    "country": "UK"
                }
            },
            "email": "j.a.carroll@sussex.ac.uk"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We address the problem of sentiment and objectivity classification of product reviews in Chinese. Our approach is distinctive in that it treats both positive / negative sentiment and subjectivity / objectivity not as distinct classes but rather as a continuum; we argue that this is desirable from the perspective of would-be customers who read the reviews. We use novel unsupervised techniques, including a one-word 'seed' vocabulary and iterative retraining for sentiment processing, and a criterion of 'sentiment density' for determining the extent to which a document is opinionated. The classifier achieves up to 87% F-measure for sentiment polarity detection.",
    "pdf_parse": {
        "paper_id": "I08-1040",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We address the problem of sentiment and objectivity classification of product reviews in Chinese. Our approach is distinctive in that it treats both positive / negative sentiment and subjectivity / objectivity not as distinct classes but rather as a continuum; we argue that this is desirable from the perspective of would-be customers who read the reviews. We use novel unsupervised techniques, including a one-word 'seed' vocabulary and iterative retraining for sentiment processing, and a criterion of 'sentiment density' for determining the extent to which a document is opinionated. The classifier achieves up to 87% F-measure for sentiment polarity detection.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Automatic classification of sentiment has been a focus of a number of recent research efforts (e.g. (Turney, 2002; Pang et al., 2002; Dave at al., 2003) . An important potential application of such work is in business intelligence: brands and company image are valuable property, so organizations want to know how they are viewed by the media (what the 'spin' is on news stories, and editorials), business analysts (as expressed in stock market reports), customers (for example on product review sites) and their own employees. Another important application is to help people find out others' views about products they have purchased (e.g. consumer electronics), services and entertainment (e.g. movies), stocks and shares (from investor bulletin boards), and so on. In the work reported in this paper we focus on product reviews, with the intended users of the processing being would-be customers.",
                "cite_spans": [
                    {
                        "start": 100,
                        "end": 114,
                        "text": "(Turney, 2002;",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 115,
                        "end": 133,
                        "text": "Pang et al., 2002;",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 134,
                        "end": 152,
                        "text": "Dave at al., 2003)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Our approach is based on the insight that positive and negative sentiments are extreme points in a continuum of sentiment, and that intermediate points in this continuum are of potential interest. For instance, in one scenario, someone might want to get an idea of the types of things people are saying about a particular product through reading a sample of reviews covering the spectrum from highly positive, through balanced, to highly negative. (We call a review balanced if it is an opinionated text with an undecided or weak sentiment direction). In another scenario, a would-be customer might only be interested in reading balanced reviews, since they often present more reasoned arguments with fewer unsupported claims. Such a person might therefore want to avoid reviews such as Example (1) -written by a Chinese purchaser of a mobile phone (our English gloss).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "(1)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The software is bad, some sent SMS are never received by the addressee; compatibility is also bad, on some mobile phones the received messages are in a scrambled encoding! And sometimes the phone 'dies'! Photos are horrible! It doesn't have a cyclic or pro-grammable alarm-clock, you have to set it every time, how cumbersome! The back cover does not fit! The original software has many holes!",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In a third scenario, someone might decide they would like only to read opinionated, weakly negative reviews such as Example (2), since these often contain good argumentation while still identifying the most salient bad aspects of a product.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "(2)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The response time of this mobile is very long, MMS should be less than 30kb only to be downloaded, also it doesn't support MP3 ring tones, (while) the built-in tunes are not good, and from time to time it 'dies', but when I was buying it I really liked it: very original, very nicely matching red and white colours, it has its individuality, also it's not expensive, but when used it always causes trouble, makes one's head ache",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The review contains both positive and negative sentiment covering different aspects of the product, and the fact that it contains a balance of views means that it is likely to be useful for a would-be customer. Moving beyond review classification, more advanced tasks such as automatic summarization of reviews (e.g. Feiguina & LaPalme, 2007) might also benefit from techniques which could distinguish more shades of sentiment than just a binary positive / negative distinction.",
                "cite_spans": [
                    {
                        "start": 317,
                        "end": 342,
                        "text": "Feiguina & LaPalme, 2007)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "A second dimension, orthogonal to positive / negative, is opinionated / unopinionated (or equivalently subjective / objective). When shopping for a product, one might be interested in the physical characteristics of the product or what features the product has, rather than opinions about how well these features work or about how well the product as a whole functions. Thus, if one is looking for a review that contains more factual information than opinion, one might be interested in reviews like Example (3).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "(3) (My) overall feeling about this mobile is not bad, it features: 5 alarm-clocks that switch the phone on (off), phone book for 800 items (500 people), lunar and solar calendars, fast switching between time and date modes, WAP networking, organizer,notebook and so on.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "This review is mostly neutral (unopinionated), but contains information that could be useful to a would-be customer which might not be in a product specification document, e.g. fast switching between different operating modes. Similarly, wouldbe customers might be interested in retrieving completely unopinionated documents such as technical descriptions and user manuals. Again, as with sentiment classification, we argue that opinionated and unopinionated texts are not easily distinguishable separate sets, but form a continuum. In this continuum, intermediate points are of interest as well as the extremes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "A major obstacle for automatic classification of sentiment and objectivity is lack of training data, which limits the applicability of approaches based on supervised machine learning. With the rapid growth in textual data and the emergence of new domains of knowledge it is virtually impossible to maintain corpora of tagged data that cover all -or even most -areas of interest. The cost of manual tagging also adds to the problem. Reusing the same corpus for training classifiers for new domains is also not effective: several studies report decreased accuracy in cross-domain classification (Engstr\u00f6m, 2004; Aue & Gamon, 2005) a similar problem has also been observed in classification of documents created over different time periods (Read, 2005) .",
                "cite_spans": [
                    {
                        "start": 593,
                        "end": 609,
                        "text": "(Engstr\u00f6m, 2004;",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 610,
                        "end": 628,
                        "text": "Aue & Gamon, 2005)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 737,
                        "end": 749,
                        "text": "(Read, 2005)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper we describe an unsupervised classification technique which is able to build its own sentiment vocabulary starting from a very small seed vocabulary, using iterative retraining to enlarge the vocabulary. In order to avoid problems of domain dependence, the vocabulary is built using text from the same source as the text which is to be classified. In this paper we work with Chinese, but using a very small seed vocabulary may mean that this approach would in principle need very little linguistic adjustment to be applied to a different language. Written Chinese has some specific features, one of which is the absence of explicitly marked word boundaries, which makes word-based processing problematic. In keeping with our unsupervised, knowledge-poor approach, we do not use any preliminary word segmentation tools or higher level grammatical analysis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The paper is structured as follows. Section 2 reviews related work in sentiment classification and more generally in unsupervised training of classifiers. Section 3 describes our datasets, and Section 4 the techniques we use for unsupervised classification and iterative retraining. Sections 5 and 6 describe a number of experiments into how well the approaches work, and Section 7 concludes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Most previous work on the problem of categorizing opinionated texts has focused on the binary classification of positive and negative sentiment (Turney, 2002; Pang et al., 2002; Dave at al., 2003) . However, Pang & Lee (2005) describe an approach closer to ours in which they determine an author's evaluation with respect to a multi-point scale, similar to the 'five-star' sentiment scale widely used on review sites. However, authors of reviews are inconsistent in assigning fine-grained ratings and quite often star systems are not consistent between critics. This makes their approach very author-dependent. The main differences are that Pang and Lee use discrete classes (although more than two), not a continuum as in our approach, and use supervised machine learning rather than unsupervised techniques. A similar approach was adopted by Hagedorn et al. (2007) , applied to news stories: they defined five classes encoding sentiment intensity and trained their classifier on a manually tagged training corpus. They note that world knowledge is necessary for accurate classification in such open-ended domains.",
                "cite_spans": [
                    {
                        "start": 144,
                        "end": 158,
                        "text": "(Turney, 2002;",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 159,
                        "end": 177,
                        "text": "Pang et al., 2002;",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 178,
                        "end": 196,
                        "text": "Dave at al., 2003)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 208,
                        "end": 225,
                        "text": "Pang & Lee (2005)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 844,
                        "end": 866,
                        "text": "Hagedorn et al. (2007)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Classification",
                "sec_num": "2.1"
            },
            {
                "text": "There has also been previous work on determining whether a given text is factual or expresses opinion (Yu & Hatzivassiloglu, 2003; Pang & Lee, 2004) ; again this work uses a binary distinction, and supervised rather than unsupervised approaches.",
                "cite_spans": [
                    {
                        "start": 102,
                        "end": 130,
                        "text": "(Yu & Hatzivassiloglu, 2003;",
                        "ref_id": null
                    },
                    {
                        "start": 131,
                        "end": 148,
                        "text": "Pang & Lee, 2004)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Classification",
                "sec_num": "2.1"
            },
            {
                "text": "Recent work on classification of terms with respect to opinion (Esuli & Sebastiani, 2006 ) uses a three-category system to characterize the opinionrelated properties of word meanings, assigning numerical scores to Positive, Negative and Objective categories. The visualization of these scores somewhat resembles our graphs in Section 5, although we use two orthogonal scales rather than three categories; we are also concerned with classification of documents rather than terms.",
                "cite_spans": [
                    {
                        "start": 63,
                        "end": 88,
                        "text": "(Esuli & Sebastiani, 2006",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Classification",
                "sec_num": "2.1"
            },
            {
                "text": "Abney 2002compares two major kinds of unsupervised approach to classification (co-training and the Yarowsky algorithm). As we do not use multiple classifiers our approach is quite far from cotraining. But it is close to the paradigm described by Yarowsky (1995) and Turney (2002) as it also employs self-training based on a relatively small seed data set which is incrementally enlarged with unlabelled samples. But our approach does not use point-wise mutual information. Instead we use relative frequencies of newly found features in a training subcorpus produced by the previous iteration of the classifier. We also use the smallest possible seed vocabulary, containing just a single word; however there are no restrictions regarding the maximum number of items in the seed vocabulary.",
                "cite_spans": [
                    {
                        "start": 246,
                        "end": 261,
                        "text": "Yarowsky (1995)",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 266,
                        "end": 279,
                        "text": "Turney (2002)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Unsupervised Classification",
                "sec_num": "2.2"
            },
            {
                "text": "Our approach starts out with a seed vocabulary consisting of a single word, (good). This word is tagged as a positive vocabulary item; initially there are no negative items. The choice of word was arbitrary, and other words with strongly positive or negative meaning would also be plausible seeds. Indeed, might not be the best possible seed, as it is relatively ambiguous: in some contexts it means to like or acts as the adverbial very, and is often used as part of other words (although usually contributing a positive meaning). But since it is one of the most frequent units in the Chinese language, it is likely to occur in a relatively large number of reviews, which is important for the rapid growth of the vocabulary list.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Seed Vocabulary",
                "sec_num": "3.1"
            },
            {
                "text": "Our test corpus is derived from product reviews harvested from the website IT168 1 . All the reviews were tagged by their authors as either positive or negative overall. Most reviews consist of two or three distinct parts: positive opinions, negative opinions, and comments ('other') -although some reviews have only one part. We removed duplicate reviews automatically using approximate matching, giving a corpus of 29531 reviews of which 23122 are positive (78%) and 6409 are negative (22%). The total number of different products in the corpus is 10631, the number of product categories is 255, and most of the reviewed products are either software products or consumer electronics. Unfortunately, it appears that some users misused the sentiment tagging facility on the website so quite a lot of reviews have incorrect tags. However, the parts of the reviews are much more reliably identified as being positive or negative so we used these as the items of the test corpus. In the experiments described in this paper we used 2317 reviews of mobile phones of which 1158 are negative and 1159 are positive. Thus random choice would have approximately 50% accuracy if all items were tagged either as negative or positive 2 .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Test Corpus",
                "sec_num": "3.2"
            },
            {
                "text": "As discussed in Section 1, we do not carry out any word segmentation or grammatical processing of input documents. We use a very broad notion of words (or phrases) in the Chinese language. The basic units of processing are 'lexical items', each of which is a sequence of one or more Chinese characters excluding punctuation marks (which may actually form part of a word, a whole word or a sequence of words), and`zones', each of which is a sequence of characters delimited by punctuation marks.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Classification",
                "sec_num": "4.1"
            },
            {
                "text": "Each zone is classified as either positive or negative based whether positive or negative vocabulary items predominate. In more detail, a simple maximum match algorithm is used to find all lexical items (character sequences) in the zone that are in the vocabulary list. As there are two parts of the vocabulary (positive and negative), we correspondingly calculate two scores using Equation 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Classification",
                "sec_num": "4.1"
            },
            {
                "text": "3 , S i = L d L phrase S d N d (1)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Classification",
                "sec_num": "4.1"
            },
            {
                "text": "where Ld is the length in characters of a matching lexical item, Lphrase is the length of the current zone in characters, Sd is the current sentiment score of the matching lexical item (initially 1.0), and Nd is a negation check coefficient. The negation check is a regular expression which determines if the lexical item is preceded by a negation within its enclosing zone. If a negation is found then Nd is set to -1. The check looks for six frequently occurring negations:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Classification",
                "sec_num": "4.1"
            },
            {
                "text": "EQUATION",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [
                    {
                        "start": 0,
                        "end": 8,
                        "text": "EQUATION",
                        "ref_id": "EQREF",
                        "raw_str": "(bu), (buhui), (meiyou), (baituo),",
                        "eq_num": "(mianqu)"
                    }
                ],
                "section": "Sentiment Classification",
                "sec_num": "4.1"
            },
            {
                "text": ", and (bimian). The sentiment score of a zone is the sum of sentiment scores of all the items found in it. In fact there are two competing sentiment scores for every zone: one positive (the sum of all scores of items found in the positive part of the vocabulary list) and one negative (the sum of the scores for the items in the negative part). The sentiment direction of a zone is determined from the maximum of the absolute values of the two competing scores for the zone.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Classification",
                "sec_num": "4.1"
            },
            {
                "text": "This procedure is applied to all zones in a document, classifying each zone as positive, negative, or neither (in cases where there are no positive or negative vocabulary items in the zone). To determine the sentiment direction of the whole document, the classifier computes the difference between the number of positive and negative zones. If the result is greater than zero the document is classified as positive, and vice versa. If the result is zero the document is balanced or neutral for sentiment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentiment Classification",
                "sec_num": "4.1"
            },
            {
                "text": "The task of iterative retraining is to enlarge the initial seed vocabulary (consisting of a single word as discussed in Section 3.1) into a comprehensive vocabulary list of sentiment-bearing lexical items. In each iteration, the current version of the classifier is run on the product review corpus to classify each document, resulting in a training subcorpus of positive and a negative documents. The subcorpus is used to adjust the scores of existing positive and negative vocabulary items and to find new items to be included in the vocabulary.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Iterative Retraining",
                "sec_num": "4.2"
            },
            {
                "text": "Each lexical item that occurs at least twice in the corpus is a candidate for inclusion in the vocabulary list. After candidate items are found, the system calculates their relative frequencies in both the positive and negative parts of the current training subcorpus. The system also checks for negation while counting occurrences: if a lexical item is preceded by a negation, its count is reduced by one. This results in negative counts (and thus negative relative frequencies and scores) for those items that are usually used with negation; for example, (the quality is far too bad) is in the positive part of the vocabulary with a score of -1.70. This means that the item was found in reviews classified by the system as positive but it was preceded by a negation. If during classification this item is found in a document it will reduce the positive score for that document (as it is in the positive part of the vocabulary), unless the item is preceded by a negation. In this situation the score will be reversed (multiplied by -1), and the positive score will be increased -see Equation 1above.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Iterative Retraining",
                "sec_num": "4.2"
            },
            {
                "text": "For all candidate items we compare their relative frequencies in the positive and negative documents in the subcorpus using Equation 2.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Iterative Retraining",
                "sec_num": "4.2"
            },
            {
                "text": "difference= | F p \u2212 F n | \ue09e F p \ue083 F n \ue09f/2 (2)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Iterative Retraining",
                "sec_num": "4.2"
            },
            {
                "text": "If difference < 1, then the frequencies are similar and the item does not have enough distinguishing power, so it is not included in the vocabulary. Otherwise the the sentiment score of the item is (re-) calculated -according to Equation (3) for positive items, and analogously for negative items.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Iterative Retraining",
                "sec_num": "4.2"
            },
            {
                "text": "F p F p \ue083F n (3)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Iterative Retraining",
                "sec_num": "4.2"
            },
            {
                "text": "Finally, the adjusted vocabulary list with the new scores is ready for the next iteration.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Iterative Retraining",
                "sec_num": "4.2"
            },
            {
                "text": "Given a sentiment classification for each zone in a document, we compute sentiment density as the proportion of opinionated zones with respect to the total number of zones in the document. Sentiment density measures the proportion of opinionated text in a document, and thus the degree to which the document as a whole is opinionated.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Objectivity Classification",
                "sec_num": "4.3"
            },
            {
                "text": "It should be noted that neither sentiment score nor sentiment density are absolute values, but are relative and only valid for comparing one document with other. Thus, a sentiment density of 0.5 does not mean that the review is half opinionated, half not. It means that the review is less opinionated than a review with density 0.9.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Objectivity Classification",
                "sec_num": "4.3"
            },
            {
                "text": "We ran the system on the product review corpus (Section 3.2) for 20 iterations. The results for bina-ry sentiment classification are shown in Table 1 . We see increasing F-measure up to iteration 18, after which both precision and recall start to descrease; we therefore use the version of the classifier as it stood after iteration 18 4 . These figures are only indicative of the classification accuracy of the system. Accuracy might be lower for unseen text, although since our approach is unsupervised we could in principle perform further retraining iterations on any sample of new text to tune the vocabulary list to it.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 142,
                        "end": 149,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "5"
            },
            {
                "text": "We also computed a (strong) baseline, using as the vocabulary list the NTU Sentiment Dictionary (Ku et al., 2006) 5 which is intended to contain only sentiment-related words and phrases. We assigned each positive and negative vocabulary item a score of 1 or -1 respectively. This setup achieved 87.77 precision and 77.09 recall on the product review corpus.",
                "cite_spans": [
                    {
                        "start": 96,
                        "end": 113,
                        "text": "(Ku et al., 2006)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "5"
            },
            {
                "text": "In Section 1 we argued that sentiment and objectivity should both be considered as continuums, not binary distinctions. Section 4.1 describes how our approach compares the number of positive and negative zones for a document and treats the difference as a measure of the 'positivity' or 'negativity' of a review. The document in Example (2), with 12 zones, is assigned a score of -1 (the least negative score possible): the review contains some positive sentiment but the overall sentiment direction of the review is negative. In contrast, Example (1) is identified as a highly negative review, as would be expected, with a score of -8, from 11 zones. Similarly, with regard to objectivity, the sentiment density of the text in Example (3) is 0.53, which reflects its more factual character compared to Example (1), which has a score of 0.91. We can represent sentiment and objectivity on the following scales:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experiments",
                "sec_num": "5"
            },
            {
                "text": "Balanced Positive",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Negative",
                "sec_num": null
            },
            {
                "text": "The scales are orthogonal, so we can combine them into a single coordinate system:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Unopinionated Neutral Opinionated",
                "sec_num": null
            },
            {
                "text": "We would expect most product reviews to be placed towards the top of the the coordinate system (i.e. opinionated), and stretch from left to right. Figure 1 plots the results of sentiment and objectivity classification of the test corpus in this two dimensional coordinate system, where X represents sentiment (with scores scaled with respect to the number of zones so that -100 is the most negative possible and +100 the most positive), and Y represents sentiment density (0 being unopinionated and 1 being highly opinionated).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 147,
                        "end": 155,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Opinionated Negative Positive",
                "sec_num": null
            },
            {
                "text": "Most of the reviews are located in the upper part of the coordinate system, indicating that they have been classified as opinionated, with either positive or negative sentiment direction. Looking at the overall shape of the plot, more opinionated documents tend to have more explicit sentiment direction, while less opinionated texts stay closer to the balanced / neutral region (around X = 0). ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Opinionated Negative Positive",
                "sec_num": null
            },
            {
                "text": "As can be seen in Figure 1 , the classifier managed to map the reviews onto the coordinate system. However, there are very few points in the neutral region, that is, on the same X = 0 line as balanced but with low sentiment density. By inspection, we know that there are neutral reviews in our data set. We therefore conducted a further experiment to investigate what the problem might be. We took Wikipedia 6 articles written in Chinese on mobile telephony and related issues, as well as several articles about the technology, the market and the history of mobile telecommunications, and split them into small parts (about a paragraph long, to make their size close to the size of the reviews) resulting in a corpus of 115 documents, which we assume to be mostly unopinionated. We processed these documents with the trained classifier and found that they were mapped almost exactly where balanced documents should be (see Figure 2 ). Most of these documents have weak sentiment direction (X = -5 to +10), but are classified as relatively opinionated (Y > 0.5). The former is to be expected, whereas the latter is not. When investigating the possible reasons for this behavior we noticed that the classifier found not only feature descriptions (like nice touch) or expressions which describe attitude ( (one) like(s)), but also product features (for example, MMS or TV) to be opinionated. This is because the presence of some advanced features such as MMS in mobile phones is often regarded as a positive by 6 www.wikipedia.org authors of reviews. In addition, the classifier found words that were used in reviews to describe situations connected with a product and its features: for example, (service) was often used in descriptions of quite unpleasant situations when a user had to turn to a manufacturer's post-sales service for repair or replacement of a malfunctioning phone, and (user) was often used to describe what one can do with some advanced features. Thus the classifier was able to capture some product-specific as well as market-specific sentiment markers, however, it was not able to distinguish the context these generally objective words were used in. This resulted in relatively high sentiment density of neutral texts which contained these words but used in other types of context.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 18,
                        "end": 26,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 923,
                        "end": 931,
                        "text": "Figure 2",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "6"
            },
            {
                "text": "To verify this hypothesis we applied the same processing to our corpus derived from Wikipedia articles, but using as the vocabulary list the NTU Sentiment Dictionary. The results (Figure 3) show that most of the neutral texts are now mapped to the lower part of the opinionation scale (Y < 0.5), as expected. Therefore, to successfully distinguish between balanced reviews and neutral documents a classifier should be able to detect when product features are used as sentiment markers and when they are not.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 179,
                        "end": 189,
                        "text": "(Figure 3)",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "6"
            },
            {
                "text": "We have described an approach to classification of documents with respect to sentiment polarity and objectivity, representing both as a continuum, and mapping classified documents onto a coordinate system that also represents the difference between balanced and neutral text. We have presented a novel, unsupervised, iterative retraining procedure for deriving the classifier, starting from the most minimal size seed vocabulary, in conjunction with a simple negation check. We have verified that the approach produces reasonable results. The approach is extremely minimal in terms of language processing technology, giving it good possibilities for porting to different genres, domains and languages.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions and Future Work",
                "sec_num": "7"
            },
            {
                "text": "We also found that the accuracy of the method depends a lot on the seed word chosen. If the word has a relatively low frequency or does not have a definite sentiment-related meaning, the results may be very poor. For example, an antonymous word to (good) in Chinese is (bad), but the latter is not a frequent word: the Chinese prefer to say (not good). When this word was used as the seed word, accuracy was little more than 15%. Although the first iteration produced high precision (82%), the size of the extracted subcorpus was only 24 items, resulting in the system being unable to produce a good classifier for the following iterations. Every new iteration produced an even poorer result as each new extracted corpus was of lower accuracy.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions and Future Work",
                "sec_num": "7"
            },
            {
                "text": "On the other hand, it seems that a seed list consisting of several low-frequency one-character words can compensate each other and produce better results by capturing a larger part of the corpus (thus increasing recall). Nevertheless a single word may also produce results even better than those for multiword seed lists. For example, the two-character word (comfortable) as seed reached 91% accuracy with 90% recall. We can conclude that our method relies on the quality of the seed word. We therefore need to investigate ways of choosing 'lucky' seeds and avoiding 'unlucky' ones.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions and Future Work",
                "sec_num": "7"
            },
            {
                "text": "Future work should also focus on improving classification accuracy: adding a little languagespecific knowledge to be able to detect some word boundaries should help; we also plan to experiment with more sophisticated methods of sentiment score calculation. In addition, the notion of 'zone' needs refining and language-specific adjustments (for example, a 'reversed comma' should not be considered to be a zone boundary marker, since this punctuation mark is generally used for the enumeration of related objects).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions and Future Work",
                "sec_num": "7"
            },
            {
                "text": "More experiments are also necessary to determine how the approach works across domains, and further investigation into methods for distinguishing between balanced and neutral text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions and Future Work",
                "sec_num": "7"
            },
            {
                "text": "Finally, we need to produce a new corpus that would enable us to evaluate the performance of a pre-trained version of the classifier that did not have any prior access to the documents it was classifying: we need the reviews to be tagged not in a binary way as they are now, but in a way that reflects the two continuums we use (sentiment and objectivity).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions and Future Work",
                "sec_num": "7"
            },
            {
                "text": "http://product.it168.com",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "This corpus is publicly available at http://www.informatics. sussex.ac.uk/users/tz21/it168test.zip 3 In the first iteration, when we have only one item in the vocabulary, negative zones are found by means of the negation check (so not + good = negative item).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "The size of the sentiment vocabulary after iteration 18 was 22530 (13462 positive and 9068 negative).5 Ku et al. automatically generated the dictionary by enlarging",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "The first author is supported by the Ford Foundation International Fellowships Program.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Bootstrapping",
                "authors": [
                    {
                        "first": "Steven",
                        "middle": [],
                        "last": "Abney",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "360--367",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Abney, Steven (2002) Bootstrapping. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, Philadelphia, PA. 360-367.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Customizing sentiment classifiers to new domains: a case study",
                "authors": [
                    {
                        "first": "Anthony & Michael",
                        "middle": [],
                        "last": "Aue",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Gamon",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of RANLP-2005",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Aue, Anthony & Michael Gamon (2005) Customizing sentiment classifiers to new domains: a case study. In Proceedings of RANLP-2005.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Mining the peanut gallery: opinion extraction and semantic classification of product reviews",
                "authors": [
                    {
                        "first": "Kushal",
                        "middle": [],
                        "last": "Dave",
                        "suffix": ""
                    },
                    {
                        "first": "Steve",
                        "middle": [],
                        "last": "Lawrence",
                        "suffix": ""
                    },
                    {
                        "first": "&",
                        "middle": [],
                        "last": "David",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Pennock",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the Twelfth International World Wide Web Conference",
                "volume": "",
                "issue": "",
                "pages": "519--528",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dave, Kushal, Steve Lawrence & David M. Pennock (2003) Mining the peanut gallery: opinion extraction and semantic classification of product reviews. In Proceedings of the Twelfth International World Wide Web Conference. 519-528.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Topic dependence in sentiment classification. Unpublished MPhil dissertation",
                "authors": [
                    {
                        "first": "Charlotte",
                        "middle": [],
                        "last": "Engstr\u00f6m",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Engstr\u00f6m, Charlotte (2004) Topic dependence in senti- ment classification. Unpublished MPhil dissertation, University of Cambridge.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "SENTI-WORDNET: a publicly available lexical resource for opinion mining",
                "authors": [
                    {
                        "first": "Andrea & Fabrizio",
                        "middle": [],
                        "last": "Esuli",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Sebastiani",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of LREC-06, the 5th Conference on Language Resources and Evaluation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Esuli, Andrea & Fabrizio Sebastiani (2006) SENTI- WORDNET: a publicly available lexical resource for opinion mining. In Proceedings of LREC-06, the 5th Conference on Language Resources and Evaluation, Genoa, Italy.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Jordi Atserias (2007) World knowledge in broad-coverage information filtering",
                "authors": [
                    {
                        "first": "Bennett",
                        "middle": [],
                        "last": "Hagedorn",
                        "suffix": ""
                    },
                    {
                        "first": "Massimiliano",
                        "middle": [],
                        "last": "Ciaramita",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "Proceedings of the 30th ACM SIGIR Conference on Research and Development in Information Retrieval",
                "volume": "",
                "issue": "",
                "pages": "801--802",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hagedorn, Bennett, Massimiliano Ciaramita & Jordi At- serias (2007) World knowledge in broad-coverage in- formation filtering. In Proceedings of the 30th ACM SIGIR Conference on Research and Development in Information Retrieval. 801-802.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Opinion extraction, summarization and tracking in news and blog corpora",
                "authors": [
                    {
                        "first": "Lun-Wei",
                        "middle": [],
                        "last": "Ku",
                        "suffix": ""
                    },
                    {
                        "first": "Yu-Ting",
                        "middle": [],
                        "last": "Liang",
                        "suffix": ""
                    },
                    {
                        "first": "& Hsin-Hsi",
                        "middle": [],
                        "last": "Chen",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of the AAAI-2006 Spring Symposium on Computational Approaches to Analyzing Weblogs, AAAI Technical Report",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ku, Lun-Wei, Yu-Ting Liang & Hsin-Hsi Chen (2006) Opinion extraction, summarization and tracking in news and blog corpora. In Proceedings of the AAAI- 2006 Spring Symposium on Computational Ap- proaches to Analyzing Weblogs, AAAI Technical Re- port.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Query-based summarization of customer reviews",
                "authors": [
                    {
                        "first": "Olga & Guy",
                        "middle": [],
                        "last": "Feiguina",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Lapalme",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of the 20th Canadian Conference on Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "452--463",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Feiguina, Olga & Guy Lapalme (2007) Query-based summarization of customer reviews. In Proceedings of the 20th Canadian Conference on Artificial Intelli- gence, Montreal, Canada. 452-463.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Thumbs up? Sentiment classification using machine learning techniques",
                "authors": [
                    {
                        "first": "Bo",
                        "middle": [],
                        "last": "Pang",
                        "suffix": ""
                    },
                    {
                        "first": "Lillian",
                        "middle": [],
                        "last": "Lee & Shivakumar",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Vaithyanathan",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "79--86",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Pang, Bo, Lillian Lee & Shivakumar Vaithyanathan (2002) Thumbs up? Sentiment classification using machine learning techniques. In Proceedings of the Conference on Empirical Methods in Natural Lan- guage Processing, Philadelphia, PA. 79-86.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "A sentimental education: sentiment analysis using subjectivity summarization based on minimum cuts",
                "authors": [
                    {
                        "first": "Bo & Lillian",
                        "middle": [],
                        "last": "Pang",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of the 42nd",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Pang, Bo & Lillian Lee (2004) A sentimental education: sentiment analysis using subjectivity summarization based on minimum cuts. In Proceedings of the 42nd",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Annual Meeting of the Association for Computational Linguistics",
                "authors": [],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "271--278",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Annual Meeting of the Association for Computation- al Linguistics, Barcelona, Spain. 271-278.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Seeing stars: exploiting class relationships for sentiment categorization with respect to rating scales",
                "authors": [
                    {
                        "first": "Bo & Lillian",
                        "middle": [],
                        "last": "Pang",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of the 43rd",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Pang, Bo & Lillian Lee (2005) Seeing stars: exploiting class relationships for sentiment categorization with respect to rating scales. In Proceedings of the 43rd",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Annual Meeting of the Association for Computational Linguistics",
                "authors": [],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "115--124",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Annual Meeting of the Association for Computation- al Linguistics, Ann Arbor, MI. 115-124.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Using emoticons to reduce dependency in machine learning techniques for sentiment classification",
                "authors": [
                    {
                        "first": "Jonathon",
                        "middle": [],
                        "last": "Read",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of the Student Research Workshop at ACL-05",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Read, Jonathon (2005) Using emoticons to reduce de- pendency in machine learning techniques for senti- ment classification. In Proceedings of the Student Research Workshop at ACL-05, Ann Arbor, MI.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Thumbs up or thumbs down? Semantic orientation applied to unsupervised classification of reviews",
                "authors": [
                    {
                        "first": "Peter",
                        "middle": [
                            "D"
                        ],
                        "last": "Turney",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "417--424",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Turney, Peter D. (2002) Thumbs up or thumbs down? Semantic orientation applied to unsupervised classifi- cation of reviews. In Proceedings of the 40th Annual Meeting of the Association for Computational Lin- guistics, Philadelphia, PA. 417-424.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Unsupervised word sense disambiguation rivaling supervised methods",
                "authors": [
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Yarowsky",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Proceedings of the 33rd Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "189--196",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yarowsky, David (1995) Unsupervised word sense dis- ambiguation rivaling supervised methods. In Pro- ceedings of the 33rd Annual Meeting of the Associa- tion for Computational Linguistics, Cambridge, MA. 189-196.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Towards answering opinion questions: separating facts from opinions and identifying the polarity of opinion sentences",
                "authors": [
                    {
                        "first": "Hong & Vasileios",
                        "middle": [],
                        "last": "Yu",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Hatzivassiloglou",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the 2003 Conference on Empirical Methods in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "129--136",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yu, Hong & Vasileios Hatzivassiloglou (2003) Towards answering opinion questions: separating facts from opinions and identifying the polarity of opinion sen- tences. In Proceedings of the 2003 Conference on Empirical Methods in Natural Language Processing, Sapporo, Japan. 129-136.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "text": "Reviews classified according to sentiment (X axis) and degree of opinionation (Y axis).",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF1": {
                "uris": null,
                "text": "Classification of a sample of articles from Wikipedia.",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF2": {
                "uris": null,
                "text": "Classification of a sample of articles from Wikipedia, using the NTU Sentiment Dictionary as the vocabulary list.",
                "num": null,
                "type_str": "figure"
            },
            "TABREF0": {
                "text": "Results for binary sentiment classification during iterative retraining.",
                "content": "<table><tr><td>an initial manually created seed vocabulary by consulting two</td></tr><tr><td>thesauri, including tong2yi4ci2ci2lin2 and the Academia Sini-</td></tr><tr><td>ca Bilingual Ontological WordNet 3.</td></tr></table>",
                "num": null,
                "type_str": "table",
                "html": null
            }
        }
    }
}