File size: 67,829 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
{
    "paper_id": "2020",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T02:11:45.129863Z"
    },
    "title": "Enhancing the Identification of Cyberbullying through Participant Roles",
    "authors": [
        {
            "first": "Gathika",
            "middle": [],
            "last": "Ratnayaka",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Moratuwa",
                "location": {
                    "settlement": "Katubedda",
                    "country": "Sri Lanka"
                }
            },
            "email": ""
        },
        {
            "first": "Thushari",
            "middle": [],
            "last": "Atapattu",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "The University of Adelaide",
                "location": {
                    "settlement": "Adelaide",
                    "country": "Australia"
                }
            },
            "email": "thushari.atapattu@adelaide.edu.au"
        },
        {
            "first": "Mahen",
            "middle": [],
            "last": "Herath",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Moratuwa",
                "location": {
                    "settlement": "Katubedda",
                    "country": "Sri Lanka"
                }
            },
            "email": ""
        },
        {
            "first": "Georgia",
            "middle": [],
            "last": "Zhang",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "The University of Adelaide",
                "location": {
                    "settlement": "Adelaide",
                    "country": "Australia"
                }
            },
            "email": ""
        },
        {
            "first": "Katrina",
            "middle": [],
            "last": "Falkner",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "The University of Adelaide",
                "location": {
                    "settlement": "Adelaide",
                    "country": "Australia"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Cyberbullying is a prevalent social problem that inflicts detrimental consequences to the health and safety of victims such as psychological distress, antisocial behaviour, and suicide. The automation of cyberbullying detection is a recent but widely researched problem, with current research having a strong focus on a binary classification of bullying versus non-bullying. This paper proposes a novel approach to enhancing cyberbullying detection through role modeling. We utilise a dataset from ASKfm to perform multi-class classification to detect participant roles (e.g. victim, harasser). Our preliminary results demonstrate promising performance including 0.83 and 0.76 of F1-score for cyberbullying and role classification respectively, outperforming baselines.",
    "pdf_parse": {
        "paper_id": "2020",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Cyberbullying is a prevalent social problem that inflicts detrimental consequences to the health and safety of victims such as psychological distress, antisocial behaviour, and suicide. The automation of cyberbullying detection is a recent but widely researched problem, with current research having a strong focus on a binary classification of bullying versus non-bullying. This paper proposes a novel approach to enhancing cyberbullying detection through role modeling. We utilise a dataset from ASKfm to perform multi-class classification to detect participant roles (e.g. victim, harasser). Our preliminary results demonstrate promising performance including 0.83 and 0.76 of F1-score for cyberbullying and role classification respectively, outperforming baselines.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The surge of Internet and social media has led to the unprecedented social crisis of cyberbullying, particularly among adolescents. It can lead to various damaging consequences on the health and safety of victims, such as feelings of isolation, depression, and suicide. Cyberbullying is the repetitive use of aggressive language among peers, with the intention to harm others through digital media (Rosa et al., 2019) . Despite the illegality of harassing others, most social media platforms are susceptible to cyberbullying due to the openness and anonymisation of platforms. Research conducted by Patchin and Hinduja (2019) indicates that cyberbullying victimisation rates have approximately doubled between the years 2007 and 2019. Adolescents, minorities (e.g. refugees, LGBTQI) and women are among common targets of cyberbullying. The sheer amount of cyberbullying-related incidents vastly exceeds the capacity of manual detection and demands the need to develop technology to effectively and automatically detect this. The development of automated models to detect cyberbullying is a widely researched problem in recent years, with current research focusing on classifying posts as bullying or non-bullying (Rosa et al., 2019; Al-garadi et al., 2016; Salawu et al., 2020) . One of the fundamental gaps in current research is that all texts from all users are treated equally without differentiating who has authored bullying and who has been targeted.These models provide a temporary solution by filtering offensive contents. Bullies often find novel ways to bypass technology such as incorporating implicit and subtle forms of language (e.g. sarcasm) and pseudo profiles. Identifying the roles of authors and targets introduces a novel approach to enable more information-rich models and to foster precise detection. A small number of recent studies focus on cyberbullying-related 'participant roles' (e.g. bully, victim, bystander) (see Figure 1 ) (Van Hee et al., 2018; Xu et al., 2012; Jacobs et al., 2020) .",
                "cite_spans": [
                    {
                        "start": 398,
                        "end": 417,
                        "text": "(Rosa et al., 2019)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 599,
                        "end": 625,
                        "text": "Patchin and Hinduja (2019)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 1213,
                        "end": 1232,
                        "text": "(Rosa et al., 2019;",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 1233,
                        "end": 1256,
                        "text": "Al-garadi et al., 2016;",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 1257,
                        "end": 1277,
                        "text": "Salawu et al., 2020)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 1956,
                        "end": 1978,
                        "text": "(Van Hee et al., 2018;",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 1979,
                        "end": 1995,
                        "text": "Xu et al., 2012;",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 1996,
                        "end": 2016,
                        "text": "Jacobs et al., 2020)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 1945,
                        "end": 1953,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Motivated by this idea, our work focuses on two tasks, 1) detecting cyberbullying as a binary classification problem, and 2) detecting participant roles as a multi-class classification problem. We build upon previous role identification research and the AMiCA dataset proposed by Van Hee et al. (2018).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In addition to modeling bullying and non-bullying content as a binary classification task (Rosa et al., 2019; Al-garadi et al., 2016; Salawu et al., 2020) , several research studies focus on participant role identification (Salawu et al., 2020; Van Hee et al., 2018; Xu et al., 2012) within the cyberbullying context. Xu et al. (2012) defined 8 roles -bully, victim, bystander, assistant, defender, reporter, accuser and reinforcer, based on the theoretical framework of Salmivalli (2010) . The majority of previous studies addressing role identification incorporate user-(e.g., age, gender, location) and social networkbased features (e.g., number of followers, network centrality). Although these features have demonstrated a tendency to increase classification performance (Huang et al., 2014; Singh et al., 2016) , relying on user and network features is logistically challenging in real-world application due to the creation of pseudo profiles and ethical restrictions imposed by platforms. Alternatively, lexical and semantic features (e.g., subjectivity lexicons, character n-grams, topic models, profanity word lists, and named entities) of participants' posts are considered in few research studies (Van Hee et al., 2018; Xu et al., 2012) .",
                "cite_spans": [
                    {
                        "start": 90,
                        "end": 109,
                        "text": "(Rosa et al., 2019;",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 110,
                        "end": 133,
                        "text": "Al-garadi et al., 2016;",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 134,
                        "end": 154,
                        "text": "Salawu et al., 2020)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 223,
                        "end": 244,
                        "text": "(Salawu et al., 2020;",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 245,
                        "end": 266,
                        "text": "Van Hee et al., 2018;",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 267,
                        "end": 283,
                        "text": "Xu et al., 2012)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 318,
                        "end": 334,
                        "text": "Xu et al. (2012)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 471,
                        "end": 488,
                        "text": "Salmivalli (2010)",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 776,
                        "end": 796,
                        "text": "(Huang et al., 2014;",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 797,
                        "end": 816,
                        "text": "Singh et al., 2016)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 1208,
                        "end": 1230,
                        "text": "(Van Hee et al., 2018;",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 1231,
                        "end": 1247,
                        "text": "Xu et al., 2012)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Works",
                "sec_num": "2"
            },
            {
                "text": "Our research aims to automatically identify cyberbullying and roles are based on supervised learning mechanisms that utilizes pretrained language models and advanced contextual embedding techniques. Therefore, such mechanisms will mitigate the need for rule-based approaches and will also minimize the requirement for creating task-specific feature extraction mechanisms.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Works",
                "sec_num": "2"
            },
            {
                "text": "This study focuses on two tasks 1) detecting cyberbullying as a binary classification problem, and 2) detecting cyberbullying-related participant roles as a multi-class classification problem.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Model Description",
                "sec_num": "3"
            },
            {
                "text": "Instead of building new models, we extend an ensemble model originally designed by the authors (Herath et al., 2020) for SemEval-2020 Task on offensive language identification (Zampieri et al., 2020) , to classify posts in the current dataset. The reused ensemble model (Herath et al., 2020) was built using three single classifiers, each based on DistilBERT (Sanh et al., 2019) , a lighter, faster version of BERT (Devlin et al., 2018) . Each of the single classifiers A, B, and C was trained on a Twitter dataset containing Tweets annotated as offensive ('OFF') or non-offensive('NOT') posts. Models A and B were trained on imbalanced sets of Twitter data where the majority class instance was OFF and NOT respectively. Model C was trained using a balanced subset of Tweets which were assigned opposing class labels by the models A and B.",
                "cite_spans": [
                    {
                        "start": 95,
                        "end": 116,
                        "text": "(Herath et al., 2020)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 176,
                        "end": 199,
                        "text": "(Zampieri et al., 2020)",
                        "ref_id": null
                    },
                    {
                        "start": 270,
                        "end": 291,
                        "text": "(Herath et al., 2020)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 359,
                        "end": 378,
                        "text": "(Sanh et al., 2019)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 415,
                        "end": 436,
                        "text": "(Devlin et al., 2018)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cyberbullying classification",
                "sec_num": "3.1"
            },
            {
                "text": "Each classifier was trained using a learning rate of 5e-5 and a batch size of 32 for 2 epochs. A voting scheme was then used to combine the single models and build an ensemble model. If the biased classifiers A and B agreed upon a label for a given data instance, we assigned it that particular label. If the predictions from the biased classifiers were different, we assigned the data instance the prediction from the model C. This ensemble model achieved 0.906 of F1 score on the evaluation dataset of Of-fensEval challenge (Zampieri et al., 2020) .",
                "cite_spans": [
                    {
                        "start": 526,
                        "end": 549,
                        "text": "(Zampieri et al., 2020)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cyberbullying classification",
                "sec_num": "3.1"
            },
            {
                "text": "According to a theoretical framework developed by Salmivalli (2010) and the annotation guide by Van Hee et al. 2015, 'bystander assistant' also engages in bullying while helping or encouraging the 'harasser'. Similarly, 'bystander defender' helps the 'victims' to defend themselves from the harassment. Therefore, we consider 'bystander assistant' as a role which contributes to bullying. Accordingly, we categorise the posts of harassers and bystander assistants in AMiCA dataset into a category called 'bullying' and victim and bystander defender's posts into a category called 'defending'. Then, we divide the posts in each category into the roles as shown in Figure 3 . The final ensemble model contains 3 sub models as follows, Each of these models have the same model architecture, that consists of a pre-trained BERT embedding layer, hidden neural layer and a softmax output layer ( Figure 2 ). In order to extract BERT embeddings, 'bert-based uncased' model (Devlin et al., 2018) used. As discussed in section 5, each ",
                "cite_spans": [
                    {
                        "start": 50,
                        "end": 67,
                        "text": "Salmivalli (2010)",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 966,
                        "end": 987,
                        "text": "(Devlin et al., 2018)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 663,
                        "end": 671,
                        "text": "Figure 3",
                        "ref_id": null
                    },
                    {
                        "start": 890,
                        "end": 898,
                        "text": "Figure 2",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Role classification",
                "sec_num": "3.2"
            },
            {
                "text": "Our research is guided by two tasks, which focus on evaluating the performance of models that could classify whether a given post is, 1. cyberbullying-related or not, and 2. if cyberbullying-related, predicting the role of the user who authored that post.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Methods",
                "sec_num": "4"
            },
            {
                "text": "AMiCA dataset contains data collected from the social networking site ASKfm 1 by Van Hee et al.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dataset",
                "sec_num": "4.1"
            },
            {
                "text": "in April and October, 2013. ASKfm is very popular among adolescents and has increasingly been used for cyberbullying (Kao et al., 2019) . We used the English dataset, where posts are annotated and presented in chronological order within their original conversation (see Figure 1 ). AMiCA dataset is annotated by linguists using BRAT 2 , a web-based tool for text annotation, and considers the following four roles.",
                "cite_spans": [
                    {
                        "start": 117,
                        "end": 135,
                        "text": "(Kao et al., 2019)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 270,
                        "end": 278,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Dataset",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 Harasser: person who initiates the harassment",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dataset",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 Victim: person who is harassed",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dataset",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 Bystander defender: person who helps the victim and discourages the harasser from continuing his actions",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dataset",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 Bystander assistant: person who does not initiate, but takes part in the actions of the harasser. Figure 4 shows the annotation mechanism where '2 Har' refers that the author's role is 'harasser' while the harmfulness score is 2.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 100,
                        "end": 108,
                        "text": "Figure 4",
                        "ref_id": "FIGREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Dataset",
                "sec_num": "4.1"
            },
            {
                "text": "At post-level, the harmfulness of a post is scaled from 0 (no harm) to 2 (severely harmful). We merge harmfulness scores 1 and 2 together (e.g. 1 victim, 2 victim as 'victim') to increase training examples for each cyberbullying role. The cyberbullying class contained 5,380 instances (Harasser -3,576, Victim -1,356, Bystander assistant -24, Bystander defender -424). AMiCA dataset also provides annotations of cyberbullying-related textual categories such as threat, insult, curse. This study does not focus on those annotations during our model development.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dataset",
                "sec_num": "4.1"
            },
            {
                "text": "Van Hee et al. (2018) have used 10% of the data as the hold-out test set. However, their hold-out is not publicly available. Therefore, in this study, we perform 10-fold cross validation while having 10% of the dataset as the test set in each fold. In order to maintain a similar data distribution ratio among the classes and to make sure that test set of one fold is mutually exclusive with the test sets of other folds, we use the 'StratifiedKFold' method in the Scikit-Learner.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dataset",
                "sec_num": "4.1"
            },
            {
                "text": "In order to minimise the noise of ASKfm posts, we performed some pre-processing steps such as replacing slang words and abbreviations 3 and decoding emoticons 4 in addition to standard data preprocessing steps (e.g. removal of punctuations) while fine-tuning BERT (Devlin et al., 2018) .",
                "cite_spans": [
                    {
                        "start": 264,
                        "end": 285,
                        "text": "(Devlin et al., 2018)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data preprocessing and balancing",
                "sec_num": "4.2"
            },
            {
                "text": "Before feeding the posts into the models, we performed more preprocessing steps such as converting to lower case, tokenisation using the berttokenizer, and special token additions (adding [CLS] and [SEP] tokens to appropriate positions to perform BERT based sequence classification).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data preprocessing and balancing",
                "sec_num": "4.2"
            },
            {
                "text": "Evaluation metric. To evaluate our models and compare the performance with baselines, we use metrics similar to Van Hee et al. 2018: 1) F1-score: The harmonic mean of precision and recall and 2) Error rate: 1-recall of the class.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "5"
            },
            {
                "text": "Baseline. We use the best system of Van Hee et al. (2018) as our baseline to compare our models. This baseline used feature combinations such as subjectivity lexicons, character n-grams, term lists, and topic models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussion",
                "sec_num": "5"
            },
            {
                "text": "As discussed in section 3.1, our cyberbullying classification experiments extended an ensemble model (refer as 'OffensEval ensemble' hereafter) based on DistilBERT developed by authors for SemEval 2020 challenge (Herath et al., 2020) . To test the performance of OffensEval ensemble on ASKfm dataset, we constructed three test datasets. Each test dataset consisted of 10,872 non-bullying posts randomly sampled from the non-cyberbullying class and all the 5,380 posts belonging to the cyberbullying class. The class distribution in test datasets was selected such that it would be compatible with Van Hee et al. (2018). The averaged performance using three test sets is presented in 2018by a margin of 0.2 (F1 score). Since present results were obtained by evaluating a prebuilt model for a separate task, in our future works, we expect to improve our performance through fine-tuning our previous model on AMiCA dataset. Further, the presence of obscene slang words in non-cyberbullying posts could have led to some of the false positives. A sample of examples in this category is provided in section 5.2. The presence of very short posts with 'chat-related slang words (e.g., Fgt, No to the woah hoe)' the model has not seen during the training could have led to some of the false negatives. Table 2 demonstrates the 10-fold cross-validation results of our role classification models. As discussed in section 3.2, we created the BERT-based 'outer model' to classify posts into two classesbullying and defending. At the initial experiments, we obtained low recall for 'defending' class mainly due to the class imbalance in the dataset. To overcome this drawback, we have carried out experiments with different techniques such as weighted random sampling and weighted cross-entropy loss (as cost function). Based on the results of our experiments, weighted random sampling was used when training the outer model as it has shown considerable improvement in performance. Weighted random sampling is an sampling technique that attempts to maintain an approximately equal distribution of data instances among classes in a batch while training.",
                "cite_spans": [
                    {
                        "start": 212,
                        "end": 233,
                        "text": "(Herath et al., 2020)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 1293,
                        "end": 1300,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of cyberbullying classification",
                "sec_num": "5.1"
            },
            {
                "text": "Our BERT-based 'defending model' demonstrated promising performance including 0.93 of weighted F1 score and 0.96 (victim class) and 0.86 (bystander defender class) of F1 score (Table 2) . Our BERT-based 'bullying model' was not successful in classifying bystander assistants. We have experimented several strategies to improve the performance of bystander assistant detection such as choosing different training samples, limiting the number of instances taken from 'Harasser' class (100, 500) when training the 'Bullying' model, using weighted random sampling to under sample the harasser class while oversampling the bystander assistant class in order to keep the distribution among two classes at a ratio near to 1:1. However, these strategies failed to enable the 'Bullying' model or the overall ensemble model to detect bystander assistant class properly. Based on these experiments, we assume that the issue of the bystander assistant being classified as a harasser may not be due to class imbalance, however, based on the fact that examples in both classes have the overlapping language (see sample posts of 'bystander assistant' below).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 176,
                        "end": 185,
                        "text": "(Table 2)",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of role classification",
                "sec_num": "5.2"
            },
            {
                "text": "\" While training each of the three models (Outer, Bullying, Defending), batch size of 8 was used with a maximum sequence length of 256 characters. Cross entropy loss was used as the cost function and stochastic gradient descent with a learning rate of 2 \u00d7 10 \u22125 was used as the optimizer.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of role classification",
                "sec_num": "5.2"
            },
            {
                "text": "As shown in the Table 2 , our BERT-based 'ensemble model' has achieved 'good' performance (weighted F1-score is 0.76) except in the classesvictim and bystander assistant. According to the confusion matrix of ensemble model, most misclassified instances are related to victims being classified as harassers. An error analysis of misclassified posts revealed that bullying language widely overlaps with victims when victims use swear words to respond the harasser. These posts increase the difficulty for models to detect victims and require efforts in future research to develop effective models that can handle aggressive victims. A sample of posts where victims have aggressively responded to harassers is shown below. \"[..] whoever is saying that sh*t that its me needs to cut your sh*t out you need to shut the f*** up [..]\" \"and you're living proof that abortion should be legal\"",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 16,
                        "end": 23,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation of role classification",
                "sec_num": "5.2"
            },
            {
                "text": "The comparison of our role classification model with the baselines is restricted since Van Hee et al. (2018) do not report cross-validation results 5 , However, if the 'error rates' are compared using our 10-fold cross-validation results with their hold-out results, our model outperforms the baseline by 0.26 and 0.11 of 'error rate' in harasser and victim classes respectively. Both the models were not able to detect bystander assistant successfully (i.e. error 5 https://doi.org/10.1371/journal.pone.0203794.t009 rate is 1). The baseline outperforms us by 0.01 (error rate) in the bystander defender class. Van Hee et al. (2018) reported that error rates often being lowest for the profanity baseline, confirming that it performs well in terms of recall, however, precision is also an important metric to be considered. In our future work, we intend to further improving recall of each role class while stabilizing good precision.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation of role classification",
                "sec_num": "5.2"
            },
            {
                "text": "This paper proposes an approach to classify cyberbullying and associated roles (e.g., harasser, victim) as a novel contribution to enhance automated cyberbullying detection. Cyberbullying is a growing social problem that inflicts detrimental impacts on online users. The identification of roles is a valuable contribution to future research as it can prompt closer monitoring of bullies and implicitly help victims through potential prevention. Currently, our approaches to identifying cyberbullying related roles focus only on individual posts on a forum. In our future work, we aim to expand this further by considering an entire discussion and the discourse relationships between the posts within the considered discussion. This will enable us to get a better understanding of the roles played by different users in a discussion. Moreover, we intend to integrate cyberbullying and role classification as a single model and optimise performance further to provide an effective solution to the cyberbullying problem.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "6"
            },
            {
                "text": "https://brat.nlplab.org/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "https://floatcode.wordpress.com/2015/11/28/internetslang-dataset/ 4 https://github.com/carpedm20/emoji",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "Authors would like to acknowledge the researchers on the AMiCA project for sharing the dataset.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Cybercrime detection in online communications: The experimental case of cyberbullying detection in the twitter network",
                "authors": [
                    {
                        "first": "Mohammed Ali Al-Garadi",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    },
                    {
                        "first": "Kasturi",
                        "middle": [],
                        "last": "Dewi Varathan",
                        "suffix": ""
                    },
                    {
                        "first": "Sri Devi",
                        "middle": [],
                        "last": "Ravana",
                        "suffix": ""
                    }
                ],
                "year": 2016,
                "venue": "Computers in Human Behavior",
                "volume": "63",
                "issue": "",
                "pages": "433--443",
                "other_ids": {
                    "DOI": [
                        "10.1016/j.chb.2016.05.051"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Mohammed Ali Al-garadi, Kasturi Dewi Varathan, and Sri Devi Ravana. 2016. Cybercrime detection in on- line communications: The experimental case of cy- berbullying detection in the twitter network. Com- puters in Human Behavior, 63:433 -443.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Bert: Pre-training of deep bidirectional transformers for language understanding",
                "authors": [
                    {
                        "first": "Jacob",
                        "middle": [],
                        "last": "Devlin",
                        "suffix": ""
                    },
                    {
                        "first": "Ming-Wei",
                        "middle": [],
                        "last": "Chang",
                        "suffix": ""
                    },
                    {
                        "first": "Kenton",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    },
                    {
                        "first": "Kristina",
                        "middle": [],
                        "last": "Toutanova",
                        "suffix": ""
                    }
                ],
                "year": 2018,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Ade-laideCyC at SemEval-2020 Task 12: Ensemble of Classifiers for Offensive Language Detection in Social Media",
                "authors": [
                    {
                        "first": "Mahen",
                        "middle": [],
                        "last": "Herath",
                        "suffix": ""
                    },
                    {
                        "first": "Thushari",
                        "middle": [],
                        "last": "Atapattu",
                        "suffix": ""
                    },
                    {
                        "first": "Hoang",
                        "middle": [],
                        "last": "Dung",
                        "suffix": ""
                    },
                    {
                        "first": "Christoph",
                        "middle": [],
                        "last": "Treude",
                        "suffix": ""
                    },
                    {
                        "first": "Katrina",
                        "middle": [],
                        "last": "Falkner",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of SemEval",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mahen Herath, Thushari Atapattu, Hoang Dung, Christoph Treude, and Katrina Falkner. 2020. Ade- laideCyC at SemEval-2020 Task 12: Ensemble of Classifiers for Offensive Language Detection in So- cial Media. In Proceedings of SemEval.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Cyber bullying detection using social and textual analysis",
                "authors": [
                    {
                        "first": "Qianjia",
                        "middle": [],
                        "last": "Huang",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Vivek Kumar",
                        "suffix": ""
                    },
                    {
                        "first": "Pradeep",
                        "middle": [],
                        "last": "Singh",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Kumar Atrey",
                        "suffix": ""
                    }
                ],
                "year": 2014,
                "venue": "Proceedings of the 3rd International Workshop on Socially-Aware Multimedia",
                "volume": "",
                "issue": "",
                "pages": "3--6",
                "other_ids": {
                    "DOI": [
                        "10.1145/2661126.2661133"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Qianjia Huang, Vivek Kumar Singh, and Pradeep Ku- mar Atrey. 2014. Cyber bullying detection using so- cial and textual analysis. In Proceedings of the 3rd International Workshop on Socially-Aware Multime- dia, page 3-6. Association for Computing Machin- ery.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Automatic classification of participant roles in cyberbullying: Can we detect victims, bullies, and bystanders in social media text? Natural Language Engineering",
                "authors": [
                    {
                        "first": "Gilles",
                        "middle": [],
                        "last": "Jacobs",
                        "suffix": ""
                    },
                    {
                        "first": "Cynthia",
                        "middle": [],
                        "last": "Van Hee",
                        "suffix": ""
                    },
                    {
                        "first": "V\u00e9ronique",
                        "middle": [],
                        "last": "Hoste",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gilles Jacobs, Cynthia Van Hee, and V\u00e9ronique Hoste. 2020. Automatic classification of participant roles in cyberbullying: Can we detect victims, bullies, and bystanders in social media text? Natural Language Engineering. In-print.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Understanding cyberbullying on instagram and ask.fm via social role detection",
                "authors": [
                    {
                        "first": "Hsien-Te",
                        "middle": [],
                        "last": "Kao",
                        "suffix": ""
                    },
                    {
                        "first": "Shen",
                        "middle": [],
                        "last": "Yan",
                        "suffix": ""
                    },
                    {
                        "first": "Di",
                        "middle": [],
                        "last": "Huang",
                        "suffix": ""
                    },
                    {
                        "first": "Nathan",
                        "middle": [],
                        "last": "Bartley",
                        "suffix": ""
                    },
                    {
                        "first": "Homa",
                        "middle": [],
                        "last": "Hosseinmardi",
                        "suffix": ""
                    },
                    {
                        "first": "Emilio",
                        "middle": [],
                        "last": "Ferrara",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Companion Proceedings of The 2019 World Wide Web Conference",
                "volume": "",
                "issue": "",
                "pages": "183--188",
                "other_ids": {
                    "DOI": [
                        "10.1145/3308560.3316505"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Hsien-Te Kao, Shen Yan, Di Huang, Nathan Bartley, Homa Hosseinmardi, and Emilio Ferrara. 2019. Un- derstanding cyberbullying on instagram and ask.fm via social role detection. In Companion Proceed- ings of The 2019 World Wide Web Conference, page 183-188. Association for Computing Machinery.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Lifetime cyberbullying victimization rates",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Patchin",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Hinduja",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Patchin and S. Hinduja. 2019. Lifetime cyberbully- ing victimization rates.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Automatic cyberbullying detection: A systematic review",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Rosa",
                        "suffix": ""
                    },
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Pereira",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Ribeiro",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [
                            "C"
                        ],
                        "last": "Ferreira",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [
                            "P"
                        ],
                        "last": "Carvalho",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Oliveira",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Coheur",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Paulino",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [
                            "M"
                        ],
                        "last": "Veiga Sim\u00e3o",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Trancoso",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Computers in Human Behavior",
                "volume": "93",
                "issue": "",
                "pages": "333--345",
                "other_ids": {
                    "DOI": [
                        "10.1016/j.chb.2018.12.021"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "H. Rosa, N. Pereira, R. Ribeiro, P. C. Ferreira, J. P. Carvalho, S. Oliveira, L. Coheur, P. Paulino, A. M. Veiga Sim\u00e3o, and I. Trancoso. 2019. Automatic cy- berbullying detection: A systematic review. Com- puters in Human Behavior, 93:333 -345.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Approaches to automated detection of cyberbullying: A survey",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Salawu",
                        "suffix": ""
                    },
                    {
                        "first": "Y",
                        "middle": [],
                        "last": "He",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Lumsden",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "IEEE Transactions on Affective Computing",
                "volume": "11",
                "issue": "01",
                "pages": "3--24",
                "other_ids": {
                    "DOI": [
                        "10.1109/TAFFC.2017.2761757"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "S. Salawu, Y. He, and J. Lumsden. 2020. Approaches to automated detection of cyberbullying: A sur- vey. IEEE Transactions on Affective Computing, 11(01):3-24.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Bullying and the peer group: A review",
                "authors": [
                    {
                        "first": "Christina",
                        "middle": [],
                        "last": "Salmivalli",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Special Issue on Group Processes and Aggression",
                "volume": "15",
                "issue": "",
                "pages": "112--120",
                "other_ids": {
                    "DOI": [
                        "10.1016/j.avb.2009.08.007"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Christina Salmivalli. 2010. Bullying and the peer group: A review. Aggression and Violent Behavior, 15(2):112 -120. Special Issue on Group Processes and Aggression.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter",
                "authors": [
                    {
                        "first": "Victor",
                        "middle": [],
                        "last": "Sanh",
                        "suffix": ""
                    },
                    {
                        "first": "Lysandre",
                        "middle": [],
                        "last": "Debut",
                        "suffix": ""
                    },
                    {
                        "first": "Julien",
                        "middle": [],
                        "last": "Chaumond",
                        "suffix": ""
                    },
                    {
                        "first": "Thomas",
                        "middle": [],
                        "last": "Wolf",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Cyberbullying detection using probabilistic socio-textual information fusion",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Vivek",
                        "suffix": ""
                    },
                    {
                        "first": "Qianjia",
                        "middle": [],
                        "last": "Singh",
                        "suffix": ""
                    },
                    {
                        "first": "Pradeep",
                        "middle": [
                            "K"
                        ],
                        "last": "Huang",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Atrey",
                        "suffix": ""
                    }
                ],
                "year": 2016,
                "venue": "Proceedings of the 2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining",
                "volume": "",
                "issue": "",
                "pages": "884--887",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Vivek K. Singh, Qianjia Huang, and Pradeep K. Atrey. 2016. Cyberbullying detection using probabilistic socio-textual information fusion. In Proceedings of the 2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining, page 884-887.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Automatic detection of cyberbullying in social media text",
                "authors": [
                    {
                        "first": "Cynthia",
                        "middle": [],
                        "last": "Van Hee",
                        "suffix": ""
                    },
                    {
                        "first": "Gilles",
                        "middle": [],
                        "last": "Jacobs",
                        "suffix": ""
                    },
                    {
                        "first": "Chris",
                        "middle": [],
                        "last": "Emmery",
                        "suffix": ""
                    },
                    {
                        "first": "Bart",
                        "middle": [],
                        "last": "Desmet",
                        "suffix": ""
                    },
                    {
                        "first": "Els",
                        "middle": [],
                        "last": "Lefever",
                        "suffix": ""
                    },
                    {
                        "first": "Ben",
                        "middle": [],
                        "last": "Verhoeven",
                        "suffix": ""
                    },
                    {
                        "first": "Guy",
                        "middle": [
                            "De"
                        ],
                        "last": "Pauw",
                        "suffix": ""
                    },
                    {
                        "first": "Walter",
                        "middle": [],
                        "last": "Daelemans",
                        "suffix": ""
                    },
                    {
                        "first": "V\u00e9ronique",
                        "middle": [],
                        "last": "Hoste",
                        "suffix": ""
                    }
                ],
                "year": 2018,
                "venue": "PLOS ONE",
                "volume": "13",
                "issue": "10",
                "pages": "1--22",
                "other_ids": {
                    "DOI": [
                        "10.1371/journal.pone.0203794"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Cynthia Van Hee, Gilles Jacobs, Chris Emmery, Bart Desmet, Els Lefever, Ben Verhoeven, Guy De Pauw, Walter Daelemans, and V\u00e9ronique Hoste. 2018. Au- tomatic detection of cyberbullying in social media text. PLOS ONE, 13(10):1-22.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Guidelines for the Fine-Grained Analysis of Cyberbullying",
                "authors": [
                    {
                        "first": "Cynthia",
                        "middle": [],
                        "last": "Van Hee",
                        "suffix": ""
                    },
                    {
                        "first": "Ben",
                        "middle": [],
                        "last": "Verhoeven",
                        "suffix": ""
                    },
                    {
                        "first": "Els",
                        "middle": [],
                        "last": "Lefever",
                        "suffix": ""
                    },
                    {
                        "first": "Guy",
                        "middle": [
                            "De"
                        ],
                        "last": "Pauw",
                        "suffix": ""
                    },
                    {
                        "first": "Walter",
                        "middle": [],
                        "last": "Daelemans",
                        "suffix": ""
                    },
                    {
                        "first": "V\u00e9ronique",
                        "middle": [],
                        "last": "Hoste",
                        "suffix": ""
                    }
                ],
                "year": 2015,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and V\u00e9ronique Hoste. 2015. Guidelines for the Fine-Grained Analysis of Cyberbullying, version 1.0. LT3,. Technical report, Language and Translation Technology Team Ghent University.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Learning from bullying traces in social media",
                "authors": [
                    {
                        "first": "Jun-Ming",
                        "middle": [],
                        "last": "Xu",
                        "suffix": ""
                    },
                    {
                        "first": "Kwang-Sung",
                        "middle": [],
                        "last": "Jun",
                        "suffix": ""
                    },
                    {
                        "first": "Xiaojin",
                        "middle": [],
                        "last": "Zhu",
                        "suffix": ""
                    },
                    {
                        "first": "Amy",
                        "middle": [],
                        "last": "Bellmore",
                        "suffix": ""
                    }
                ],
                "year": 2012,
                "venue": "Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
                "volume": "",
                "issue": "",
                "pages": "656--666",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jun-Ming Xu, Kwang-Sung Jun, Xiaojin Zhu, and Amy Bellmore. 2012. Learning from bullying traces in social media. In Proceedings of the 2012 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, page 656-666. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Zeses Pitenis, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. SemEval-2020 Task 12: Multilingual Offensive Language Identification in Social Media (Offen-sEval 2020)",
                "authors": [
                    {
                        "first": "Marcos",
                        "middle": [],
                        "last": "Zampieri",
                        "suffix": ""
                    },
                    {
                        "first": "Preslav",
                        "middle": [],
                        "last": "Nakov",
                        "suffix": ""
                    },
                    {
                        "first": "Sara",
                        "middle": [],
                        "last": "Rosenthal",
                        "suffix": ""
                    },
                    {
                        "first": "Pepa",
                        "middle": [],
                        "last": "Atanasova",
                        "suffix": ""
                    },
                    {
                        "first": "Georgi",
                        "middle": [],
                        "last": "Karadzhov",
                        "suffix": ""
                    },
                    {
                        "first": "Hamdy",
                        "middle": [],
                        "last": "Mubarak",
                        "suffix": ""
                    },
                    {
                        "first": "Leon",
                        "middle": [],
                        "last": "Derczynski",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "Proceedings of SemEval",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Marcos Zampieri, Preslav Nakov, Sara Rosenthal, Pepa Atanasova, Georgi Karadzhov, Hamdy Mubarak, Leon Derczynski, Zeses Pitenis, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. SemEval-2020 Task 12: Multilingual Offen- sive Language Identification in Social Media (Offen- sEval 2020). In Proceedings of SemEval.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "An excerpt from a cyberbullying episode(Van Hee et al., 2015)",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF1": {
                "text": "Outer Model: Classifies a post as Bullying or Defending 2. Bullying Model: Classifies a post as 'Harasser' or 'Bystander assistant' 3. Defending Model: Classifies a post as 'Victim' or 'Bystander defender'",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF2": {
                "text": "Model architecture Figure 3: Overview of the Ensemble model model was experimented with different sampling strategies and cost functions to obtain optimal performance.",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "FIGREF3": {
                "text": "1 https://ask.fm/ An example of BRAT annotation(Van Hee et al., 2015)",
                "num": null,
                "type_str": "figure",
                "uris": null
            },
            "TABREF0": {
                "type_str": "table",
                "content": "<table><tr><td>along</td></tr></table>",
                "text": "OffensEval ensemble 0.83 0.84 0.82 Van Hee et al. (2018) 0.64 0.74 0.56 Hold-out scores of cyberbullying classification et al.",
                "html": null,
                "num": null
            },
            "TABREF3": {
                "type_str": "table",
                "content": "<table><tr><td>: 10-fold cross-validation scores of our models;</td></tr><tr><td>WF: Weight F1</td></tr></table>",
                "text": "",
                "html": null,
                "num": null
            }
        }
    }
}