File size: 59,115 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
{
    "paper_id": "2021",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T02:10:16.309637Z"
    },
    "title": "Team Enigma at ArgMining-EMNLP 2021: Leveraging Pre-trained Language Models for Key Point Matching",
    "authors": [
        {
            "first": "Nitin",
            "middle": [],
            "last": "Manav",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Indian Institute of Technology Kharagpur",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "",
            "middle": [],
            "last": "Kapadnis",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Indian Institute of Technology Kharagpur",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Sohan",
            "middle": [],
            "last": "Patnaik",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Indian Institute of Technology Kharagpur",
                "location": {}
            },
            "email": "sohanpatnaik106@gmail.com"
        },
        {
            "first": "Siba",
            "middle": [
                "Smarak"
            ],
            "last": "Panigrahi",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Indian Institute of Technology Kharagpur",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Varun",
            "middle": [],
            "last": "Madhavan",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Indian Institute of Technology Kharagpur",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Abhilash",
            "middle": [],
            "last": "Nandy",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Indian Institute of Technology Kharagpur",
                "location": {}
            },
            "email": "nandyabhilash@gmail.com"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We present the system description for our submission towards the Key Point Analysis Shared Task at ArgMining 2021. Track 1 of the shared task requires participants to develop methods to predict the match score between each pair of arguments and keypoints, provided they belong to the same topic under the same stance. We leveraged existing state of the art pre-trained language models along with incorporating additional data and features extracted from the inputs (topics, key points, and arguments) to improve performance. We were able to achieve mAP strict and mAP relaxed score of 0.872 and 0.966 respectively in the evaluation phase, securing 5th place 1 on the leaderboard. In the post evaluation phase, we achieved a mAP strict and mAP relaxed score of 0.921 and 0.982 respectively. All the codes to generate reproducible results on our models are available on Github 2 .",
    "pdf_parse": {
        "paper_id": "2021",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We present the system description for our submission towards the Key Point Analysis Shared Task at ArgMining 2021. Track 1 of the shared task requires participants to develop methods to predict the match score between each pair of arguments and keypoints, provided they belong to the same topic under the same stance. We leveraged existing state of the art pre-trained language models along with incorporating additional data and features extracted from the inputs (topics, key points, and arguments) to improve performance. We were able to achieve mAP strict and mAP relaxed score of 0.872 and 0.966 respectively in the evaluation phase, securing 5th place 1 on the leaderboard. In the post evaluation phase, we achieved a mAP strict and mAP relaxed score of 0.921 and 0.982 respectively. All the codes to generate reproducible results on our models are available on Github 2 .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The Quantitative Summarization -Key Point Analysis (KPA) Shared Task requires participants to identify the keypoints in a given corpus. Formally, given an input corpus of relatively short, opinionated texts focused on a particular topic, KPA aims to identify the most prominent keypoints in the corpus. Hence the goal is to condense free-form text into a set of concise bullet points using a welldefined quantitative framework. In track 1, given a debatable topic, a set of keypoints per stance, and a set of crowd arguments supporting or contesting the topic, participants must report for each argument the corresponding match score for each keypoint under the same stance towards the topic. In track 2, we are required to build a language model that would generate keypoints given a set of arguments * Equal contribution.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "1 All results and leaderboard standings are reported using the default evaluation method (explained in section 5)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "2 https://github.com/manavkapadnis/ Enigma_ArgMining and a topic and finally find the match score of that particular keypoint with the argument. We mainly focused on the first track.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We frame the task of identifying the most prominent keypoints as a sentence similarity task, obtaining the most similar keypoints corresponding to a given argument.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Sentence similarity is gaining much attention in the research community due to its versatility in various natural language applications such as text summarization (Abujar et al., 2019), question answering (Ashok et al., 2020) , sentiment analysis (Khamphakdee and Seresangtakul, 2021) and plagarisim detection (Lo and Simard, 2019) . Two major approaches to quantitatively measure similarity have been proposed -",
                "cite_spans": [
                    {
                        "start": 205,
                        "end": 225,
                        "text": "(Ashok et al., 2020)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 310,
                        "end": 331,
                        "text": "(Lo and Simard, 2019)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "\u2022 Lexical similarity, as the name suggests, is a measure of the extent or degree of lexicon overlap between two given sentences, ignoring the semantics of the lexicons.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "\u2022 Semantic similarity takes into account the meaning or semantics of the sentences. Deep Learning based approaches are typically leveraged to create dense representations of sentences, which are then compared using statistical methods like cosine similarity.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Since the ArgKP-2021 dataset (Friedman et al., 2021) contains crowd arguments for or against a particular stance, naturally, we expect some paraphrasing in the arguments put forth by different people. This indicates that semantic similarity would be an appropriate measure of similarity. However, we observe the problem of semantic drift (Jansen, 2018) in keypoint -argument pairs. Hence, we add additional lexical overlap and syntactic parse based features to improve performance (details on the features can be found in Section 4).",
                "cite_spans": [
                    {
                        "start": 29,
                        "end": 52,
                        "text": "(Friedman et al., 2021)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 338,
                        "end": 352,
                        "text": "(Jansen, 2018)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "The ArgKP-2021 dataset (Friedman et al., 2021) which was the main dataset used for the shared task consists of approximately 27,520 argument/keypoint pairs for 31 controversial topics. Each of the pairs is labeled as matching or nonmatching, along with a stance towards the topic. The train data comprises of 5583 arguments and 207 keypoints, the validation data comprises of 932 arguments and 36 keypoints and the test data comprises of 723 arguments and 33 keypoints. Additionally, since external datasets were permitted, we experimented with two more datasets i.e., the IBM Rank 30k dataset (Gretz et al., 2019) and the Semantic Textual Similarity or STS dataset (Cer et al., 2017 ) (described in section 4.5) to train our model before fine-tuning on the ArgKP-2021 dataset. The STS dataset comprises of 8020 pairs of sentences, whereas the IBM Rank 30k dataset comprises of 30497 pairs of arguments and keypoints.",
                "cite_spans": [
                    {
                        "start": 23,
                        "end": 46,
                        "text": "(Friedman et al., 2021)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 594,
                        "end": 614,
                        "text": "(Gretz et al., 2019)",
                        "ref_id": null
                    },
                    {
                        "start": 666,
                        "end": 683,
                        "text": "(Cer et al., 2017",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dataset Description",
                "sec_num": "3"
            },
            {
                "text": "In this section, we elaborate on our experiments and methodology to find the best-performing models. The section is organized to describe the addition of dependency parsing features in Section 4.2, parts of speech features in Section 4.3, Tf-idf features in Section 4.4, and the use of external datasets in Section 4.5.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Implementation Details",
                "sec_num": "4"
            },
            {
                "text": "In recent work, Transformer (Vaswani et al., 2017) based pre-trained language models like BERT (Devlin et al., 2019), RoBERTa , BART , and DeBERTa (He et al., 2021) , have proven to be very powerful in learning robust context-based representations of lexicons and applying these to achieve state of the art performance on a variety of downstream tasks. We leverage these models for learning contextual representations of a keypoint -argument pair. The keypoints and arguments are individually concatenated, along with the topic (in the same order) for additional context information. We then obtain the contextual representation of this triplet and concatenate to it an encoded feature vector of additional features (one of Dependency Parse based features, Parts-of-Speech based features, and Tf-idf vectors). This concatenated vector was then passed through dense layers and a sigmoid activation to get a final similarity score in the desired range of [0, 1], as shown in Figure 1 .",
                "cite_spans": [
                    {
                        "start": 28,
                        "end": 50,
                        "text": "(Vaswani et al., 2017)",
                        "ref_id": null
                    },
                    {
                        "start": 147,
                        "end": 164,
                        "text": "(He et al., 2021)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 973,
                        "end": 981,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Baseline Transformer Model Architecture",
                "sec_num": "4.1"
            },
            {
                "text": "To capture the syntactic structure of the sentences, we added the dependency parse tree of the sentence as an additional feature.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dependency Parsing Features",
                "sec_num": "4.2"
            },
            {
                "text": "To obtain the same, we used the open-source tool spacy 3 . The dependency features are then label encoded according to descending order of occurrences. Consider three unique dependency features in all the concatenated sentences of the original dataset, namely, 'aux', 'amod', and 'nsubj'. Let 'aux', 'nsubj', and 'amod' be the descending order of count in the dataset, then 'aux' is encoded as one, 'nsubj' as two and 'amod' is encoded as three. All the names of unique features can be found in the supplementary material.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Dependency Parsing Features",
                "sec_num": "4.2"
            },
            {
                "text": "These encoded dependency features are then concatenated to the output of the transformer model and passed to subsequent layers as shown in Figure  1 . ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 139,
                        "end": 148,
                        "text": "Figure  1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Dependency Parsing Features",
                "sec_num": "4.2"
            },
            {
                "text": "With a similar motive as before, i.e., to better capture the syntactic structure of the sentences, we experimented with Part-Of-Speech (POS) Features as well.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Parts of Speech Features",
                "sec_num": "4.3"
            },
            {
                "text": "As before, we used the open-source tool Spacy to obtain POS labels for each lexicon, which were then label encoded according to descending order of occurrences. The encoded feature vector is then concatenated to the output of the transformer model and fed to the subsequent layers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Parts of Speech Features",
                "sec_num": "4.3"
            },
            {
                "text": "In addition to semantic overlap, we wished to see if adding lexical overlap-based features would improve the ability of the model to identify similar sentences. To this end, we obtained the Tf-idf vector of the (keypoint, argument, topic) triplet (with padding). As before, the encoded feature vector is then concatenated to the output of the transformer model and fed further to the subsequent layers.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Tf-idf features",
                "sec_num": "4.4"
            },
            {
                "text": "We further tried to experiment with sentence similarity pre-training task on two additional datasets. The two datasets used were the STS benchmark dataset and the IBM Debater\u00ae -IBM Rank 30k dataset.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "External Datasets",
                "sec_num": "4.5"
            },
            {
                "text": "For the STS dataset, we normalized the target similarity score to bring the scores between 0 and 1. No additional preprocessing was done to the text. The two input sentences were concatenated into a single sentence and then directly fed to the model. We trained our model on STS dataset for 6 epochs and on the main dataset for 3 epochs.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "External Datasets",
                "sec_num": "4.5"
            },
            {
                "text": "For the IBM Rank 30k dataset, we used the MACE (Hovy et al., 2013) Probability score as the target column, which signifies the argument quality score for the corresponding topic. This is analogous to our approach for main task, wherein we output a similarity score for each argumentkeypoint pair. No preprocessing was done to the text, the argument and topic were concatenated into a single sentence and then fed to the model. We trained our model on the IBM Rank 30k dataset for 3 epochs and on the main dataset for 3 epochs.",
                "cite_spans": [
                    {
                        "start": 47,
                        "end": 66,
                        "text": "(Hovy et al., 2013)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "External Datasets",
                "sec_num": "4.5"
            },
            {
                "text": "Due to resource constraints, we were not able to perform pre-training on both the additional datasets one after another.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "External Datasets",
                "sec_num": "4.5"
            },
            {
                "text": "After we had concluded our experiments, a new evaluation method was proposed by organizers, which removes the positive bias towards a system that predict less true positives in high confidence.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussions",
                "sec_num": "5"
            },
            {
                "text": "In the default evaluation metric a perfect recall is attained only when all positive ground truth labels are predicted, whereas the new method allows a perfect recall score when the top 50% of the predictions (ranked by confidence) are positive. However, since we had completed all our experiments at this point, it was not feasible to rerun all our experiments in the given time frame. Hence we have reported all our results according to the default evaluation method.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Discussions",
                "sec_num": "5"
            },
            {
                "text": "Among all the transformer models without the use of external datasets, we found BART-large to perform best, along with DeBERTa-large with Tfidf as additional features, achieving the best mAP strict and mAP relaxed score of 0.909, 0.982 and 0.911, 0.987 respectively. All the reported results are averaged over three seeds. Table 1 describes our experiments with different Transformer-based contextual language models without using any additional features. Recent improvements to the state-of-the-art in contextual language models in BART and DeBERTa perform significantly better than BERT. Further, BART is pre-trained using various self-supervised objectives such as token masking, sentence permutation, document rotation, token deletion and text infilling, unlike other models that mostly use either masked language modelling or next sentence prediction. In our opinion, the tasks of sentence permutation and document rotation help the model get a better understanding of context at the sentence level, and thus, are helpful when considering the keypoint matching task. We also observe that the large version of the models, trained on more data with more parameters, perform significantly better than the base versions, as expected. Table 2 shows the best performing results obtained by concatenating one of the following -Dependency Parse features, POS features, and Tfidf features. We note that out of the three feature vectors methods, Tf-idf features performs the best. Tf-idf gives a relation/measure of lexical overlap between the argument and keypoint, whereas the other features (POS and Dependency Parse) just expand on the sentence structures of the argument and the keypoint, without expressing the relation between the same. Thus it is observed that Tfidf performs better than the other two feature vectors. In table 2, we report the best-performing transformer-based models for each feature vector. Detailed results (each transformer model with each feature) can be found in the Appendix which is present in the supplementary material. We could not perform combination of all the syntactic features due to limited GPU memory availability. BART-large 0.868 \u00b1 0.023 0.977 \u00b1 0.015 POS 5 BART-large 0.906 \u00b1 0.011 0.987 \u00b1 0.005 Tf-idf DeBERTa-large 0.911 \u00b1 0.005 0.987 \u00b1 0.008 Table 3 shows the outcome of training on additional datasets such as the STS and the IBM Rank 30k dataset without using any feature vectors. We find that the best performing scores using both these datasets are almost equal and are achieved by the same BART-large model architecture. Thus training on additional datasets led to a substantial increase in both mAP strict and mAP relaxed scores. The best results of pre-training on the additional datasets were almost similar, which might be because the ground truth scores in both the datasets effectively reflect the semantic overlap between two sentences (i.e., if two sentences of a data sample are semantically similar, they would have a higher score, and vice versa), thus making the datasets similar to one another.",
                "cite_spans": [
                    {
                        "start": 2197,
                        "end": 2198,
                        "text": "5",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 323,
                        "end": 330,
                        "text": "Table 1",
                        "ref_id": null
                    },
                    {
                        "start": 1235,
                        "end": 1242,
                        "text": "Table 2",
                        "ref_id": "TABREF1"
                    },
                    {
                        "start": 2287,
                        "end": 2294,
                        "text": "Table 3",
                        "ref_id": "TABREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Results and Discussions",
                "sec_num": "5"
            },
            {
                "text": "We also tried adding feature vectors plus training on additional datasets 6 , but there was no significant change in the performance than the existing results. Transformers themselves are able to learn syntactic and semantic features on their own during the training process (Clark et al., 2019) . Adding these features only increases redundancy, as a result of which the performance of the model isn't affected much. This observation could also be seen in the difference in the results of table 1 and 2. In Figures 2 and 3 , we plot the results of the best-performing transformer-based models using different feature vectors.",
                "cite_spans": [
                    {
                        "start": 275,
                        "end": 295,
                        "text": "(Clark et al., 2019)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 508,
                        "end": 523,
                        "text": "Figures 2 and 3",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Model",
                "sec_num": null
            },
            {
                "text": "We designed different settings to compare and validate our approach and its performance. This section consists of results on excluding of topics from input in Section 6.1, incorporating average of hidden states before feeding to dense layers in Section 6.2, and boosting in Section 6.3. Since we obtain best results with BART-large and DeBERTalarge with Tf-idf features, thus the following ablation study is done with these class of models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Ablation Study",
                "sec_num": "6"
            },
            {
                "text": "We incorporate the combination of keypoints and arguments as input to the pre-trained language models to analyze the importance of the topic towards generating the matching score. Comparing Table 1 and Table 4 , incorporating topic provides more context in the input, thus improving both mAP strict score and mAP relaxed score.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 190,
                        "end": 197,
                        "text": "Table 1",
                        "ref_id": null
                    },
                    {
                        "start": 202,
                        "end": 209,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Exclusion of topic from input",
                "sec_num": "6.1"
            },
            {
                "text": "mAP Strict mAP Relaxed BART-base 0.803 \u00b1 0.028 0.898 \u00b1 0.015 DeBERTa-base 0.823 \u00b1 0.030 0.922 \u00b1 0.012 BART-large 0.880 \u00b1 0.006 0.946 \u00b1 0.010 DeBERTa-large 0.874 \u00b1 0.025 0.946 \u00b1 0.027 Table 4 : Results with input as keypoint plus argument",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 183,
                        "end": 190,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Model",
                "sec_num": null
            },
            {
                "text": "We average the last two and the last three hidden states of the pre-trained language model. The average hidden states were then fed into the dense layers to obtain the match score. It can be observed that for both BART-large and DeBERTa-large, the performance decreases as we incorporate more hidden states for the output. The intuition behind this observation can be attributed to the fact that taskspecific information encoded in hidden states is less as compared to the last layer, resulting in decreased performance. The results are shown in Table 5 ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 546,
                        "end": 553,
                        "text": "Table 5",
                        "ref_id": "TABREF4"
                    }
                ],
                "eq_spans": [],
                "section": "Average of hidden states",
                "sec_num": "6.2"
            },
            {
                "text": "We implemented the AdaBoost algorithm by considering our baseline transformer architecture as the base model for this sequential paradigm. BARTlarge and DeBERTa-large were the transformers used for this study. The first base model was trained with the whole training set, whereas the other four models were trained by sampling data points from a probability distribution. Initially, all the data points were assigned an equal probability. However, the distribution was updated in a way such that the erroneous data points for the previous base models were given a higher probability to be sampled. The top 10, 000 most probable data points were sampled for each base model except for the first one. It can be observed from Table 1 and Table  6 that for DeBERTa large model, the mAP Strict has indeed been boosted from 0.889 to 0.904. The results are mentioned in Table 6 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 723,
                        "end": 743,
                        "text": "Table 1 and Table  6",
                        "ref_id": "TABREF5"
                    },
                    {
                        "start": 863,
                        "end": 870,
                        "text": "Table 6",
                        "ref_id": "TABREF5"
                    }
                ],
                "eq_spans": [],
                "section": "Boosting",
                "sec_num": "6.3"
            },
            {
                "text": "mAP Strict mAP Relaxed BART-large 0.832 \u00b1 0.020 0.960 \u00b1 0.010 DeBERTa-large 0.904 \u00b1 0.021 0.973 \u00b1 0.017 ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Model",
                "sec_num": null
            },
            {
                "text": "In this work, we used Pre-trained Language Models (PLMs) to predict the match score for each argument and keypoint pair under the same stance towards the topic. We observed the state-of-theart PLMs such as BART and DeBERTa perform the best compared to other models. We further improve the performance with additional datasets (IBM Rank 30k and STS) to perform additional pre-training (with sentence similarity) before finetuning on ArgKP-2021 dataset. We experimented with POS, Dependency and Tf-idf features to evaluate the addition of extra syntactic features. We support the selection of our final models with various ablation studies. It would be a good future direction to generate appropriate explanations from concatenated input and propose methods to use explanations in the training process.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "7"
            },
            {
                "text": "https://spacy.io/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "Encoded dependency features (section 4.2) 5 Encoded parts of speech features (section 4.3)6 The results of these experiments can be found in Appendix available in the supplementary material.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "We would like to thank the organizers Roni Friedman-Melamed, Lena Dankin, Yufang Hou, and Noam Slonim for holding this shared task. It was a great learning experience for us. We would also like to thank our fellow participants at ArgMining 2021; we look forward to learning more about their approaches and interacting with them at EMNLP. Finally, we would like to extend a big thanks to makers and maintainers of the exemplary HuggingFace (Wolf et al., 2020) repository, without which most of our research would have been impossible.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": "8"
            },
            {
                "text": "Complete results of these experiments can be found in the Appendix available in the supplementary material. ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "annex",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Sentence similarity estimation for text summarization using deep learning",
                "authors": [
                    {
                        "first": "Mahmudul",
                        "middle": [],
                        "last": "Sheikh Abujar",
                        "suffix": ""
                    },
                    {
                        "first": "Syed",
                        "middle": [],
                        "last": "Hasan",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Akhter Hossain",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 2nd International Conference on Data Engineering and Communication Technology",
                "volume": "",
                "issue": "",
                "pages": "155--164",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Sheikh Abujar, Mahmudul Hasan, and Syed Akhter Hossain. 2019. Sentence similarity estimation for text summarization using deep learning. In Proceed- ings of the 2nd International Conference on Data Engineering and Communication Technology, pages 155-164, Singapore. Springer Singapore.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "SimsterQ: A similarity based clustering approach to opinion question answering",
                "authors": [
                    {
                        "first": "Aishwarya",
                        "middle": [],
                        "last": "Ashok",
                        "suffix": ""
                    },
                    {
                        "first": "Ganapathy",
                        "middle": [],
                        "last": "Natarajan",
                        "suffix": ""
                    },
                    {
                        "first": "Ramez",
                        "middle": [],
                        "last": "Elmasri",
                        "suffix": ""
                    },
                    {
                        "first": "Laurel",
                        "middle": [],
                        "last": "Smith-Stvan",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of The 3rd Workshop on e-Commerce and NLP",
                "volume": "",
                "issue": "",
                "pages": "69--76",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/2020.ecnlp-1.11"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Aishwarya Ashok, Ganapathy Natarajan, Ramez El- masri, and Laurel Smith-Stvan. 2020. SimsterQ: A similarity based clustering approach to opinion ques- tion answering. In Proceedings of The 3rd Workshop on e-Commerce and NLP, pages 69-76, Seattle, WA, USA. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Semeval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation",
                "authors": [
                    {
                        "first": "Daniel",
                        "middle": [],
                        "last": "Cer",
                        "suffix": ""
                    },
                    {
                        "first": "Mona",
                        "middle": [],
                        "last": "Diab",
                        "suffix": ""
                    },
                    {
                        "first": "Eneko",
                        "middle": [],
                        "last": "Agirre",
                        "suffix": ""
                    },
                    {
                        "first": "I\u00f1igo",
                        "middle": [],
                        "last": "Lopez-Gazpio",
                        "suffix": ""
                    },
                    {
                        "first": "Lucia",
                        "middle": [],
                        "last": "Specia",
                        "suffix": ""
                    }
                ],
                "year": 2017,
                "venue": "Proceedings of the 11th International Workshop on Semantic Evaluation",
                "volume": "",
                "issue": "",
                "pages": "1--14",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Daniel Cer, Mona Diab, Eneko Agirre, I\u00f1igo Lopez- Gazpio, and Lucia Specia. 2017. Semeval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Evalu- ation (SemEval-2017), pages 1-14.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "What does bert look at? an analysis of bert's attention",
                "authors": [
                    {
                        "first": "Kevin",
                        "middle": [],
                        "last": "Clark",
                        "suffix": ""
                    },
                    {
                        "first": "Urvashi",
                        "middle": [],
                        "last": "Khandelwal",
                        "suffix": ""
                    },
                    {
                        "first": "Omer",
                        "middle": [],
                        "last": "Levy",
                        "suffix": ""
                    },
                    {
                        "first": "Christopher",
                        "middle": [
                            "D"
                        ],
                        "last": "Manning",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D. Manning. 2019. What does bert look at? an analysis of bert's attention.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "BERT: Pre-training of deep bidirectional transformers for language understanding",
                "authors": [
                    {
                        "first": "Jacob",
                        "middle": [],
                        "last": "Devlin",
                        "suffix": ""
                    },
                    {
                        "first": "Ming-Wei",
                        "middle": [],
                        "last": "Chang",
                        "suffix": ""
                    },
                    {
                        "first": "Kenton",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    },
                    {
                        "first": "Kristina",
                        "middle": [],
                        "last": "Toutanova",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
                "volume": "1",
                "issue": "",
                "pages": "4171--4186",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/N19-1423"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Overview of kpa-2021 shared task: Key point based quantitative summarization",
                "authors": [
                    {
                        "first": "Roni",
                        "middle": [],
                        "last": "Friedman",
                        "suffix": ""
                    },
                    {
                        "first": "Lena",
                        "middle": [],
                        "last": "Dankin",
                        "suffix": ""
                    },
                    {
                        "first": "Yoav",
                        "middle": [],
                        "last": "Katz",
                        "suffix": ""
                    },
                    {
                        "first": "Yufang",
                        "middle": [],
                        "last": "Hou",
                        "suffix": ""
                    },
                    {
                        "first": "Noam",
                        "middle": [],
                        "last": "Slonim",
                        "suffix": ""
                    }
                ],
                "year": 2021,
                "venue": "Proceedings of the 8th Workshop on Argumentation Mining",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Roni Friedman, Lena Dankin, Yoav Katz, Yufang Hou, and Noam Slonim. 2021. Overview of kpa-2021 shared task: Key point based quantitative summa- rization. In Proceedings of the 8th Workshop on Ar- gumentation Mining. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Ranit Aharonov, and Noam Slonim. 2019. A large-scale dataset for argument quality ranking: Construction and analysis",
                "authors": [
                    {
                        "first": "Shai",
                        "middle": [],
                        "last": "Gretz",
                        "suffix": ""
                    },
                    {
                        "first": "Roni",
                        "middle": [],
                        "last": "Friedman",
                        "suffix": ""
                    },
                    {
                        "first": "Edo",
                        "middle": [],
                        "last": "Cohen-Karlik",
                        "suffix": ""
                    },
                    {
                        "first": "Assaf",
                        "middle": [],
                        "last": "Toledo",
                        "suffix": ""
                    },
                    {
                        "first": "Dan",
                        "middle": [],
                        "last": "Lahav",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Shai Gretz, Roni Friedman, Edo Cohen-Karlik, As- saf Toledo, Dan Lahav, Ranit Aharonov, and Noam Slonim. 2019. A large-scale dataset for argument quality ranking: Construction and analysis.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Deberta: Decoding-enhanced bert with disentangled attention",
                "authors": [
                    {
                        "first": "Pengcheng",
                        "middle": [],
                        "last": "He",
                        "suffix": ""
                    },
                    {
                        "first": "Xiaodong",
                        "middle": [],
                        "last": "Liu",
                        "suffix": ""
                    },
                    {
                        "first": "Jianfeng",
                        "middle": [],
                        "last": "Gao",
                        "suffix": ""
                    },
                    {
                        "first": "Weizhu",
                        "middle": [],
                        "last": "Chen",
                        "suffix": ""
                    }
                ],
                "year": 2021,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. 2021. Deberta: Decoding-enhanced bert with disentangled attention.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Learning whom to trust with MACE",
                "authors": [
                    {
                        "first": "Dirk",
                        "middle": [],
                        "last": "Hovy",
                        "suffix": ""
                    },
                    {
                        "first": "Taylor",
                        "middle": [],
                        "last": "Berg-Kirkpatrick",
                        "suffix": ""
                    },
                    {
                        "first": "Ashish",
                        "middle": [],
                        "last": "Vaswani",
                        "suffix": ""
                    },
                    {
                        "first": "Eduard",
                        "middle": [],
                        "last": "Hovy",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
                "volume": "",
                "issue": "",
                "pages": "1120--1130",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dirk Hovy, Taylor Berg-Kirkpatrick, Ashish Vaswani, and Eduard Hovy. 2013. Learning whom to trust with MACE. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1120-1130, Atlanta, Georgia. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Multi-hop inference for sentencelevel TextGraphs: How challenging is meaningfully combining information for science question answering?",
                "authors": [
                    {
                        "first": "Peter",
                        "middle": [],
                        "last": "Jansen",
                        "suffix": ""
                    }
                ],
                "year": 2018,
                "venue": "Proceedings of the Twelfth Workshop on Graph-Based Methods for Natural Language Processing (TextGraphs-12)",
                "volume": "",
                "issue": "",
                "pages": "12--17",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/W18-1703"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Peter Jansen. 2018. Multi-hop inference for sentence- level TextGraphs: How challenging is meaningfully combining information for science question answer- ing? In Proceedings of the Twelfth Workshop on Graph-Based Methods for Natural Language Pro- cessing (TextGraphs-12), pages 12-17, New Or- leans, Louisiana, USA. Association for Computa- tional Linguistics.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "A framework for constructing thai sentiment corpus using the cosine similarity technique",
                "authors": [
                    {
                        "first": "Nattawat",
                        "middle": [],
                        "last": "Khamphakdee",
                        "suffix": ""
                    },
                    {
                        "first": "Pusadee",
                        "middle": [],
                        "last": "Seresangtakul",
                        "suffix": ""
                    }
                ],
                "year": 2021,
                "venue": "2021 13th International Conference on Knowledge and Smart Technology (KST)",
                "volume": "",
                "issue": "",
                "pages": "202--207",
                "other_ids": {
                    "DOI": [
                        "10.1109/KST51265.2021.9415802"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Nattawat Khamphakdee and Pusadee Seresangtakul. 2021. A framework for constructing thai sentiment corpus using the cosine similarity technique. In 2021 13th International Conference on Knowledge and Smart Technology (KST), pages 202-207.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension",
                "authors": [
                    {
                        "first": "Mike",
                        "middle": [],
                        "last": "Lewis",
                        "suffix": ""
                    },
                    {
                        "first": "Yinhan",
                        "middle": [],
                        "last": "Liu",
                        "suffix": ""
                    },
                    {
                        "first": "Naman",
                        "middle": [],
                        "last": "Goyal ; Abdelrahman Mohamed",
                        "suffix": ""
                    },
                    {
                        "first": "Omer",
                        "middle": [],
                        "last": "Levy",
                        "suffix": ""
                    },
                    {
                        "first": "Ves",
                        "middle": [],
                        "last": "Stoyanov",
                        "suffix": ""
                    },
                    {
                        "first": "Luke",
                        "middle": [],
                        "last": "Zettlemoyer",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. 2019. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Fully unsupervised crosslingual semantic textual similarity metric based on BERT for identifying parallel data",
                "authors": [
                    {
                        "first": "Chi-Kiu",
                        "middle": [],
                        "last": "Lo",
                        "suffix": ""
                    },
                    {
                        "first": "Michel",
                        "middle": [],
                        "last": "Simard",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)",
                "volume": "",
                "issue": "",
                "pages": "206--215",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/K19-1020"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Chi-kiu Lo and Michel Simard. 2019. Fully unsuper- vised crosslingual semantic textual similarity metric based on BERT for identifying parallel data. In Pro- ceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 206- 215, Hong Kong, China. Association for Computa- tional Linguistics.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "type_str": "figure",
                "num": null,
                "text": "Model Architecture (\"+\" implies concatenation)"
            },
            "FIGREF2": {
                "uris": null,
                "type_str": "figure",
                "num": null,
                "text": "All preprocessing methods with BART large"
            },
            "FIGREF3": {
                "uris": null,
                "type_str": "figure",
                "num": null,
                "text": "All preprocessing methods with DeBERTa large"
            },
            "TABREF1": {
                "html": null,
                "num": null,
                "content": "<table/>",
                "text": "",
                "type_str": "table"
            },
            "TABREF2": {
                "html": null,
                "num": null,
                "content": "<table/>",
                "text": "Results with pretraining on additional datasets",
                "type_str": "table"
            },
            "TABREF3": {
                "html": null,
                "num": null,
                "content": "<table><tr><td>Model</td><td>No. of Hidden States</td><td>mAP Strict</td><td>mAP Relaxed</td></tr><tr><td>BART-large</td><td>2</td><td colspan=\"2\">0.868 \u00b1 0.016 0.941 \u00b1 0.004</td></tr><tr><td>DeBERTa-large</td><td>2</td><td colspan=\"2\">0.871 \u00b1 0.039 0.949 \u00b1 0.015</td></tr><tr><td>BART-large</td><td>3</td><td colspan=\"2\">0.837 \u00b1 0.020 0.933 \u00b1 0.012</td></tr><tr><td>DeBERTa-large</td><td>3</td><td colspan=\"2\">0.850 \u00b1 0.014 0.934 \u00b1 0.022</td></tr></table>",
                "text": ".",
                "type_str": "table"
            },
            "TABREF4": {
                "html": null,
                "num": null,
                "content": "<table/>",
                "text": "Results with average of hidden states",
                "type_str": "table"
            },
            "TABREF5": {
                "html": null,
                "num": null,
                "content": "<table/>",
                "text": "Boosting Results on Transformer model",
                "type_str": "table"
            }
        }
    }
}