File size: 86,931 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
{
    "paper_id": "2021",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T02:10:07.747124Z"
    },
    "title": "Exploring Methodologies for Collecting High-Quality Implicit Reasoning in Arguments",
    "authors": [
        {
            "first": "Keshav",
            "middle": [],
            "last": "Singh",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Tohoku University",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Farjana",
            "middle": [],
            "last": "Sultana",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Tohoku University",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Naoya",
            "middle": [],
            "last": "Inoue",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "RIKEN Stony Brook University Ricoh Company, Ltd",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Shoichi",
            "middle": [],
            "last": "Naito",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Tohoku University",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Kentaro",
            "middle": [],
            "last": "Inui",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Tohoku University",
                "location": {}
            },
            "email": "inui@tohoku.ac.jp"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Annotation of implicit reasoning (i.e., warrant) in arguments is a critical resource to train models in gaining deeper understanding and correct interpretation of arguments. However, warrants are usually annotated in unstructured form, having no restriction on their lexical structure which sometimes makes it difficult to interpret how warrants relate to any of the information given in claim and premise. Moreover, assessing and determining better warrants from the large variety of reasoning patterns of unstructured warrants becomes a formidable task. Therefore, in order to annotate warrants in a more interpretative and restrictive way, we propose two methodologies to annotate warrants in a semi-structured form. To the best of our knowledge, we are the first to show how such semi-structured warrants can be annotated on a large scale via crowdsourcing. We demonstrate through extensive quality evaluation that our methodologies enable collecting better quality warrants in comparison to unstructured annotations. To further facilitate research towards the task of explicating warrants in arguments, we release our materials publicly (i.e., crowdsourcing guidelines and collected warrants).",
    "pdf_parse": {
        "paper_id": "2021",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Annotation of implicit reasoning (i.e., warrant) in arguments is a critical resource to train models in gaining deeper understanding and correct interpretation of arguments. However, warrants are usually annotated in unstructured form, having no restriction on their lexical structure which sometimes makes it difficult to interpret how warrants relate to any of the information given in claim and premise. Moreover, assessing and determining better warrants from the large variety of reasoning patterns of unstructured warrants becomes a formidable task. Therefore, in order to annotate warrants in a more interpretative and restrictive way, we propose two methodologies to annotate warrants in a semi-structured form. To the best of our knowledge, we are the first to show how such semi-structured warrants can be annotated on a large scale via crowdsourcing. We demonstrate through extensive quality evaluation that our methodologies enable collecting better quality warrants in comparison to unstructured annotations. To further facilitate research towards the task of explicating warrants in arguments, we release our materials publicly (i.e., crowdsourcing guidelines and collected warrants).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Implicit reasonings, commonly referred to as warrants (Toulmin, 1958) , have long been studied to understand the grounds on which a premise lends support to the claim (Freeman, 1992) . In other words, a warrant, when made explicit, clearly shows the inferential link between claim and premise (Pineau, 2013) . As depicted in Figure 1 , identification of such warrants by students has been shown to aid them in making better arguments (Erduran et al., 2004) , as well as improving their critical thinking skills (von der M\u00fchlen et al., 2019) and argument comprehension process (Hitchcock and Verheij, 2006) .",
                "cite_spans": [
                    {
                        "start": 54,
                        "end": 69,
                        "text": "(Toulmin, 1958)",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 167,
                        "end": 182,
                        "text": "(Freeman, 1992)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 293,
                        "end": 307,
                        "text": "(Pineau, 2013)",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 434,
                        "end": 456,
                        "text": "(Erduran et al., 2004)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 511,
                        "end": 540,
                        "text": "(von der M\u00fchlen et al., 2019)",
                        "ref_id": "BIBREF19"
                    },
                    {
                        "start": 576,
                        "end": 605,
                        "text": "(Hitchcock and Verheij, 2006)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 325,
                        "end": 333,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "While explication of warrants with assistance from teachers has been shown to be useful for improving students' argumentation skills, automating this explication process would not only help students to be less dependent on teachers, but it can also be beneficial for different downstream educational applications such as argument analysis (Becker et al., 2020) , enthymeme reconstruction (Razuvayevskaya and Teufel, 2017; Hulpus et al., 2019) and essay scoring (Williamson, 2013) . However, building an automated warrant explication system has been a challenge due to the difficulty of collecting warrants in a form that explicitly manifests the way a warrant relates the information between claim and premise. Generally, warrants are annotated in an unstructured (i.e., freetext) format which lays no restriction on its lexical structure (Boltu\u017ei\u0107 and \u0160najder, 2016) . As a result, sometimes the warrant consist of no information that overlaps with claim or premise which makes it difficult to understand how the warrant connects the claim to its premise. Furthermore, for a given argument, unstructured warrants can be framed in diverse ways that would have a wide variety of reasoning patterns (Kock, 2006 ) (given no restriction on the lexical structure), and identifying the correct ones from this large pool of warrants can be a preposterous task.",
                "cite_spans": [
                    {
                        "start": 339,
                        "end": 360,
                        "text": "(Becker et al., 2020)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 388,
                        "end": 421,
                        "text": "(Razuvayevskaya and Teufel, 2017;",
                        "ref_id": "BIBREF17"
                    },
                    {
                        "start": 422,
                        "end": 442,
                        "text": "Hulpus et al., 2019)",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 461,
                        "end": 479,
                        "text": "(Williamson, 2013)",
                        "ref_id": "BIBREF21"
                    },
                    {
                        "start": 839,
                        "end": 867,
                        "text": "(Boltu\u017ei\u0107 and \u0160najder, 2016)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 1197,
                        "end": 1208,
                        "text": "(Kock, 2006",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In order to annotate warrants on a large-scale and in a way that overcomes the aforementioned challenges, we propose two novel warrant annotation methodologies: Pre-defined Keyword-based Warrant (PKW) and User-defined Keyword-based Warrant (UKW), which restrict a warrant's lexical structure to a semi-structured form. In contrast to approaches that crowdsource unstructured warrants, these methodologies explicate warrant by enforcing it to have the key information (i.e., keywords) from both claim and premise. The intuition behind our semi-structured approach is to restrict the structure of warrants to specific keyword-based Figure 1 : A typical example of warrant explication. Feedback provided by teachers or automated warrant explication system can help students identify correct warrants and leverage it in their revised argument to make the argument more reasonable.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 630,
                        "end": 638,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "reasoning patterns. We hypothesize that leveraging such keyword-based pattern could assist in collecting high quality warrants, where keywords are derived from the original argument (i.e., claim and premise). Our assumption follows the formal definition of warrants in the sense that warrants act as a inferential link between the contents of claim and its premise (Toulmin, 1958; Freeman, 1992) .",
                "cite_spans": [
                    {
                        "start": 365,
                        "end": 380,
                        "text": "(Toulmin, 1958;",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 381,
                        "end": 395,
                        "text": "Freeman, 1992)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Many previous work demonstrated different strategies of collecting high quality warrants (Becker et al., 2017; Habernal et al., 2018; Becker et al., 2020) , but did not apply any restriction on the structure of warrants to handle the variety of reasoning patterns in which warrants can be explicated. In contrast, our annotation methodologies are designed to restrict the reasoning pattern and ensure that warrant explicates the reasoning link between claim and premise.",
                "cite_spans": [
                    {
                        "start": 89,
                        "end": 110,
                        "text": "(Becker et al., 2017;",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 111,
                        "end": 133,
                        "text": "Habernal et al., 2018;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 134,
                        "end": 154,
                        "text": "Becker et al., 2020)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In order to evaluate the warrants annotated via our proposed UKW and PKW methodologies, we devise specific guidelines to judge their quality through scoring. We also collect unstructured warrants (i.e., Natural Language Warrants (NLW)) and perform quality evaluation on them in order to compare with our annotated warrants. Our results suggest that in comparison to NLW, high quality warrants can be annotated via our proposed UKW methodology. To the best of our knowledge, this is the first study which targets large scale annotation of warrants in semi-structured form. To facilitate further research in warrant explication, we publish our crowdsourcing guidelines and the preliminary corpus of around 1700 warrants that are annotated via UKW methodology, covering over 600 arguments 1 .",
                "cite_spans": [
                    {
                        "start": 786,
                        "end": 787,
                        "text": "1",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Explication of warrants in arguments has already been approached in many previous researches. In an initial attempt, Feng and Hirst (2011) proposed leveraging argumentation schemes (Walton et al., 2008) as a means to automatically reconstruct warrant, but did not approach the task due to the absence of training datasets. To overcome the unavailability of dataset, Boltu\u017ei\u0107 and \u0160najder (2016) leverage crowdsourcing and ask non-expert workers to annotate all possible variations of warrants for a given claim-premise pair. However, they concluded that the annotation of warrants varied both in number annotated per argument and in content due to no restrictions imposed on in the annotation process.",
                "cite_spans": [
                    {
                        "start": 181,
                        "end": 202,
                        "text": "(Walton et al., 2008)",
                        "ref_id": "BIBREF20"
                    },
                    {
                        "start": 366,
                        "end": 393,
                        "text": "Boltu\u017ei\u0107 and \u0160najder (2016)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "In order to overcome the prior difficulties, recent approaches leverage crowdsourcing to restrict the number of warrants collected per argument and either employ a step by step filtering process to weed out bad warrants (Habernal et al., 2018) or hire ex-Argument Warrant",
                "cite_spans": [
                    {
                        "start": 220,
                        "end": 243,
                        "text": "(Habernal et al., 2018)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Claim: We should abolish zoos. Premise: Zoos are notorious for animal abuse.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "1.",
                "sec_num": null
            },
            {
                "text": "Abolishing zoos leads to animals being in their natural habitat which results in no animal abuse.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "1.",
                "sec_num": null
            },
            {
                "text": "Claim: We should ban whaling. Premise: Whaling is considered to be unacceptable cruelty towards animals.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2.",
                "sec_num": null
            },
            {
                "text": "Banning whaling would stop the inhumane methods of stabbing whales which is unacceptable cruelty towards animals.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2.",
                "sec_num": null
            },
            {
                "text": "Claim: We should introduce compulsory voting. Premise: Compulsory voting can help obtain better results during elections.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "3.",
                "sec_num": null
            },
            {
                "text": "Introducing compulsory voting leads to every citizen exercising the right to vote which can help obtain better results during elections. perts who iteratively converge to a single warrant annotation (Becker et al., 2017) . In an advanced attempt, Razuvayevskaya and Teufel (2017) explored whether it is feasible for human annotators to explicate warrants in arguments. They propose the idea of template-based warrant reconstruction using information from premise and claim, and employ experts to perform the task. In contrast, our annotation methodology is designed for expert as well as non-expert workers. Additionally, our annotation aims to restrict the warrant's structure to semi-structured format such that it comprises knowledge that is necessary to form an inferential link between the key contents of claim and premise. Furthermore, we do not restrict the number of warrants to one, but collect set of warrants per argument that fulfill the criteria of qualifying as a high quality warrant.",
                "cite_spans": [
                    {
                        "start": 199,
                        "end": 220,
                        "text": "(Becker et al., 2017)",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 247,
                        "end": 279,
                        "text": "Razuvayevskaya and Teufel (2017)",
                        "ref_id": "BIBREF17"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "3.",
                "sec_num": null
            },
            {
                "text": "We define warrant that we want to annotate by characterizing its desired properties. Properties such as structure of warrant, quality in terms of how well a warrant links claim and premise, and feasibility of annotating warrant for an argument. In this work, we define this feasibility in terms of whether a warrant can be annotated for an argument, regardless of whether it is good or bad. We assume that explication of a warrant might not be possible if the argument is too good (i.e., warrant is explicated in the premise) or if the argument is too bad (i.e., no warrant can explicate the link between claim and premise).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Warrant Desiderata",
                "sec_num": "3"
            },
            {
                "text": "Structure Warrants are implicit reasoning that logically link the contents of claim and premise (Toulmin, 1958; Freeman, 1992) . Therefore, we hypothesize that a warrant should have a structure that (a) comprises the key information given in claim and premise, and (b) explicates logical connection between the aforementioned key information with some implicit knowledge that is relevant to the argument. We define the warrants framed in such a way as semi-structured warrants. Examples of such semi-structured warrants are provided in Table 1 , where key information or keywords from claim and premise is linked with relevant implicit knowledge which all together forms a semi-structured warrant.",
                "cite_spans": [
                    {
                        "start": 96,
                        "end": 111,
                        "text": "(Toulmin, 1958;",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 112,
                        "end": 126,
                        "text": "Freeman, 1992)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 536,
                        "end": 543,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Warrant Desiderata",
                "sec_num": "3"
            },
            {
                "text": "Feasibility Warrants may not be explicable for all the arguments. Specifically, for an argument with bad premise there may be no feasible way to explicate the logical link between claim and premise. For example, the warrant for the argument \"We should introduce multi-party system because it's the right thing to do\" is not feasible, since the argument is a fallacy (i.e. begging the question) where the premise: \"it's the right thing to do\" provides no adequate support to the claim: \"We should introduce multi-party system\". Similarly, for arguments with very good premise, it might not be necessary to explicate the warrant since the warrant might already be explicated in the premise. In contrast, as shown in Table 1 , for arguments with moderately good/bad premise, we assume that one can frame a warrant by leveraging argument relevant external knowledge. 2 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 714,
                        "end": 721,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Warrant Desiderata",
                "sec_num": "3"
            },
            {
                "text": "Quality A key factor distinguishing warrants from any other type of implicit knowledge or statement (e.g., commonsense knowledge) is their ability to justify the flow of reasoning between claim and premise. For example, in Table 1 , warrant (3) explicitly answers how introducing compulsory voting can help obtain better results in elections. These are the type of warrants we would like to annotate. Conversely, statements which do not serve this purpose cannot be qualified as a warrant. For example, given argument (3) from Table 1 , the statement \"Introducing compulsory voting enables people to freely choose their favourite candidate which results in encouraging better results during elections\" cannot be labeled as a warrant because it offers little to no help in bridging the implicit reasoning link between claim and premise.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 223,
                        "end": 230,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    },
                    {
                        "start": 527,
                        "end": 534,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Warrant Desiderata",
                "sec_num": "3"
            },
            {
                "text": "In this section, we discuss the development and design of our proposed semi-structured annotation methodologies. In particular, we consider two methodologies for annotating warrants: Predefined Keyword-based Warrant and User-defined Keyword-based Warrants.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Annotation Methodologies",
                "sec_num": "4"
            },
            {
                "text": "Pre-defined Keyword-based Warrants (PKW) In order to annotate semi-structured warrants that encompass information from claim and premise, we propose using keywords which encode the key information given in claim and premise. As shown in Table 1 , the purpose of these keywords is to create a semi-structured format for completing a warrant annotation such that keywords from claim form the initial (shown in red) and keywords from premise (shown in blue) form the latter part of the warrant. The keywords are linked by implicit knowledge that is necessary to connect the keywords in a meaningful way. Example annotation and task design of PKW annotation is shown in Figure 2 . For PKW methodology, the annotator is initially provided with keywords and is tasked to explain the flow of reasoning between them by writing implicit knowledge (i.e., hidden reasoning). In order to provide pre-defining keywords to the annotator, we employ spaCy (Honnibal et al., 2020) and automatically extract the key information from claim and premise by parsing the sentence into verb/noun phrases. For example, for claim \"We should ban whaling\", the verb phrase \"Banning whaling\" and for premise \"Whales are necessary for ecological sustainability of the oceans\", the noun phrase \"ecological sustainability of the oceans\" is extracted. To ensure the feasibility of framing a warrant with extracted keywords, we perform a manual check and if needed, make minimal changes to its tense or word-order.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 237,
                        "end": 244,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    },
                    {
                        "start": 666,
                        "end": 674,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Annotation Methodologies",
                "sec_num": "4"
            },
            {
                "text": "User-defined Keyword-based Warrants (UKW) While PKW methodology introduces restrictions on warrant annotation via pre-defined keywords, they might be too restrictive or not provide sufficient flexibility for annotators to annotate the warrant. Moreover, automatically extracting keywords from claim and premise can be sometimes challenging due to varied syntactic structure of the argument. Therefore as an alternate approach, we ask the annotators to derive their own keywords from claim and premise. To do this, we provide detailed guidelines and concrete examples in our interface for annotators so that they can correctly understand the process of deriving keywords.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Annotation Methodologies",
                "sec_num": "4"
            },
            {
                "text": "The annotation design and example annotation is shown in Figure 3 , where for the given claim and premise, the keywords from claim can be a verb phrase: \"Introducing compulsory voting\", and keywords from premise can be verb phrase: \"obtain better results during elections\". To avoid annotations where annotator might write keywords with information from outside claim/premise, annotators were strictly advised not to use any external knowledge when writing the keywords, although the use of external knowledge was permitted for writing the hidden reasoning.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 57,
                        "end": 65,
                        "text": "Figure 3",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Annotation Methodologies",
                "sec_num": "4"
            },
            {
                "text": "Our goal is to establish a procedure for collecting semi-structured warrants and their annotations at large-scale. In order to collect such warrants from each methodology, we build a multi-step crowdsourcing process designed for encouraging annotator's creativity, while preventing biases in the annotations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Warrant Collection Procedure",
                "sec_num": "5"
            },
            {
                "text": "In general, we break the warrant collection procedure into three steps of simple tasks: (i) Deriving keywords, (ii) Judging feasibility and (iii) Framing warrant. In addition, we implement several mechanisms for quality assurance and employ manual checks to ensure annotators understand and perform the final task correctly.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Warrant Collection Procedure",
                "sec_num": "5"
            },
            {
                "text": "(i) Deriving keywords In order to collect semistructured warrants, we require keywords derived from each of claim and premise. These keywords Figure 2 : The interface used for warrant annotations, along with an example of a annotator's annotation for Predefined Keyword-based Warrant. To avoid using complicated terminology, we used the terms stance, supporting statement and hidden reasoning to convey the notion of claim, premise and warrant respectively. act as the skeleton of the final annotated warrant. For example, as shown on the left side of Figure 3 , \"Introducing compulsory voting\" and \"obtain better results during elections\" are keywords derived from claim and premise respectively. To derive these keywords, annotators are instructed to strictly include only key information conveyed in their respective counterparts and no external information. For PKW methodology, keywords in this step are already derived as shown in Figure 2 . To ensure annotators understand the notion of keywords, we provide sufficient variety of examples for them to get used to this sub-task.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 142,
                        "end": 150,
                        "text": "Figure 2",
                        "ref_id": null
                    },
                    {
                        "start": 552,
                        "end": 560,
                        "text": "Figure 3",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 937,
                        "end": 945,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Warrant Collection Procedure",
                "sec_num": "5"
            },
            {
                "text": "(b) Judging feasibility Since warrants may not be explicable for all the arguments, i.e., if a premise is very bad or contrastingly very good, we explicitly ask annotators to judge the feasibility of writing a warrant by asking if they can complete the hidden reasoning. This step is rather tricky since annotators may be biased to answer \"No\" or \"Unsure\" (See Question in Figure 2 and 3) to avoid doing the task and finish the task quickly. To avoid this, we treat this step as bonus question and depending on majority response i.e., if majority of annotators believe a warrant can be explicated for the given argument, then the majority annotators get bonus. Similarly, if majority of annotators believe a warrant cannot be explicated for a given argument, then again majority annotators get bonus. We keep a high bonus for this step in order to compel anno-tators to do task as instructed instead of providing low quality response. This step helps us get a better judgement of feasibility of warrants and also identify annotators who are not doing the task properly.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 373,
                        "end": 381,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Warrant Collection Procedure",
                "sec_num": "5"
            },
            {
                "text": "(c) Framing Warrant The last step in warrant collection procedure is for the annotators to frame the hidden reasoning to complete the warrant. This step is the most challenging since it requires the annotator to be logical and use his background knowledge to complete warrant annotation. To complete this via PKW methodology, annotator's are restricted in terms of pre-defined keywords, while for UKW methodology the annotator can annotate the warrant by minimally changing the keywords as well as hidden reasoning.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Warrant Collection Procedure",
                "sec_num": "5"
            },
            {
                "text": "Auxiliary verification measures For each task, we hold preliminary qualification test that consists of several basic questions to judge the understanding of annotator's reasoning skills. Annotators who score more than a pre-defined threshold (\u2265 80%) are granted access to the main task. Our qualifications are open to annotators from major English speaking countries, namely USA, UK, New Zealand and Canada. Additionally, to address any ethical issues (Adda et al., 2011) raised by our task, we actively monitored multiple pilot tests to ensure annotators were satisfied with our task. Simultaneously, we corresponded directly to annotators that had questions/comments on our task. Annotators were paid in accordance with the minimum wage calculated by conducting many trials and based on average work-time. Additional bonus was paid to annotators that provided quality annotations.",
                "cite_spans": [
                    {
                        "start": 452,
                        "end": 471,
                        "text": "(Adda et al., 2011)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Warrant Collection Procedure",
                "sec_num": "5"
            },
            {
                "text": "We choose Amazon Mechanical Turk (AMT) 3 as our crowdsourcing platform due to its success in previous argumentation mining tasks (Habernal et al., 2018) . As an initial step, we only allowed annotators who had \u2265 98% acceptance rate and \u2265 5,000 approved Human Intelligence Tasks (HITs). Each annotator was paid $0.30 for doing the task and workers who qualified for bonus were paid an additional $0.25 per HIT. For the task involving UKW annotations, the payment was increased to $0.40 for doing the task and $0.35 as bonus.",
                "cite_spans": [
                    {
                        "start": 129,
                        "end": 152,
                        "text": "(Habernal et al., 2018)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Crowdsourcing",
                "sec_num": "6"
            },
            {
                "text": "Source data For the purpose of annotating warrants for a given set of arguments, we utilize a wellknown argumentation dataset, IBM-Rank-30K corpus (Gretz et al., 2019) , which already consists arguments in the form of claim and premise. The dataset contains around 15K crowd-sourced arguments covering 71 topics, annotated for supporting as well as opposing stance. 4 The arguments were collected with strict length limitations and accompanied by extensive quality control measures. Our inspection of arguments from IBM-Rank-30k revealed that a for large proportion of the arguments we can explicate warrants. To proceed with our warrant annotation, we selected a subset of three 3 well-known debatable topics: We should abolish zoos, We should ban whaling and We should introduce compulsory voting.",
                "cite_spans": [
                    {
                        "start": 147,
                        "end": 167,
                        "text": "(Gretz et al., 2019)",
                        "ref_id": null
                    },
                    {
                        "start": 366,
                        "end": 367,
                        "text": "4",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Crowdsourcing",
                "sec_num": "6"
            },
            {
                "text": "Annotations In addition to warrants annotated via PKW and UKW methodologies, we also annotated unstructured warrants via crowdsourcing to compare the quality of annotated warrants across different methodologies. We followed similar crowdsourcing procedure as employed by Habernal et al. (2018) and refer to the warrants annotated via this process as Natural language Warrants (NLW). For each methodology, we annotate 40 unique claim and premise pairs, randomly chosen from the three pre-selected topics. Each argument was annotated by 5 annotators resulting in a total of 200 annotations per methodology and all warrants were limited to have a length between 60 and 200 characters excluding keywords.",
                "cite_spans": [
                    {
                        "start": 271,
                        "end": 293,
                        "text": "Habernal et al. (2018)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Crowdsourcing",
                "sec_num": "6"
            },
            {
                "text": "Filtering For each methodology, it is possible to collect at most 200 warrants (given 40 arguments and 5 annotators). However, not all annotators Table 2 : Comparison between the different warrant crowdsourcing methodologies. Avg. corresponds to the average score given by both expert annotators.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 146,
                        "end": 153,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Crowdsourcing",
                "sec_num": "6"
            },
            {
                "text": "Warrant is unrelated to the claim and its premise.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Score Explanation 0",
                "sec_num": null
            },
            {
                "text": "Warrant is related to the claim and premise but does not make the relationship between them easy to understand and/or strengthen the argument. In addition, the warrant may overlap or be a paraphrase of the premise.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "1",
                "sec_num": null
            },
            {
                "text": "The relationship between the claim and premise is easier to understand and/or strengthened because of the warrant. Table 3 : Guidelines used by our expert annotators for scoring the quality of warrants on a scale of 0-2.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 115,
                        "end": 122,
                        "text": "Table 3",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "chose \"Yes\" when they were asked to judge if they can write a warrant. After filtering the negative responses at this step, we discover that, in total, annotators wrote 155, 101 and 65 warrants out of possible 200 warrants for Natural language Warrant, PKW and UKW methodologies respectively. We utilize these 321 collected warrants for further analysis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "In order to analyze the quality of the annotated warrants, we hired two annotators who are experts in the field of argumentation to score the crowdsourced warrants. To frame quality scoring guidelines for judging warrants, we ran several pilot tests and take expert advice to make our guidelines easier to interpret. Our final quality annotation guidelines are shown in Table 3 . The experts were asked to score a given warrant on a scale of (0-2), with 0 being the lowest and 2 being the highest. Both annotators were given 50 warrants randomly chosen from the pool of collected warrants for each methodology. Each topic was represented fairly in the quality annotation step with each topic having at least 15 warrants. In total, our experts annotated 150 warrants out of a total 321 annotated warrants. As shown in Table 2 , the agreement between both experts as judged by Krippendorff's alpha \u03b1 (Krippendorff, 2011 ) was found to be fairly good. We find that the average scores given by two expert annotators (Avg.) on a scale of (0-2) indicated userdefined and pre-defined methodologies with overall higher quality warrants as compared to natural language warrants. We also measure the combined average score given to the warrants for each topic to measure if the warrants belonging to one topic was of higher quality. We find that on average warrants annotated for Introducing compulsory voting were scored the lowest while for other topics was fairly high.",
                "cite_spans": [
                    {
                        "start": 875,
                        "end": 917,
                        "text": "Krippendorff's alpha \u03b1 (Krippendorff, 2011",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 370,
                        "end": 377,
                        "text": "Table 3",
                        "ref_id": null
                    },
                    {
                        "start": 817,
                        "end": 824,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "7"
            },
            {
                "text": "To further analyze the quality of warrants and the quality of the entire crowdsourcing process, we analyze sample of the warrants collected via each methodology and which were annotated by experts.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Qualitative Analysis",
                "sec_num": "7.1"
            },
            {
                "text": "In Table 4 , we can see that the warrants that were scored the highest by experts have fair amount of keyword overlap with the claim/premise. This follows from our initial motivation for using semistructured annotation methodology where we hypothesized that the inclusion of keyword information from the claim and premise can assist in framing better quality warrants. As shown in Table 4 , warrants UKW (1) and PKW (1) have similar keywords \"Banning whaling\" and \"ecological sustainability of the oceans\", hence the annotated implicit knowledge is also same. This indicates that our keyword-based PKW and UKW methodologies restrict the diverse ways in which warrants can be explicated. On the contrary, for NLW (1) the warrant is analogous to a general statement yet has 1. Claim: We should ban whaling. Premise: Whales are necessary for ecological sustainability of the oceans.",
                "cite_spans": [
                    {
                        "start": 772,
                        "end": 774,
                        "text": "1.",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 3,
                        "end": 10,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 381,
                        "end": 388,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Qualitative Analysis",
                "sec_num": "7.1"
            },
            {
                "text": "Marine life in the ocean cannot survive without ecological sustainability. PKW: Banning whaling would prevent the decreasing of whale population on which marine life thrives which will result in ecological sustainability of the oceans. UKW: Banning whaling prevents the decreasing of whale population which is necessary for ecological sustainability of the oceans.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NLW:",
                "sec_num": null
            },
            {
                "text": "Claim: We should abolish zoos. Premise: We should abolish zoos to prevent the cruel confinement of wild animals.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2.",
                "sec_num": null
            },
            {
                "text": "Animals in confinement suffer physically and emotionally. PKW: Abolishing zoos enables animals to live a more stimulating and fulfilling life in the wild which prevents the cruel confinement of wild animals. UKW: Zoos force many animals to live in prison-like environment with unhygienic conditions which is a cruel way of confining wild animals.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NLW:",
                "sec_num": null
            },
            {
                "text": "Claim: We should introduce compulsory voting. Premise: Compulsory voting can help encourage better results during elections.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "3.",
                "sec_num": null
            },
            {
                "text": "Mandatory voting will reflect people's actual preferences in election results. PKW: Compulsory voting produces a winning candidate that is more accurately representative of all the voters which ensures better election results. UKW: Introducing compulsory voting stops one side from rigging the process by canvasing more people which results in better elections. Table 4 : Examples of warrants scored 2 by both experts. Majority of warrant belonging to UKW methodology were found to be of good quality. Table 5 : Examples of warrants scored 0 or 1 by both experts. Low scored warrants were mainly paraphrased (Scored 1) from the premise or did not relate to the given topic (Scored 0). similar implicit knowledge explicated in UKW (1) and PKW (1) warrants. This hints that NLW can also be of higher quality but its quality can vary to a larger extent due to no restrictions. In Table 5 , we can see that even though the warrants encode claim-premise information, the quality of the warrant can essentially be bad. For example, the warrants PLW (1) and UKW (1) explicate implicit knowledge which does not make the inferential link between claim and premise clear. Additionally, we note that while keyword-based methods restrict most warrants to a single sentence, natural language warrants often consist of shorter yet multiple sentences. Overall, we found that 23% of natural language warrants were composed of more than one sentences. Besides, such warrants were scored low and were often found to be paraphrased from the information in the premise or annotators rephrased previous premise in place of the warrant. Although this was mostly observed in natural language warrants, we found few such instances in our keyword-based collected warrants (See UKW (1) in Table 5 ). This suggests that while keyword-based methods assist in collecting warrants that explicate the inferential link between claim and premise, it still does not guarantee high quality warrant annotations and might require further adjustments.",
                "cite_spans": [
                    {
                        "start": 730,
                        "end": 733,
                        "text": "(1)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 742,
                        "end": 745,
                        "text": "(1)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 362,
                        "end": 369,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 502,
                        "end": 509,
                        "text": "Table 5",
                        "ref_id": null
                    },
                    {
                        "start": 877,
                        "end": 884,
                        "text": "Table 5",
                        "ref_id": null
                    },
                    {
                        "start": 1763,
                        "end": 1770,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "NLW:",
                "sec_num": null
            },
            {
                "text": "Based on our finding that user-defined keywordbased warrant methodology comparatively results in better warrants, we follow this method to collect a total of 1700 warrants across 3 topics, annotated for 600 claim-premise pairs. All warrants are limited to have a length between 60 and 200 characters. Since this is an ongoing work, we plan to make further analysis on our preliminary dataset in future.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Preliminary large-scale corpus",
                "sec_num": "7.2"
            },
            {
                "text": "In this work, we tackle the difficult task of explicating warrants in arguments and propose two novel methodologies to annotate warrants in semistructured format. We conduct extensive analysis and perform annotation study for determining the appropriate methodology for collecting warrants and show that user-defined keyword based approach produces the highest quality warrants as compared to pre-defined keyword-based warrant and natural language warrants. In future, we plan to extend the annotation of warrants for more diverse topics. Moreover, we plan to cover warrant annotations for claim-premise pairs with premises attacking the claim in addition to premises supporting the original claim. We would also like to test the usefulness of our annotations for constructing a model for automatic warrant explication which can be used to explicate warrant for any given argument. We believe that such a model can be useful in a pedagogical setting to perform downstream tasks such as argument analysis or giving constructive feedback to students.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future work",
                "sec_num": "8"
            },
            {
                "text": "Our crowdsourcing guidelines and annotated warrants are publicly available at https://github.com/ cl-tohoku/ukw-warrants",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "The arguments shown inTable 1were already annotated with moderate quality scores in a larger study(Gretz et al., 2019)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "www.mturk.com 4 In our work we only focus on arguments with supporting stance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "This work was partially supported by JST CREST Grant Number JPMJCR20D2 and NEDO Grant Number J200001946. The authors would like to thank Paul Reisert, other members of the Tohoku NLP Lab, and the anonymous reviewers for their insightful feedback.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "NLW: Whales could soon die off completely. It is our duty to ban whaling. PKW: Banning whaling leads to whales not dying which has cause decrease in whale population over the years. UKW: Banning whaling is a harmful practice which results in decrease in whale population over the years. 2. Claim: We should abolish zoos. Premise:It is unfair to trap animals from their natural habitat and confine them to small spaces for human entertainment",
                "authors": [],
                "year": null,
                "venue": "Claim: We should ban whaling. Premise: Whaling has led to a major decrease in whale populations over the years",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Claim: We should ban whaling. Premise: Whaling has led to a major decrease in whale populations over the years. NLW: Whales could soon die off completely. It is our duty to ban whaling. PKW: Banning whaling leads to whales not dying which has cause decrease in whale population over the years. UKW: Banning whaling is a harmful practice which results in decrease in whale population over the years. 2. Claim: We should abolish zoos. Premise:It is unfair to trap animals from their natural habitat and confine them to small spaces for human entertain- ment. NLW: Zoos keep animals captive where where they cannot run free and thrive.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "UKW: Abolishing zoos makes sure that animals are not being treated unfair and in small spaces. 3. Claim: We should introduce compulsory voting. Premise: Compulsive voting is a patriotic act that must be fully complied with. NLW: We enjoy the freedom and liberty enjoyed by expressing our opinions. We should take advantage of such compulsion. PKW: Introducing compulsory voting makes you feel that you belong to your country which is a patriotic act. UKW: Compulsory voting will force people to engage in politics and choose a",
                "authors": [],
                "year": null,
                "venue": "PKW: Abolishing zoos is a bad practice because it is unfair to trap animals from their natural habitat and confine them to small spaces for human entertainment",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "PKW: Abolishing zoos is a bad practice because it is unfair to trap animals from their natural habitat and confine them to small spaces for human entertainment. UKW: Abolishing zoos makes sure that animals are not being treated unfair and in small spaces. 3. Claim: We should introduce compulsory voting. Premise: Compulsive voting is a patriotic act that must be fully complied with. NLW: We enjoy the freedom and liberty enjoyed by expressing our opinions. We should take advantage of such compulsion. PKW: Introducing compulsory voting makes you feel that you belong to your country which is a patriotic act. UKW: Compulsory voting will force people to engage in politics and choose a right leader that can lead their country in the future.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Crowdsourcing for language resource development: Critical analysis of amazon mechanical turk overpowering use",
                "authors": [
                    {
                        "first": "Gilles",
                        "middle": [],
                        "last": "Adda",
                        "suffix": ""
                    },
                    {
                        "first": "Beno\u00eet",
                        "middle": [],
                        "last": "Sagot",
                        "suffix": ""
                    },
                    {
                        "first": "Kar\u00ebn",
                        "middle": [],
                        "last": "Fort",
                        "suffix": ""
                    },
                    {
                        "first": "Joseph",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    }
                ],
                "year": 2011,
                "venue": "5th Language and Technology Conference",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gilles Adda, Beno\u00eet Sagot, Kar\u00ebn Fort, and Joseph Mar- iani. 2011. Crowdsourcing for language resource de- velopment: Critical analysis of amazon mechanical turk overpowering use. In 5th Language and Tech- nology Conference.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Explaining arguments with background knowledge",
                "authors": [
                    {
                        "first": "Maria",
                        "middle": [],
                        "last": "Becker",
                        "suffix": ""
                    },
                    {
                        "first": "Ioana",
                        "middle": [],
                        "last": "Hulpu\u015f",
                        "suffix": ""
                    },
                    {
                        "first": "Juri",
                        "middle": [],
                        "last": "Opitz",
                        "suffix": ""
                    },
                    {
                        "first": "Debjit",
                        "middle": [],
                        "last": "Paul",
                        "suffix": ""
                    },
                    {
                        "first": "Jonathan",
                        "middle": [],
                        "last": "Kobbe",
                        "suffix": ""
                    },
                    {
                        "first": "Heiner",
                        "middle": [],
                        "last": "Stuckenschmidt",
                        "suffix": ""
                    },
                    {
                        "first": "Anette",
                        "middle": [],
                        "last": "Frank",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Datenbank-Spektrum",
                "volume": "20",
                "issue": "2",
                "pages": "131--141",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Maria Becker, Ioana Hulpu\u015f, Juri Opitz, Debjit Paul, Jonathan Kobbe, Heiner Stuckenschmidt, and Anette Frank. 2020. Explaining arguments with background knowledge. Datenbank-Spektrum, 20(2):131-141.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Enriching argumentative texts with implicit knowledge",
                "authors": [
                    {
                        "first": "Maria",
                        "middle": [],
                        "last": "Becker",
                        "suffix": ""
                    },
                    {
                        "first": "Michael",
                        "middle": [],
                        "last": "Staniek",
                        "suffix": ""
                    },
                    {
                        "first": "Vivi",
                        "middle": [],
                        "last": "Nastase",
                        "suffix": ""
                    },
                    {
                        "first": "Anette",
                        "middle": [],
                        "last": "Frank",
                        "suffix": ""
                    }
                ],
                "year": 2017,
                "venue": "International Conference on Applications of Natural Language to Information Systems",
                "volume": "",
                "issue": "",
                "pages": "84--96",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Maria Becker, Michael Staniek, Vivi Nastase, and Anette Frank. 2017. Enriching argumentative texts with implicit knowledge. In International Confer- ence on Applications of Natural Language to Infor- mation Systems, pages 84-96. Springer.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Fill the gap! analyzing implicit premises between claims from online debates",
                "authors": [
                    {
                        "first": "Filip",
                        "middle": [],
                        "last": "Boltu\u017ei\u0107",
                        "suffix": ""
                    }
                ],
                "year": 2016,
                "venue": "Proceedings of the Third Workshop on Argument Mining (ArgMining2016)",
                "volume": "",
                "issue": "",
                "pages": "124--133",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Filip Boltu\u017ei\u0107 and Jan \u0160najder. 2016. Fill the gap! an- alyzing implicit premises between claims from on- line debates. In Proceedings of the Third Work- shop on Argument Mining (ArgMining2016), pages 124-133, Berlin, Germany. Association for Compu- tational Linguistics.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Tapping into argumentation: Developments in the application of toulmin's argument pattern for studying science discourse",
                "authors": [
                    {
                        "first": "Sibel",
                        "middle": [],
                        "last": "Erduran",
                        "suffix": ""
                    },
                    {
                        "first": "Shirley",
                        "middle": [],
                        "last": "Simon",
                        "suffix": ""
                    },
                    {
                        "first": "Jonathan",
                        "middle": [],
                        "last": "Osborne",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Science education",
                "volume": "88",
                "issue": "6",
                "pages": "915--933",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Sibel Erduran, Shirley Simon, and Jonathan Osborne. 2004. Tapping into argumentation: Developments in the application of toulmin's argument pattern for studying science discourse. Science education, 88(6):915-933.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Classifying arguments by scheme",
                "authors": [
                    {
                        "first": "Vanessa",
                        "middle": [],
                        "last": "Wei Feng",
                        "suffix": ""
                    },
                    {
                        "first": "Graeme",
                        "middle": [],
                        "last": "Hirst",
                        "suffix": ""
                    }
                ],
                "year": 2011,
                "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
                "volume": "",
                "issue": "",
                "pages": "987--996",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Vanessa Wei Feng and Graeme Hirst. 2011. Classify- ing arguments by scheme. In Proceedings of the 49th Annual Meeting of the Association for Com- putational Linguistics: Human Language Technolo- gies, pages 987-996, Portland, Oregon, USA. Asso- ciation for Computational Linguistics.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Relevance, warrants, backing, inductive support",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "James B Freeman",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "Argumentation",
                "volume": "6",
                "issue": "2",
                "pages": "219--275",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "James B Freeman. 1992. Relevance, warrants, backing, inductive support. Argumentation, 6(2):219-275.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Ranit Aharonov, and Noam Slonim. 2019. A large-scale dataset for argument quality ranking: Construction and analysis",
                "authors": [
                    {
                        "first": "Shai",
                        "middle": [],
                        "last": "Gretz",
                        "suffix": ""
                    },
                    {
                        "first": "Roni",
                        "middle": [],
                        "last": "Friedman",
                        "suffix": ""
                    },
                    {
                        "first": "Edo",
                        "middle": [],
                        "last": "Cohen-Karlik",
                        "suffix": ""
                    },
                    {
                        "first": "Assaf",
                        "middle": [],
                        "last": "Toledo",
                        "suffix": ""
                    },
                    {
                        "first": "Dan",
                        "middle": [],
                        "last": "Lahav",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {
                    "arXiv": [
                        "arXiv:1911.11408"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Shai Gretz, Roni Friedman, Edo Cohen-Karlik, As- saf Toledo, Dan Lahav, Ranit Aharonov, and Noam Slonim. 2019. A large-scale dataset for argument quality ranking: Construction and analysis. arXiv preprint arXiv:1911.11408.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "SemEval-2018 task 12: The argument reasoning comprehension task",
                "authors": [
                    {
                        "first": "Ivan",
                        "middle": [],
                        "last": "Habernal",
                        "suffix": ""
                    },
                    {
                        "first": "Henning",
                        "middle": [],
                        "last": "Wachsmuth",
                        "suffix": ""
                    },
                    {
                        "first": "Iryna",
                        "middle": [],
                        "last": "Gurevych",
                        "suffix": ""
                    },
                    {
                        "first": "Benno",
                        "middle": [],
                        "last": "Stein",
                        "suffix": ""
                    }
                ],
                "year": 2018,
                "venue": "Proceedings of The 12th International Workshop on Semantic Evaluation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ivan Habernal, Henning Wachsmuth, Iryna Gurevych, and Benno Stein. 2018. SemEval-2018 task 12: The argument reasoning comprehension task. In Pro- ceedings of The 12th International Workshop on Se- mantic Evaluation, New Orleans, Louisiana. Associ- ation for Computational Linguistics.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Arguing on the Toulmin model",
                "authors": [
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Hitchcock",
                        "suffix": ""
                    },
                    {
                        "first": "Bart",
                        "middle": [],
                        "last": "Verheij",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "",
                "volume": "10",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "David Hitchcock and Bart Verheij. 2006. Arguing on the Toulmin model, volume 10. Springer.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "spaCy: Industrial-strength Natural Language Processing in Python",
                "authors": [],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Matthew Honnibal, Ines Montani, Sofie Van Lan- deghem, and Adriane Boyd. 2020. spaCy: Industrial-strength Natural Language Processing in Python.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Towards explaining natural language arguments with background knowledge",
                "authors": [
                    {
                        "first": "Ioana",
                        "middle": [],
                        "last": "Hulpus",
                        "suffix": ""
                    },
                    {
                        "first": "Jonathan",
                        "middle": [],
                        "last": "Kobbe",
                        "suffix": ""
                    },
                    {
                        "first": "Christian",
                        "middle": [],
                        "last": "Meilicke",
                        "suffix": ""
                    },
                    {
                        "first": "Heiner",
                        "middle": [],
                        "last": "Stuckenschmidt",
                        "suffix": ""
                    },
                    {
                        "first": "Maria",
                        "middle": [],
                        "last": "Becker",
                        "suffix": ""
                    },
                    {
                        "first": "Juri",
                        "middle": [],
                        "last": "Opitz",
                        "suffix": ""
                    },
                    {
                        "first": "Vivi",
                        "middle": [],
                        "last": "Nastase",
                        "suffix": ""
                    },
                    {
                        "first": "Anette",
                        "middle": [],
                        "last": "Frank",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "PROFILES/SEMEX@ ISWC",
                "volume": "",
                "issue": "",
                "pages": "62--77",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ioana Hulpus, Jonathan Kobbe, Christian Meilicke, Heiner Stuckenschmidt, Maria Becker, Juri Opitz, Vivi Nastase, and Anette Frank. 2019. Towards explaining natural language arguments with back- ground knowledge. In PROFILES/SEMEX@ ISWC, pages 62-77.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Multiple warrants in practical reasoning",
                "authors": [
                    {
                        "first": "Christian",
                        "middle": [],
                        "last": "Kock",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Arguing on the Toulmin model",
                "volume": "",
                "issue": "",
                "pages": "247--259",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Christian Kock. 2006. Multiple warrants in practical reasoning. In Arguing on the Toulmin model, pages 247-259. Springer.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Computing krippendorff's alpha-reliability",
                "authors": [
                    {
                        "first": "Klaus",
                        "middle": [],
                        "last": "Krippendorff",
                        "suffix": ""
                    }
                ],
                "year": 2011,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Klaus Krippendorff. 2011. Computing krippendorff's alpha-reliability.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "The abuses of argument: Understanding fallacies on toulmin's layout of argument",
                "authors": [
                    {
                        "first": "Andrew",
                        "middle": [],
                        "last": "Pineau",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "Informal Logic",
                "volume": "33",
                "issue": "4",
                "pages": "531--546",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Andrew Pineau. 2013. The abuses of argument: Under- standing fallacies on toulmin's layout of argument. Informal Logic, 33(4):531-546.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "Finding enthymemes in real-world texts: A feasibility study",
                "authors": [
                    {
                        "first": "Olesya",
                        "middle": [],
                        "last": "Razuvayevskaya",
                        "suffix": ""
                    },
                    {
                        "first": "Simone",
                        "middle": [],
                        "last": "Teufel",
                        "suffix": ""
                    }
                ],
                "year": 2017,
                "venue": "Argument & computation",
                "volume": "8",
                "issue": "2",
                "pages": "113--129",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Olesya Razuvayevskaya and Simone Teufel. 2017. Finding enthymemes in real-world texts: A feasibil- ity study. Argument & computation, 8(2):113-129.",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "The use of argument",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "Stephen Edelston Toulmin",
                        "suffix": ""
                    }
                ],
                "year": 1958,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Stephen Edelston Toulmin. 1958. The use of argument. Cambridge University Press.",
                "links": null
            },
            "BIBREF19": {
                "ref_id": "b19",
                "title": "How to improve argumentation comprehension in university students: Experimental test of a training approach",
                "authors": [
                    {
                        "first": "Tobias",
                        "middle": [],
                        "last": "Sarah Von Der M\u00fchlen",
                        "suffix": ""
                    },
                    {
                        "first": "Sebastian",
                        "middle": [],
                        "last": "Richter",
                        "suffix": ""
                    },
                    {
                        "first": "Kirsten",
                        "middle": [],
                        "last": "Schmid",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Berthold",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Instructional Science",
                "volume": "47",
                "issue": "2",
                "pages": "215--237",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Sarah von der M\u00fchlen, Tobias Richter, Sebastian Schmid, and Kirsten Berthold. 2019. How to im- prove argumentation comprehension in university students: Experimental test of a training approach. Instructional Science, 47(2):215-237.",
                "links": null
            },
            "BIBREF20": {
                "ref_id": "b20",
                "title": "Argumentation schemes",
                "authors": [
                    {
                        "first": "Douglas",
                        "middle": [],
                        "last": "Walton",
                        "suffix": ""
                    },
                    {
                        "first": "Christopher",
                        "middle": [],
                        "last": "Reed",
                        "suffix": ""
                    },
                    {
                        "first": "Fabrizio",
                        "middle": [],
                        "last": "Macagno",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Douglas Walton, Christopher Reed, and Fabrizio Macagno. 2008. Argumentation schemes. Cam- bridge University Press.",
                "links": null
            },
            "BIBREF21": {
                "ref_id": "b21",
                "title": "Probable cause: Developing warrants for automated scoring of essays",
                "authors": [
                    {
                        "first": "",
                        "middle": [],
                        "last": "David M Williamson",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "Handbook of automated essay evaluation",
                "volume": "",
                "issue": "",
                "pages": "175--202",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "David M Williamson. 2013. Probable cause: De- veloping warrants for automated scoring of essays. In Handbook of automated essay evaluation, pages 175-202. Routledge.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "num": null,
                "type_str": "figure",
                "text": "The interface used for our warrant annotations, along with an example of a annotator's annotation for User-defined Keyword-based Warrant."
            },
            "TABREF0": {
                "num": null,
                "content": "<table/>",
                "text": "Example warrants demonstrating the semi-structured form used for annotation.",
                "html": null,
                "type_str": "table"
            }
        }
    }
}