ivykopal commited on
Commit
92536d5
·
verified ·
1 Parent(s): c03e3e3

Delete templates.json

Browse files
Files changed (1) hide show
  1. templates.json +0 -1063
templates.json DELETED
@@ -1,1063 +0,0 @@
1
- {
2
- "train": [
3
- {
4
- "dataset": "aeslc",
5
- "model": "T5",
6
- "name": "aeslc-prompt-t5",
7
- "input": "summarize: {{email_body}}",
8
- "target": "{{subject_line}}",
9
- "metadata": {
10
- "languages": [
11
- "en"
12
- ],
13
- "metrics": [
14
- "rouge"
15
- ]
16
- },
17
- "languages": [
18
- "en"
19
- ],
20
- "metrics": [
21
- "rouge"
22
- ]
23
- },
24
- {
25
- "dataset": "anli",
26
- "model": "T5",
27
- "name": "anli-prompt-t5",
28
- "input": "premise: {{premise}} hypothesis: {{hypothesis}}",
29
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
30
- "metadata": {
31
- "languages": [
32
- "en"
33
- ],
34
- "metrics": [
35
- "accuracy"
36
- ]
37
- },
38
- "languages": [
39
- "en"
40
- ],
41
- "metrics": [
42
- "accuracy"
43
- ]
44
- },
45
- {
46
- "dataset": "billsum",
47
- "model": "T5",
48
- "name": "billsum-prompt-t5",
49
- "input": "summarize: {{text}}",
50
- "target": "{{summary}}",
51
- "metadata": {
52
- "languages": [
53
- "en"
54
- ],
55
- "metrics": [
56
- "rouge"
57
- ]
58
- },
59
- "languages": [
60
- "en"
61
- ],
62
- "metrics": [
63
- "rouge"
64
- ]
65
- },
66
- {
67
- "dataset": "boolq",
68
- "model": "T5",
69
- "name": "boolq-prompt-t5",
70
- "input": "boolq question: {{question}} passage: {{passage}}",
71
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
72
- "metadata": {
73
- "languages": [
74
- "en"
75
- ],
76
- "metrics": [
77
- "accuracy"
78
- ]
79
- },
80
- "languages": [
81
- "en"
82
- ],
83
- "metrics": [
84
- "accuracy"
85
- ]
86
- },
87
- {
88
- "dataset": "c4",
89
- "model": "T5",
90
- "name": "c4-prompt-t5",
91
- "input": "",
92
- "target": "{{text}}",
93
- "metadata": {
94
- "languages": [
95
- "en"
96
- ],
97
- "metrics": [
98
- "rouge"
99
- ]
100
- },
101
- "languages": [
102
- "en"
103
- ],
104
- "metrics": [
105
- "rouge"
106
- ]
107
- },
108
- {
109
- "dataset": "cb",
110
- "model": "T5",
111
- "name": "cb-prompt-t5",
112
- "input": "cb premise: {{premise}} hypothesis: {{hypothesis}}",
113
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
114
- "metadata": {
115
- "languages": [
116
- "en"
117
- ],
118
- "metrics": [
119
- "accuracy",
120
- "multiclass_f1"
121
- ]
122
- },
123
- "languages": [
124
- "en"
125
- ],
126
- "metrics": [
127
- "accuracy",
128
- "multiclass_f1"
129
- ]
130
- },
131
- {
132
- "dataset": "cnn_dailymail",
133
- "model": "T5",
134
- "name": "cnn_dailymail-prompt-t5",
135
- "input": "summarize: {{article}}",
136
- "target": "{{highlights}}",
137
- "metadata": {
138
- "languages": [
139
- "en"
140
- ],
141
- "metrics": [
142
- "rouge"
143
- ]
144
- },
145
- "languages": [
146
- "en"
147
- ],
148
- "metrics": [
149
- "rouge"
150
- ]
151
- },
152
- {
153
- "dataset": "cola",
154
- "model": "T5",
155
- "name": "cola-prompt-t5",
156
- "input": "cola sentence: {{sentence}}",
157
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
158
- "metadata": {
159
- "languages": [
160
- "en"
161
- ],
162
- "metrics": [
163
- "matthews_corrcoef"
164
- ]
165
- },
166
- "languages": [
167
- "en"
168
- ],
169
- "metrics": [
170
- "matthews_corrcoef"
171
- ]
172
- },
173
- {
174
- "dataset": "common_gen",
175
- "model": "T5",
176
- "name": "common_gen-prompt-t5",
177
- "input": "generate: {{lambda concepts: \" \".join(concepts)}}",
178
- "target": "{{target}}",
179
- "metadata": {
180
- "languages": [
181
- "en"
182
- ],
183
- "metrics": [
184
- "rouge"
185
- ]
186
- },
187
- "languages": [
188
- "en"
189
- ],
190
- "metrics": [
191
- "rouge"
192
- ]
193
- },
194
- {
195
- "dataset": "copa",
196
- "model": "T5",
197
- "name": "copa-prompt-t5",
198
- "input": "copa premise: {{premise}} choice1: {{choice1}} choice2: {{choice2}}",
199
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
200
- "metadata": {
201
- "languages": [
202
- "en"
203
- ],
204
- "metrics": [
205
- "accuracy"
206
- ]
207
- },
208
- "languages": [
209
- "en"
210
- ],
211
- "metrics": [
212
- "accuracy"
213
- ]
214
- },
215
- {
216
- "dataset": "cosmos_qa",
217
- "model": "T5",
218
- "name": "cosmos_qa-prompt-t5",
219
- "input": "question: {{question}} context: {{context}} choice0: {{answer0}} choice1: {{answer1}} choice2: {{answer2}} choice3: {{answer3}}",
220
- "target": "{{lambda label: str(label)}}",
221
- "metadata": {
222
- "languages": [
223
- "en"
224
- ],
225
- "metrics": [
226
- "squad"
227
- ]
228
- },
229
- "languages": [
230
- "en"
231
- ],
232
- "metrics": [
233
- "squad"
234
- ]
235
- },
236
- {
237
- "dataset": "cxc",
238
- "model": "T5",
239
- "name": "cxc-prompt-t5",
240
- "input": "sentence1: {{sentence1}} sentence2: {{sentence2}}",
241
- "target": "{{lambda score: np.round((score * 5) / 5, decimals=1)}}",
242
- "metadata": {
243
- "languages": [
244
- "en"
245
- ],
246
- "metrics": [
247
- "pearson_corrcoef",
248
- "spearman_corrcoef"
249
- ]
250
- },
251
- "languages": [
252
- "en"
253
- ],
254
- "metrics": [
255
- "pearson_corrcoef",
256
- "spearman_corrcoef"
257
- ]
258
- },
259
- {
260
- "dataset": "doc_nli",
261
- "model": "T5",
262
- "name": "doc_nli-prompt-t5",
263
- "input": "premise: {{premise}} hypothesis: {{hypothesis}}",
264
- "target": "{{label}}",
265
- "metadata": {
266
- "languages": [
267
- "en"
268
- ],
269
- "metrics": [
270
- "accuracy"
271
- ]
272
- },
273
- "languages": [
274
- "en"
275
- ],
276
- "metrics": [
277
- "accuracy"
278
- ]
279
- },
280
- {
281
- "dataset": "drop",
282
- "model": "T5",
283
- "name": "drop-prompt-t5",
284
- "input": "question: {{question}} context: {{passage}}",
285
- "target": "{{answers_spans[\"spans\"][0]}}",
286
- "metadata": {
287
- "languages": [
288
- "en"
289
- ],
290
- "metrics": [
291
- "squad"
292
- ],
293
- "preprocessing": [
294
- "pad_punctuation"
295
- ]
296
- },
297
- "languages": [
298
- "en"
299
- ],
300
- "metrics": [
301
- "squad"
302
- ]
303
- },
304
- {
305
- "dataset": "gigaword",
306
- "model": "T5",
307
- "name": "gigaword-prompt-t5",
308
- "input": "summarize: {{document}}",
309
- "target": "{{summary}}",
310
- "metadata": {
311
- "languages": [
312
- "en"
313
- ],
314
- "metrics": [
315
- "rouge"
316
- ]
317
- },
318
- "languages": [
319
- "en"
320
- ],
321
- "metrics": [
322
- "rouge"
323
- ]
324
- },
325
- {
326
- "dataset": "hellaswag",
327
- "model": "T5",
328
- "name": "hellaswag-prompt-t5",
329
- "input": "context: {{ctx}} ending0: {{endings[0]}} ending1: {{endings[1]}} ending2: {{endings[2]}} ending3: {{endings[3]}}",
330
- "target": "{{lambda label: str(label)}}",
331
- "metadata": {
332
- "languages": [
333
- "en"
334
- ],
335
- "metrics": [
336
- "accuracy"
337
- ]
338
- },
339
- "languages": [
340
- "en"
341
- ],
342
- "metrics": [
343
- "accuracy"
344
- ]
345
- },
346
- {
347
- "dataset": "hotpot_qa",
348
- "model": "T5",
349
- "name": "hotpot_qa-prompt-t5",
350
- "input": "question: {{question}} context: {{\"\".join(context[\"sentences\"][0])}}",
351
- "target": "{{answer}}",
352
- "metadata": {
353
- "languages": [
354
- "en"
355
- ],
356
- "metrics": [
357
- "squad"
358
- ],
359
- "preprocessing": [
360
- "pad_punctuation"
361
- ]
362
- },
363
- "languages": [
364
- "en"
365
- ],
366
- "metrics": [
367
- "squad"
368
- ]
369
- },
370
- {
371
- "dataset": "mnli",
372
- "model": "T5",
373
- "name": "mnli-prompt-t5",
374
- "input": "mnli premise: {{premise}} hypothesis: {{hypothesis}}",
375
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
376
- "metadata": {
377
- "languages": [
378
- "en"
379
- ],
380
- "metrics": [
381
- "accuracy"
382
- ]
383
- },
384
- "languages": [
385
- "en"
386
- ],
387
- "metrics": [
388
- "accuracy"
389
- ]
390
- },
391
- {
392
- "dataset": "mrpc",
393
- "model": "T5",
394
- "name": "mrpc-prompt-t5",
395
- "input": "mrpc sentence1: {{sentence1}} sentence2: {{sentence2}}",
396
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
397
- "metadata": {
398
- "languages": [
399
- "en"
400
- ],
401
- "metrics": [
402
- "accuracy",
403
- "f1_invalid"
404
- ]
405
- },
406
- "languages": [
407
- "en"
408
- ],
409
- "metrics": [
410
- "accuracy",
411
- "f1_invalid"
412
- ]
413
- },
414
- {
415
- "dataset": "mrqa",
416
- "model": "T5",
417
- "name": "mrqa-prompt-t5",
418
- "input": "question: {{question}} context: {{context}}",
419
- "target": "{{answers[0]}}",
420
- "metadata": {
421
- "languages": [
422
- "en"
423
- ],
424
- "metrics": [
425
- "squad"
426
- ],
427
- "preprocessing": [
428
- "pad_punctuation"
429
- ]
430
- },
431
- "languages": [
432
- "en"
433
- ],
434
- "metrics": [
435
- "squad"
436
- ]
437
- },
438
- {
439
- "dataset": "multirc",
440
- "model": "T5",
441
- "name": "multirc-prompt-t5",
442
- "input": "multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}}",
443
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
444
- "metadata": {
445
- "languages": [
446
- "en"
447
- ],
448
- "metrics": [
449
- "exact_match",
450
- "multirc_f1"
451
- ],
452
- "preprocessing": [
453
- "remove_markup"
454
- ]
455
- },
456
- "languages": [
457
- "en"
458
- ],
459
- "metrics": [
460
- "exact_match",
461
- "multirc_f1"
462
- ]
463
- },
464
- {
465
- "dataset": "multi_news",
466
- "model": "T5",
467
- "name": "multi_news-prompt-t5",
468
- "input": "summarize: {{document}}",
469
- "target": "{{summary}}",
470
- "metadata": {
471
- "languages": [
472
- "en"
473
- ],
474
- "metrics": [
475
- "rouge"
476
- ]
477
- },
478
- "languages": [
479
- "en"
480
- ],
481
- "metrics": [
482
- "rouge"
483
- ]
484
- },
485
- {
486
- "dataset": "multi_nli",
487
- "model": "T5",
488
- "name": "multi_nli-prompt-t5",
489
- "input": "premise: {{premise}} hypothesis: {{hypothesis}}",
490
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
491
- "metadata": {
492
- "languages": [
493
- "en"
494
- ],
495
- "metrics": [
496
- "accuracy"
497
- ]
498
- },
499
- "languages": [
500
- "en"
501
- ],
502
- "metrics": [
503
- "accuracy"
504
- ]
505
- },
506
- {
507
- "dataset": "newsqa",
508
- "model": "T5",
509
- "name": "newsqa-prompt-t5",
510
- "input": "question: {{question}} context: {{context}}",
511
- "target": "{{answer[0]}}",
512
- "metadata": {
513
- "languages": [
514
- "en"
515
- ],
516
- "metrics": [
517
- "rouge"
518
- ],
519
- "preprocessing": [
520
- "pad_punctuation"
521
- ]
522
- },
523
- "languages": [
524
- "en"
525
- ],
526
- "metrics": [
527
- "rouge"
528
- ]
529
- },
530
- {
531
- "dataset": "newsqa",
532
- "model": "T5",
533
- "name": "newsqa-prompt-t5-without-context",
534
- "input": "question: {{question}}",
535
- "target": "{{answer[0]}}",
536
- "metadata": {
537
- "languages": [
538
- "en"
539
- ],
540
- "metrics": [
541
- "rouge"
542
- ],
543
- "preprocessing": [
544
- "pad_punctuation"
545
- ]
546
- },
547
- "languages": [
548
- "en"
549
- ],
550
- "metrics": [
551
- "rouge"
552
- ]
553
- },
554
- {
555
- "dataset": "newsroom",
556
- "model": "T5",
557
- "name": "newsroom-prompt-t5",
558
- "input": "summarize: {{text}}",
559
- "target": "{{summary}}",
560
- "metadata": {
561
- "languages": [
562
- "en"
563
- ],
564
- "metrics": [
565
- "rouge"
566
- ]
567
- },
568
- "languages": [
569
- "en"
570
- ],
571
- "metrics": [
572
- "rouge"
573
- ]
574
- },
575
- {
576
- "dataset": "nq_open",
577
- "model": "T5",
578
- "name": "nq_open-prompt-t5",
579
- "input": "nq question: {{question}}",
580
- "target": "{{answer[0]}}",
581
- "metadata": {
582
- "languages": [
583
- "en"
584
- ],
585
- "metrics": [
586
- "squad"
587
- ],
588
- "preprocessing": [
589
- "pad_punctuation"
590
- ]
591
- },
592
- "languages": [
593
- "en"
594
- ],
595
- "metrics": [
596
- "squad"
597
- ]
598
- },
599
- {
600
- "dataset": "piqa",
601
- "model": "T5",
602
- "name": "piqa-prompt-t5",
603
- "input": "question: {{goal}} choice1: {{sol1}} choice2: {{sol2}}",
604
- "target": "{{lambda label: str(label)}}",
605
- "metadata": {
606
- "languages": [
607
- "en"
608
- ],
609
- "metrics": [
610
- "accuracy"
611
- ]
612
- },
613
- "languages": [
614
- "en"
615
- ],
616
- "metrics": [
617
- "accuracy"
618
- ]
619
- },
620
- {
621
- "dataset": "qnli",
622
- "model": "T5",
623
- "name": "qnli-prompt-t5",
624
- "input": "qnli question: {{question}} sentence: {{sentence}}",
625
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
626
- "metadata": {
627
- "languages": [
628
- "en"
629
- ],
630
- "metrics": [
631
- "accuracy"
632
- ]
633
- },
634
- "languages": [
635
- "en"
636
- ],
637
- "metrics": [
638
- "accuracy"
639
- ]
640
- },
641
- {
642
- "dataset": "qqp",
643
- "model": "T5",
644
- "name": "qqp-prompt-t5",
645
- "input": "qqp question1: {{question1}} question2: {{question2}}",
646
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
647
- "metadata": {
648
- "languages": [
649
- "en"
650
- ],
651
- "metrics": [
652
- "accuracy",
653
- "f1_invalid"
654
- ]
655
- },
656
- "languages": [
657
- "en"
658
- ],
659
- "metrics": [
660
- "accuracy",
661
- "f1_invalid"
662
- ]
663
- },
664
- {
665
- "dataset": "race",
666
- "model": "T5",
667
- "name": "race-prompt-t5",
668
- "input": "question: {{question}} context: {{article}} choice0: {{options[0]}} choice1: {{options[1]}} choice2: {{options[2]}} choice3: {{options[3]}}",
669
- "target": "{{lambda answer: str(ord(answer) - ord(\"A\"))}}",
670
- "metadata": {
671
- "languages": [
672
- "en"
673
- ],
674
- "metrics": [
675
- "accuracy"
676
- ]
677
- },
678
- "languages": [
679
- "en"
680
- ],
681
- "metrics": [
682
- "accuracy"
683
- ]
684
- },
685
- {
686
- "dataset": "record",
687
- "model": "T5",
688
- "name": "record-prompt-t5",
689
- "input": "record query: {{query}} entities: {{\", \".join(entities)}} passage: {{passage}}",
690
- "target": "{{answers[0]}}",
691
- "metadata": {
692
- "languages": [
693
- "en"
694
- ],
695
- "metrics": [
696
- "squad"
697
- ],
698
- "preprocessing": [
699
- "record_preprocess"
700
- ]
701
- },
702
- "languages": [
703
- "en"
704
- ],
705
- "metrics": [
706
- "squad"
707
- ]
708
- },
709
- {
710
- "dataset": "rte",
711
- "model": "T5",
712
- "name": "rte-prompt-t5",
713
- "input": "rte sentence1: {{sentence1}} sentence2: {{sentence2}}",
714
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
715
- "metadata": {
716
- "languages": [
717
- "en"
718
- ],
719
- "metrics": [
720
- "accuracy"
721
- ]
722
- },
723
- "languages": [
724
- "en"
725
- ],
726
- "metrics": [
727
- "accuracy"
728
- ]
729
- },
730
- {
731
- "dataset": "samsum",
732
- "model": "T5",
733
- "name": "samsum-prompt-t5",
734
- "input": "summarize: {{dialogue}}",
735
- "target": "{{summary}}",
736
- "metadata": {
737
- "languages": [
738
- "en"
739
- ],
740
- "metrics": [
741
- "rouge"
742
- ]
743
- },
744
- "languages": [
745
- "en"
746
- ],
747
- "metrics": [
748
- "rouge"
749
- ]
750
- },
751
- {
752
- "dataset": "search_qa",
753
- "model": "T5",
754
- "name": "search_qa-prompt-t5",
755
- "input": "question: {{question}}",
756
- "target": "{{answer}}",
757
- "metadata": {
758
- "languages": [
759
- "en"
760
- ],
761
- "metrics": [
762
- "squad"
763
- ],
764
- "preprocessing": [
765
- "pad_punctuation"
766
- ]
767
- },
768
- "languages": [
769
- "en"
770
- ],
771
- "metrics": [
772
- "squad"
773
- ]
774
- },
775
- {
776
- "dataset": "snli",
777
- "model": "T5",
778
- "name": "snli-prompt-t5",
779
- "input": "premise: {{premise}} hypothesis: {{hypothesis}}",
780
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
781
- "metadata": {
782
- "languages": [
783
- "en"
784
- ],
785
- "metrics": [
786
- "accuracy"
787
- ]
788
- },
789
- "languages": [
790
- "en"
791
- ],
792
- "metrics": [
793
- "accuracy"
794
- ]
795
- },
796
- {
797
- "dataset": "social_i_qa",
798
- "model": "T5",
799
- "name": "social_i_qa-prompt-t5",
800
- "input": "question: {{question}} context: {{context}} || choice0: {{answerA}} || choice1: {{answerB}} || choice2: {{answerC}}",
801
- "target": "{{lambda label: str(int(label) - 1)}}",
802
- "metadata": {
803
- "languages": [
804
- "en"
805
- ],
806
- "metrics": [
807
- "accuracy"
808
- ]
809
- },
810
- "languages": [
811
- "en"
812
- ],
813
- "metrics": [
814
- "accuracy"
815
- ]
816
- },
817
- {
818
- "dataset": "squad",
819
- "model": "T5",
820
- "name": "squad-prompt-t5",
821
- "input": "question: {{question}} context: {{context}}",
822
- "target": "{{answers[\"text\"][0]}}",
823
- "metadata": {
824
- "languages": [
825
- "en"
826
- ],
827
- "metrics": [
828
- "squad"
829
- ],
830
- "preprocessing": [
831
- "pad_punctuation"
832
- ]
833
- },
834
- "languages": [
835
- "en"
836
- ],
837
- "metrics": [
838
- "squad"
839
- ]
840
- },
841
- {
842
- "dataset": "squad",
843
- "model": "T5",
844
- "name": "squad-prompt-trivia-t5",
845
- "input": "squad trivia question: {{question}}",
846
- "target": "{{answers[\"text\"][0]}}",
847
- "metadata": {
848
- "languages": [
849
- "en"
850
- ],
851
- "metrics": [
852
- "squad"
853
- ],
854
- "preprocessing": [
855
- "pad_punctuation"
856
- ]
857
- },
858
- "languages": [
859
- "en"
860
- ],
861
- "metrics": [
862
- "squad"
863
- ]
864
- },
865
- {
866
- "dataset": "sst2",
867
- "model": "T5",
868
- "name": "sst2-prompt-t5",
869
- "input": "sst2 sentence: {{sentence}}",
870
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
871
- "metadata": {
872
- "languages": [
873
- "en"
874
- ],
875
- "metrics": [
876
- "accuracy"
877
- ],
878
- "preprocessing": [
879
- "pad_punctuation"
880
- ]
881
- },
882
- "languages": [
883
- "en"
884
- ],
885
- "metrics": [
886
- "accuracy"
887
- ]
888
- },
889
- {
890
- "dataset": "stsb",
891
- "model": "T5",
892
- "name": "stsb-prompt-t5",
893
- "input": "stsb sentence1: {{sentence1}} sentence2: {{sentence2}}",
894
- "target": "{{lambda label: np.round((label * 5) / 5, decimals=1)}}",
895
- "metadata": {
896
- "languages": [
897
- "en"
898
- ],
899
- "metrics": [
900
- "pearson_corrcoef",
901
- "spearman_corrcoef"
902
- ]
903
- },
904
- "languages": [
905
- "en"
906
- ],
907
- "metrics": [
908
- "pearson_corrcoef",
909
- "spearman_corrcoef"
910
- ]
911
- },
912
- {
913
- "dataset": "wic",
914
- "model": "T5",
915
- "name": "wic-prompt-t5",
916
- "input": "wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}}",
917
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
918
- "metadata": {
919
- "languages": [
920
- "en"
921
- ],
922
- "metrics": [
923
- "accuracy"
924
- ]
925
- },
926
- "languages": [
927
- "en"
928
- ],
929
- "metrics": [
930
- "accuracy"
931
- ]
932
- },
933
- {
934
- "dataset": "wiki_auto",
935
- "model": "T5",
936
- "name": "wiki_auto-prompt-t5",
937
- "input": "{{source}}",
938
- "target": "{{target}}",
939
- "metadata": {
940
- "languages": [
941
- "en"
942
- ],
943
- "metrics": [
944
- "bleu"
945
- ]
946
- },
947
- "languages": [
948
- "en"
949
- ],
950
- "metrics": [
951
- "bleu"
952
- ]
953
- },
954
- {
955
- "dataset": "wiki_lingua",
956
- "model": "T5",
957
- "name": "wiki_lingua-prompt-t5",
958
- "input": "{{source_aligned[\"en\"]}}",
959
- "target": "{{target_aligned[\"en\"]}}",
960
- "metadata": {
961
- "languages": [
962
- "en"
963
- ],
964
- "metrics": [
965
- "rouge"
966
- ]
967
- },
968
- "languages": [
969
- "en"
970
- ],
971
- "metrics": [
972
- "rouge"
973
- ]
974
- },
975
- {
976
- "dataset": "winogrande",
977
- "model": "T5",
978
- "name": "winogrande-prompt-t5",
979
- "input": "sentence: {{sentence}} option0: {{option1}} option1: {{option2}}",
980
- "target": "{{lambda answer: str(int(answer) - 1)}}",
981
- "metadata": {
982
- "languages": [
983
- "en"
984
- ],
985
- "metrics": [
986
- "accuracy"
987
- ]
988
- },
989
- "languages": [
990
- "en"
991
- ],
992
- "metrics": [
993
- "accuracy"
994
- ]
995
- },
996
- {
997
- "dataset": "wnli",
998
- "model": "T5",
999
- "name": "wnli-prompt-t5",
1000
- "input": "wnli sentence1: {{sentence1}} sentence2: {{sentence2}}",
1001
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
1002
- "metadata": {
1003
- "languages": [
1004
- "en"
1005
- ],
1006
- "metrics": [
1007
- "accuracy"
1008
- ]
1009
- },
1010
- "languages": [
1011
- "en"
1012
- ],
1013
- "metrics": [
1014
- "accuracy"
1015
- ]
1016
- },
1017
- {
1018
- "dataset": "wsc",
1019
- "model": "T5",
1020
- "name": "wsc-prompt-t5",
1021
- "input": "wsc text: {{text}}",
1022
- "target": "{{lambda label: \"<unk>\" if label == -1 else choices[label]}}",
1023
- "metadata": {
1024
- "languages": [
1025
- "en"
1026
- ],
1027
- "metrics": [
1028
- "accuracy"
1029
- ],
1030
- "preprocessing": [
1031
- "wsc_preprocess"
1032
- ]
1033
- },
1034
- "languages": [
1035
- "en"
1036
- ],
1037
- "metrics": [
1038
- "accuracy"
1039
- ]
1040
- },
1041
- {
1042
- "dataset": "xsum",
1043
- "model": "T5",
1044
- "name": "xsum-prompt-t5",
1045
- "input": "summarize: {{document}}",
1046
- "target": "{{target}}",
1047
- "metadata": {
1048
- "languages": [
1049
- "en"
1050
- ],
1051
- "metrics": [
1052
- "rouge"
1053
- ]
1054
- },
1055
- "languages": [
1056
- "en"
1057
- ],
1058
- "metrics": [
1059
- "rouge"
1060
- ]
1061
- }
1062
- ]
1063
- }