File size: 12,864 Bytes
683d0f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
[
    {
        "dataset": "cola",
        "type": "task",
        "origin_acc": 80.5,
        "origin_prompt": "Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable'.",
        "atk_acc": 79.5,
        "atk_prompt": "Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable'  and true is true  and true is true  and true is true  and true is true  and true is true . ",
        "transfer_ori_acc": 86.2,
        "transfer_atk_acc": 85.9
    },
    {
        "dataset": "cola",
        "type": "role",
        "origin_acc": 81.5,
        "origin_prompt": "Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable'.",
        "atk_acc": 80.5,
        "atk_prompt": "Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable'  and false is not true . ",
        "transfer_ori_acc": 86.4,
        "transfer_atk_acc": 86.4
    },
    {
        "dataset": "mrpc",
        "type": "task",
        "origin_acc": 73.5,
        "origin_prompt": "Assess if the two given sentences have equivalent meanings by selecting 'equivalent' or 'not_equivalent'.",
        "atk_acc": 71.5,
        "atk_prompt": "Assess if the two given sentences have equivalent meanings by selecting 'equivalent' or 'not_equivalent'  and true is true  and true is true  and true is true  and true is true  and true is true . ",
        "transfer_ori_acc": 84.06862745098039,
        "transfer_atk_acc": 83.82352941176471
    },
    {
        "dataset": "mrpc",
        "type": "role",
        "origin_acc": 72.5,
        "origin_prompt": "In the role of a textual comparison specialist, examine the given sentences and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.",
        "atk_acc": 70.0,
        "atk_prompt": "In the role of a textual comparison specialist, examine the given sentences and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'  and true is true . ",
        "transfer_ori_acc": 87.00980392156863,
        "transfer_atk_acc": 86.27450980392157
    },
    {
        "dataset": "qnli",
        "type": "task",
        "origin_acc": 85.0,
        "origin_prompt": "Examine the context and question, and determine if the context logically implies the answer by selecting 'entailment' or 'not_entailment'.",
        "atk_acc": 79.5,
        "atk_prompt": "Examine the context and question, and determine if the context logically implies the answer by selecting 'entailment' or 'not_entailment'  and true is true  and true is true  and true is true  and true is true  and true is true . ",
        "transfer_ori_acc": 93.5,
        "transfer_atk_acc": 93.89999999999999
    },
    {
        "dataset": "qnli",
        "type": "role",
        "origin_acc": 84.0,
        "origin_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.",
        "atk_acc": 81.5,
        "atk_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'  and false is not true . ",
        "transfer_ori_acc": 94.0,
        "transfer_atk_acc": 94.3
    },
    {
        "dataset": "qqp",
        "type": "task",
        "origin_acc": 81.5,
        "origin_prompt": "Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'.",
        "atk_acc": 81.0,
        "atk_prompt": "Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'  and false is not true . ",
        "transfer_ori_acc": 87.6,
        "transfer_atk_acc": 87.6
    },
    {
        "dataset": "qqp",
        "type": "role",
        "origin_acc": 83.0,
        "origin_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions.",
        "atk_acc": 82.0,
        "atk_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions  and true is true  and true is true  and true is true  and true is true  and true is true . ",
        "transfer_ori_acc": 89.0,
        "transfer_atk_acc": 89.3
    },
    {
        "dataset": "rte",
        "type": "task",
        "origin_acc": 85.0,
        "origin_prompt": "Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.",
        "atk_acc": 82.0,
        "atk_prompt": "Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'  and false is not true . ",
        "transfer_ori_acc": 93.86281588447653,
        "transfer_atk_acc": 93.14079422382672
    },
    {
        "dataset": "rte",
        "type": "role",
        "origin_acc": 83.0,
        "origin_prompt": "As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.",
        "atk_acc": 83.0,
        "atk_prompt": "As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'  and false is not true . ",
        "transfer_ori_acc": 92.7797833935018,
        "transfer_atk_acc": 92.7797833935018
    },
    {
        "dataset": "sst2",
        "type": "task",
        "origin_acc": 97.0,
        "origin_prompt": "Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.",
        "atk_acc": 95.5,
        "atk_prompt": "Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'  and true is true . ",
        "transfer_ori_acc": 96.78899082568807,
        "transfer_atk_acc": 96.78899082568807
    },
    {
        "dataset": "sst2",
        "type": "role",
        "origin_acc": 97.0,
        "origin_prompt": "As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.",
        "atk_acc": 96.0,
        "atk_prompt": "As a sentiment classifier, determine whether the following text is 'positive' or 'negative'  and false is not true . ",
        "transfer_ori_acc": 96.67431192660551,
        "transfer_atk_acc": 96.44495412844036
    },
    {
        "dataset": "wnli",
        "type": "task",
        "origin_acc": 67.61,
        "origin_prompt": "Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.",
        "atk_acc": 60.56,
        "atk_prompt": "Indicate if the connection between the later sentences is 'entailment' or 'not_entailment'. ",
        "transfer_ori_acc": 76.05633802816901,
        "transfer_atk_acc": 76.05633802816901
    },
    {
        "dataset": "wnli",
        "type": "role",
        "origin_acc": 66.2,
        "origin_prompt": "In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.",
        "atk_acc": 59.15,
        "atk_prompt": "In the role of a semantic relationship analyzer, investigated the linking between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. ",
        "transfer_ori_acc": 76.05633802816901,
        "transfer_atk_acc": 74.64788732394366
    },
    {
        "dataset": "mmlu",
        "type": "task",
        "origin_acc": 66.5,
        "origin_prompt": "Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'.",
        "atk_acc": 61.5,
        "atk_prompt": "Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'  and false is not true . ",
        "transfer_ori_acc": 53.36879432624113,
        "transfer_atk_acc": 51.95035460992907
    },
    {
        "dataset": "mmlu",
        "type": "role",
        "origin_acc": 65.5,
        "origin_prompt": "As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'.",
        "atk_acc": 59.5,
        "atk_prompt": "As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'  and false is not true .",
        "transfer_ori_acc": 53.01418439716312,
        "transfer_atk_acc": 51.95035460992907
    },
    {
        "dataset": "squad_v2",
        "type": "task",
        "origin_acc": 73.99,
        "origin_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.",
        "atk_acc": 61.58,
        "atk_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'  and false is not true .",
        "transfer_ori_acc": 90.54896616541353,
        "transfer_atk_acc": 91.04896616541353
    },
    {
        "dataset": "squad_v2",
        "type": "role",
        "origin_acc": 72.94,
        "origin_prompt": "Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.",
        "atk_acc": 62.85,
        "atk_prompt": "Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'  and false is not true .",
        "transfer_ori_acc": 91.04896616541353,
        "transfer_atk_acc": 90.54896616541353
    },
    {
        "dataset": "iwslt",
        "type": "task",
        "origin_acc": 0.24,
        "origin_prompt": "Please perform a translation of the given sentence, converting it from {} to {}.",
        "atk_acc": 0.23,
        "atk_prompt": "Please perform a translation of the given sentence, converting it from {} to  and true is true  and true is true  and true is true  and true is true  and true is true  {}. ",
        "transfer_ori_acc": 0.23770260910322083,
        "transfer_atk_acc": 0.19504983833847192
    },
    {
        "dataset": "iwslt",
        "type": "role",
        "origin_acc": 0.24,
        "origin_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}.",
        "atk_acc": 0.23,
        "atk_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to  and false is not true  {}. ",
        "transfer_ori_acc": 0.236844571841243,
        "transfer_atk_acc": 0.23378656190205807
    },
    {
        "dataset": "un_multi",
        "type": "task",
        "origin_acc": 0.35,
        "origin_prompt": "Perform a machine translation for the provided sentence, changing it from {} to {}.",
        "atk_acc": 0.34,
        "atk_prompt": "Perform a machine translation for the provided sentence, changing it from {} to  and false is not true  {}. ",
        "transfer_ori_acc": 0.3589520956246615,
        "transfer_atk_acc": 0.35262904846027593
    },
    {
        "dataset": "un_multi",
        "type": "role",
        "origin_acc": 0.35,
        "origin_prompt": "Acting as a professional translator, translate the provided sentence from {} to {}.",
        "atk_acc": 0.35,
        "atk_prompt": "Acting as a professional translator, translate the provided sentence from {} to  and true is true  {}. ",
        "transfer_ori_acc": 0.35878153272684565,
        "transfer_atk_acc": 0.3528990956235115
    },
    {
        "dataset": "math",
        "type": "task",
        "origin_acc": 23.12,
        "origin_prompt": "Resolve the following mathematical question associated with {}.",
        "atk_acc": 20.0,
        "atk_prompt": "Resolve the following mathematical question associated with  and true is true  {}. ",
        "transfer_ori_acc": 16.25,
        "transfer_atk_acc": 16.25
    },
    {
        "dataset": "math",
        "type": "role",
        "origin_acc": 22.5,
        "origin_prompt": "As a mathematics instructor, calculate the answer to the following problem related to {}.",
        "atk_acc": 20.62,
        "atk_prompt": "As a mathematics instructor, calculate the answer to the following problem related to  and false is not true  {}. ",
        "transfer_ori_acc": 15.625,
        "transfer_atk_acc": 11.875
    }
]