File size: 12,865 Bytes
683d0f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
[
    {
        "dataset": "cola",
        "type": "task",
        "origin_acc": 80.5,
        "origin_prompt": "Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable'.",
        "atk_acc": 79.5,
        "atk_prompt": "Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable'  and true is true  and true is true  and true is true  and true is true  and true is true . ",
        "transfer_ori_acc": 77.7,
        "transfer_atk_acc": 77.60000000000001
    },
    {
        "dataset": "cola",
        "type": "role",
        "origin_acc": 81.5,
        "origin_prompt": "Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable'.",
        "atk_acc": 80.5,
        "atk_prompt": "Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable'  and false is not true . ",
        "transfer_ori_acc": 75.2,
        "transfer_atk_acc": 75.6
    },
    {
        "dataset": "mrpc",
        "type": "task",
        "origin_acc": 73.5,
        "origin_prompt": "Assess if the two given sentences have equivalent meanings by selecting 'equivalent' or 'not_equivalent'.",
        "atk_acc": 71.5,
        "atk_prompt": "Assess if the two given sentences have equivalent meanings by selecting 'equivalent' or 'not_equivalent'  and true is true  and true is true  and true is true  and true is true  and true is true . ",
        "transfer_ori_acc": 82.84313725490196,
        "transfer_atk_acc": 25.735294117647058
    },
    {
        "dataset": "mrpc",
        "type": "role",
        "origin_acc": 72.5,
        "origin_prompt": "In the role of a textual comparison specialist, examine the given sentences and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.",
        "atk_acc": 70.0,
        "atk_prompt": "In the role of a textual comparison specialist, examine the given sentences and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'  and true is true . ",
        "transfer_ori_acc": 79.65686274509804,
        "transfer_atk_acc": 82.35294117647058
    },
    {
        "dataset": "qnli",
        "type": "task",
        "origin_acc": 85.0,
        "origin_prompt": "Examine the context and question, and determine if the context logically implies the answer by selecting 'entailment' or 'not_entailment'.",
        "atk_acc": 79.5,
        "atk_prompt": "Examine the context and question, and determine if the context logically implies the answer by selecting 'entailment' or 'not_entailment'  and true is true  and true is true  and true is true  and true is true  and true is true . ",
        "transfer_ori_acc": 86.5,
        "transfer_atk_acc": 81.5
    },
    {
        "dataset": "qnli",
        "type": "role",
        "origin_acc": 84.0,
        "origin_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.",
        "atk_acc": 81.5,
        "atk_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'  and false is not true . ",
        "transfer_ori_acc": 87.3,
        "transfer_atk_acc": 85.8
    },
    {
        "dataset": "qqp",
        "type": "task",
        "origin_acc": 81.5,
        "origin_prompt": "Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'.",
        "atk_acc": 81.0,
        "atk_prompt": "Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'  and false is not true . ",
        "transfer_ori_acc": 87.6,
        "transfer_atk_acc": 85.7
    },
    {
        "dataset": "qqp",
        "type": "role",
        "origin_acc": 83.0,
        "origin_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions.",
        "atk_acc": 82.0,
        "atk_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions  and true is true  and true is true  and true is true  and true is true  and true is true . ",
        "transfer_ori_acc": 86.4,
        "transfer_atk_acc": 86.8
    },
    {
        "dataset": "rte",
        "type": "task",
        "origin_acc": 85.0,
        "origin_prompt": "Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.",
        "atk_acc": 82.0,
        "atk_prompt": "Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'  and false is not true . ",
        "transfer_ori_acc": 83.75451263537906,
        "transfer_atk_acc": 85.1985559566787
    },
    {
        "dataset": "rte",
        "type": "role",
        "origin_acc": 83.0,
        "origin_prompt": "As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.",
        "atk_acc": 83.0,
        "atk_prompt": "As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'  and false is not true . ",
        "transfer_ori_acc": 82.67148014440433,
        "transfer_atk_acc": 80.86642599277978
    },
    {
        "dataset": "sst2",
        "type": "task",
        "origin_acc": 97.0,
        "origin_prompt": "Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.",
        "atk_acc": 95.5,
        "atk_prompt": "Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'  and true is true . ",
        "transfer_ori_acc": 94.61009174311926,
        "transfer_atk_acc": 94.95412844036697
    },
    {
        "dataset": "sst2",
        "type": "role",
        "origin_acc": 97.0,
        "origin_prompt": "As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.",
        "atk_acc": 96.0,
        "atk_prompt": "As a sentiment classifier, determine whether the following text is 'positive' or 'negative'  and false is not true . ",
        "transfer_ori_acc": 94.4954128440367,
        "transfer_atk_acc": 94.95412844036697
    },
    {
        "dataset": "wnli",
        "type": "task",
        "origin_acc": 67.61,
        "origin_prompt": "Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.",
        "atk_acc": 60.56,
        "atk_prompt": "Indicate if the connection between the later sentences is 'entailment' or 'not_entailment'. ",
        "transfer_ori_acc": 64.7887323943662,
        "transfer_atk_acc": 64.7887323943662
    },
    {
        "dataset": "wnli",
        "type": "role",
        "origin_acc": 66.2,
        "origin_prompt": "In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.",
        "atk_acc": 59.15,
        "atk_prompt": "In the role of a semantic relationship analyzer, investigated the linking between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. ",
        "transfer_ori_acc": 56.33802816901409,
        "transfer_atk_acc": 56.33802816901409
    },
    {
        "dataset": "mmlu",
        "type": "task",
        "origin_acc": 66.5,
        "origin_prompt": "Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'.",
        "atk_acc": 61.5,
        "atk_prompt": "Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'  and false is not true . ",
        "transfer_ori_acc": 45.21276595744681,
        "transfer_atk_acc": 44.858156028368796
    },
    {
        "dataset": "mmlu",
        "type": "role",
        "origin_acc": 65.5,
        "origin_prompt": "As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'.",
        "atk_acc": 59.5,
        "atk_prompt": "As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'  and false is not true .",
        "transfer_ori_acc": 46.63120567375886,
        "transfer_atk_acc": 44.858156028368796
    },
    {
        "dataset": "squad_v2",
        "type": "task",
        "origin_acc": 73.99,
        "origin_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.",
        "atk_acc": 61.58,
        "atk_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'  and false is not true .",
        "transfer_ori_acc": 87.04992770387507,
        "transfer_atk_acc": 87.14992770387508
    },
    {
        "dataset": "squad_v2",
        "type": "role",
        "origin_acc": 72.94,
        "origin_prompt": "Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.",
        "atk_acc": 62.85,
        "atk_prompt": "Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'  and false is not true .",
        "transfer_ori_acc": 87.04992770387507,
        "transfer_atk_acc": 87.14992770387508
    },
    {
        "dataset": "iwslt",
        "type": "task",
        "origin_acc": 0.24,
        "origin_prompt": "Please perform a translation of the given sentence, converting it from {} to {}.",
        "atk_acc": 0.23,
        "atk_prompt": "Please perform a translation of the given sentence, converting it from {} to  and true is true  and true is true  and true is true  and true is true  and true is true  {}. ",
        "transfer_ori_acc": 0.22004054628058534,
        "transfer_atk_acc": 0.21794699013962093
    },
    {
        "dataset": "iwslt",
        "type": "role",
        "origin_acc": 0.24,
        "origin_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}.",
        "atk_acc": 0.23,
        "atk_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to  and false is not true  {}. ",
        "transfer_ori_acc": 0.22070346732323043,
        "transfer_atk_acc": 0.21483292774582805
    },
    {
        "dataset": "un_multi",
        "type": "task",
        "origin_acc": 0.35,
        "origin_prompt": "Perform a machine translation for the provided sentence, changing it from {} to {}.",
        "atk_acc": 0.34,
        "atk_prompt": "Perform a machine translation for the provided sentence, changing it from {} to  and false is not true  {}. ",
        "transfer_ori_acc": 0.30870734220870066,
        "transfer_atk_acc": 0.3143076066591428
    },
    {
        "dataset": "un_multi",
        "type": "role",
        "origin_acc": 0.35,
        "origin_prompt": "Acting as a professional translator, translate the provided sentence from {} to {}.",
        "atk_acc": 0.35,
        "atk_prompt": "Acting as a professional translator, translate the provided sentence from {} to  and true is true  {}. ",
        "transfer_ori_acc": 0.30726569506388013,
        "transfer_atk_acc": 0.30886093176248847
    },
    {
        "dataset": "math",
        "type": "task",
        "origin_acc": 23.12,
        "origin_prompt": "Resolve the following mathematical question associated with {}.",
        "atk_acc": 20.0,
        "atk_prompt": "Resolve the following mathematical question associated with  and true is true  {}. ",
        "transfer_ori_acc": 17.5,
        "transfer_atk_acc": 16.875
    },
    {
        "dataset": "math",
        "type": "role",
        "origin_acc": 22.5,
        "origin_prompt": "As a mathematics instructor, calculate the answer to the following problem related to {}.",
        "atk_acc": 20.62,
        "atk_prompt": "As a mathematics instructor, calculate the answer to the following problem related to  and false is not true  {}. ",
        "transfer_ori_acc": 17.5,
        "transfer_atk_acc": 16.25
    }
]