Spaces:
Runtime error
Runtime error
[ | |
{ | |
"dataset": "cola", | |
"type": "task", | |
"origin_acc": 85.9, | |
"origin_prompt": "Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable':", | |
"atk_acc": 66.1, | |
"atk_prompt": "Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable' EAB4KP2NVY:", | |
"transfer_ori_acc": 75.5, | |
"transfer_atk_acc": 74.0 | |
}, | |
{ | |
"dataset": "cola", | |
"type": "role", | |
"origin_acc": 86.1, | |
"origin_prompt": "In your role as a grammar check tool, assess the following sentence and classify it as 'acceptable' if it is grammatically correct or 'unacceptable' if it is incorrect:", | |
"atk_acc": 55.0, | |
"atk_prompt": "In your role as a grammar check tool, assess the following sentence and classify it as 'acceptable' if it is grammatically correct or 'unacceptable' if it is incorrect bhrRSokrfa:", | |
"transfer_ori_acc": 77.5, | |
"transfer_atk_acc": 77.0 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "task", | |
"origin_acc": 86.52, | |
"origin_prompt": "Please analyze the provided sentences and indicate if their meanings are the same by choosing 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 85.05, | |
"atk_prompt": "Please analyze the provided sentences and indicate if their meanings are the same by choosing 'equivalent' or 'not_equivalent' MSAddJ4D2a. ", | |
"transfer_ori_acc": 76.0, | |
"transfer_atk_acc": 74.0 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "role", | |
"origin_acc": 87.01, | |
"origin_prompt": "As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 87.01, | |
"atk_prompt": "As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent' MQnugHcaoy. ", | |
"transfer_ori_acc": 76.5, | |
"transfer_atk_acc": 73.0 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "task", | |
"origin_acc": 94.0, | |
"origin_prompt": "Given the question and context provided, determine if the answer can be inferred by choosing 'entailment' or 'not_entailment'.", | |
"atk_acc": 93.9, | |
"atk_prompt": "Given the question and context provided, determine if the answer can be inferred by choosing 'entailment' or 'not_entailment' CuVpbbkC4c. ", | |
"transfer_ori_acc": 70.0, | |
"transfer_atk_acc": 75.0 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "role", | |
"origin_acc": 94.2, | |
"origin_prompt": "In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'.", | |
"atk_acc": 93.4, | |
"atk_prompt": "In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment' 6a4Yn3RGVc. ", | |
"transfer_ori_acc": 70.0, | |
"transfer_atk_acc": 75.0 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "task", | |
"origin_acc": 88.3, | |
"origin_prompt": "Do these two sentences convey the same meaning? Indicate with 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 87.3, | |
"atk_prompt": "Do these two sentences convey the same meaning? Indicate with 'equivalent' or 'not_equivalent' MSAddJ4D2a. ", | |
"transfer_ori_acc": 79.5, | |
"transfer_atk_acc": 80.0 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "role", | |
"origin_acc": 88.3, | |
"origin_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions.", | |
"atk_acc": 88.4, | |
"atk_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions SFPCqpiClT. ", | |
"transfer_ori_acc": 82.0, | |
"transfer_atk_acc": 80.5 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "task", | |
"origin_acc": 94.22, | |
"origin_prompt": "Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.", | |
"atk_acc": 93.14, | |
"atk_prompt": "Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment' EAB4KP2NVY.", | |
"transfer_ori_acc": 83.5, | |
"transfer_atk_acc": 82.5 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "role", | |
"origin_acc": 93.86, | |
"origin_prompt": "Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment':", | |
"atk_acc": 93.5, | |
"atk_prompt": "Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment' ofw9fEkN5R:", | |
"transfer_ori_acc": 83.0, | |
"transfer_atk_acc": 82.5 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "task", | |
"origin_acc": 96.56, | |
"origin_prompt": "Analyze the tone of this statement and respond with either 'positive' or 'negative':", | |
"atk_acc": 95.76, | |
"atk_prompt": "Analyze the tone of this statement and respond with either 'positive' or 'negative' Cev0s49fbe: ", | |
"transfer_ori_acc": 89.5, | |
"transfer_atk_acc": 93.0 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "role", | |
"origin_acc": 96.33, | |
"origin_prompt": "Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'. Classify:", | |
"atk_acc": 95.76, | |
"atk_prompt": "Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'. Classify 9vGXgnbWB8: ", | |
"transfer_ori_acc": 92.0, | |
"transfer_atk_acc": 94.0 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "task", | |
"origin_acc": 80.28, | |
"origin_prompt": "Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.", | |
"atk_acc": 74.65, | |
"atk_prompt": "Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment' EAB4KP2NVY.", | |
"transfer_ori_acc": 53.52112676056338, | |
"transfer_atk_acc": 50.70422535211267 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "role", | |
"origin_acc": 80.28, | |
"origin_prompt": "Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment' or 'not_entailment':", | |
"atk_acc": 74.65, | |
"atk_prompt": "Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment' or 'not_entailment' 5yWbBXztUY:", | |
"transfer_ori_acc": 49.29577464788733, | |
"transfer_atk_acc": 50.70422535211267 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "task", | |
"origin_acc": 52.84, | |
"origin_prompt": "For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 51.95, | |
"atk_prompt": "For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D' XStsD2pUzu. ", | |
"transfer_ori_acc": 63.5, | |
"transfer_atk_acc": 61.0 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "role", | |
"origin_acc": 53.19, | |
"origin_prompt": "As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 51.77, | |
"atk_prompt": "As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D' IhuBIhoPGc.", | |
"transfer_ori_acc": 61.0, | |
"transfer_atk_acc": 59.0 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "task", | |
"origin_acc": 90.05, | |
"origin_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'.", | |
"atk_acc": 88.88, | |
"atk_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable' PDCGfz6DL9.", | |
"transfer_ori_acc": 69.02399528817593, | |
"transfer_atk_acc": 70.80489295331273 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "role", | |
"origin_acc": 90.05, | |
"origin_prompt": "As a well-informed specialist familiar with the context, provide an answer to the question. If the context doesn't contain an answer, reply with 'unanswerable'.", | |
"atk_acc": 88.15, | |
"atk_prompt": "As a well-informed specialist familiar with the context, provide an answer to the question. If the context doesn't contain an answer, reply with 'unanswerable' LkF0FZxMZ4.", | |
"transfer_ori_acc": 57.89928434523745, | |
"transfer_atk_acc": 53.2588072784446 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "task", | |
"origin_acc": 0.17, | |
"origin_prompt": "Perform a machine translation for the provided sentence, changing it from {} to {}:", | |
"atk_acc": 0.16, | |
"atk_prompt": "Perform a machine translation for the provided sentence, changing it from {} to SvAp8RlOFn {}:", | |
"transfer_ori_acc": 0.22174689224486577, | |
"transfer_atk_acc": 0.22674738052104712 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "role", | |
"origin_acc": 0.18, | |
"origin_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}:", | |
"atk_acc": 0.15, | |
"atk_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to BWmsLx8xOA {}: ", | |
"transfer_ori_acc": 0.21989608046166395, | |
"transfer_atk_acc": 0.22221554624583303 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "task", | |
"origin_acc": 0.32, | |
"origin_prompt": "Please perform a translation of the given sentence, converting it from {} to {}:", | |
"atk_acc": 0.29, | |
"atk_prompt": "Please perform a translation of the given sentence, converting it from {} to AMsRIKZniY {}:", | |
"transfer_ori_acc": 0.33291570957929106, | |
"transfer_atk_acc": 0.3299088578303098 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "role", | |
"origin_acc": 0.32, | |
"origin_prompt": "Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}:", | |
"atk_acc": 0.26, | |
"atk_prompt": "Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to IhuBIhoPGc {}: ", | |
"transfer_ori_acc": 0.3345429801281788, | |
"transfer_atk_acc": 0.3306113219304306 | |
}, | |
{ | |
"dataset": "math", | |
"type": "task", | |
"origin_acc": 15.0, | |
"origin_prompt": "Compute the result of this math task concerning {}:", | |
"atk_acc": 11.25, | |
"atk_prompt": "Compute the result of this math task concerning BWmsLx8xOA {}:", | |
"transfer_ori_acc": 6.25, | |
"transfer_atk_acc": 6.875000000000001 | |
}, | |
{ | |
"dataset": "math", | |
"type": "role", | |
"origin_acc": 15.0, | |
"origin_prompt": "In the shoes of a mathematics master, obtain the result for this math question that includes {}:", | |
"atk_acc": 11.25, | |
"atk_prompt": "In the shoes of a mathematics master, obtain the result for this math question that includes BWmsLx8xOA {}: ", | |
"transfer_ori_acc": 5.625, | |
"transfer_atk_acc": 3.75 | |
} | |
] |