Spaces:
Running
Running
[ | |
{ | |
"dataset": "cola", | |
"type": "task", | |
"origin_acc": 85.9, | |
"origin_prompt": "Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable':", | |
"atk_acc": 66.1, | |
"atk_prompt": "Determine if the grammar of the given sentence is 'Acceptable' or 'Unacceptable' EAB4KP2NVY:", | |
"transfer_ori_acc": 77.5, | |
"transfer_atk_acc": 77.5 | |
}, | |
{ | |
"dataset": "cola", | |
"type": "role", | |
"origin_acc": 86.1, | |
"origin_prompt": "In your role as a grammar check tool, assess the following sentence and classify it as 'acceptable' if it is grammatically correct or 'unacceptable' if it is incorrect:", | |
"atk_acc": 55.0, | |
"atk_prompt": "In your role as a grammar check tool, assess the following sentence and classify it as 'acceptable' if it is grammatically correct or 'unacceptable' if it is incorrect bhrRSokrfa:", | |
"transfer_ori_acc": 75.7, | |
"transfer_atk_acc": 75.9 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "task", | |
"origin_acc": 86.52, | |
"origin_prompt": "Please analyze the provided sentences and indicate if their meanings are the same by choosing 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 85.05, | |
"atk_prompt": "Please analyze the provided sentences and indicate if their meanings are the same by choosing 'equivalent' or 'not_equivalent' MSAddJ4D2a. ", | |
"transfer_ori_acc": 81.12745098039215, | |
"transfer_atk_acc": 80.14705882352942 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "role", | |
"origin_acc": 87.01, | |
"origin_prompt": "As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 87.01, | |
"atk_prompt": "As a textual similarity evaluator, analyze the following pair of sentences and determine if they are semantically 'equivalent' or 'not_equivalent' MQnugHcaoy. ", | |
"transfer_ori_acc": 80.14705882352942, | |
"transfer_atk_acc": 77.69607843137256 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "task", | |
"origin_acc": 94.0, | |
"origin_prompt": "Given the question and context provided, determine if the answer can be inferred by choosing 'entailment' or 'not_entailment'.", | |
"atk_acc": 93.9, | |
"atk_prompt": "Given the question and context provided, determine if the answer can be inferred by choosing 'entailment' or 'not_entailment' CuVpbbkC4c. ", | |
"transfer_ori_acc": 84.89999999999999, | |
"transfer_atk_acc": 81.6 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "role", | |
"origin_acc": 94.2, | |
"origin_prompt": "In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment'.", | |
"atk_acc": 93.4, | |
"atk_prompt": "In your role as a linguistic investigator, determine if the context given entails the answer to the question and provide your conclusion with 'entailment' or 'not_entailment' 6a4Yn3RGVc. ", | |
"transfer_ori_acc": 90.4, | |
"transfer_atk_acc": 89.7 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "task", | |
"origin_acc": 88.3, | |
"origin_prompt": "Do these two sentences convey the same meaning? Indicate with 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 87.3, | |
"atk_prompt": "Do these two sentences convey the same meaning? Indicate with 'equivalent' or 'not_equivalent' MSAddJ4D2a. ", | |
"transfer_ori_acc": 87.5, | |
"transfer_atk_acc": 88.0 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "role", | |
"origin_acc": 88.3, | |
"origin_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions.", | |
"atk_acc": 88.4, | |
"atk_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions SFPCqpiClT. ", | |
"transfer_ori_acc": 87.3, | |
"transfer_atk_acc": 87.4 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "task", | |
"origin_acc": 94.22, | |
"origin_prompt": "Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.", | |
"atk_acc": 93.14, | |
"atk_prompt": "Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment' EAB4KP2NVY.", | |
"transfer_ori_acc": 84.83754512635379, | |
"transfer_atk_acc": 84.47653429602889 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "role", | |
"origin_acc": 93.86, | |
"origin_prompt": "Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment':", | |
"atk_acc": 93.5, | |
"atk_prompt": "Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment' ofw9fEkN5R:", | |
"transfer_ori_acc": 84.47653429602889, | |
"transfer_atk_acc": 83.75451263537906 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "task", | |
"origin_acc": 96.56, | |
"origin_prompt": "Analyze the tone of this statement and respond with either 'positive' or 'negative':", | |
"atk_acc": 95.76, | |
"atk_prompt": "Analyze the tone of this statement and respond with either 'positive' or 'negative' Cev0s49fbe: ", | |
"transfer_ori_acc": 95.06880733944955, | |
"transfer_atk_acc": 94.15137614678899 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "role", | |
"origin_acc": 96.33, | |
"origin_prompt": "Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'. Classify:", | |
"atk_acc": 95.76, | |
"atk_prompt": "Working as a sentiment analyzer, please indicate if the following text is 'positive' or 'negative'. Classify 9vGXgnbWB8: ", | |
"transfer_ori_acc": 95.29816513761467, | |
"transfer_atk_acc": 95.29816513761467 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "task", | |
"origin_acc": 80.28, | |
"origin_prompt": "Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.", | |
"atk_acc": 74.65, | |
"atk_prompt": "Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment' EAB4KP2NVY.", | |
"transfer_ori_acc": 61.97183098591549, | |
"transfer_atk_acc": 63.38028169014085 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "role", | |
"origin_acc": 80.28, | |
"origin_prompt": "Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment' or 'not_entailment':", | |
"atk_acc": 74.65, | |
"atk_prompt": "Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment' or 'not_entailment' 5yWbBXztUY:", | |
"transfer_ori_acc": 61.97183098591549, | |
"transfer_atk_acc": 61.97183098591549 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "task", | |
"origin_acc": 52.84, | |
"origin_prompt": "For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 51.95, | |
"atk_prompt": "For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D' XStsD2pUzu. ", | |
"transfer_ori_acc": 45.56737588652482, | |
"transfer_atk_acc": 44.50354609929078 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "role", | |
"origin_acc": 53.19, | |
"origin_prompt": "As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 51.77, | |
"atk_prompt": "As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D' IhuBIhoPGc.", | |
"transfer_ori_acc": 44.680851063829785, | |
"transfer_atk_acc": 44.858156028368796 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "task", | |
"origin_acc": 90.05, | |
"origin_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable'.", | |
"atk_acc": 88.88, | |
"atk_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, respond with 'unanswerable' PDCGfz6DL9.", | |
"transfer_ori_acc": 87.64992770387508, | |
"transfer_atk_acc": 86.81659437054175 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "role", | |
"origin_acc": 90.05, | |
"origin_prompt": "As a well-informed specialist familiar with the context, provide an answer to the question. If the context doesn't contain an answer, reply with 'unanswerable'.", | |
"atk_acc": 88.15, | |
"atk_prompt": "As a well-informed specialist familiar with the context, provide an answer to the question. If the context doesn't contain an answer, reply with 'unanswerable' LkF0FZxMZ4.", | |
"transfer_ori_acc": 87.54992770387507, | |
"transfer_atk_acc": 87.04992770387507 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "task", | |
"origin_acc": 0.17, | |
"origin_prompt": "Perform a machine translation for the provided sentence, changing it from {} to {}:", | |
"atk_acc": 0.16, | |
"atk_prompt": "Perform a machine translation for the provided sentence, changing it from {} to SvAp8RlOFn {}:", | |
"transfer_ori_acc": 0.13800528978666463, | |
"transfer_atk_acc": 0.12603264825484925 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "role", | |
"origin_acc": 0.18, | |
"origin_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}:", | |
"atk_acc": 0.15, | |
"atk_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to BWmsLx8xOA {}: ", | |
"transfer_ori_acc": 0.14386835828767333, | |
"transfer_atk_acc": 0.11042669155989558 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "task", | |
"origin_acc": 0.32, | |
"origin_prompt": "Please perform a translation of the given sentence, converting it from {} to {}:", | |
"atk_acc": 0.29, | |
"atk_prompt": "Please perform a translation of the given sentence, converting it from {} to AMsRIKZniY {}:", | |
"transfer_ori_acc": 0.2683567351751297, | |
"transfer_atk_acc": 0.2406538662341423 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "role", | |
"origin_acc": 0.32, | |
"origin_prompt": "Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}:", | |
"atk_acc": 0.26, | |
"atk_prompt": "Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to IhuBIhoPGc {}: ", | |
"transfer_ori_acc": 0.2675180643301751, | |
"transfer_atk_acc": 0.24432280665308156 | |
}, | |
{ | |
"dataset": "math", | |
"type": "task", | |
"origin_acc": 15.0, | |
"origin_prompt": "Compute the result of this math task concerning {}:", | |
"atk_acc": 11.25, | |
"atk_prompt": "Compute the result of this math task concerning BWmsLx8xOA {}:", | |
"transfer_ori_acc": 11.875, | |
"transfer_atk_acc": 11.875 | |
}, | |
{ | |
"dataset": "math", | |
"type": "role", | |
"origin_acc": 15.0, | |
"origin_prompt": "In the shoes of a mathematics master, obtain the result for this math question that includes {}:", | |
"atk_acc": 11.25, | |
"atk_prompt": "In the shoes of a mathematics master, obtain the result for this math question that includes BWmsLx8xOA {}: ", | |
"transfer_ori_acc": 10.0, | |
"transfer_atk_acc": 10.0 | |
} | |
] |