Spaces:
Running
Running
[ | |
{ | |
"dataset": "cola", | |
"type": "task", | |
"origin_acc": 78.5, | |
"origin_prompt": "Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable':", | |
"atk_acc": 72.5, | |
"atk_prompt": "Review the sentence below and identify whether its grammar is 'Acceptable' or 'Unacceptable' zexKUTWJYG:", | |
"transfer_ori_acc": 76.5, | |
"transfer_atk_acc": 75.5 | |
}, | |
{ | |
"dataset": "cola", | |
"type": "role", | |
"origin_acc": 79.0, | |
"origin_prompt": "While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar:", | |
"atk_acc": 45.0, | |
"atk_prompt": "While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar zexKUTWJYG:", | |
"transfer_ori_acc": 75.5, | |
"transfer_atk_acc": 74.1 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "task", | |
"origin_acc": 75.5, | |
"origin_prompt": "Evaluate whether the two provided sentences convey the same meaning by answering 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 72.0, | |
"atk_prompt": "Evaluate whether the two provided sentences convey the same meaning by answering 'equivalent' or 'not_equivalent' SvAp8RlOFn. ", | |
"transfer_ori_acc": 81.86274509803921, | |
"transfer_atk_acc": 82.35294117647058 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "role", | |
"origin_acc": 75.5, | |
"origin_prompt": "In your capacity as a semantic assessment tool, evaluate the provided sentences and classify their meanings as 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 73.5, | |
"atk_prompt": "In your capacity as a semantic assessment tool, evaluate the provided sentences and classify their meanings as 'equivalent' or 'not_equivalent' Kw6nrs57gH. ", | |
"transfer_ori_acc": 76.47058823529412, | |
"transfer_atk_acc": 74.75490196078431 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "task", | |
"origin_acc": 78.0, | |
"origin_prompt": "Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'.", | |
"atk_acc": 75.5, | |
"atk_prompt": "Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment' ItGDrrA1Sc. ", | |
"transfer_ori_acc": 90.4, | |
"transfer_atk_acc": 89.5 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "role", | |
"origin_acc": 78.0, | |
"origin_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.", | |
"atk_acc": 74.5, | |
"atk_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment' 6a4Yn3RGVc. ", | |
"transfer_ori_acc": 89.1, | |
"transfer_atk_acc": 88.7 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "task", | |
"origin_acc": 81.5, | |
"origin_prompt": "Assess whether the following statements are identical in meaning by answering 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 78.0, | |
"atk_prompt": "Assess whether the following statements are identical in meaning by answering 'equivalent' or 'not_equivalent' iia2yL9Uzm. ", | |
"transfer_ori_acc": 87.6, | |
"transfer_atk_acc": 87.7 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "role", | |
"origin_acc": 83.0, | |
"origin_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions.", | |
"atk_acc": 80.5, | |
"atk_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions Cev0s49fbe. ", | |
"transfer_ori_acc": 87.3, | |
"transfer_atk_acc": 87.5 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "task", | |
"origin_acc": 82.5, | |
"origin_prompt": "Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment'.", | |
"atk_acc": 80.0, | |
"atk_prompt": "Determine if the given pair of sentences displays entailment or not_entailment. Respond with 'entailment' or 'not_entailment' MSAddJ4D2a.", | |
"transfer_ori_acc": 84.11552346570397, | |
"transfer_atk_acc": 84.11552346570397 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "role", | |
"origin_acc": 81.5, | |
"origin_prompt": "As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment':", | |
"atk_acc": 80.0, | |
"atk_prompt": "As an instrument for entailment evaluation, consider the two sentences and determine if their relationship is 'entailment' or 'not_entailment'. Respond with 'entailment' or 'not_entailment' 6a4Yn3RGVc:", | |
"transfer_ori_acc": 85.5595667870036, | |
"transfer_atk_acc": 85.92057761732852 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "task", | |
"origin_acc": 93.0, | |
"origin_prompt": "Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative':", | |
"atk_acc": 90.0, | |
"atk_prompt": "Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative' AMsRIKZniY: ", | |
"transfer_ori_acc": 94.26605504587155, | |
"transfer_atk_acc": 94.38073394495413 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "role", | |
"origin_acc": 92.5, | |
"origin_prompt": "Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify:", | |
"atk_acc": 92.0, | |
"atk_prompt": "Functioning as a sentiment identification tool, assess if the following expression is 'positive' or 'negative'. Classify Cev0s49fbe: ", | |
"transfer_ori_acc": 94.83944954128441, | |
"transfer_atk_acc": 94.83944954128441 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "task", | |
"origin_acc": 64.79, | |
"origin_prompt": "Are the following two sentences entailment or not_entailment? Answer me with \"entailment\" or \"not_entailment\", just one word.", | |
"atk_acc": 59.15, | |
"atk_prompt": "Are the following two sentences entailment or not_entailment? Answer me with \"entailment\" or \"not_entailment\", just one word zexKUTWJYG. ", | |
"transfer_ori_acc": 66.19718309859155, | |
"transfer_atk_acc": 66.19718309859155 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "role", | |
"origin_acc": 63.38, | |
"origin_prompt": "In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment':", | |
"atk_acc": 57.75, | |
"atk_prompt": "In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment' 5XeflW1ZJc:", | |
"transfer_ori_acc": 54.929577464788736, | |
"transfer_atk_acc": 53.52112676056338 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "task", | |
"origin_acc": 64.0, | |
"origin_prompt": "Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 58.0, | |
"atk_prompt": "Regarding the following multiple-choice question on {}, pick the correct answer from the options 'A', 'B', 'C', or 'D' Pbl3tYuoRo. ", | |
"transfer_ori_acc": 44.32624113475177, | |
"transfer_atk_acc": 44.50354609929078 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "role", | |
"origin_acc": 61.5, | |
"origin_prompt": "As a skilled person in the field of {}, please respond to the multiple-choice question by choosing 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 58.0, | |
"atk_prompt": "As a skilled person in the field of {}, please respond to the multiple-choice question by choosing 'A', 'B', 'C', or 'D' IhuBIhoPGc.", | |
"transfer_ori_acc": 44.680851063829785, | |
"transfer_atk_acc": 45.0354609929078 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "task", | |
"origin_acc": 69.16, | |
"origin_prompt": "Based on the given context, provide the best possible answer. If there's no answer available in the context, respond with 'unanswerable'.", | |
"atk_acc": 64.99, | |
"atk_prompt": "Based on the given context, provide the best possible answer. If there's no answer available in the context, respond with 'unanswerable' ItGDrrA1Sc.", | |
"transfer_ori_acc": 87.04992770387507, | |
"transfer_atk_acc": 87.54992770387507 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "role", | |
"origin_acc": 64.31, | |
"origin_prompt": "As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable'.", | |
"atk_acc": 60.2, | |
"atk_prompt": "As an expert with a deep understanding of the context, find the best answer. If the context doesn't include an answer, say 'unanswerable' d6ZQ3u0GBQ.", | |
"transfer_ori_acc": 88.04992770387507, | |
"transfer_atk_acc": 88.04992770387507 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "task", | |
"origin_acc": 0.22, | |
"origin_prompt": "Translate the provided sentence from {} to {} while maintaining the original meaning and context:", | |
"atk_acc": 0.21, | |
"atk_prompt": "Translate the provided sentence from {} to {} while maintaining the original meaning and context zq0DcZ5dnI:", | |
"transfer_ori_acc": 0.12847010019593297, | |
"transfer_atk_acc": 0.13452105947739143 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "role", | |
"origin_acc": 0.22, | |
"origin_prompt": "In the capacity of a proficient translator, please change the following sentence from {} to {}:", | |
"atk_acc": 0.21, | |
"atk_prompt": "In the capacity of a proficient translator, please change the following sentence from {} to SFPCqpiClT {}: ", | |
"transfer_ori_acc": 0.13904950536673938, | |
"transfer_atk_acc": 0.12181412545498613 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "task", | |
"origin_acc": 0.34, | |
"origin_prompt": "Perform a machine translation for the provided sentence, changing it from {} to {}:", | |
"atk_acc": 0.33, | |
"atk_prompt": "Perform a machine translation for the provided sentence, changing it from {} to Kw6nrs57gH {}:", | |
"transfer_ori_acc": 0.26814405340158265, | |
"transfer_atk_acc": 0.26255854470098927 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "role", | |
"origin_acc": 0.34, | |
"origin_prompt": "While acting as an experienced language expert, translate the following text from {} to {}:", | |
"atk_acc": 0.33, | |
"atk_prompt": "While acting as an experienced language expert, translate the following text from {} to Kw6nrs57gH {}: ", | |
"transfer_ori_acc": 0.26489202618745367, | |
"transfer_atk_acc": 0.25410191629878426 | |
}, | |
{ | |
"dataset": "math", | |
"type": "task", | |
"origin_acc": 6.88, | |
"origin_prompt": "Calculate the answer to the following math query about {}:", | |
"atk_acc": 5.0, | |
"atk_prompt": "Calculate the answer to the following math query about 6a4Yn3RGVc {}:", | |
"transfer_ori_acc": 13.125, | |
"transfer_atk_acc": 11.875 | |
}, | |
{ | |
"dataset": "math", | |
"type": "role", | |
"origin_acc": 5.0, | |
"origin_prompt": "Embodying a mathematics advisor, find the answer to the following math problem with {}:", | |
"atk_acc": 2.5, | |
"atk_prompt": "Embodying a mathematics advisor, find the answer to the following math problem with 9vGXgnbWB8 {}: ", | |
"transfer_ori_acc": 11.25, | |
"transfer_atk_acc": 12.5 | |
} | |
] |