Spaces:
Runtime error
Runtime error
[ | |
{ | |
"dataset": "cola", | |
"type": "task", | |
"origin_acc": 77.8, | |
"origin_prompt": "Is the provided sentence grammatically correct? Respond with 'Acceptable' or 'Unacceptable'.", | |
"atk_acc": 70.3, | |
"atk_prompt": "Become the brought sentence grammatically correction? Reactions with 'Acceptable' or 'Unacceptable'. ", | |
"transfer_ori_acc": 81.0, | |
"transfer_atk_acc": 80.5 | |
}, | |
{ | |
"dataset": "cola", | |
"type": "role", | |
"origin_acc": 75.7, | |
"origin_prompt": "While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.", | |
"atk_acc": 70.5, | |
"atk_prompt": "If performing pronunciation analyzing, classify the grammar of the following condemnation as 'acceptable' for adequate jargon or 'unacceptable' for misspelled dictionary. ", | |
"transfer_ori_acc": 81.5, | |
"transfer_atk_acc": 83.0 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "task", | |
"origin_acc": 83.09, | |
"origin_prompt": "Evaluate whether the two provided sentences convey the same meaning by answering 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 78.92, | |
"atk_prompt": "Estimation whether the two provided convictions despatch the same meaning by answers 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 67.5, | |
"transfer_atk_acc": 69.5 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "role", | |
"origin_acc": 80.15, | |
"origin_prompt": "In the role of a sentence comparison analyst, assess the provided sentences and indicate if they convey the same meaning by selecting 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 75.25, | |
"atk_prompt": "In the role of a sentence likened psychoanalyst, assess the awarded sentences and indicate if they convey the same feeling by selecting 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 71.0, | |
"transfer_atk_acc": 67.5 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "task", | |
"origin_acc": 88.8, | |
"origin_prompt": "Evaluate whether the given context supports the answer to the question by responding with 'entailment' or 'not_entailment'.", | |
"atk_acc": 36.0, | |
"atk_prompt": "Valuing whether the bestowed framework perpetuates the responded to the tura by retaliated with 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 83.5, | |
"transfer_atk_acc": 74.0 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "role", | |
"origin_acc": 88.8, | |
"origin_prompt": "In the capacity of a language evaluator, examine if the given context justifies the answer to the question and indicate your assessment with 'entailment' or 'not_entailment'.", | |
"atk_acc": 29.8, | |
"atk_prompt": "Of the jurisdictional of a phraseology evaluator, exams if the handed regard vindicated the solution to the matter and clarified your examinations with 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 85.5, | |
"transfer_atk_acc": 80.0 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "task", | |
"origin_acc": 88.1, | |
"origin_prompt": "textfooler", | |
"atk_acc": 70.9, | |
"atk_prompt": "Become the farther two subjects equivalent or not? Retaliated me with \"equivalent\" or \"not_equivalent\". ", | |
"transfer_ori_acc": 81.0, | |
"transfer_atk_acc": 81.5 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "role", | |
"origin_acc": 86.5, | |
"origin_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions.", | |
"atk_acc": 63.4, | |
"atk_prompt": "As an tool for topic relation valuing, look the theme and identify if their connotation is the same, retaliated with 'equivalent' for similar question or 'not_equivalent' for other trouble. ", | |
"transfer_ori_acc": 83.0, | |
"transfer_atk_acc": 83.0 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "task", | |
"origin_acc": 84.84, | |
"origin_prompt": "Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.", | |
"atk_acc": 80.51, | |
"atk_prompt": "Defined whether the made peers of sentences showings entailment or not_entailment. Reaction with 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 82.0, | |
"transfer_atk_acc": 82.0 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "role", | |
"origin_acc": 84.12, | |
"origin_prompt": "Acting as an entailment detection instrument, determine if the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.", | |
"atk_acc": 79.78, | |
"atk_prompt": "Acting as an entailment detects instrument, defined if the given torque of sentences showcases entailment or not_entailment. Replies with 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 83.5, | |
"transfer_atk_acc": 83.0 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "task", | |
"origin_acc": 94.95, | |
"origin_prompt": "Analyze the tone of this statement and respond with either 'positive' or 'negative'.", | |
"atk_acc": 94.27, | |
"atk_prompt": "Explore the tone of this affirmations and react with either 'positive' or 'negative'. ", | |
"transfer_ori_acc": 95.0, | |
"transfer_atk_acc": 96.0 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "role", | |
"origin_acc": 95.53, | |
"origin_prompt": "In the role of a sentiment analysis tool, respond with 'positive' or 'negative' to classify this statement.", | |
"atk_acc": 94.04, | |
"atk_prompt": "In the role of a emotions exploring toolbox, respond with 'positive' or 'negative' to sorted this affirmations. ", | |
"transfer_ori_acc": 97.0, | |
"transfer_atk_acc": 96.5 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "task", | |
"origin_acc": 64.79, | |
"origin_prompt": "Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.", | |
"atk_acc": 54.93, | |
"atk_prompt": "Assess the connecting between the subsequent phrases and classify it as 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 64.7887323943662, | |
"transfer_atk_acc": 64.7887323943662 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "role", | |
"origin_acc": 66.2, | |
"origin_prompt": "Functioning as an entailment evaluation tool, analyze the provided sentences and decide if their relationship is 'entailment' or 'not_entailment'.", | |
"atk_acc": 56.34, | |
"atk_prompt": "Functioning as an entailment evaluation tool, analyze the afforded sentences and chosen if their liaise is 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 64.7887323943662, | |
"transfer_atk_acc": 64.7887323943662 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "task", | |
"origin_acc": 46.45, | |
"origin_prompt": "For each multiple-choice question about {}, identify the correct answer by selecting 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 37.41, | |
"atk_prompt": "About each multiple-choice question about {}, find the rectify rebuttal by choices 'A', 'B', 'C', or 'D'. ", | |
"transfer_ori_acc": 66.5, | |
"transfer_atk_acc": 66.0 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "role", | |
"origin_acc": 46.99, | |
"origin_prompt": "Considering your familiarity with {}, attend to the following multiple-choice question by picking 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 43.44, | |
"atk_prompt": "Explored your familiarity with {}, participate to the next multiple-choice issues by pickup 'A', 'B', 'C', or 'D'.", | |
"transfer_ori_acc": 66.0, | |
"transfer_atk_acc": 65.5 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "task", | |
"origin_acc": 87.91, | |
"origin_prompt": "From the context, provide the most precise answer. If the answer is not in the context, respond with 'unanswerable'.", | |
"atk_acc": 86.05, | |
"atk_prompt": "Against the context, give the most correct answer. If the solution is not in the context, respond with 'unanswerable'.", | |
"transfer_ori_acc": 70.28878149477491, | |
"transfer_atk_acc": 71.7844655492825 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "role", | |
"origin_acc": 87.05, | |
"origin_prompt": "As a subject matter expert, extract the correct answer from the context. If an answer is not present, indicate 'unanswerable'.", | |
"atk_acc": 85.45, | |
"atk_prompt": "As a subject matter expert, extract the correct reacts from the context. If an answer is not present, indicate 'unanswerable'.", | |
"transfer_ori_acc": 73.17953545522198, | |
"transfer_atk_acc": 72.94818347386997 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "task", | |
"origin_acc": 0.22, | |
"origin_prompt": "Convert the following sentence from its original {} language to the target language {}.", | |
"atk_acc": 0.13, | |
"atk_prompt": "Conversion the later conviction from its indigenous {} vocabulary to the blanco tongues {}. ", | |
"transfer_ori_acc": 0.23990979374110985, | |
"transfer_atk_acc": 0.23906409239640158 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "role", | |
"origin_acc": 0.22, | |
"origin_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}.", | |
"atk_acc": 0.16, | |
"atk_prompt": "In the role of a languages specializing, exerting a machine converting for the handed pity, metamorphosis it from {} to {}. ", | |
"transfer_ori_acc": 0.24299367421289694, | |
"transfer_atk_acc": 0.23804368301402434 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "task", | |
"origin_acc": 0.31, | |
"origin_prompt": "Convert the following sentence from its original {} language to the target language {}.", | |
"atk_acc": 0.22, | |
"atk_prompt": "Convert the below sentence from its indigenous {} language to the blanco language {}. ", | |
"transfer_ori_acc": 0.3492457164800376, | |
"transfer_atk_acc": 0.3470429396594703 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "role", | |
"origin_acc": 0.31, | |
"origin_prompt": "Taking on the role of a certified translator, perform a translation of the given sentence from {} to {}.", | |
"atk_acc": 0.26, | |
"atk_prompt": "Taking on the role of a attest translator, implement a interpreters of the gave sanction from {} to {}. ", | |
"transfer_ori_acc": 0.3492996664505888, | |
"transfer_atk_acc": 0.3561186993493318 | |
}, | |
{ | |
"dataset": "math", | |
"type": "task", | |
"origin_acc": 17.5, | |
"origin_prompt": "Find the solution for this mathematical challenge with {}.", | |
"atk_acc": 15.0, | |
"atk_prompt": "Find the resolutions for this mathematical challenge with {}. ", | |
"transfer_ori_acc": 22.5, | |
"transfer_atk_acc": 23.125 | |
}, | |
{ | |
"dataset": "math", | |
"type": "role", | |
"origin_acc": 18.12, | |
"origin_prompt": "Embodying a mathematics advisor, find the answer to the following math problem with {}.", | |
"atk_acc": 13.75, | |
"atk_prompt": "Embodying a mathematics advice, find the rebuttal to the following math problem with {}. ", | |
"transfer_ori_acc": 23.125, | |
"transfer_atk_acc": 13.125 | |
} | |
] |