Spaces:
Runtime error
Runtime error
[ | |
{ | |
"dataset": "cola", | |
"type": "task", | |
"origin_acc": 86.1, | |
"origin_prompt": "Is the provided sentence grammatically correct? Respond with 'Acceptable' or 'Unacceptable':", | |
"atk_acc": 30.9, | |
"atk_prompt": "Is the brought chastisement grammatically appropriate? Retaliated with 'Acceptable' or 'Unacceptable':", | |
"transfer_ori_acc": 70.89999999999999, | |
"transfer_atk_acc": 31.900000000000002 | |
}, | |
{ | |
"dataset": "cola", | |
"type": "role", | |
"origin_acc": 86.2, | |
"origin_prompt": "As an instrument for grammar evaluation, consider the sentence and determine if its grammar is correct, responding with 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar:", | |
"atk_acc": 33.1, | |
"atk_prompt": "As an instrument for grammar appraisals, considerations the convicting and determine if its verbs is correct, responding with 'acceptable' for proper grammar or 'unacceptable' for misspelled vocabulary:", | |
"transfer_ori_acc": 76.3, | |
"transfer_atk_acc": 72.89999999999999 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "task", | |
"origin_acc": 86.76, | |
"origin_prompt": "Please analyze the provided sentences and indicate if their meanings are the same by choosing 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 75.98, | |
"atk_prompt": "Asks analysis the conferred priors and specified if their meanings are the same by selecting 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 81.12745098039215, | |
"transfer_atk_acc": 81.12745098039215 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "role", | |
"origin_acc": 87.99, | |
"origin_prompt": "In your capacity as a language analyst, assess the following sentences and classify their similarity as 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 76.23, | |
"atk_prompt": "At your power as a language psych, assess the later judgements and categories their metaphor as 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 78.18627450980392, | |
"transfer_atk_acc": 78.43137254901961 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "task", | |
"origin_acc": 94.1, | |
"origin_prompt": "Based on the information in the context, decide if the answer to the question is justified by choosing 'entailment' or 'not_entailment'.", | |
"atk_acc": 67.1, | |
"atk_prompt": "Baseline on the detail in the framework, chosen if the answers to the topic is motivated by selector 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 89.4, | |
"transfer_atk_acc": 80.30000000000001 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "role", | |
"origin_acc": 94.2, | |
"origin_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.", | |
"atk_acc": 52.7, | |
"atk_prompt": "Iike a semantic savant, determine whether the provided framework encourages the rebuttal to the topics and option 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 89.1, | |
"transfer_atk_acc": 87.7 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "task", | |
"origin_acc": 88.3, | |
"origin_prompt": "Do these two sentences convey the same meaning? Indicate with 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 77.6, | |
"atk_prompt": "Doing these two condemning communicate the same feeling? Stating with 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 87.5, | |
"transfer_atk_acc": 83.1 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "role", | |
"origin_acc": 88.5, | |
"origin_prompt": "As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 83.4, | |
"atk_prompt": "As a utility for definition questions equivalence, revising the question and ranks their metaphor as either 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 86.5, | |
"transfer_atk_acc": 85.6 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "task", | |
"origin_acc": 94.22, | |
"origin_prompt": "Are the following two sentences entailment or not_entailment? Answer me with \"entailment\" or \"not_entailment\", just one word.", | |
"atk_acc": 91.34, | |
"atk_prompt": "Represented the aftermath two phrases entailment or not_entailment? Address me with \"entailment\" or \"not_entailment\", just one verb. ", | |
"transfer_ori_acc": 88.08664259927798, | |
"transfer_atk_acc": 83.39350180505414 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "role", | |
"origin_acc": 93.86, | |
"origin_prompt": "In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment':", | |
"atk_acc": 90.61, | |
"atk_prompt": "At your role as an entailment analysis tool, assess the affairs between the given condemnation and classify it as 'entailment' or 'not_entailment':", | |
"transfer_ori_acc": 84.47653429602889, | |
"transfer_atk_acc": 83.39350180505414 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "task", | |
"origin_acc": 96.33, | |
"origin_prompt": "Review this statement and decide whether it has a 'positive' or 'negative' sentiment:", | |
"atk_acc": 94.72, | |
"atk_prompt": "Considering this speech and decide whether it has a 'positive' or 'negative' foreboding: ", | |
"transfer_ori_acc": 95.06880733944955, | |
"transfer_atk_acc": 94.26605504587155 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "role", | |
"origin_acc": 96.33, | |
"origin_prompt": "As a sentiment classifier, determine whether the following text is 'positive' or 'negative'. Please classify:", | |
"atk_acc": 92.89, | |
"atk_prompt": "As a emotion classifier, detects whether the suite texts is 'positive' or 'negative'. Prayers classify: ", | |
"transfer_ori_acc": 94.4954128440367, | |
"transfer_atk_acc": 93.57798165137615 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "task", | |
"origin_acc": 80.28, | |
"origin_prompt": "Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.", | |
"atk_acc": 67.61, | |
"atk_prompt": "Assess the connections between the afterward punishment and classifies it as 'entailment' or 'not_entailment'.", | |
"transfer_ori_acc": 61.97183098591549, | |
"transfer_atk_acc": 61.97183098591549 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "role", | |
"origin_acc": 78.87, | |
"origin_prompt": "In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment':", | |
"atk_acc": 71.83, | |
"atk_prompt": "In your role as an entailment analysis tool, assess the coitus between the given sentencing and classify it as 'entailment' or 'not_entailment':", | |
"transfer_ori_acc": 56.33802816901409, | |
"transfer_atk_acc": 59.154929577464785 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "task", | |
"origin_acc": 52.84, | |
"origin_prompt": "Evaluate the multiple-choice question about {} and select the most fitting response from 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 32.09, | |
"atk_prompt": "Evaluation the multiple-choice topic about {} and select the most splice rebuttal from 'A', 'B', 'C', or 'D'. ", | |
"transfer_ori_acc": 44.50354609929078, | |
"transfer_atk_acc": 41.843971631205676 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "role", | |
"origin_acc": 53.19, | |
"origin_prompt": "As a knowledgeable individual in {}, provide your response to the multiple-choice question by choosing 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 39.36, | |
"atk_prompt": "As a knowledgeable everybody in {}, prescribes your rebuttal to the multiple-choice item by chooses 'A', 'B', 'C', or 'D'.", | |
"transfer_ori_acc": 44.50354609929078, | |
"transfer_atk_acc": 42.730496453900706 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "task", | |
"origin_acc": 89.99, | |
"origin_prompt": "Find the correct answer in the context provided. If an answer cannot be found, please respond with 'unanswerable'.", | |
"atk_acc": 46.83, | |
"atk_prompt": "Find the correct rebuttal in the context offer. Until an answering cannot be found, please respond with 'unanswerable'.", | |
"transfer_ori_acc": 87.54992770387507, | |
"transfer_atk_acc": 86.58932164326902 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "role", | |
"origin_acc": 89.88, | |
"origin_prompt": "As a proficient expert in the given context, search for the most relevant answer. If the answer cannot be found, respond by saying 'unanswerable'.", | |
"atk_acc": 44.5, | |
"atk_prompt": "As a clever whiz in the offered context, find for the most corresponding rebuttal. If the answer cannot exist discovering, answer by claim 'unanswerable'.", | |
"transfer_ori_acc": 88.18023073417811, | |
"transfer_atk_acc": 87.31659437054175 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "task", | |
"origin_acc": 0.18, | |
"origin_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}:", | |
"atk_acc": 0.14, | |
"atk_prompt": "In the feature of a language expert, perform a typewriters converts for the provided pity, changing it from {} to {}: ", | |
"transfer_ori_acc": 0.14386835828767333, | |
"transfer_atk_acc": 0.1043261414423813 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "role", | |
"origin_acc": 0, | |
"origin_prompt": "", | |
"atk_acc": 100, | |
"atk_prompt": "", | |
"transfer_ori_acc": 0.03223192659122938, | |
"transfer_atk_acc": 0.03223192659122938 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "task", | |
"origin_acc": 0.32, | |
"origin_prompt": "Translate the provided sentence from {} to {} while maintaining the original meaning and context:", | |
"atk_acc": 0.25, | |
"atk_prompt": "Translate the provided sentence from {} to {} while protections the original signify and background:", | |
"transfer_ori_acc": 0.2678951646057538, | |
"transfer_atk_acc": 0.23906375713121092 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "role", | |
"origin_acc": 0.31, | |
"origin_prompt": "In the capacity of a proficient translator, please change the following sentence from {} to {}:", | |
"atk_acc": 0.26, | |
"atk_prompt": "Towards the skills of a proficient performers, please evolving the following denounces from {} to {}: ", | |
"transfer_ori_acc": 0.26592302056033257, | |
"transfer_atk_acc": 0.23053164042025812 | |
}, | |
{ | |
"dataset": "math", | |
"type": "task", | |
"origin_acc": 14.37, | |
"origin_prompt": "Determine the solution to this mathematical problem related to {}:", | |
"atk_acc": 0.62, | |
"atk_prompt": "Determine the address to this arithmetic issuing links to {}:", | |
"transfer_ori_acc": 10.625, | |
"transfer_atk_acc": 2.5 | |
}, | |
{ | |
"dataset": "math", | |
"type": "role", | |
"origin_acc": 15.0, | |
"origin_prompt": "As a mathematics instructor, calculate the answer to the following problem related to {}:", | |
"atk_acc": 0.62, | |
"atk_prompt": "As a algebra schoolteacher, calculate the address to the following question linking to {}: ", | |
"transfer_ori_acc": 11.875, | |
"transfer_atk_acc": 2.5 | |
} | |
] |