Spaces:
Runtime error
Runtime error
[ | |
{ | |
"dataset": "cola", | |
"type": "task", | |
"origin_acc": 86.4, | |
"origin_prompt": "Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable'.", | |
"atk_acc": 39.5, | |
"atk_prompt": "Inspect the made condemning and select if it is grammatically healthier. Answers with either 'Acceptable' or 'Unacceptable'. ", | |
"transfer_ori_acc": 77.7, | |
"transfer_atk_acc": 76.3 | |
}, | |
{ | |
"dataset": "cola", | |
"type": "role", | |
"origin_acc": 86.6, | |
"origin_prompt": "While performing grammar analysis, classify the grammar of the following sentence as 'acceptable' for correct grammar or 'unacceptable' for incorrect grammar.", | |
"atk_acc": 73.0, | |
"atk_prompt": "While performing grammar scans, classify the verbs of the afterward condemned as 'acceptable' for correctness grammar or 'unacceptable' for inappropriate vocabulary. ", | |
"transfer_ori_acc": 75.7, | |
"transfer_atk_acc": 74.0 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "task", | |
"origin_acc": 86.52, | |
"origin_prompt": "Compare the following sentences and determine if they share the same semantic meaning by responding with 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 83.58, | |
"atk_prompt": "Compare the later judgements and defines if they exchange the same semantic sensation by answered with 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 80.3921568627451, | |
"transfer_atk_acc": 79.90196078431373 | |
}, | |
{ | |
"dataset": "mrpc", | |
"type": "role", | |
"origin_acc": 89.22, | |
"origin_prompt": "As a sentence similarity evaluator, analyze the provided sentences and indicate if their meanings are 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 85.29, | |
"atk_prompt": "As a sentence metaphor evaluator, analyze the brought priors and demonstrating if their meanings are 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 79.16666666666666, | |
"transfer_atk_acc": 81.86274509803921 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "task", | |
"origin_acc": 94.0, | |
"origin_prompt": "Analyze the context and question, and indicate if the context entails the answer by choosing 'entailment' or 'not_entailment'.", | |
"atk_acc": 54.2, | |
"atk_prompt": "Explore the context and question, and suggest if the context involves the rebuttal by selected 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 87.3, | |
"transfer_atk_acc": 83.5 | |
}, | |
{ | |
"dataset": "qnli", | |
"type": "role", | |
"origin_acc": 93.9, | |
"origin_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.", | |
"atk_acc": 61.5, | |
"atk_prompt": "Because a semantic inquisitors, appraise whether the provided context alleges the rebuttal to the issuing and selector 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 87.3, | |
"transfer_atk_acc": 85.1 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "task", | |
"origin_acc": 88.7, | |
"origin_prompt": "Examine the following expressions and tell me if they are alike in meaning by using 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 85.9, | |
"atk_prompt": "Examine the afterward words and saying me if they are uniformly in connotation by using 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 86.7, | |
"transfer_atk_acc": 85.39999999999999 | |
}, | |
{ | |
"dataset": "qqp", | |
"type": "role", | |
"origin_acc": 90.0, | |
"origin_prompt": "As a tool for determining question equivalence, review the questions and categorize their similarity as either 'equivalent' or 'not_equivalent'.", | |
"atk_acc": 87.6, | |
"atk_prompt": "Because a tool for determining question equivalence, examination the queries and classifications their likeness as either 'equivalent' or 'not_equivalent'. ", | |
"transfer_ori_acc": 86.3, | |
"transfer_atk_acc": 86.8 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "task", | |
"origin_acc": 93.5, | |
"origin_prompt": "Identify whether the given pair of sentences demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.", | |
"atk_acc": 92.42, | |
"atk_prompt": "Deciding whether the given pair of sentencing demonstrates entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 84.83754512635379, | |
"transfer_atk_acc": 85.1985559566787 | |
}, | |
{ | |
"dataset": "rte", | |
"type": "role", | |
"origin_acc": 93.5, | |
"origin_prompt": "As a tool for determining entailment relationships, review the two statements and categorize their connection as either 'entailment' or 'not_entailment'.", | |
"atk_acc": 91.34, | |
"atk_prompt": "Because a device for determining entailment relationships, review the two words and categorize their connection as either 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 83.39350180505414, | |
"transfer_atk_acc": 81.94945848375451 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "task", | |
"origin_acc": 96.79, | |
"origin_prompt": "Determine the overall sentiment of this sentence, categorizing it as 'positive' or 'negative'.", | |
"atk_acc": 93.46, | |
"atk_prompt": "Determine the plenary senses of this condemning, categorizing it as 'positive' or 'negative'. ", | |
"transfer_ori_acc": 94.38073394495413, | |
"transfer_atk_acc": 94.38073394495413 | |
}, | |
{ | |
"dataset": "sst2", | |
"type": "role", | |
"origin_acc": 96.56, | |
"origin_prompt": "As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.", | |
"atk_acc": 95.18, | |
"atk_prompt": "As a emotion classifier, ascertain whether the suite wording is 'positive' or 'negative'. ", | |
"transfer_ori_acc": 94.4954128440367, | |
"transfer_atk_acc": 94.61009174311926 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "task", | |
"origin_acc": 77.46, | |
"origin_prompt": "Assess the connection between the following sentences and classify it as 'entailment' or 'not_entailment'.", | |
"atk_acc": 73.24, | |
"atk_prompt": "Appraisal the attach between the suite condemns and classify it as 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 64.7887323943662, | |
"transfer_atk_acc": 66.19718309859155 | |
}, | |
{ | |
"dataset": "wnli", | |
"type": "role", | |
"origin_acc": 76.06, | |
"origin_prompt": "In your role as an entailment analysis tool, assess the relationship between the given sentences and classify it as 'entailment' or 'not_entailment'.", | |
"atk_acc": 73.24, | |
"atk_prompt": "In your role as an entailment analysis tool, assess the reports between the given sentences and classify it as 'entailment' or 'not_entailment'. ", | |
"transfer_ori_acc": 57.74647887323944, | |
"transfer_atk_acc": 61.97183098591549 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "task", | |
"origin_acc": 54.08, | |
"origin_prompt": "For the multiple-choice question related to {}, please choose the most accurate answer from 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 24.82, | |
"atk_prompt": "Within the multiple-choice item relevant to {}, please decides the most careful rebuttal from 'A', 'B', 'C', or 'D'. ", | |
"transfer_ori_acc": 45.56737588652482, | |
"transfer_atk_acc": 42.5531914893617 | |
}, | |
{ | |
"dataset": "mmlu", | |
"type": "role", | |
"origin_acc": 53.72, | |
"origin_prompt": "As an expert in {}, respond to the following multiple-choice question by selecting 'A', 'B', 'C', or 'D'.", | |
"atk_acc": 51.95, | |
"atk_prompt": "As an whiz in {}, respond to the following multiple-choice question by decided 'A', 'B', 'C', or 'D'.", | |
"transfer_ori_acc": 46.98581560283688, | |
"transfer_atk_acc": 45.39007092198582 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "task", | |
"origin_acc": 91.05, | |
"origin_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.", | |
"atk_acc": 55.32, | |
"atk_prompt": "Ask subtract the most fitting rebuttal from the context. If there isn't an rebuttal in the context, use 'unanswerable'.", | |
"transfer_ori_acc": 87.04992770387507, | |
"transfer_atk_acc": 87.31659437054175 | |
}, | |
{ | |
"dataset": "squad_v2", | |
"type": "role", | |
"origin_acc": 90.55, | |
"origin_prompt": "Drawing upon your expertise in the context, determine the most suitable answer. If an answer isn't available, state 'unanswerable'.", | |
"atk_acc": 55.41, | |
"atk_prompt": "Drawing upon your acquaintances in the context, identify the most suitable rebuttal. If an answer isn't available, state 'unanswerable'.", | |
"transfer_ori_acc": 87.04992770387507, | |
"transfer_atk_acc": 85.42265497660233 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "task", | |
"origin_acc": 0.27, | |
"origin_prompt": "Accurately translate the sentence from {} to {}, ensuring the meaning remains intact.", | |
"atk_acc": 0.19, | |
"atk_prompt": "Rightly translate the convicts from {} to {}, insurance the signify sojourn unaffected. ", | |
"transfer_ori_acc": 0.2195336520895316, | |
"transfer_atk_acc": 0.2204127497581867 | |
}, | |
{ | |
"dataset": "iwslt", | |
"type": "role", | |
"origin_acc": 0.27, | |
"origin_prompt": "Assuming the responsibilities of a professional translator, translate the subsequent text passage from {} to {}.", | |
"atk_acc": 0.19, | |
"atk_prompt": "Adopt the indebted of a vocational artist, translate the subsequent laws transition from {} to {}. ", | |
"transfer_ori_acc": 0.22030199520456395, | |
"transfer_atk_acc": 0.2233226772101081 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "task", | |
"origin_acc": 0.36, | |
"origin_prompt": "Perform a machine translation for the provided sentence, changing it from {} to {}.", | |
"atk_acc": 0.35, | |
"atk_prompt": "Perform a computer translate for the tabled sentence, changing it from {} to {}. ", | |
"transfer_ori_acc": 0.30870734220870066, | |
"transfer_atk_acc": 0.3112750115865155 | |
}, | |
{ | |
"dataset": "un_multi", | |
"type": "role", | |
"origin_acc": 0.36, | |
"origin_prompt": "As a skilled linguist, please convert the following sentence from {} to {}.", | |
"atk_acc": 0.35, | |
"atk_prompt": "As a skilled linguist, please translate the following punishment from {} to {}. ", | |
"transfer_ori_acc": 0.3046140486156621, | |
"transfer_atk_acc": 0.30805246751439636 | |
}, | |
{ | |
"dataset": "math", | |
"type": "task", | |
"origin_acc": 16.25, | |
"origin_prompt": "Calculate the answer to the following math query about {}.", | |
"atk_acc": 10.0, | |
"atk_prompt": "Estimate the rebuttal to the following math view about {}. ", | |
"transfer_ori_acc": 16.25, | |
"transfer_atk_acc": 15.625 | |
}, | |
{ | |
"dataset": "math", | |
"type": "role", | |
"origin_acc": 16.25, | |
"origin_prompt": "Act as a math expert and compute the result of the following mathematical task about {}.", | |
"atk_acc": 13.75, | |
"atk_prompt": "Act as a math expert and estimate the conclusion of the following mathematical task about {}. ", | |
"transfer_ori_acc": 15.625, | |
"transfer_atk_acc": 15.625 | |
} | |
] |