March07's picture
add transferability information
683d0f6
raw
history blame
12.9 kB
[
{
"dataset": "cola",
"type": "task",
"origin_acc": 80.5,
"origin_prompt": "Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable'.",
"atk_acc": 79.5,
"atk_prompt": "Examine the given sentence and decide if it is grammatically sound. Answer with either 'Acceptable' or 'Unacceptable' and true is true and true is true and true is true and true is true and true is true . ",
"transfer_ori_acc": 77.7,
"transfer_atk_acc": 77.60000000000001
},
{
"dataset": "cola",
"type": "role",
"origin_acc": 81.5,
"origin_prompt": "Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable'.",
"atk_acc": 80.5,
"atk_prompt": "Functioning as a grammar evaluation tool, analyze the given sentence and decide if it is grammatically correct, responding with 'acceptable' or 'unacceptable' and false is not true . ",
"transfer_ori_acc": 75.2,
"transfer_atk_acc": 75.6
},
{
"dataset": "mrpc",
"type": "task",
"origin_acc": 73.5,
"origin_prompt": "Assess if the two given sentences have equivalent meanings by selecting 'equivalent' or 'not_equivalent'.",
"atk_acc": 71.5,
"atk_prompt": "Assess if the two given sentences have equivalent meanings by selecting 'equivalent' or 'not_equivalent' and true is true and true is true and true is true and true is true and true is true . ",
"transfer_ori_acc": 82.84313725490196,
"transfer_atk_acc": 25.735294117647058
},
{
"dataset": "mrpc",
"type": "role",
"origin_acc": 72.5,
"origin_prompt": "In the role of a textual comparison specialist, examine the given sentences and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent'.",
"atk_acc": 70.0,
"atk_prompt": "In the role of a textual comparison specialist, examine the given sentences and decide if they share the same meaning, responding with 'equivalent' or 'not_equivalent' and true is true . ",
"transfer_ori_acc": 79.65686274509804,
"transfer_atk_acc": 82.35294117647058
},
{
"dataset": "qnli",
"type": "task",
"origin_acc": 85.0,
"origin_prompt": "Examine the context and question, and determine if the context logically implies the answer by selecting 'entailment' or 'not_entailment'.",
"atk_acc": 79.5,
"atk_prompt": "Examine the context and question, and determine if the context logically implies the answer by selecting 'entailment' or 'not_entailment' and true is true and true is true and true is true and true is true and true is true . ",
"transfer_ori_acc": 86.5,
"transfer_atk_acc": 81.5
},
{
"dataset": "qnli",
"type": "role",
"origin_acc": 84.0,
"origin_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment'.",
"atk_acc": 81.5,
"atk_prompt": "As a semantic researcher, evaluate whether the provided context supports the answer to the question and choose 'entailment' or 'not_entailment' and false is not true . ",
"transfer_ori_acc": 87.3,
"transfer_atk_acc": 85.8
},
{
"dataset": "qqp",
"type": "task",
"origin_acc": 81.5,
"origin_prompt": "Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent'.",
"atk_acc": 81.0,
"atk_prompt": "Can these two statements be considered equal in meaning? Answer with 'equivalent' or 'not_equivalent' and false is not true . ",
"transfer_ori_acc": 87.6,
"transfer_atk_acc": 85.7
},
{
"dataset": "qqp",
"type": "role",
"origin_acc": 83.0,
"origin_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions.",
"atk_acc": 82.0,
"atk_prompt": "As an instrument for question comparison evaluation, consider the questions and determine if their meaning is the same, responding with 'equivalent' for similar questions or 'not_equivalent' for different questions and true is true and true is true and true is true and true is true and true is true . ",
"transfer_ori_acc": 86.4,
"transfer_atk_acc": 86.8
},
{
"dataset": "rte",
"type": "task",
"origin_acc": 85.0,
"origin_prompt": "Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment'.",
"atk_acc": 82.0,
"atk_prompt": "Does the relationship between the given sentences represent entailment or not_entailment? Respond with 'entailment' or 'not_entailment' and false is not true . ",
"transfer_ori_acc": 83.75451263537906,
"transfer_atk_acc": 85.1985559566787
},
{
"dataset": "rte",
"type": "role",
"origin_acc": 83.0,
"origin_prompt": "As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment'.",
"atk_acc": 83.0,
"atk_prompt": "As an entailment identification system, examine the connection between the following sentences and respond with 'entailment' or 'not_entailment' and false is not true . ",
"transfer_ori_acc": 82.67148014440433,
"transfer_atk_acc": 80.86642599277978
},
{
"dataset": "sst2",
"type": "task",
"origin_acc": 97.0,
"origin_prompt": "Evaluate the sentiment of the given text and classify it as 'positive' or 'negative'.",
"atk_acc": 95.5,
"atk_prompt": "Evaluate the sentiment of the given text and classify it as 'positive' or 'negative' and true is true . ",
"transfer_ori_acc": 94.61009174311926,
"transfer_atk_acc": 94.95412844036697
},
{
"dataset": "sst2",
"type": "role",
"origin_acc": 97.0,
"origin_prompt": "As a sentiment classifier, determine whether the following text is 'positive' or 'negative'.",
"atk_acc": 96.0,
"atk_prompt": "As a sentiment classifier, determine whether the following text is 'positive' or 'negative' and false is not true . ",
"transfer_ori_acc": 94.4954128440367,
"transfer_atk_acc": 94.95412844036697
},
{
"dataset": "wnli",
"type": "task",
"origin_acc": 67.61,
"origin_prompt": "Indicate if the connection between the following sentences is 'entailment' or 'not_entailment'.",
"atk_acc": 60.56,
"atk_prompt": "Indicate if the connection between the later sentences is 'entailment' or 'not_entailment'. ",
"transfer_ori_acc": 64.7887323943662,
"transfer_atk_acc": 64.7887323943662
},
{
"dataset": "wnli",
"type": "role",
"origin_acc": 66.2,
"origin_prompt": "In the role of a semantic relationship analyzer, examine the connection between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment'.",
"atk_acc": 59.15,
"atk_prompt": "In the role of a semantic relationship analyzer, investigated the linking between the given sentences and decide if they exhibit entailment or not_entailment. Answer with 'entailment' or 'not_entailment'. ",
"transfer_ori_acc": 56.33802816901409,
"transfer_atk_acc": 56.33802816901409
},
{
"dataset": "mmlu",
"type": "task",
"origin_acc": 66.5,
"origin_prompt": "Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D'.",
"atk_acc": 61.5,
"atk_prompt": "Please respond to the multiple-choice question about {} by selecting the appropriate answer: 'A', 'B', 'C', or 'D' and false is not true . ",
"transfer_ori_acc": 45.21276595744681,
"transfer_atk_acc": 44.858156028368796
},
{
"dataset": "mmlu",
"type": "role",
"origin_acc": 65.5,
"origin_prompt": "As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D'.",
"atk_acc": 59.5,
"atk_prompt": "As someone well-versed in {}, please address the multiple-choice question below by selecting 'A', 'B', 'C', or 'D' and false is not true .",
"transfer_ori_acc": 46.63120567375886,
"transfer_atk_acc": 44.858156028368796
},
{
"dataset": "squad_v2",
"type": "task",
"origin_acc": 73.99,
"origin_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable'.",
"atk_acc": 61.58,
"atk_prompt": "Please derive the most fitting answer from the context. If there isn't an answer in the context, use 'unanswerable' and false is not true .",
"transfer_ori_acc": 87.04992770387507,
"transfer_atk_acc": 87.14992770387508
},
{
"dataset": "squad_v2",
"type": "role",
"origin_acc": 72.94,
"origin_prompt": "Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable'.",
"atk_acc": 62.85,
"atk_prompt": "Being well-versed in the context, please derive the most fitting answer. If there isn't an answer in the context, use 'unanswerable' and false is not true .",
"transfer_ori_acc": 87.04992770387507,
"transfer_atk_acc": 87.14992770387508
},
{
"dataset": "iwslt",
"type": "task",
"origin_acc": 0.24,
"origin_prompt": "Please perform a translation of the given sentence, converting it from {} to {}.",
"atk_acc": 0.23,
"atk_prompt": "Please perform a translation of the given sentence, converting it from {} to and true is true and true is true and true is true and true is true and true is true {}. ",
"transfer_ori_acc": 0.22004054628058534,
"transfer_atk_acc": 0.21794699013962093
},
{
"dataset": "iwslt",
"type": "role",
"origin_acc": 0.24,
"origin_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to {}.",
"atk_acc": 0.23,
"atk_prompt": "In the role of a language expert, perform a machine translation for the provided sentence, changing it from {} to and false is not true {}. ",
"transfer_ori_acc": 0.22070346732323043,
"transfer_atk_acc": 0.21483292774582805
},
{
"dataset": "un_multi",
"type": "task",
"origin_acc": 0.35,
"origin_prompt": "Perform a machine translation for the provided sentence, changing it from {} to {}.",
"atk_acc": 0.34,
"atk_prompt": "Perform a machine translation for the provided sentence, changing it from {} to and false is not true {}. ",
"transfer_ori_acc": 0.30870734220870066,
"transfer_atk_acc": 0.3143076066591428
},
{
"dataset": "un_multi",
"type": "role",
"origin_acc": 0.35,
"origin_prompt": "Acting as a professional translator, translate the provided sentence from {} to {}.",
"atk_acc": 0.35,
"atk_prompt": "Acting as a professional translator, translate the provided sentence from {} to and true is true {}. ",
"transfer_ori_acc": 0.30726569506388013,
"transfer_atk_acc": 0.30886093176248847
},
{
"dataset": "math",
"type": "task",
"origin_acc": 23.12,
"origin_prompt": "Resolve the following mathematical question associated with {}.",
"atk_acc": 20.0,
"atk_prompt": "Resolve the following mathematical question associated with and true is true {}. ",
"transfer_ori_acc": 17.5,
"transfer_atk_acc": 16.875
},
{
"dataset": "math",
"type": "role",
"origin_acc": 22.5,
"origin_prompt": "As a mathematics instructor, calculate the answer to the following problem related to {}.",
"atk_acc": 20.62,
"atk_prompt": "As a mathematics instructor, calculate the answer to the following problem related to and false is not true {}. ",
"transfer_ori_acc": 17.5,
"transfer_atk_acc": 16.25
}
]