picocreator's picture
updated results for bloom
e0a3412
raw
history blame
No virus
13.2 kB
{
"results": {
"multimedqa": {
"alias": "stem",
"acc,none": 0.4063875088715401,
"acc_stderr,none": 0.07686893603849891,
"acc_norm,none": 0.37056429084523906,
"acc_norm_stderr,none": 0.00010241543372288454
},
"medmcqa": {
"acc,none": 0.3691130767391824,
"acc_stderr,none": 0.007462141229317634,
"acc_norm,none": 0.3691130767391824,
"acc_norm_stderr,none": 0.007462141229317634,
"alias": " - medmcqa"
},
"medqa_4options": {
"acc,none": 0.373134328358209,
"acc_stderr,none": 0.013560518364022962,
"acc_norm,none": 0.373134328358209,
"acc_norm_stderr,none": 0.013560518364022962,
"alias": " - medqa_4options"
},
"mmlu_anatomy": {
"alias": " - anatomy (mmlu)",
"acc,none": 0.4148148148148148,
"acc_stderr,none": 0.04256193767901407
},
"mmlu_clinical_knowledge": {
"alias": " - clinical_knowledge (mmlu)",
"acc,none": 0.4830188679245283,
"acc_stderr,none": 0.030755120364119905
},
"mmlu_college_biology": {
"alias": " - college_biology (mmlu)",
"acc,none": 0.4722222222222222,
"acc_stderr,none": 0.04174752578923185
},
"mmlu_college_medicine": {
"alias": " - college_medicine (mmlu)",
"acc,none": 0.4161849710982659,
"acc_stderr,none": 0.03758517775404947
},
"mmlu_medical_genetics": {
"alias": " - medical_genetics (mmlu)",
"acc,none": 0.6,
"acc_stderr,none": 0.04923659639173309
},
"mmlu_professional_medicine": {
"alias": " - professional_medicine (mmlu)",
"acc,none": 0.43014705882352944,
"acc_stderr,none": 0.030074971917302875
},
"pubmedqa": {
"acc,none": 0.686,
"acc_stderr,none": 0.020776701920308997,
"alias": " - pubmedqa"
}
},
"groups": {
"multimedqa": {
"alias": "stem",
"acc,none": 0.4063875088715401,
"acc_stderr,none": 0.07686893603849891,
"acc_norm,none": 0.37056429084523906,
"acc_norm_stderr,none": 0.00010241543372288454
}
},
"configs": {
"medmcqa": {
"task": "medmcqa",
"dataset_path": "medmcqa",
"training_split": "train",
"validation_split": "validation",
"test_split": "validation",
"doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Question: <question>\n Choices:\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n Answer:\n \"\"\"\n choices = [doc[\"opa\"], doc[\"opb\"], doc[\"opc\"], doc[\"opd\"]]\n option_choices = {'A': choices[0], 'B': choices[1], 'C': choices[2], 'D': choices[3]}\n\n prompt = \"Question: \" + doc[\"question\"] + \"\\nChoices:\\n\"\n for choice, option in option_choices.items():\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"Answer:\"\n return prompt\n",
"doc_to_target": "cop",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
},
{
"metric": "acc_norm",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": true,
"doc_to_decontamination_query": "{{question}}"
},
"medqa_4options": {
"task": "medqa_4options",
"dataset_path": "GBaker/MedQA-USMLE-4-options-hf",
"training_split": "train",
"validation_split": "validation",
"test_split": "test",
"doc_to_text": "def doc_to_text(doc) -> str:\n option_choices = {'A': doc[\"ending0\"], 'B': doc[\"ending1\"], 'C': doc[\"ending2\"], 'D': doc[\"ending3\"]}\n answers = \"\".join((f\"{k}. {v}\\n\") for k, v in option_choices.items())\n return f\"Question: {doc['sent1']}\\n{answers}Answer:\"\n",
"doc_to_target": "def doc_to_target(doc) -> int:\n return doc[\"label\"]\n",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
},
{
"metric": "acc_norm",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false
},
"mmlu_anatomy": {
"task": "mmlu_anatomy",
"task_alias": "anatomy (mmlu)",
"group": "multimedqa",
"group_alias": "stem",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "anatomy",
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"mmlu_clinical_knowledge": {
"task": "mmlu_clinical_knowledge",
"task_alias": "clinical_knowledge (mmlu)",
"group": "multimedqa",
"group_alias": "other",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "clinical_knowledge",
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"mmlu_college_biology": {
"task": "mmlu_college_biology",
"task_alias": "college_biology (mmlu)",
"group": "multimedqa",
"group_alias": "stem",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "college_biology",
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about college biology.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"mmlu_college_medicine": {
"task": "mmlu_college_medicine",
"task_alias": "college_medicine (mmlu)",
"group": "multimedqa",
"group_alias": "other",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "college_medicine",
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"mmlu_medical_genetics": {
"task": "mmlu_medical_genetics",
"task_alias": "medical_genetics (mmlu)",
"group": "multimedqa",
"group_alias": "other",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "medical_genetics",
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"mmlu_professional_medicine": {
"task": "mmlu_professional_medicine",
"task_alias": "professional_medicine (mmlu)",
"group": "multimedqa",
"group_alias": "other",
"dataset_path": "hails/mmlu_no_train",
"dataset_name": "professional_medicine",
"test_split": "test",
"fewshot_split": "dev",
"doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
"doc_to_target": "answer",
"doc_to_choice": [
"A",
"B",
"C",
"D"
],
"description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"fewshot_config": {
"sampler": "first_n"
},
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 0.0
}
},
"pubmedqa": {
"task": "pubmedqa",
"dataset_path": "bigbio/pubmed_qa",
"dataset_name": "pubmed_qa_labeled_fold0_source",
"training_split": "train",
"validation_split": "validation",
"test_split": "test",
"doc_to_text": "def doc_to_text(doc) -> str:\n ctxs = \"\\n\".join(doc[\"CONTEXTS\"])\n return \"Abstract: {}\\nQuestion: {}\\nAnswer:\".format(\n ctxs,\n doc[\"QUESTION\"],\n )\n",
"doc_to_target": "final_decision",
"doc_to_choice": [
"yes",
"no",
"maybe"
],
"description": "",
"target_delimiter": " ",
"fewshot_delimiter": "\n\n",
"metric_list": [
{
"metric": "acc",
"aggregation": "mean",
"higher_is_better": true
}
],
"output_type": "multiple_choice",
"repeats": 1,
"should_decontaminate": false,
"metadata": {
"version": 1.0
}
}
},
"versions": {
"medmcqa": "Yaml",
"medqa_4options": "Yaml",
"mmlu_anatomy": 0.0,
"mmlu_clinical_knowledge": 0.0,
"mmlu_college_biology": 0.0,
"mmlu_college_medicine": 0.0,
"mmlu_medical_genetics": 0.0,
"mmlu_professional_medicine": 0.0,
"multimedqa": "N/A",
"pubmedqa": 1.0
},
"n-shot": {
"medmcqa": 0,
"medqa_4options": 0,
"mmlu_anatomy": 0,
"mmlu_clinical_knowledge": 0,
"mmlu_college_biology": 0,
"mmlu_college_medicine": 0,
"mmlu_medical_genetics": 0,
"mmlu_professional_medicine": 0,
"multimedqa": 0,
"pubmedqa": 0
},
"config": {
"model": "hf",
"model_args": "pretrained=bigscience/bloomz-7b1-mt,dtype=bfloat16,trust_remote_code=True",
"batch_size": "auto",
"batch_sizes": [
4
],
"device": null,
"use_cache": null,
"limit": null,
"bootstrap_iters": 100000,
"gen_kwargs": null
},
"git_hash": "62513ca"
}