run_id
large_stringlengths 64
64
| timestamp_utc
int64 1,736B
1,737B
| timestamp_day_hour_utc
int64 1,736B
1,737B
| model_name_or_path
large_stringclasses 5
values | unitxt_card
large_stringclasses 76
values | unitxt_recipe
large_stringlengths 330
399
| quantization_type
large_stringclasses 1
value | quantization_bit_count
large_stringclasses 1
value | inference_runtime_s
float64 1.2
66.4
| generation_args
large_stringclasses 1
value | model_args
large_stringclasses 5
values | inference_engine
large_stringclasses 1
value | packages_versions
large_stringclasses 1
value | scores
large_stringlengths 174
242
| num_gpu
int64 1
1
| device
large_stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6ffa11418c24d5ad71ee147d72b0143549942e67e01b147c85eb762db54f7404 | 1,736,452,788,278 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.health | card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.347738 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.38, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
9a9514d13f0d1d27d9b5b59a3d38b26760afd67921d83f57ed29ecc2887c556c | 1,736,452,793,287 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.nutrition | card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.303906 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.71, 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.71, 'score_ci_high': 0.8, 'score_ci_low': 0.63, 'num_of_instances': 100} | 1 | a100_80gb |
732287a6a810ee5cbcabb8cc9ad74dbe4b26de090dc8aec432ef60e1d1894b98 | 1,736,452,796,997 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.electrical_engineering | card=cards.mmlu.electrical_engineering,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_greek_choicesSeparator_orLower_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.117088 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.35, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.45, 'score_name': 'accuracy', 'score': 0.35, 'score_ci_high': 0.45, 'score_ci_low': 0.26, 'num_of_instances': 100} | 1 | a100_80gb |
67a2dcb7da645926d76f11657eb678401da2d2526d365173a855904f70b246e0 | 1,736,452,738,023 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.health | card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.697294 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.31, 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.31, 'score_ci_high': 0.4, 'score_ci_low': 0.23, 'num_of_instances': 100} | 1 | a100_80gb |
55a25f3403e35cce6bc6af9cc7940aa335c9df88939c4f907c6a8887e88c2ea9 | 1,736,452,743,970 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.hellaswag | card=cards.hellaswag,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.HellaSwag.MultipleChoiceTemplatesInstructionsStandard.enumerator_capitals_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.257932 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.66, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.66, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
235f108a2d5375a92fe96ef5c56993404ac0f11050c32bcafc0ccc5d2938be83 | 1,736,452,748,421 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_physics | card=cards.mmlu.high_school_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.823301 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.14, 'accuracy_ci_low': 0.08, 'accuracy_ci_high': 0.21, 'score_name': 'accuracy', 'score': 0.14, 'score_ci_high': 0.21, 'score_ci_low': 0.08, 'num_of_instances': 100} | 1 | a100_80gb |
f273ee1d60f24df07cf0a3622902ad541abb6cefb4efd726d84d77d354022ae9 | 1,736,452,752,704 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.moral_scenarios | card=cards.mmlu.moral_scenarios,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.696296 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.33, 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.42, 'score_name': 'accuracy', 'score': 0.33, 'score_ci_high': 0.42, 'score_ci_low': 0.24, 'num_of_instances': 100} | 1 | a100_80gb |
4f40f9d53161c0aac8d94890cc43d104c0ec2a3a9541dc83765ddc768ac23bc2 | 1,736,452,756,393 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.marketing | card=cards.mmlu.marketing,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.116687 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.7, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.7, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100} | 1 | a100_80gb |
37a2b278c33471fea2a9bbec163d6b68ea8e457c436268b12e25f2423e74dc67 | 1,736,452,761,992 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.other | card=cards.mmlu_pro.other,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.253184 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37364459710577874, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.37364459710577874, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
ae6c49f75d89f9c87b838ff41ce7f12f69d358a107398f5d2a7b734e984954d3 | 1,736,452,769,324 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.620578 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.71, 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.79, 'score_name': 'accuracy', 'score': 0.71, 'score_ci_high': 0.79, 'score_ci_low': 0.61, 'num_of_instances': 100} | 1 | a100_80gb |
ef1d888d10be6db04e141c43ae2a86ac4c86fe29b88568a7166ea4c9213f3855 | 1,736,452,773,716 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.professional_accounting | card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_roman_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.564801 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.29, 'accuracy_ci_low': 0.21, 'accuracy_ci_high': 0.38, 'score_name': 'accuracy', 'score': 0.29, 'score_ci_high': 0.38, 'score_ci_low': 0.21, 'num_of_instances': 100} | 1 | a100_80gb |
1d18a89fd235c2973ffab18f13903159e2b7ee95aa6921299a90c9059772bd9e | 1,736,452,781,158 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.miscellaneous | card=cards.mmlu.miscellaneous,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.859958 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.72, 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.72, 'score_ci_high': 0.8, 'score_ci_low': 0.63, 'num_of_instances': 100} | 1 | a100_80gb |
231dc2097114b0e759fdfbbb6cf17f4de22ccbe05476db378ade638cb29378fd | 1,736,452,786,531 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.philosophy | card=cards.mmlu_pro.philosophy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSACould.enumerator_roman_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.690829 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.35, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.35, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
06c44f4bfa4355ff11a629ea1ab25eb8776bdd3cf266307e89fc1b2ceb469bdc | 1,736,452,703,962 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.chemistry | card=cards.mmlu_pro.chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 12.692634 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.2, 'accuracy_ci_low': 0.13, 'accuracy_ci_high': 0.29, 'score_name': 'accuracy', 'score': 0.2, 'score_ci_high': 0.29, 'score_ci_low': 0.13, 'num_of_instances': 100} | 1 | a100_80gb |
42311cc03b31e23f085c0f4430cf237b815b58641eef9de8368ce931efe6f3aa | 1,736,452,734,251 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_european_history | card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 28.658253 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.74, 'score_ci_low': 0.56, 'num_of_instances': 100} | 1 | a100_80gb |
c92a424e68f5c6a41dadc881470bde5b02b0fbacf4006e5a1f42ed2d29e3bc51 | 1,736,452,741,208 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.conceptual_physics | card=cards.mmlu.conceptual_physics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.242481 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.54, 'score_ci_low': 0.35, 'num_of_instances': 100} | 1 | a100_80gb |
3cf2e8ab0f4b76c34e798857187dece58bda8ff4ce11a6f7569e6294aad6455a | 1,736,452,745,401 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_biology | card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.679791 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.54, 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.64, 'score_ci_low': 0.44, 'num_of_instances': 100} | 1 | a100_80gb |
3853705df30f043df76f7d3e7384a5d217af49dbf5208181d777d06689d586ec | 1,736,452,753,822 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.elementary_mathematics | card=cards.mmlu.elementary_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.310651 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.32, 'accuracy_ci_low': 0.24, 'accuracy_ci_high': 0.42, 'score_name': 'accuracy', 'score': 0.32, 'score_ci_high': 0.42, 'score_ci_low': 0.24, 'num_of_instances': 100} | 1 | a100_80gb |
7ed3f3335e9fdbc3138ce69f590785a3d83b10e8566d96ca698a3d2eb2453dfa | 1,736,452,758,480 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_greek_choicesSeparator_comma_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.849396 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.71, 'accuracy_ci_low': 0.62, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.71, 'score_ci_high': 0.8, 'score_ci_low': 0.62, 'num_of_instances': 100} | 1 | a100_80gb |
63516b846970ef8e10e15079388e07fc898880063ece6fdd827676000ba6c2ab | 1,736,452,765,183 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.world_religions | card=cards.mmlu.world_religions,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.213439 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.77, 'accuracy_ci_low': 0.68, 'accuracy_ci_high': 0.85, 'score_name': 'accuracy', 'score': 0.77, 'score_ci_high': 0.85, 'score_ci_low': 0.68, 'num_of_instances': 100} | 1 | a100_80gb |
a1349f77b94fb287b9f1252792054d1168354206e9f09b5f7498b44561d600d0 | 1,736,452,771,033 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.computer_science | card=cards.mmlu_pro.computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.185964 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.23, 'accuracy_ci_low': 0.16, 'accuracy_ci_high': 0.3103932477410149, 'score_name': 'accuracy', 'score': 0.23, 'score_ci_high': 0.3103932477410149, 'score_ci_low': 0.16, 'num_of_instances': 100} | 1 | a100_80gb |
3487eb2a7e755966af95333274766747bd6914097cccff2479fd257173b05d40 | 1,736,452,777,375 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.management | card=cards.mmlu.management,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.6373 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.75, 'accuracy_ci_low': 0.65, 'accuracy_ci_high': 0.83, 'score_name': 'accuracy', 'score': 0.75, 'score_ci_high': 0.83, 'score_ci_low': 0.65, 'num_of_instances': 100} | 1 | a100_80gb |
b9d92cc32e106fb00ae8178f4b8a133c013f410a305334e465033a3057626f94 | 1,736,452,784,565 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.human_aging | card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.826671 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.68, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
84723e1fd4abbcef33ecba44c1b4ec5a22650cacc1b61b75e25b25730db23947 | 1,736,452,691,242 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_computer_science | card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.523775 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.4, 'accuracy_ci_low': 0.31, 'accuracy_ci_high': 0.5, 'score_name': 'accuracy', 'score': 0.4, 'score_ci_high': 0.5, 'score_ci_low': 0.31, 'num_of_instances': 100} | 1 | a100_80gb |
91d28fd23786fe017d568619005cac64276cb1e42b71a40a55f8cd7718310e86 | 1,736,452,704,137 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.biology | card=cards.mmlu_pro.biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 12.301933 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.57, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.57, 'score_ci_high': 0.65, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
b984d3d471cff7ee7308a063807c2e38f320aa8e5612040c661a4281ba323a39 | 1,736,452,713,667 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.739143 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.35, 'accuracy_ci_low': 0.26, 'accuracy_ci_high': 0.44, 'score_name': 'accuracy', 'score': 0.35, 'score_ci_high': 0.44, 'score_ci_low': 0.26, 'num_of_instances': 100} | 1 | a100_80gb |
3448ae226810e345899bbee8609f583b90065ac720f8418f74d842522dfb7e9d | 1,736,452,722,290 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_medicine | card=cards.mmlu.college_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.831136 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.69, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.69, 'score_ci_low': 0.49, 'num_of_instances': 100} | 1 | a100_80gb |
8ab04cf3ea329cf26329698f965e9e6201aa49dee15c2a65e5d1da14f1c111da | 1,736,452,726,942 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.sociology | card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.280304 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.65, 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.73, 'score_ci_low': 0.55, 'num_of_instances': 100} | 1 | a100_80gb |
113cacfc0d37df13e2bf5f4b2d92bfcf91b9cf4a69b820bcbb876269d1be0b20 | 1,736,452,733,990 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.computer_security | card=cards.mmlu.computer_security,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.467945 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.75, 'score_ci_low': 0.56, 'num_of_instances': 100} | 1 | a100_80gb |
219fb7071feac6f107786ec9e93f27d15830ee11bd07f7a75507535a86255770 | 1,736,452,746,942 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.computer_security | card=cards.mmlu.computer_security,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.569208 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.71, 'accuracy_ci_low': 0.62, 'accuracy_ci_high': 0.79, 'score_name': 'accuracy', 'score': 0.71, 'score_ci_high': 0.79, 'score_ci_low': 0.62, 'num_of_instances': 100} | 1 | a100_80gb |
bdc46f7a811addd3973089a699abbe32de79c7056a0ef6a7b71f7cf965aca73b | 1,736,452,739,706 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.business | card=cards.mmlu_pro.business,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.409825 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.18, 'accuracy_ci_low': 0.11, 'accuracy_ci_high': 0.26, 'score_name': 'accuracy', 'score': 0.18, 'score_ci_high': 0.26, 'score_ci_low': 0.11, 'num_of_instances': 100} | 1 | a100_80gb |
5995367922473cf088578cc6accb22789fc1df75281f92173a2f948138a71a2a | 1,736,452,760,607 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.chemistry | card=cards.mmlu_pro.chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 12.276021 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.15, 'accuracy_ci_low': 0.09, 'accuracy_ci_high': 0.23, 'score_name': 'accuracy', 'score': 0.15, 'score_ci_high': 0.23, 'score_ci_low': 0.09, 'num_of_instances': 100} | 1 | a100_80gb |
a9af303365a6d9c39b4660f3177c814007496a3f72ca15eb0b53060a5df2dd9e | 1,736,452,781,481 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.professional_law | card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 18.944517 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.53, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.53, 'score_ci_low': 0.35, 'num_of_instances': 100} | 1 | a100_80gb |
8102463342ca934497044f5d277d80c41a51455df17be34ba6c7e1e207996c50 | 1,736,452,703,347 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.philosophy | card=cards.mmlu_pro.philosophy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.325395 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.37, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
d350b365df91a30c110d6f3c57a8478c95b9a346aac89e0a3d13f6c14767d8fd | 1,736,452,709,265 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.health | card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_capitals_choicesSeparator_space_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.511444 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.25, 'accuracy_ci_low': 0.17, 'accuracy_ci_high': 0.34, 'score_name': 'accuracy', 'score': 0.25, 'score_ci_high': 0.34, 'score_ci_low': 0.17, 'num_of_instances': 100} | 1 | a100_80gb |
21d274eb6e79054682c8fe987fea8b53a2293ede33c34d12fe49161f0a2bfe09 | 1,736,452,713,572 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.electrical_engineering | card=cards.mmlu.electrical_engineering,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.652309 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.37, 'accuracy_ci_low': 0.28, 'accuracy_ci_high': 0.47, 'score_name': 'accuracy', 'score': 0.37, 'score_ci_high': 0.47, 'score_ci_low': 0.28, 'num_of_instances': 100} | 1 | a100_80gb |
823d1cf3daa28560250d62623a4c80493ff1f02dbc0a1f2bced6fe78de1e14dc | 1,736,452,717,110 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.management | card=cards.mmlu.management,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.017728 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.65, 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.73, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.73, 'score_ci_low': 0.54, 'num_of_instances': 100} | 1 | a100_80gb |
90efbc1dad5a9f6f7fca4a1b2f333cc32baf607c5330bcac5faa5711e0be1c59 | 1,736,452,726,282 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.business_ethics | card=cards.mmlu.business_ethics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_greek_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.653665 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.63, 'accuracy_ci_low': 0.5244540970302355, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.63, 'score_ci_high': 0.71, 'score_ci_low': 0.5244540970302355, 'num_of_instances': 100} | 1 | a100_80gb |
11ec8586673f2d25230d1f0ac0a5898b9d93a18967d7a1971c61c810ee1aff82 | 1,736,452,731,067 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.863693 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.36, 'accuracy_ci_low': 0.27, 'accuracy_ci_high': 0.45, 'score_name': 'accuracy', 'score': 0.36, 'score_ci_high': 0.45, 'score_ci_low': 0.27, 'num_of_instances': 100} | 1 | a100_80gb |
1de8f0fe3b5d8a00a18a925843b9a3d9b3e0db2b5e8c021af1cd108a41eaa124 | 1,736,452,748,734 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_world_history | card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 16.552771 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.73, 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.81, 'score_name': 'accuracy', 'score': 0.73, 'score_ci_high': 0.81, 'score_ci_low': 0.64, 'num_of_instances': 100} | 1 | a100_80gb |
5f14b3b0c4a282a5dbaf3f98e87dedf901a34645da9e5f6281a06bc7cf585cc3 | 1,736,452,754,267 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_numbers_choicesSeparator_newline_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.268896 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.3, 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.3, 'score_ci_high': 0.4, 'score_ci_low': 0.22, 'num_of_instances': 100} | 1 | a100_80gb |
649a0657e5c01eb2fdb3854051d0f1b68e4c005e7f2be2ac76f3c2cd5f83d7cf | 1,736,452,760,762 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_numbers_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 5.969304 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.79, 'accuracy_ci_low': 0.7, 'accuracy_ci_high': 0.86, 'score_name': 'accuracy', 'score': 0.79, 'score_ci_high': 0.86, 'score_ci_low': 0.7, 'num_of_instances': 100} | 1 | a100_80gb |
33443cca3ee9eb88de3937aeb509f7caf18d88aec272cf9e094ff7873967b8d0 | 1,736,452,780,673 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.professional_law | card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_greek_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 19.278426 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.53, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.53, 'score_ci_low': 0.34, 'num_of_instances': 100} | 1 | a100_80gb |
635b25434f861390b84622216a2f7c09d7c68512400a0d9204978d3505906ad7 | 1,736,452,707,732 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.math | card=cards.mmlu_pro.math,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 12.644429 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.19, 'accuracy_ci_low': 0.12, 'accuracy_ci_high': 0.27, 'score_name': 'accuracy', 'score': 0.19, 'score_ci_high': 0.27, 'score_ci_low': 0.12, 'num_of_instances': 100} | 1 | a100_80gb |
203561c65f83cc8593361d61e66a5054e6fb2c27ad3594269a8071d146f97e55 | 1,736,452,715,855 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_european_history | card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.464317 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.46, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.55, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.55, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
6572c6865c41ae28bc606575d63acd5052bf54d545dec77e86160e3b6bb1df57 | 1,736,452,723,939 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.sociology | card=cards.mmlu.sociology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.219037 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.79, 'accuracy_ci_low': 0.7, 'accuracy_ci_high': 0.86, 'score_name': 'accuracy', 'score': 0.79, 'score_ci_high': 0.86, 'score_ci_low': 0.7, 'num_of_instances': 100} | 1 | a100_80gb |
c9c1590f56ad9483af61763ab01827ebd3fcce5004d986441d20926f2a3b5fc3 | 1,736,452,731,876 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.moral_disputes | card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_roman_choicesSeparator_comma_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.985636 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.45, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.5444997559944489, 'score_name': 'accuracy', 'score': 0.45, 'score_ci_high': 0.5444997559944489, 'score_ci_low': 0.35, 'num_of_instances': 100} | 1 | a100_80gb |
47f0d797d7fd445952ab448969d9d8feb1de0b8ac05dc728aafab57378dbc1c3 | 1,736,452,736,627 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_lowercase_choicesSeparator_comma_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.300973 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.4777512981379606, 'accuracy_ci_high': 0.67, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.67, 'score_ci_low': 0.4777512981379606, 'num_of_instances': 100} | 1 | a100_80gb |
465ce7822ad51695105249b2f1fa62d637a6fe1cb9dd3c024fbd7e65497b58ca | 1,736,452,741,747 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.health | card=cards.mmlu_pro.health,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.533268 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.3, 'accuracy_ci_low': 0.22, 'accuracy_ci_high': 0.4, 'score_name': 'accuracy', 'score': 0.3, 'score_ci_high': 0.4, 'score_ci_low': 0.22, 'num_of_instances': 100} | 1 | a100_80gb |
2f43ecad8e5f3ddd9a71710c127dc87a1f579b07283194fb9e9a8112a8c0f3a3 | 1,736,452,759,225 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.security_studies | card=cards.mmlu.security_studies,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 16.750788 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.76, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.76, 'score_ci_low': 0.57, 'num_of_instances': 100} | 1 | a100_80gb |
eb3b761e38703d17c400032cc93d6547b47a03c7e42c458c4813a756323a411b | 1,736,452,768,457 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.medical_genetics | card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.678761 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.69, 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_name': 'accuracy', 'score': 0.69, 'score_ci_high': 0.78, 'score_ci_low': 0.6, 'num_of_instances': 100} | 1 | a100_80gb |
43943d5264ac7ead6f3d24ba94be6048cc0f387c1365d07b1a59aa20def7427b | 1,736,452,773,200 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_psychology | card=cards.mmlu.high_school_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.390742 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.48, 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.48, 'score_ci_high': 0.58, 'score_ci_low': 0.39, 'num_of_instances': 100} | 1 | a100_80gb |
b9c4b7ae6a8c0fefbca0bfa8a4c6b7b72cfa7c523008fe1f65d1ff89d16b73d8 | 1,736,452,777,124 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.human_aging | card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.310357 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.51, 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.6, 'score_name': 'accuracy', 'score': 0.51, 'score_ci_high': 0.6, 'score_ci_low': 0.41, 'num_of_instances': 100} | 1 | a100_80gb |
9c8647b68dbccfbca6eda5a7fe2bafd67dd295a1e58be9bb40dc339171c2ceb4 | 1,736,452,716,870 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_biology | card=cards.mmlu.high_school_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.868458 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.66, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.66, 'score_ci_high': 0.75, 'score_ci_low': 0.56, 'num_of_instances': 100} | 1 | a100_80gb |
21c654ec420bf71e949a96d46cce4dfe765c0c4cf8120898ffba208dedbc54a9 | 1,736,452,721,820 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.psychology | card=cards.mmlu_pro.psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.339202 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.39, 'accuracy_ci_low': 0.3, 'accuracy_ci_high': 0.49, 'score_name': 'accuracy', 'score': 0.39, 'score_ci_high': 0.49, 'score_ci_low': 0.3, 'num_of_instances': 100} | 1 | a100_80gb |
8d258706f49f998833ea8e98f99fc5bd8a8d95e2fdcd242e239a1b5b503d2d1b | 1,736,452,729,628 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.electrical_engineering | card=cards.mmlu.electrical_engineering,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.034106 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.68, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
afea72f8b2f855d8bc3a5a604380556a2fdc5454a5945505cc4ad8eade6b3394 | 1,736,452,734,552 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_biology | card=cards.mmlu.college_biology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_roman_choicesSeparator_newline_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.340683 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.61, 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.71, 'score_name': 'accuracy', 'score': 0.61, 'score_ci_high': 0.71, 'score_ci_low': 0.51, 'num_of_instances': 100} | 1 | a100_80gb |
c50ab754201ba2ef27aaf740b46645091091a9e41c117f9d2dbc6f8d797bbbd3 | 1,736,452,746,449 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_computer_science | card=cards.mmlu.high_school_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.318791 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.57, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.66, 'score_name': 'accuracy', 'score': 0.57, 'score_ci_high': 0.66, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
7f520423abfb701bf171a95eabb1f1d8300e4471cfea759c2d2cd7f421b820cf | 1,736,452,750,884 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.clinical_knowledge | card=cards.mmlu.clinical_knowledge,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.233998 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.63, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.63, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
5d08c28b9f99c7e5f9c1dafd7217c93a49b7ff932ccbefb055294f17255e4198 | 1,736,452,755,635 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.jurisprudence | card=cards.mmlu.jurisprudence,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_lowercase_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.171759 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.62, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
cabdde3a44f05d4ffda59432950e91fca54e4f108bf309c8d4ecc84727feb1ee | 1,736,452,759,459 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.anatomy | card=cards.mmlu.anatomy,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.247089 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.55, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.55, 'score_ci_high': 0.64, 'score_ci_low': 0.45, 'num_of_instances': 100} | 1 | a100_80gb |
8ea407efe5b6362aaacc227a8c597f9ad85ecb5955e8895e59f264f32bfc08bf | 1,736,452,763,328 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.medical_genetics | card=cards.mmlu.medical_genetics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.332552 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.62, 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.7127395004877394, 'score_name': 'accuracy', 'score': 0.62, 'score_ci_high': 0.7127395004877394, 'score_ci_low': 0.52, 'num_of_instances': 100} | 1 | a100_80gb |
723ee4586fdaa3c4ea5ab8236ba05d6e3994c3b7829d8ba4c48dc43300e0eac4 | 1,736,452,773,167 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.professional_accounting | card=cards.mmlu.professional_accounting,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_lowercase_choicesSeparator_newline_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.294735 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.51, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.51, 'score_ci_low': 0.32, 'num_of_instances': 100} | 1 | a100_80gb |
91939b6371a4e0dc8f0207713040290adbc3b9d549a0070e2035f825b43b4abd | 1,736,452,698,141 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.anatomy | card=cards.mmlu.anatomy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_numbers_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.072021 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.83, 'accuracy_ci_low': 0.7488878282242509, 'accuracy_ci_high': 0.89, 'score_name': 'accuracy', 'score': 0.83, 'score_ci_high': 0.89, 'score_ci_low': 0.7488878282242509, 'num_of_instances': 100} | 1 | a100_80gb |
3c3f539abf27bbd2d741219d5337ed9dbe8792e41725df2efbb6a8f653742863 | 1,736,452,707,667 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_mathematics | card=cards.mmlu.high_school_mathematics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_lowercase_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.438934 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.34, 'accuracy_ci_low': 0.25, 'accuracy_ci_high': 0.43, 'score_name': 'accuracy', 'score': 0.34, 'score_ci_high': 0.43, 'score_ci_low': 0.25, 'num_of_instances': 100} | 1 | a100_80gb |
c9e77fcbb32475957e95b78c06dcae5ce6a6541098c06564e181b0d71690d8e3 | 1,736,452,715,897 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.us_foreign_policy | card=cards.mmlu.us_foreign_policy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_numbers_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.839452 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.67, 'accuracy_ci_low': 0.58, 'accuracy_ci_high': 0.75, 'score_name': 'accuracy', 'score': 0.67, 'score_ci_high': 0.75, 'score_ci_low': 0.58, 'num_of_instances': 100} | 1 | a100_80gb |
85e0796816fba9ff307d7bde7997ab7d3cfe994c36f4ff496007bfec1a9bf91a | 1,736,452,723,633 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_microeconomics | card=cards.mmlu.high_school_microeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.468077 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.6760966862333271, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.6760966862333271, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
f648e4d77e032e84779adcd9834f1a00147ee24d620956bf8a1e3a129f26ce41 | 1,736,452,728,793 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.859596 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.37, 'accuracy_ci_low': 0.28, 'accuracy_ci_high': 0.46, 'score_name': 'accuracy', 'score': 0.37, 'score_ci_high': 0.46, 'score_ci_low': 0.28, 'num_of_instances': 100} | 1 | a100_80gb |
564ccfc6b6565ee00f6a7b73dd6d48f941659fc344d0f9e2e9ab07a34187d7bb | 1,736,452,736,388 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.computer_security | card=cards.mmlu.computer_security,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_lowercase_choicesSeparator_space_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.918518 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.65, 'score_ci_low': 0.46, 'num_of_instances': 100} | 1 | a100_80gb |
303588a1e52564fdce8811adbab2e0a9f34f51c38cb64490a5b60fbb96c39726 | 1,736,452,742,427 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.other | card=cards.mmlu_pro.other,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.633475 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.36983733791353074, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.36983733791353074, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
61e9d8ab6be14c25208beb0d6f69259d7c6a3e4c096d5e916b8052c5050570a0 | 1,736,452,749,284 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.human_aging | card=cards.mmlu.human_aging,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_keyboard_choicesSeparator_space_shuffleChoices_alphabeticalSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.144577 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.59, 'accuracy_ci_low': 0.5, 'accuracy_ci_high': 0.69, 'score_name': 'accuracy', 'score': 0.59, 'score_ci_high': 0.69, 'score_ci_low': 0.5, 'num_of_instances': 100} | 1 | a100_80gb |
4283d31058cdf67da12b5b5f802f2502cd9e6edd25342c364c181a317aedd955 | 1,736,452,758,323 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_medicine | card=cards.mmlu.college_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.309763 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.77, 'accuracy_ci_low': 0.68, 'accuracy_ci_high': 0.85, 'score_name': 'accuracy', 'score': 0.77, 'score_ci_high': 0.85, 'score_ci_low': 0.68, 'num_of_instances': 100} | 1 | a100_80gb |
37d3321ea95efbe707b0b4b0d73145ba01bf3c44991b6f2a374be7e1dd394dcd | 1,736,452,762,757 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.moral_disputes | card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelow.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.51059 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.52, 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.61, 'score_name': 'accuracy', 'score': 0.52, 'score_ci_high': 0.61, 'score_ci_low': 0.42, 'num_of_instances': 100} | 1 | a100_80gb |
9815dde4bcf9e19041f4a2443cec1e45d06171e0eae83460a4353e60dc483ae5 | 1,736,452,697,557 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_geography | card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_lowercase_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.870735 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.6, 'accuracy_ci_low': 0.5, 'accuracy_ci_high': 0.69, 'score_name': 'accuracy', 'score': 0.6, 'score_ci_high': 0.69, 'score_ci_low': 0.5, 'num_of_instances': 100} | 1 | a100_80gb |
3bd485eccdbb9fa480d8f50798aca74d1216ff9e8ac7bb2cc345501379f375e6 | 1,736,452,705,264 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.openbook_qa | card=cards.openbook_qa,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.OpenBookQA.MultipleChoiceTemplatesInstructionsStateHere.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.086931 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.79, 'accuracy_ci_low': 0.71, 'accuracy_ci_high': 0.86, 'score_name': 'accuracy', 'score': 0.79, 'score_ci_high': 0.86, 'score_ci_low': 0.71, 'num_of_instances': 100} | 1 | a100_80gb |
2c3954890248c5ddf09af8ceb8d3be48c044ad23d3939d680b8cae4c4c0ca468 | 1,736,452,724,640 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.biology | card=cards.mmlu_pro.biology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 18.624823 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.55, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.55, 'score_ci_low': 0.35, 'num_of_instances': 100} | 1 | a100_80gb |
7c75e0a63047e322a9f92624ba40d61e39749822009d4c67c7f6e9e2c293df69 | 1,736,452,732,625 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.law | card=cards.mmlu_pro.law,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.526686 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.26, 'accuracy_ci_low': 0.18, 'accuracy_ci_high': 0.36, 'score_name': 'accuracy', 'score': 0.26, 'score_ci_high': 0.36, 'score_ci_low': 0.18, 'num_of_instances': 100} | 1 | a100_80gb |
cd312d9333e70e62b42d65efe8835e6fca6b3ff8f681453a7f81ec0d754223e1 | 1,736,452,737,507 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_psychology | card=cards.mmlu.high_school_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.733056 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.72, 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.8, 'score_name': 'accuracy', 'score': 0.72, 'score_ci_high': 0.8, 'score_ci_low': 0.63, 'num_of_instances': 100} | 1 | a100_80gb |
82055dd5474c6b4dde56d7cb45a228e8bba50e3ee75a154fa238593f3a077bc4 | 1,736,452,741,521 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_macroeconomics | card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.377172 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.45, 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.45, 'score_ci_high': 0.56, 'score_ci_low': 0.35, 'num_of_instances': 100} | 1 | a100_80gb |
01cb947d3b043f8eaa569e36c6b4d71a14d2fa6a9ada98de11b816919e6bc2ab | 1,736,452,746,185 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_chemistry | card=cards.mmlu.college_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.524242 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.37, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
2e4c9086330f143398761b61f43e1d5e2b0aeafa0671146a034d481709358f08 | 1,736,452,750,312 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.510259 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.56, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.65, 'score_name': 'accuracy', 'score': 0.56, 'score_ci_high': 0.65, 'score_ci_low': 0.45, 'num_of_instances': 100} | 1 | a100_80gb |
4e85addba0699b947ad6cdc13d027b3dfe7affbb60089334983d4ab08a57e35d | 1,736,452,754,448 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.nutrition | card=cards.mmlu.nutrition,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.509001 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.58, 'accuracy_ci_low': 0.48, 'accuracy_ci_high': 0.68, 'score_name': 'accuracy', 'score': 0.58, 'score_ci_high': 0.68, 'score_ci_low': 0.48, 'num_of_instances': 100} | 1 | a100_80gb |
352969e5d90d371c092606df06752afa02fb49bcf0aa04138679f365fe3c2d00 | 1,736,452,759,052 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateHere.enumerator_keyboard_choicesSeparator_space_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.931499 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.28, 'accuracy_ci_low': 0.2, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.28, 'score_ci_high': 0.37, 'score_ci_low': 0.2, 'num_of_instances': 100} | 1 | a100_80gb |
47f96576a13ea84ef58dcb797e41566123d5ce2949ddec607c405be0f8b852ec | 1,736,452,701,082 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.professional_law | card=cards.mmlu.professional_law,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 19.674149 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.45, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.45, 'score_ci_high': 0.54, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
67cb728a46c318ed3b02175a8d17b44a9abce473e2374f15e1a13f87ad305f23 | 1,736,452,713,796 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.formal_logic | card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_numbers_choicesSeparator_pipe_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 9.842059 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.46, 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.56, 'score_name': 'accuracy', 'score': 0.46, 'score_ci_high': 0.56, 'score_ci_low': 0.36, 'num_of_instances': 100} | 1 | a100_80gb |
a1a716d7a12e22d5e3d66f2fde305d5910ed6357878b9ffbb4543089d5761664 | 1,736,452,722,475 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_chemistry | card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_greek_choicesSeparator_space_shuffleChoices_lengthSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.207061 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.37, 'accuracy_ci_low': 0.28, 'accuracy_ci_high': 0.48, 'score_name': 'accuracy', 'score': 0.37, 'score_ci_high': 0.48, 'score_ci_low': 0.28, 'num_of_instances': 100} | 1 | a100_80gb |
c490fac470f52e5daf1977831547c40b07204b5daee5585775d8848592506cda | 1,736,452,727,397 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.sociology | card=cards.mmlu.sociology,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHarness.enumerator_greek_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.384278 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.65, 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.74, 'score_name': 'accuracy', 'score': 0.65, 'score_ci_high': 0.74, 'score_ci_low': 0.56, 'num_of_instances': 100} | 1 | a100_80gb |
5283dcb571187e0758fb5a4fc5d880bd10b18b22d401a9a15b9cc284836090c2 | 1,736,452,731,448 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.human_sexuality | card=cards.mmlu.human_sexuality,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.357805 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.33, 'accuracy_ci_high': 0.5136645338360245, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.5136645338360245, 'score_ci_low': 0.33, 'num_of_instances': 100} | 1 | a100_80gb |
b0bedd15dce3567963e87db0d6da4048852216e369897e5c26b1efb8dfa6ea85 | 1,736,452,735,466 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.abstract_algebra | card=cards.mmlu.abstract_algebra,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicHelmFixed.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.395799 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.54, 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.64, 'score_ci_low': 0.45, 'num_of_instances': 100} | 1 | a100_80gb |
6726532466184bd87781ee4dabbd9a49ea753c24099e2e931ed261945cd31042 | 1,736,452,739,787 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_macroeconomics | card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_roman_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.174946 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.63, 'accuracy_ci_low': 0.53, 'accuracy_ci_high': 0.72, 'score_name': 'accuracy', 'score': 0.63, 'score_ci_high': 0.72, 'score_ci_low': 0.53, 'num_of_instances': 100} | 1 | a100_80gb |
f687ea3f8c9b21708233c7d916f74123ccbb6bfe93f6c9dd80696b88b3998178 | 1,736,452,744,988 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu_pro.business | card=cards.mmlu_pro.business,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU_PRO.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_keyboard_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.624962 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.16, 'accuracy_ci_low': 0.1, 'accuracy_ci_high': 0.23949470601421693, 'score_name': 'accuracy', 'score': 0.16, 'score_ci_high': 0.23949470601421693, 'score_ci_low': 0.1, 'num_of_instances': 100} | 1 | a100_80gb |
77f22340a80a9b3b6399a6c1a695b8d07803ef0536b0755bbfe20a4e2915617a | 1,736,452,749,703 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_computer_science | card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsStateBelowPlease.enumerator_greek_choicesSeparator_orLower_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.018414 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.27, 'accuracy_ci_low': 0.19, 'accuracy_ci_high': 0.37, 'score_name': 'accuracy', 'score': 0.27, 'score_ci_high': 0.37, 'score_ci_low': 0.19, 'num_of_instances': 100} | 1 | a100_80gb |
01533d03f12b223159ecfa8401c4ba20823c09cf7f7b19e3c5425ad85650bce8 | 1,736,452,757,375 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.anatomy | card=cards.mmlu.anatomy,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_greek_choicesSeparator_orLower_shuffleChoices_alphabeticalSort,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 7.040663 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.54, 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.64, 'score_name': 'accuracy', 'score': 0.54, 'score_ci_high': 0.64, 'score_ci_low': 0.44, 'num_of_instances': 100} | 1 | a100_80gb |
e2582b7de24883fa3ff90367021abd10e0a6e95d044868267f3cef49e1d14e3d | 1,736,452,697,735 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_chemistry | card=cards.mmlu.high_school_chemistry,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithoutTopicFixed.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.054428 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.32, 'accuracy_ci_low': 0.23, 'accuracy_ci_high': 0.4149673265996648, 'score_name': 'accuracy', 'score': 0.32, 'score_ci_high': 0.4149673265996648, 'score_ci_low': 0.23, 'num_of_instances': 100} | 1 | a100_80gb |
d11f5d95357cb0a171f7d5dc4680e040d616cc1fbf232c266647484b98658020 | 1,736,452,706,889 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.professional_psychology | card=cards.mmlu.professional_psychology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSASimple.enumerator_roman_choicesSeparator_space_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 8.581813 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.53, 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.62, 'score_name': 'accuracy', 'score': 0.53, 'score_ci_high': 0.62, 'score_ci_low': 0.43, 'num_of_instances': 100} | 1 | a100_80gb |
d150feb3e22ca0bf18b26b8db7a37d24af8c40e206c359bf87a93cdbd8f04ac4 | 1,736,452,712,214 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.formal_logic | card=cards.mmlu.formal_logic,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithTopic.enumerator_greek_choicesSeparator_semicolon_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 4.35552 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.51, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.51, 'score_ci_low': 0.32, 'num_of_instances': 100} | 1 | a100_80gb |
3d6e63d66e7d5a490a23d275ded243b351e4029d219b911f1b67199f4fb31ec4 | 1,736,452,719,182 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.virology | card=cards.mmlu.virology,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_numbers_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 6.358858 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.44, 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.54, 'score_name': 'accuracy', 'score': 0.44, 'score_ci_high': 0.54, 'score_ci_low': 0.34, 'num_of_instances': 100} | 1 | a100_80gb |
4712c171f3beb8451c9091f5999189d2aac6f9d73d6812e9b768adb8a62a4525 | 1,736,452,723,840 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.college_mathematics | card=cards.mmlu.college_mathematics,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSACould.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFourth,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.394456 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.42, 'accuracy_ci_low': 0.33, 'accuracy_ci_high': 0.52, 'score_name': 'accuracy', 'score': 0.42, 'score_ci_high': 0.52, 'score_ci_low': 0.33, 'num_of_instances': 100} | 1 | a100_80gb |
a498d8181f98a4273c37b1bbe56ea86a8d380841da7c4e9f5c463dc3147b66e6 | 1,736,452,735,664 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.high_school_statistics | card=cards.mmlu.high_school_statistics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesStructuredWithoutTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_lengthSortReverse,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 11.275068 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.49, 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.58, 'score_name': 'accuracy', 'score': 0.49, 'score_ci_high': 0.58, 'score_ci_low': 0.39, 'num_of_instances': 100} | 1 | a100_80gb |
b39392dc5b98df92018cdecfca68745091a3a27d84b17191f85a7109340b1e24 | 1,736,452,740,661 | 1,736,449,200,000 | mistralai_Mistral-7B-Instruct-v0.3 | cards.mmlu.global_facts | card=cards.mmlu.global_facts,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MMLU.MultipleChoiceTemplatesInstructionsProSAAddress.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_placeCorrectChoiceFirst,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100 | None | half | 3.780409 | {"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1} | {"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1} | VLLM | {"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.16.4"} | {'accuracy': 0.13, 'accuracy_ci_low': 0.07, 'accuracy_ci_high': 0.20122668902065244, 'score_name': 'accuracy', 'score': 0.13, 'score_ci_high': 0.20122668902065244, 'score_ci_low': 0.07, 'num_of_instances': 100} | 1 | a100_80gb |