run_id
large_stringlengths
64
64
timestamp
unknown
model_name_or_path
large_stringclasses
5 values
unitxt_recipe
large_stringlengths
326
371
quantization_type
large_stringclasses
1 value
quantization_bit_count
large_stringclasses
1 value
inference_runtime_s
float64
1.05
37.4
generation_args
large_stringclasses
1 value
model_args
large_stringclasses
5 values
inference_engine
large_stringclasses
1 value
packages_versions
large_stringclasses
1 value
scores
large_stringlengths
174
240
num_gpu
int64
1
1
device
large_stringclasses
1 value
f9e7db8699ddd03c010e4986ae9530240888cc976ab22a47bc91f5659c377254
"2024-12-22T20:04:00.314000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.160624
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.61, 'score': 0.61, 'score_name': 'accuracy', 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.71, 'score_ci_low': 0.51, 'score_ci_high': 0.71}
1
a100_80gb
3d50a30a97d1fd5cbdaa1da228b5e32d10b8bee0e82686f9ee38d784dfa2de95
"2024-12-22T20:04:09.539000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.924787
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.58, 'score': 0.58, 'score_name': 'accuracy', 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.67, 'score_ci_low': 0.49, 'score_ci_high': 0.67}
1
a100_80gb
f152cd9e85521bf1a6713c33fe7e7bbd58647bb334890f4aba79bf74f87d1af2
"2024-12-22T20:04:17.815000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.963796
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.54, 'score': 0.54, 'score_name': 'accuracy', 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.64, 'score_ci_low': 0.44, 'score_ci_high': 0.64}
1
a100_80gb
b6bffae62bea950db2bcb7ce2c90af82c1834318c7958529d09c2460ce722439
"2024-12-22T20:04:26.402000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.195327
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4665592907589815, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.4665592907589815, 'score_ci_high': 0.65}
1
a100_80gb
4f8ea359173df32569912266684b4a6445b61421dc0a2465a50a7dc252369cd2
"2024-12-22T20:04:35.347000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.58557
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.55, 'score': 0.55, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4521225193112265, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.4521225193112265, 'score_ci_high': 0.65}
1
a100_80gb
2d4236c31c94426db4835932780a284f622eda71b2059f5912ba176544d1774c
"2024-12-22T20:04:43.643000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.961962
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.51, 'score': 0.51, 'score_name': 'accuracy', 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.61, 'score_ci_low': 0.42, 'score_ci_high': 0.61}
1
a100_80gb
e7fceabe382b01a929d35d30a7ed9bf78f3e4926583ac360f61c6ebab6bcc1e8
"2024-12-22T20:02:28.649000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
13.752969
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.57, 'score': 0.57, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47475314559841414, 'accuracy_ci_high': 0.66, 'score_ci_low': 0.47475314559841414, 'score_ci_high': 0.66}
1
a100_80gb
2a718b4ffef9bac130e1697ffeefbeeacc1ca3b20998604688800089721ff3d7
"2024-12-22T20:02:43.127000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_roman_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
12.76079
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_ci_low': 0.38, 'score_ci_high': 0.58}
1
a100_80gb
8312311ab007f2b0751240bbe9748e30e77e8bb897c21e2c30cbb731904bc8dd
"2024-12-22T20:02:56.726000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
11.937653
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.5, 'score': 0.5, 'score_name': 'accuracy', 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.6, 'score_ci_low': 0.39, 'score_ci_high': 0.6}
1
a100_80gb
79a7ab19fbcd1b9554e3cb97eb72e3b2ec133769d43ab2542a21f9cd8dee646d
"2024-12-22T20:03:09.207000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
10.797551
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.45, 'score': 0.45, 'score_name': 'accuracy', 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.55, 'score_ci_low': 0.35, 'score_ci_high': 0.55}
1
a100_80gb
5b2a6997ab9d6fc6cada07bc5b3097efb770e22226d23bcfb0cb3bef992400db
"2024-12-22T20:03:20.750000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.861475
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.57, 'score': 0.57, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.66, 'score_ci_low': 0.47, 'score_ci_high': 0.66}
1
a100_80gb
5fbad19323be43bb08ff3fa19f4f2c678a3442c75c0ed5551c825d9add70bb5c
"2024-12-22T20:03:33.185000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
10.739922
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.47, 'score': 0.47, 'score_name': 'accuracy', 'accuracy_ci_low': 0.3693366892479934, 'accuracy_ci_high': 0.56, 'score_ci_low': 0.3693366892479934, 'score_ci_high': 0.56}
1
a100_80gb
7075be1ab106cc8632aeb98334dcb3c15d4f5ff8e085d22274fefa7b4c411309
"2024-12-22T20:03:44.788000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.885393
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.67, 'score_ci_low': 0.46, 'score_ci_high': 0.67}
1
a100_80gb
0dcb94273ee51f94dc0a1bfdbae243433d3c7feb9343dccf225f18ebbfbda3ea
"2024-12-22T20:03:57.480000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
10.991844
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.5, 'score': 0.5, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.6, 'score_ci_low': 0.4, 'score_ci_high': 0.6}
1
a100_80gb
ea9fdaaa138c57a5c66a6f7a13dc42e905c783f4239866170f5a644bfcb6437e
"2024-12-22T20:04:10.174000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
10.986981
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.53, 'score': 0.53, 'score_name': 'accuracy', 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.63, 'score_ci_low': 0.43, 'score_ci_high': 0.63}
1
a100_80gb
8d3ff90dc08ad62efda728cbfc2ed59f81df7b5a3d061c5eb5d7ff961e4c4a81
"2024-12-22T20:04:21.796000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.college_computer_science,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.917623
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_ci_low': 0.38, 'score_ci_high': 0.58}
1
a100_80gb
806bbf5dd35cbb0b997c4458bdc9c2ae8c4ecda704310be53db7ad6d601bc327
"2024-12-22T20:03:16.785000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.349254
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.6, 'score': 0.6, 'score_name': 'accuracy', 'accuracy_ci_low': 0.5, 'accuracy_ci_high': 0.6908216470530442, 'score_ci_low': 0.5, 'score_ci_high': 0.6908216470530442}
1
a100_80gb
1f7b8b17ba14c2c1832a11aa6a5c8305b1d0fb8c5696fac65f7afe4ce2355f66
"2024-12-22T20:03:24.734000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.580453
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.61, 'score': 0.61, 'score_name': 'accuracy', 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_ci_low': 0.51, 'score_ci_high': 0.7}
1
a100_80gb
6db1183056731ead42d0233bb78f33d0f361260a37e599196451c6400f8af6df
"2024-12-22T20:03:36.371000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
10.268451
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.59, 'score': 0.59, 'score_name': 'accuracy', 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.6877317922595841, 'score_ci_low': 0.49, 'score_ci_high': 0.6877317922595841}
1
a100_80gb
3bd18d339f9a75355e5ba448328af70e50451bb271e6e848771096e2ae777aa7
"2024-12-22T20:03:44.543000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.728171
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.67, 'score': 0.67, 'score_name': 'accuracy', 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.76, 'score_ci_low': 0.57, 'score_ci_high': 0.76}
1
a100_80gb
1fe8669bd8671cc04f6b042b8eb71f8354ea52769164b2213637b0e7c357361c
"2024-12-22T20:03:54.142000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.209625
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.62, 'score': 0.62, 'score_name': 'accuracy', 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_ci_low': 0.51, 'score_ci_high': 0.7}
1
a100_80gb
5daa36fe102aa351aea6a427ebd54f0af8e31e53e7ed9b6e345d617cde62fe7f
"2024-12-22T20:04:02.906000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.765262
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.65, 'score': 0.65, 'score_name': 'accuracy', 'accuracy_ci_low': 0.55, 'accuracy_ci_high': 0.73, 'score_ci_low': 0.55, 'score_ci_high': 0.73}
1
a100_80gb
b1949877f31424d44f7a4bd499bfe2378f8fa3b07f22833aec238fa86252d72b
"2024-12-22T20:04:11.133000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.834814
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.59, 'score': 0.59, 'score_name': 'accuracy', 'accuracy_ci_low': 0.49, 'accuracy_ci_high': 0.68, 'score_ci_low': 0.49, 'score_ci_high': 0.68}
1
a100_80gb
d54c6750da7cbcf83e3ae685a2e46e6e78b3c0b6f581f6b5ce7c14800f7ca68e
"2024-12-22T20:04:19.947000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.758447
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.62, 'score': 0.62, 'score_name': 'accuracy', 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.71, 'score_ci_low': 0.51, 'score_ci_high': 0.71}
1
a100_80gb
b7cae5ff762e1d65430717ab5a84d6681abd1264dae4bbbe63a9981bb12f65dd
"2024-12-22T20:04:28.182000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.847261
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.57, 'score': 0.57, 'score_name': 'accuracy', 'accuracy_ci_low': 0.47, 'accuracy_ci_high': 0.67, 'score_ci_low': 0.47, 'score_ci_high': 0.67}
1
a100_80gb
b9dd3943f8f5f6025ac1c75daee28c4eb6cee418ced69a5c65e45351f642a0d6
"2024-12-22T20:04:36.073000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.540066
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.56, 'score': 0.56, 'score_name': 'accuracy', 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.65, 'score_ci_low': 0.46, 'score_ci_high': 0.65}
1
a100_80gb
a110524069a23f708dc1a5029b5280880ccbd7fa7a36a71562fb91bb14149265
"2024-12-22T20:03:45.134000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.915006
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.75, 'score': 0.75, 'score_name': 'accuracy', 'accuracy_ci_low': 0.65, 'accuracy_ci_high': 0.83, 'score_ci_low': 0.65, 'score_ci_high': 0.83}
1
a100_80gb
f18b2acf532aaa416f75c1b0093f40d213a556fcb21902e1648b01d982b2bd63
"2024-12-22T20:03:49.122000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.246072
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.67, 'score': 0.67, 'score_name': 'accuracy', 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.76, 'score_ci_low': 0.57, 'score_ci_high': 0.76}
1
a100_80gb
a786cc0d6a7caba46211723689152764c75685f9a1ce34a37e88fe601c06bbc6
"2024-12-22T20:03:53.003000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.195929
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.71, 'score': 0.71, 'score_name': 'accuracy', 'accuracy_ci_low': 0.62, 'accuracy_ci_high': 0.79, 'score_ci_low': 0.62, 'score_ci_high': 0.79}
1
a100_80gb
25e0efeab19c252ea08885dcd3224bc1d6660f77632cc34b899c90156a4855ef
"2024-12-22T20:03:56.029000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.3434
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.71, 'score': 0.71, 'score_name': 'accuracy', 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.8, 'score_ci_low': 0.61, 'score_ci_high': 0.8}
1
a100_80gb
637c8e9c90fbe9114e8713d19f6d4579ed0862a27f6b206609d4aee26668f69a
"2024-12-22T20:03:59.035000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.330318
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.75, 'score': 0.75, 'score_name': 'accuracy', 'accuracy_ci_low': 0.67, 'accuracy_ci_high': 0.83, 'score_ci_low': 0.67, 'score_ci_high': 0.83}
1
a100_80gb
db765e8270a579e21e448d125983282fbe44ea9333b68e698389c59db250a49e
"2024-12-22T20:04:02.142000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.404403
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.69, 'score': 0.69, 'score_name': 'accuracy', 'accuracy_ci_low': 0.59, 'accuracy_ci_high': 0.78, 'score_ci_low': 0.59, 'score_ci_high': 0.78}
1
a100_80gb
4f71d40104704fd8aa097186076daa9db16a0c6b9b2c38f4311a458af76aa7c9
"2024-12-22T20:04:07.202000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.359667
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.71, 'score': 0.71, 'score_name': 'accuracy', 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.79, 'score_ci_low': 0.61, 'score_ci_high': 0.79}
1
a100_80gb
f0c5bfd7f4191bcec84018c14bfcce9c2f9a54f87d7611a46617b2c91aaf1c05
"2024-12-22T20:04:11.516000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.624409
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.7, 'score': 0.7, 'score_name': 'accuracy', 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.78, 'score_ci_low': 0.61, 'score_ci_high': 0.78}
1
a100_80gb
7aab4f15e32306e107000f156f196aeae45129d349726abea55f9458bfec1f34
"2024-12-22T20:04:14.559000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.359349
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.66, 'score': 0.66, 'score_name': 'accuracy', 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.75, 'score_ci_low': 0.57, 'score_ci_high': 0.75}
1
a100_80gb
d709789b1f00362ec5a87bcd7e866a2c5a1ed5e75a64f9c015edd3f0b35ed964
"2024-12-22T20:04:17.611000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.359055
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.73, 'score': 0.73, 'score_name': 'accuracy', 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.81, 'score_ci_low': 0.63, 'score_ci_high': 0.81}
1
a100_80gb
b153b7f1d7793bc193b94f7817ef552bfe5fc84798514a64411188abf973c8b3
"2024-12-22T20:03:32.525000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.183957
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.52, 'score': 0.52, 'score_name': 'accuracy', 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.62, 'score_ci_low': 0.42, 'score_ci_high': 0.62}
1
a100_80gb
fbfd1dffd73571c826e2ac5e2a97c7072163e3c2026bb1ead29fd5accf62a8b0
"2024-12-22T20:03:37.991000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.313374
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.46, 'score': 0.46, 'score_name': 'accuracy', 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.56, 'score_ci_low': 0.36, 'score_ci_high': 0.56}
1
a100_80gb
27fc7a8372f857a7fa1685d56f3f40ed655382ba1fe05f61c4477b188ed25cd9
"2024-12-22T20:03:43.962000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.78269
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.53, 'score': 0.53, 'score_name': 'accuracy', 'accuracy_ci_low': 0.43, 'accuracy_ci_high': 0.6213692186774596, 'score_ci_low': 0.43, 'score_ci_high': 0.6213692186774596}
1
a100_80gb
b79a4bcf79e03b7a24661072f3eacccd28ecc13dfdca957fb57912d4b0b2c2ff
"2024-12-22T20:03:51.945000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.485323
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.45, 'score': 0.45, 'score_name': 'accuracy', 'accuracy_ci_low': 0.35, 'accuracy_ci_high': 0.55, 'score_ci_low': 0.35, 'score_ci_high': 0.55}
1
a100_80gb
c86b600c639ee44613677dc0c89031611f7f2ab32cac2e31e74ced7346b9cc70
"2024-12-22T20:03:56.947000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.85152
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.54, 'score': 0.54, 'score_name': 'accuracy', 'accuracy_ci_low': 0.44, 'accuracy_ci_high': 0.63, 'score_ci_low': 0.44, 'score_ci_high': 0.63}
1
a100_80gb
850ce029571e8b8c4f613ba27854ae19c7dc32af59dacbf62ae7ec8237e2172f
"2024-12-22T20:04:01.899000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.783661
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.43, 'score': 0.43, 'score_name': 'accuracy', 'accuracy_ci_low': 0.34, 'accuracy_ci_high': 0.53, 'score_ci_low': 0.34, 'score_ci_high': 0.53}
1
a100_80gb
81a417d37f3d189e37eac6889c38c7dab479bcd307f73a191764a688835b68e9
"2024-12-22T20:04:08.263000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.21072
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.53, 'score': 0.53, 'score_name': 'accuracy', 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.62, 'score_ci_low': 0.42, 'score_ci_high': 0.62}
1
a100_80gb
73e64d1b9660dd05cf68691a4fe3ee7f5ffa8e840431a3e6b75d3e5c3063ff1c
"2024-12-22T20:04:13.736000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.332586
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.45, 'score': 0.45, 'score_name': 'accuracy', 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.55, 'score_ci_low': 0.36, 'score_ci_high': 0.55}
1
a100_80gb
54612f9e93cc975465ad37e2a70e21226faf3b5c6fa99790617b98ee07416e73
"2024-12-22T20:04:18.705000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.787754
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.55, 'score': 0.55, 'score_name': 'accuracy', 'accuracy_ci_low': 0.45, 'accuracy_ci_high': 0.6510968727614284, 'score_ci_low': 0.45, 'score_ci_high': 0.6510968727614284}
1
a100_80gb
af6a53c7aa31a6cb16f453799e2746baf42b395d83795b4b2deccbaed6d71120
"2024-12-22T20:04:24.328000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.449775
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.57, 'score_ci_low': 0.38, 'score_ci_high': 0.57}
1
a100_80gb
03249410c686cafea1ff79484a972830d04ea237584f54a35de6f0b88af7ece4
"2024-12-22T20:02:32.050000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.99594
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.6, 'score': 0.6, 'score_name': 'accuracy', 'accuracy_ci_low': 0.51, 'accuracy_ci_high': 0.7, 'score_ci_low': 0.51, 'score_ci_high': 0.7}
1
a100_80gb
e4dc01f218c72b4a2e68eb1636cecc02c95d0741b629d7f0c185332f3c446391
"2024-12-22T20:02:47.588000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
11.454712
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.72, 'score': 0.72, 'score_name': 'accuracy', 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.81, 'score_ci_low': 0.63, 'score_ci_high': 0.81}
1
a100_80gb
8a5dd4fb40308037936207427da5d75c9b0262ac7ab968fa4a03af6e72ba41a4
"2024-12-22T20:02:58.106000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.657995
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.71, 'score': 0.71, 'score_name': 'accuracy', 'accuracy_ci_low': 0.62, 'accuracy_ci_high': 0.79, 'score_ci_low': 0.62, 'score_ci_high': 0.79}
1
a100_80gb
008ebfb0ab4e44e8c0ce14c9fc848029b4c0d987dd6b4872c415554a931ecab7
"2024-12-22T20:03:11.122000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
10.1766
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.72, 'score': 0.72, 'score_name': 'accuracy', 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.8, 'score_ci_low': 0.63, 'score_ci_high': 0.8}
1
a100_80gb
263cd38552341f417bce577a21a1b4d93cbd63995ea5eb7e7b90177010d6d0f6
"2024-12-22T20:03:22.509000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.670111
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.7, 'score': 0.7, 'score_name': 'accuracy', 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.78, 'score_ci_low': 0.61, 'score_ci_high': 0.78}
1
a100_80gb
5387431a608216be7892e7dfcb5a60fe9873f473f2419e6ac2e660cc6acccb56
"2024-12-22T20:03:33.818000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.551315
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.73, 'score': 0.73, 'score_name': 'accuracy', 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.81, 'score_ci_low': 0.63, 'score_ci_high': 0.81}
1
a100_80gb
a1fec3a5f41797af3195b1e38d6e5a9fdd764b47912981eb161ff5ab2ea5a8df
"2024-12-22T20:03:45.129000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.624707
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.68, 'score': 0.68, 'score_name': 'accuracy', 'accuracy_ci_low': 0.59, 'accuracy_ci_high': 0.77, 'score_ci_low': 0.59, 'score_ci_high': 0.77}
1
a100_80gb
941e817fc9dffb0bbe33a74a24b95c6a17cd6717f73858c38455b89a79719841
"2024-12-22T20:03:56.306000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.358121
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.71, 'score': 0.71, 'score_name': 'accuracy', 'accuracy_ci_low': 0.62, 'accuracy_ci_high': 0.8, 'score_ci_low': 0.62, 'score_ci_high': 0.8}
1
a100_80gb
a1f39052d51f251ebb2a9c98d4605a19a7648bf3b1a31cd8a759c74b15e451b5
"2024-12-22T20:04:06.453000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.364936
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.7, 'score': 0.7, 'score_name': 'accuracy', 'accuracy_ci_low': 0.61, 'accuracy_ci_high': 0.79, 'score_ci_low': 0.61, 'score_ci_high': 0.79}
1
a100_80gb
b5133bdca83985fee6ea36db8f08c3fb63bf70e83a9da6312dc5922a2c567bb6
"2024-12-22T20:04:17.497000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.high_school_world_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_capitals_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.20117
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.71, 'score': 0.71, 'score_name': 'accuracy', 'accuracy_ci_low': 0.6122499095810682, 'accuracy_ci_high': 0.79, 'score_ci_low': 0.6122499095810682, 'score_ci_high': 0.79}
1
a100_80gb
f186784fabda41ac2eae783adcd31974058cc9299d9d8085e1c48f83e68b3552
"2024-12-22T20:02:57.852000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.243951
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.73, 'score': 0.73, 'score_name': 'accuracy', 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.81, 'score_ci_low': 0.64, 'score_ci_high': 0.81}
1
a100_80gb
5adf05d4a5a5edc78f433af837edb965b6c34a08381cd142712bae60d7ab2df3
"2024-12-22T20:03:04.457000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.501329
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.69, 'score': 0.69, 'score_name': 'accuracy', 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.77, 'score_ci_low': 0.6, 'score_ci_high': 0.77}
1
a100_80gb
d946cb1515d6b9b366072d1d9b475ea513e1243c5be15bc95025508a7917b0a2
"2024-12-22T20:03:12.568000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.327091
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.75, 'score': 0.75, 'score_name': 'accuracy', 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.82, 'score_ci_low': 0.66, 'score_ci_high': 0.82}
1
a100_80gb
020734e1e5ea0926838d4e3e9e8a7a3fbbab535bf3be6b51b0098e809cde080e
"2024-12-22T20:03:21.490000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.816137
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.7, 'score': 0.7, 'score_name': 'accuracy', 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_ci_low': 0.6, 'score_ci_high': 0.78}
1
a100_80gb
6c874c0cd048944636fcc10a83acb2e0f7f4dd6179c7aa321e52ded66b6993c1
"2024-12-22T20:03:28.259000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.577148
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.75, 'score': 0.75, 'score_name': 'accuracy', 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_ci_low': 0.66, 'score_ci_high': 0.83}
1
a100_80gb
9be52df434e1a5ab6d7cc60ebec513644733ad3b96ca64e84f541c3c436acf3a
"2024-12-22T20:03:36.555000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.170494
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.69, 'score': 0.69, 'score_name': 'accuracy', 'accuracy_ci_low': 0.5863047493112463, 'accuracy_ci_high': 0.77, 'score_ci_low': 0.5863047493112463, 'score_ci_high': 0.77}
1
a100_80gb
98dea5d2c50c11d38231c0f1f0d4c2ad59e147d10d65095c7c39d5a67614b91b
"2024-12-22T20:03:43.290000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.621926
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.74, 'score': 0.74, 'score_name': 'accuracy', 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.82, 'score_ci_low': 0.64, 'score_ci_high': 0.82}
1
a100_80gb
4ede799842456188ba69d67ad6b533b990e53d217ceeddda18930279c9449393
"2024-12-22T20:03:50.658000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.644809
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.67, 'score': 0.67, 'score_name': 'accuracy', 'accuracy_ci_low': 0.57, 'accuracy_ci_high': 0.75, 'score_ci_low': 0.57, 'score_ci_high': 0.75}
1
a100_80gb
fa6ea009797ae783c2babd391ed79e70c051a4011e76e8e27d6344bc8d2f9211
"2024-12-22T20:03:57.443000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.653296
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.73, 'score': 0.73, 'score_name': 'accuracy', 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.81, 'score_ci_low': 0.64, 'score_ci_high': 0.81}
1
a100_80gb
e374f2defae1bf0bf4710df55c0e6c5df5c0630692e5b10b357a823078b92b82
"2024-12-22T20:04:04.356000Z"
mistralai/Mistral-7B-Instruct-v0.3
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopicHelm.enumerator_keyboard_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.764419
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "mistralai/Mistral-7B-Instruct-v0.3", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.66, 'score': 0.66, 'score_name': 'accuracy', 'accuracy_ci_low': 0.56, 'accuracy_ci_high': 0.75, 'score_ci_low': 0.56, 'score_ci_high': 0.75}
1
a100_80gb
d4a77b8d6d4b1fb11334ad75a2b5a2381bd6a8785c2c0e71efb434d09cbc9cd5
"2024-12-22T20:00:33.849000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_pipe_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
31.00589
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.73, 'score': 0.73, 'score_name': 'accuracy', 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.81, 'score_ci_low': 0.63, 'score_ci_high': 0.81}
1
a100_80gb
184c66af253512c375201c546751f1c2e9dbf0ef79dd5cb46ea6e3a527200e5f
"2024-12-22T20:01:09.041000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_pipe_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
30.02003
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.74, 'score': 0.74, 'score_name': 'accuracy', 'accuracy_ci_low': 0.64, 'accuracy_ci_high': 0.82, 'score_ci_low': 0.64, 'score_ci_high': 0.82}
1
a100_80gb
ebe03022c1b9fce2a820b2b9413dc1ffb572442269d3d340845a2b2d50018060
"2024-12-22T20:01:43.732000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
30.228547
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.7, 'score': 0.7, 'score_name': 'accuracy', 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.79, 'score_ci_low': 0.6, 'score_ci_high': 0.79}
1
a100_80gb
cb023b5389f6f6a9df6e61c4c4a7daa7996582d406f68f5d8136fcb20778d82d
"2024-12-22T20:02:19.334000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
31.144426
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.73, 'score': 0.73, 'score_name': 'accuracy', 'accuracy_ci_low': 0.63, 'accuracy_ci_high': 0.81, 'score_ci_low': 0.63, 'score_ci_high': 0.81}
1
a100_80gb
9b914b9c6bf8ab066656f86b4cdae7acec5e1dd72177eb85fbddffc2749428bd
"2024-12-22T20:02:54.774000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
30.12662
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.69, 'score': 0.69, 'score_name': 'accuracy', 'accuracy_ci_low': 0.59, 'accuracy_ci_high': 0.78, 'score_ci_low': 0.59, 'score_ci_high': 0.78}
1
a100_80gb
cc8a85c6e701bdc4cd177369f1f2c469e09be3bbac9e5c0760deaa85cd3c0f02
"2024-12-22T20:03:29.877000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_european_history,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_greek_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
30.255144
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.7, 'score': 0.7, 'score_name': 'accuracy', 'accuracy_ci_low': 0.6061853320038664, 'accuracy_ci_high': 0.7822588366492358, 'score_ci_low': 0.6061853320038664, 'score_ci_high': 0.7822588366492358}
1
a100_80gb
707ddc6133eaf39dfad5cf8e1ae8ee59d7645fe62c8b5987a80f81a54832b9a3
"2024-12-22T20:03:34.745000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
4.189183
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.7, 'score': 0.7, 'score_name': 'accuracy', 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_ci_low': 0.6, 'score_ci_high': 0.78}
1
a100_80gb
9ee9c92f5dce7537f9dee8284fa05311fa0b36226e2d2a4d16935e1819391272
"2024-12-22T20:03:37.804000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.383264
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.69, 'score': 0.69, 'score_name': 'accuracy', 'accuracy_ci_low': 0.6, 'accuracy_ci_high': 0.78, 'score_ci_low': 0.6, 'score_ci_high': 0.78}
1
a100_80gb
1e406403bf2bffbcfa9e817cd2c68cc4c19e7b75ffbec1b31ac6f6583c9cba58
"2024-12-22T20:03:40.859000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.high_school_geography,demos_pool_size=100,num_demos=0,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_capitals_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
2.369779
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.75, 'score': 0.75, 'score_name': 'accuracy', 'accuracy_ci_low': 0.66, 'accuracy_ci_high': 0.83, 'score_ci_low': 0.66, 'score_ci_high': 0.83}
1
a100_80gb
fb106c2f25a84e75f40067a252b9a8f5593b3de8582abe46e6b9ab31889e6ceb
"2024-12-22T20:02:33.587000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.919297
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.49, 'score': 0.49, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.59, 'score_ci_low': 0.38, 'score_ci_high': 0.59}
1
a100_80gb
ab7e08591fdaf8929b9cf41cfad737d58658e209df25954a8b716f5781f9ff96
"2024-12-22T20:02:41.271000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_OrCapital_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.487658
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.5, 'score': 0.5, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.6, 'score_ci_low': 0.4, 'score_ci_high': 0.6}
1
a100_80gb
b76dd567fdb9512c6964dfbab527fd68e0eefc9d1c26ba8396374f4bdaed94a5
"2024-12-22T20:02:46.352000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.875929
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.57, 'score_ci_low': 0.38, 'score_ci_high': 0.57}
1
a100_80gb
06f6b4a1c79e793a6cf29180b5c9fbb86cfe86563b4cfe20199063eaae2d75e5
"2024-12-22T20:02:51.412000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_roman_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.866576
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_ci_low': 0.38, 'score_ci_high': 0.58}
1
a100_80gb
5b6211a9ffe922dcc8b1dd1cca48ddc0d2fb534e310fe34616d6251ad7b8298b
"2024-12-22T20:02:57.626000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.047966
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.52, 'score': 0.52, 'score_name': 'accuracy', 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.61, 'score_ci_low': 0.42, 'score_ci_high': 0.61}
1
a100_80gb
fb23fcfdb227a75fa69c5173a56124a7c6842d66666afbdba45721ff1221c872
"2024-12-22T20:03:04.883000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.10303
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.46, 'score': 0.46, 'score_name': 'accuracy', 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.55, 'score_ci_low': 0.36, 'score_ci_high': 0.55}
1
a100_80gb
5f5c33e9b0a69cc37831dd88aebecb8b26bc52b8df36f44a408bb1089aa791c2
"2024-12-22T20:03:09.757000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.705621
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.5, 'score': 0.5, 'score_name': 'accuracy', 'accuracy_ci_low': 0.4, 'accuracy_ci_high': 0.59, 'score_ci_low': 0.4, 'score_ci_high': 0.59}
1
a100_80gb
24832b2fb18d22aa1fd3c6751f1bcdbc494e322f580cf01615222e474344ccba
"2024-12-22T20:03:14.710000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.764518
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.47, 'score': 0.47, 'score_name': 'accuracy', 'accuracy_ci_low': 0.37981775399094003, 'accuracy_ci_high': 0.57, 'score_ci_low': 0.37981775399094003, 'score_ci_high': 0.57}
1
a100_80gb
dfe2d2bfcaa1f4e95cee9ea6fce76975924cab7dbddebc2339b6bb3df88a2285
"2024-12-22T20:03:20.904000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.034328
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.51, 'score': 0.51, 'score_name': 'accuracy', 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.6, 'score_ci_low': 0.41, 'score_ci_high': 0.6}
1
a100_80gb
3c938c4e0c4e5b67ececf5330c17e70d2a375773b774d3eb1f22630e97785346
"2024-12-22T20:03:26.377000Z"
meta-llama/Llama-3.2-3B-Instruct
card=cards.mmlu.high_school_macroeconomics,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopicHelm.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
3.687035
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Llama-3.2-3B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.48, 'score': 0.48, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_ci_low': 0.38, 'score_ci_high': 0.58}
1
a100_80gb
47196dfea578310ca4c85eacfdeb4a3bd1cab9d3a47c87f204823237d9685fdc
"2024-12-22T20:02:04.329000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.297384
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.47, 'score': 0.47, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.56, 'score_ci_low': 0.38, 'score_ci_high': 0.56}
1
a100_80gb
75975aedd0a760aba9f918759843ea8b5fe60e77485eb80f2f7eb9da5a8fdcef
"2024-12-22T20:02:13.850000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_roman_choicesSeparator_orLower_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.133557
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.41, 'score': 0.41, 'score_name': 'accuracy', 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.51, 'score_ci_low': 0.32, 'score_ci_high': 0.51}
1
a100_80gb
ea2ed7a65ee798301a9eca8d7d0b5acb5a5e3942fb8d0266857af82e754e877f
"2024-12-22T20:02:25.374000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_keyboard_choicesSeparator_space_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.46558
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.46, 'score': 0.46, 'score_name': 'accuracy', 'accuracy_ci_low': 0.37, 'accuracy_ci_high': 0.56, 'score_ci_low': 0.37, 'score_ci_high': 0.56}
1
a100_80gb
c22b2e7b085f9372a17ef2cc968dec5b600fe958ccf5c688d758379a4f04da83
"2024-12-22T20:02:32.924000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_keyboard_choicesSeparator_space_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.427692
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.47, 'score': 0.47, 'score_name': 'accuracy', 'accuracy_ci_low': 0.38, 'accuracy_ci_high': 0.58, 'score_ci_low': 0.38, 'score_ci_high': 0.58}
1
a100_80gb
08fc27b31c74a257403706b3d8eebbda845f7e10bd68c6779140682f44d7c773
"2024-12-22T20:02:42.543000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.560428
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.5, 'score': 0.5, 'score_name': 'accuracy', 'accuracy_ci_low': 0.400172543031032, 'accuracy_ci_high': 0.6, 'score_ci_low': 0.400172543031032, 'score_ci_high': 0.6}
1
a100_80gb
0762ddc31de5ed391ad38771547783eab1d1cf061dbcdc3b565a85acb2072f4a
"2024-12-22T20:02:50.070000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_keyboard_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.473553
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.46, 'score': 0.46, 'score_name': 'accuracy', 'accuracy_ci_low': 0.36, 'accuracy_ci_high': 0.5616534102221169, 'score_ci_low': 0.36, 'score_ci_high': 0.5616534102221169}
1
a100_80gb
d9d56fd32965011711f9c0b2347c8d7e96b72405630d6163c248aca1d036841f
"2024-12-22T20:02:58.733000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.584232
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.41, 'score': 0.41, 'score_name': 'accuracy', 'accuracy_ci_low': 0.32, 'accuracy_ci_high': 0.5, 'score_ci_low': 0.32, 'score_ci_high': 0.5}
1
a100_80gb
1e1c3d04c6112eac710ffa2370df21771199b3f0a552acb8f5f57ee1174cb17e
"2024-12-22T20:03:07.348000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_keyboard_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.569505
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.51, 'score': 0.51, 'score_name': 'accuracy', 'accuracy_ci_low': 0.41, 'accuracy_ci_high': 0.61, 'score_ci_low': 0.41, 'score_ci_high': 0.61}
1
a100_80gb
e2091166eb8d836dc8ab4922ba6da8e36318d3d15aa7febda48bfc8674df6368
"2024-12-22T20:03:14.992000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
5.557297
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.52, 'score': 0.52, 'score_name': 'accuracy', 'accuracy_ci_low': 0.42, 'accuracy_ci_high': 0.62, 'score_ci_low': 0.42, 'score_ci_high': 0.62}
1
a100_80gb
d7d23308c2658661802e33e865d16fe22f1d3ef9e7995068cb5bdac59eae2f6d
"2024-12-22T20:03:23.718000Z"
allenai/OLMoE-1B-7B-0924-Instruct
card=cards.mmlu.professional_medicine,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithoutTopic.enumerator_keyboard_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.641837
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "allenai/OLMoE-1B-7B-0924-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.49, 'score': 0.49, 'score_name': 'accuracy', 'accuracy_ci_low': 0.39, 'accuracy_ci_high': 0.59, 'score_ci_low': 0.39, 'score_ci_high': 0.59}
1
a100_80gb
6a3803792abae6cd52210fb11e4a38e5113fbcec2b8cccb305c84f1042698f19
"2024-12-22T20:01:47.413000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_newline_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.712016
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.57, 'score': 0.57, 'score_name': 'accuracy', 'accuracy_ci_low': 0.46, 'accuracy_ci_high': 0.66, 'score_ci_low': 0.46, 'score_ci_high': 0.66}
1
a100_80gb
6564fbda63fedfb1400278f3d5b737b5f1e9c8df39163412df000f1f53217e45
"2024-12-22T20:01:55.824000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
6.990763
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.64, 'score': 0.64, 'score_name': 'accuracy', 'accuracy_ci_low': 0.5405395836138225, 'accuracy_ci_high': 0.73, 'score_ci_low': 0.5405395836138225, 'score_ci_high': 0.73}
1
a100_80gb
265ae3db1c9c08019ad6f653ff98ce7d34a68feed773d3893aaa9132139970ab
"2024-12-22T20:02:07.013000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_comma_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
9.813726
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.62, 'score': 0.62, 'score_name': 'accuracy', 'accuracy_ci_low': 0.52, 'accuracy_ci_high': 0.71, 'score_ci_low': 0.52, 'score_ci_high': 0.71}
1
a100_80gb
6af39bc7e6000e9b642f00503515e8d0d7fae66a0e07a585b22274a09e0d40be
"2024-12-22T20:02:16.617000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_False,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
8.264774
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.64, 'score': 0.64, 'score_name': 'accuracy', 'accuracy_ci_low': 0.54, 'accuracy_ci_high': 0.73, 'score_ci_low': 0.54, 'score_ci_high': 0.73}
1
a100_80gb
a20c9f30d37481cee0be73df4908258c9b50a1d1ac47c09bb8aa6d3677739f76
"2024-12-22T20:02:25.168000Z"
meta-llama/Meta-Llama-3-8B-Instruct
card=cards.mmlu.moral_disputes,demos_pool_size=100,num_demos=5,format=formats.chat_api,template=templates.huji_workshop.MultipleChoiceTemplatesInstructionsWithTopic.enumerator_roman_choicesSeparator_semicolon_shuffleChoices_True,system_prompt=system_prompts.empty,demos_taken_from=train,demos_removed_from_data=True,max_test_instances=100
None
half
7.115928
{"n": 1, "skip_special_tokens": false, "max_tokens": 64, "seed": 42, "top_p": null, "top_k": -1, "temperature": null, "logprobs": 5, "prompt_logprobs": 1}
{"model": "meta-llama/Meta-Llama-3-8B-Instruct", "seed": 0, "device": "auto", "max_num_batched_tokens": 4096, "gpu_memory_utilization": 0.7, "max_model_len": 4096, "tensor_parallel_size": 1}
VLLM
{"torch": "2.5.1", "transformers": "4.46.3", "evaluate": "0.4.0", "datasets": "2.21.0", "vllm": "0.6.4.post1", "unitxt": "1.15.9"}
{'num_of_instances': 100, 'accuracy': 0.6, 'score': 0.6, 'score_name': 'accuracy', 'accuracy_ci_low': 0.5, 'accuracy_ci_high': 0.69, 'score_ci_low': 0.5, 'score_ci_high': 0.69}
1
a100_80gb