eduagarcia
commited on
Commit
•
4e430f3
1
Parent(s):
218b9ea
Remove old version evals results (1.0.0)
Browse filesThis view is limited to 50 files because it contains too many changes. Â
See raw diff
- .gitignore +3 -0
- 01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json +0 -12
- 01-ai/Yi-34B_eval_request_False_bfloat16_Original.json +0 -12
- 01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json +1 -13
- 22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json +1 -13
- AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json +1 -13
- AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json +1 -13
- AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json +1 -13
- BAAI/Aquila-7B_eval_request_False_float16_Original.json +1 -13
- BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json +0 -12
- BAAI/Aquila2-7B_eval_request_False_float16_Original.json +1 -13
- DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json +1 -13
- Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json +1 -13
- EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json +1 -13
- EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json +1 -13
- EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json +1 -13
- EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json +1 -13
- EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json +1 -13
- EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json +1 -13
- EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json +1 -13
- EleutherAI/pythia-14m_eval_request_False_float16_Original.json +1 -13
- EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json +1 -13
- EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json +1 -13
- EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json +1 -13
- EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json +1 -13
- EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json +1 -13
- EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json +1 -13
- HeyLucasLeao/gpt-neo-small-portuguese_eval_request_False_float16_Original.json +1 -13
- NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json +1 -13
- OpenLLM-France/Claire-7B-0.1_eval_request_False_bfloat16_Original.json +1 -13
- OpenLLM-France/Claire-Mistral-7B-0.1_eval_request_False_bfloat16_Original.json +1 -13
- OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json +1 -13
- Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json +1 -13
- THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json +1 -13
- baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json +1 -13
- baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json +1 -13
- baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json +1 -13
- bigscience/bloom-3b_eval_request_False_float16_Original.json +1 -13
- deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json +0 -12
- deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json +1 -13
- deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json +1 -13
- facebook/opt-1.3b_eval_request_False_float16_Original.json +1 -13
- facebook/opt-125m_eval_request_False_float16_Original.json +1 -13
- facebook/opt-13b_eval_request_False_float16_Original.json +1 -13
- facebook/opt-2.7b_eval_request_False_float16_Original.json +1 -13
- facebook/opt-350m_eval_request_False_float16_Original.json +1 -13
- google/umt5-base_eval_request_False_bfloat16_Original.json +1 -13
- google/umt5-small_eval_request_False_bfloat16_Original.json +1 -13
- gpt2_eval_request_False_float16_Original.json +1 -13
- huggyllama/llama-30b_eval_request_False_float16_Original.json +0 -12
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.ipynb_checkpoints
|
2 |
+
*/.ipynb_checkpoints/*
|
3 |
+
|
01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json
CHANGED
@@ -13,18 +13,6 @@
|
|
13 |
"source": "script",
|
14 |
"job_id": 253,
|
15 |
"job_start_time": "2024-02-21T12-10-33.914064",
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.7186843946815955,
|
19 |
-
"bluex": 0.6634214186369958,
|
20 |
-
"oab_exams": 0.571753986332574,
|
21 |
-
"assin2_rte": 0.7858403678133732,
|
22 |
-
"assin2_sts": 0.5583683246827316,
|
23 |
-
"faquad_nli": 0.7800338409475465,
|
24 |
-
"sparrow_pt": 0.37142261482383704
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.6356464211312362,
|
27 |
-
"result_metrics_npm": 0.4967380342980293,
|
28 |
"error_msg": "CUDA out of memory. Tried to allocate 298.02 GiB. GPU 0 has a total capacty of 79.35 GiB of which 14.77 GiB is free. Process 580799 has 64.57 GiB memory in use. Of the allocated memory 64.06 GiB is allocated by PyTorch, and 7.19 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
29 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3558, in from_pretrained\n dispatch_model(model, **device_map_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/big_modeling.py\", line 445, in dispatch_model\n model.to(device)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 2556, in to\n return super().to(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1160, in to\n return self._apply(convert)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 810, in _apply\n module._apply(fn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 857, in _apply\n self._buffers[key] = fn(buf)\n ^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1158, in convert\n return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 298.02 GiB. GPU 0 has a total capacty of 79.35 GiB of which 14.77 GiB is free. Process 580799 has 64.57 GiB memory in use. Of the allocated memory 64.06 GiB is allocated by PyTorch, and 7.19 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
30 |
}
|
|
|
13 |
"source": "script",
|
14 |
"job_id": 253,
|
15 |
"job_start_time": "2024-02-21T12-10-33.914064",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"error_msg": "CUDA out of memory. Tried to allocate 298.02 GiB. GPU 0 has a total capacty of 79.35 GiB of which 14.77 GiB is free. Process 580799 has 64.57 GiB memory in use. Of the allocated memory 64.06 GiB is allocated by PyTorch, and 7.19 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
17 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3558, in from_pretrained\n dispatch_model(model, **device_map_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/big_modeling.py\", line 445, in dispatch_model\n model.to(device)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 2556, in to\n return super().to(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1160, in to\n return self._apply(convert)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 810, in _apply\n module._apply(fn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 857, in _apply\n self._buffers[key] = fn(buf)\n ^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1158, in convert\n return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 298.02 GiB. GPU 0 has a total capacty of 79.35 GiB of which 14.77 GiB is free. Process 580799 has 64.57 GiB memory in use. Of the allocated memory 64.06 GiB is allocated by PyTorch, and 7.19 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
18 |
}
|
01-ai/Yi-34B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -13,18 +13,6 @@
|
|
13 |
"source": "script",
|
14 |
"job_id": 281,
|
15 |
"job_start_time": "2024-02-28T16-27-11.805205",
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.7214835549335199,
|
19 |
-
"bluex": 0.6842837273991655,
|
20 |
-
"oab_exams": 0.566742596810934,
|
21 |
-
"assin2_rte": 0.7095337812960236,
|
22 |
-
"assin2_sts": 0.6212032386293976,
|
23 |
-
"faquad_nli": 0.7969022005981341,
|
24 |
-
"sparrow_pt": 0.3916234220734354
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.6416817888200871,
|
27 |
-
"result_metrics_npm": 0.4958265468665359,
|
28 |
"error_msg": "CUDA out of memory. Tried to allocate 98.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 96.19 MiB is free. Process 1027082 has 63.32 GiB memory in use. Process 2746447 has 9.96 GiB memory in use. Process 3815607 has 3.34 GiB memory in use. Process 3812636 has 2.60 GiB memory in use. Process 3812253 has 73.00 MiB memory in use. Of the allocated memory 62.82 GiB is allocated by PyTorch, and 396.00 KiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
29 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 98.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 96.19 MiB is free. Process 1027082 has 63.32 GiB memory in use. Process 2746447 has 9.96 GiB memory in use. Process 3815607 has 3.34 GiB memory in use. Process 3812636 has 2.60 GiB memory in use. Process 3812253 has 73.00 MiB memory in use. Of the allocated memory 62.82 GiB is allocated by PyTorch, and 396.00 KiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
30 |
}
|
|
|
13 |
"source": "script",
|
14 |
"job_id": 281,
|
15 |
"job_start_time": "2024-02-28T16-27-11.805205",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"error_msg": "CUDA out of memory. Tried to allocate 98.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 96.19 MiB is free. Process 1027082 has 63.32 GiB memory in use. Process 2746447 has 9.96 GiB memory in use. Process 3815607 has 3.34 GiB memory in use. Process 3812636 has 2.60 GiB memory in use. Process 3812253 has 73.00 MiB memory in use. Of the allocated memory 62.82 GiB is allocated by PyTorch, and 396.00 KiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
17 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 98.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 96.19 MiB is free. Process 1027082 has 63.32 GiB memory in use. Process 2746447 has 9.96 GiB memory in use. Process 3815607 has 3.34 GiB memory in use. Process 3812636 has 2.60 GiB memory in use. Process 3812253 has 73.00 MiB memory in use. Of the allocated memory 62.82 GiB is allocated by PyTorch, and 396.00 KiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
18 |
}
|
01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 121,
|
15 |
-
"job_start_time": "2024-02-09T07-10-30.923872"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.5703289013296011,
|
19 |
-
"bluex": 0.49791376912378305,
|
20 |
-
"oab_exams": 0.4419134396355353,
|
21 |
-
"assin2_rte": 0.7574463542222696,
|
22 |
-
"assin2_sts": 0.3059276997249324,
|
23 |
-
"faquad_nli": 0.4471267110923455,
|
24 |
-
"sparrow_pt": 0.31180320008181783
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.47606572503004063,
|
27 |
-
"result_metrics_npm": 0.2713265790562599
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 121,
|
15 |
+
"job_start_time": "2024-02-09T07-10-30.923872"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🆎 : language adapted models (FP, FT, ...)",
|
13 |
"source": "script",
|
14 |
"job_id": 213,
|
15 |
-
"job_start_time": "2024-02-16T13-19-48.657595"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.2218334499650105,
|
19 |
-
"bluex": 0.24895688456189152,
|
20 |
-
"oab_exams": 0.2783599088838269,
|
21 |
-
"assin2_rte": 0.6996743545023234,
|
22 |
-
"assin2_sts": 0.007646758869425693,
|
23 |
-
"faquad_nli": 0.17721518987341772,
|
24 |
-
"sparrow_pt": 0.26278701409945776
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.27092479439362194,
|
27 |
-
"result_metrics_npm": -0.00787880781904908
|
28 |
}
|
|
|
12 |
"model_type": "🆎 : language adapted models (FP, FT, ...)",
|
13 |
"source": "script",
|
14 |
"job_id": 213,
|
15 |
+
"job_start_time": "2024-02-16T13-19-48.657595"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 102,
|
15 |
-
"job_start_time": "2024-02-08T16-32-05.080295"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.19454163750874737,
|
19 |
-
"bluex": 0.21835883171070933,
|
20 |
-
"oab_exams": 0.24009111617312073,
|
21 |
-
"assin2_rte": 0.5097556100727492,
|
22 |
-
"assin2_sts": 0.04176979032426292,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.2261793330522781
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.2671930701793801,
|
27 |
-
"result_metrics_npm": -0.013569520485616151
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 102,
|
15 |
+
"job_start_time": "2024-02-08T16-32-05.080295"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 101,
|
15 |
-
"job_start_time": "2024-02-08T15-37-30.575084"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.2344296710986704,
|
19 |
-
"bluex": 0.2086230876216968,
|
20 |
-
"oab_exams": 0.23006833712984054,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.015469193291869708,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.21165120316324165
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.2390328568646351,
|
27 |
-
"result_metrics_npm": -0.06725785488427993
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 101,
|
15 |
+
"job_start_time": "2024-02-08T15-37-30.575084"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 100,
|
15 |
-
"job_start_time": "2024-02-08T14-27-00.255851"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.20363890832750176,
|
19 |
-
"bluex": 0.24478442280945759,
|
20 |
-
"oab_exams": 0.23006833712984054,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.018735799862626468,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.19168746791328134
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.23741477739854774,
|
27 |
-
"result_metrics_npm": -0.06966919981551431
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 100,
|
15 |
+
"job_start_time": "2024-02-08T14-27-00.255851"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
BAAI/Aquila-7B_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 182,
|
15 |
-
"job_start_time": "2024-02-13T21-53-04.495865"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.3002099370188943,
|
19 |
-
"bluex": 0.29763560500695413,
|
20 |
-
"oab_exams": 0.2988610478359909,
|
21 |
-
"assin2_rte": 0.48191507425133256,
|
22 |
-
"assin2_sts": 0.2947702523754177,
|
23 |
-
"faquad_nli": 0.47700641417918904,
|
24 |
-
"sparrow_pt": 0.2413705187861493
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.341681264207704,
|
27 |
-
"result_metrics_npm": 0.07218268472291701
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 182,
|
15 |
+
"job_start_time": "2024-02-13T21-53-04.495865"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -13,18 +13,6 @@
|
|
13 |
"source": "script",
|
14 |
"job_id": 253,
|
15 |
"job_start_time": "2024-02-20T17-38-24.783283",
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.543037088873338,
|
19 |
-
"bluex": 0.4297635605006954,
|
20 |
-
"oab_exams": 0.3981776765375854,
|
21 |
-
"assin2_rte": 0.7334164728077959,
|
22 |
-
"assin2_sts": 0.4958376987221018,
|
23 |
-
"faquad_nli": 0.45449901481427424,
|
24 |
-
"sparrow_pt": 0.3121381894628628
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.48098138595980766,
|
27 |
-
"result_metrics_npm": 0.26782837303937407,
|
28 |
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
29 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
30 |
}
|
|
|
13 |
"source": "script",
|
14 |
"job_id": 253,
|
15 |
"job_start_time": "2024-02-20T17-38-24.783283",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
17 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
18 |
}
|
BAAI/Aquila2-7B_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 183,
|
15 |
-
"job_start_time": "2024-02-13T23-17-21.976390"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.2470258922323303,
|
19 |
-
"bluex": 0.20445062586926285,
|
20 |
-
"oab_exams": 0.2874715261958998,
|
21 |
-
"assin2_rte": 0.5642307632772472,
|
22 |
-
"assin2_sts": 0.32002670516594683,
|
23 |
-
"faquad_nli": 0.46611451506525536,
|
24 |
-
"sparrow_pt": 0.2587475557591692
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.3354382262235873,
|
27 |
-
"result_metrics_npm": 0.07112689461870844
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 183,
|
15 |
+
"job_start_time": "2024-02-13T23-17-21.976390"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 215,
|
15 |
-
"job_start_time": "2024-02-16T14-47-24.223296"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.19244226731980407,
|
19 |
-
"bluex": 0.24200278164116829,
|
20 |
-
"oab_exams": 0.24646924829157174,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.004714541859212106,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.23356086755006158
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.24173974462984918,
|
27 |
-
"result_metrics_npm": -0.0625754139616488
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 215,
|
15 |
+
"job_start_time": "2024-02-16T14-47-24.223296"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🔶 : fine-tuned",
|
13 |
"source": "script",
|
14 |
"job_id": 39,
|
15 |
-
"job_start_time": "2024-02-07T06-30-45.967063"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.529741077676697,
|
19 |
-
"bluex": 0.41168289290681503,
|
20 |
-
"oab_exams": 0.3990888382687927,
|
21 |
-
"assin2_rte": 0.7470003950561543,
|
22 |
-
"assin2_sts": 0.5145659724171266,
|
23 |
-
"faquad_nli": 0.7851487912814374,
|
24 |
-
"sparrow_pt": 0.3532299102024751
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.5343511254013569,
|
27 |
-
"result_metrics_npm": 0.3640082745590046
|
28 |
}
|
|
|
12 |
"model_type": "🔶 : fine-tuned",
|
13 |
"source": "script",
|
14 |
"job_id": 39,
|
15 |
+
"job_start_time": "2024-02-07T06-30-45.967063"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 77,
|
15 |
-
"job_start_time": "2024-02-07T15-56-06.233510"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.21903428971308608,
|
19 |
-
"bluex": 0.19749652294853964,
|
20 |
-
"oab_exams": 0.24419134396355352,
|
21 |
-
"assin2_rte": 0.3512359875045344,
|
22 |
-
"assin2_sts": 0.09337260889279221,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.19440498214886542
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.24848441536930918,
|
27 |
-
"result_metrics_npm": -0.05661848481231452
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 77,
|
15 |
+
"job_start_time": "2024-02-07T15-56-06.233510"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 201,
|
15 |
-
"job_start_time": "2024-02-14T20-42-35.885763"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.19034289713086075,
|
19 |
-
"bluex": 0.20445062586926285,
|
20 |
-
"oab_exams": 0.23462414578587698,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.0070782273697892,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.24161040553185745
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.23587068677639625,
|
27 |
-
"result_metrics_npm": -0.07015978644825559
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 201,
|
15 |
+
"job_start_time": "2024-02-14T20-42-35.885763"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 200,
|
15 |
-
"job_start_time": "2024-02-14T20-18-41.235422"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.18964310706787962,
|
19 |
-
"bluex": 0.17524339360222532,
|
20 |
-
"oab_exams": 0.010933940774487472,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.10713084465982439,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.171051914297268
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.20385595802125872,
|
27 |
-
"result_metrics_npm": -0.11828056424440532
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 200,
|
15 |
+
"job_start_time": "2024-02-14T20-18-41.235422"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 202,
|
15 |
-
"job_start_time": "2024-02-14T21-23-17.848186"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.1952414275717285,
|
19 |
-
"bluex": 0.19193324061196107,
|
20 |
-
"oab_exams": 0.23234624145785876,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.006388450736995319,
|
23 |
-
"faquad_nli": 0.42535491344201753,
|
24 |
-
"sparrow_pt": 0.26425572526209995
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.2355504760594278,
|
27 |
-
"result_metrics_npm": -0.0712914298859
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 202,
|
15 |
+
"job_start_time": "2024-02-14T21-23-17.848186"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 78,
|
15 |
-
"job_start_time": "2024-02-07T16-49-06.259210"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.20083974807557733,
|
19 |
-
"bluex": 0.22531293463143254,
|
20 |
-
"oab_exams": 0.2560364464692483,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.1133653560992191,
|
23 |
-
"faquad_nli": 0.4718588278919615,
|
24 |
-
"sparrow_pt": 0.16031340502399352
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.2515800073606808,
|
27 |
-
"result_metrics_npm": -0.053194066948652974
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 78,
|
15 |
+
"job_start_time": "2024-02-07T16-49-06.259210"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 203,
|
15 |
-
"job_start_time": "2024-02-14T22-19-56.698691"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.06717984604618614,
|
19 |
-
"bluex": 0.05285118219749652,
|
20 |
-
"oab_exams": 0.2355353075170843,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.019117347817184663,
|
23 |
-
"faquad_nli": 0.33515801835418463,
|
24 |
-
"sparrow_pt": 0.2154843220889051
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.17980847962205354,
|
27 |
-
"result_metrics_npm": -0.15093987610503895
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 203,
|
15 |
+
"job_start_time": "2024-02-14T22-19-56.698691"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 199,
|
15 |
-
"job_start_time": "2024-02-14T18-51-32.259622"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.1973407977606718,
|
19 |
-
"bluex": 0.22809457579972184,
|
20 |
-
"oab_exams": 0.23599088838268792,
|
21 |
-
"assin2_rte": 0.33424046164366045,
|
22 |
-
"assin2_sts": 0.062042189457896954,
|
23 |
-
"faquad_nli": 0.43771626297577854,
|
24 |
-
"sparrow_pt": 0.23859602003664446
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.24771731372243738,
|
27 |
-
"result_metrics_npm": -0.05730030865088216
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 199,
|
15 |
+
"job_start_time": "2024-02-14T18-51-32.259622"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/pythia-14m_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 192,
|
15 |
-
"job_start_time": "2024-02-14T15-03-57.922027"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.0,
|
19 |
-
"bluex": 0.0,
|
20 |
-
"oab_exams": 0.0,
|
21 |
-
"assin2_rte": 0.0,
|
22 |
-
"assin2_sts": NaN,
|
23 |
-
"faquad_nli": 0.0,
|
24 |
-
"sparrow_pt": 0.0
|
25 |
-
},
|
26 |
-
"result_metrics_average": NaN,
|
27 |
-
"result_metrics_npm": NaN
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 192,
|
15 |
+
"job_start_time": "2024-02-14T15-03-57.922027"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 194,
|
15 |
-
"job_start_time": "2024-02-14T15-27-43.932506"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.0,
|
19 |
-
"bluex": 0.0,
|
20 |
-
"oab_exams": 0.0,
|
21 |
-
"assin2_rte": 0.0,
|
22 |
-
"assin2_sts": NaN,
|
23 |
-
"faquad_nli": 0.0,
|
24 |
-
"sparrow_pt": 0.0
|
25 |
-
},
|
26 |
-
"result_metrics_average": NaN,
|
27 |
-
"result_metrics_npm": NaN
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 194,
|
15 |
+
"job_start_time": "2024-02-14T15-27-43.932506"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 196,
|
15 |
-
"job_start_time": "2024-02-14T16-20-18.432424"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.2106368089573128,
|
19 |
-
"bluex": 0.18776077885952713,
|
20 |
-
"oab_exams": 0.24145785876993167,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.015287790416728658,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.1527947965732252
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.225846648474836,
|
27 |
-
"result_metrics_npm": -0.08513497266217881
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 196,
|
15 |
+
"job_start_time": "2024-02-14T16-20-18.432424"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 197,
|
15 |
-
"job_start_time": "2024-02-14T16-49-11.216028"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.20433869839048285,
|
19 |
-
"bluex": 0.23504867872044508,
|
20 |
-
"oab_exams": 0.23280182232346242,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.032750005063908794,
|
23 |
-
"faquad_nli": 0.17721518987341772,
|
24 |
-
"sparrow_pt": 0.2216627443828188
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.20530721029826696,
|
27 |
-
"result_metrics_npm": -0.13166019646895838
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 197,
|
15 |
+
"job_start_time": "2024-02-14T16-49-11.216028"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 195,
|
15 |
-
"job_start_time": "2024-02-14T15-42-42.785588"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.1980405878236529,
|
19 |
-
"bluex": 0.1571627260083449,
|
20 |
-
"oab_exams": 0.23599088838268792,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.0393820346422743,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.16268055200707188
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.22374932780159407,
|
27 |
-
"result_metrics_npm": -0.08862059215156827
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 195,
|
15 |
+
"job_start_time": "2024-02-14T15-42-42.785588"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 198,
|
15 |
-
"job_start_time": "2024-02-14T17-44-29.767283"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.20503848845346395,
|
19 |
-
"bluex": 0.21835883171070933,
|
20 |
-
"oab_exams": 0.2669703872437358,
|
21 |
-
"assin2_rte": 0.33514630774633175,
|
22 |
-
"assin2_sts": 0.015459703136524651,
|
23 |
-
"faquad_nli": 0.5032594590990455,
|
24 |
-
"sparrow_pt": 0.16024035452533222
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.24349621884502046,
|
27 |
-
"result_metrics_npm": -0.056880866402642415
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 198,
|
15 |
+
"job_start_time": "2024-02-14T17-44-29.767283"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 193,
|
15 |
-
"job_start_time": "2024-02-14T15-15-51.530711"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.0,
|
19 |
-
"bluex": 0.0,
|
20 |
-
"oab_exams": 0.0,
|
21 |
-
"assin2_rte": 0.0,
|
22 |
-
"assin2_sts": NaN,
|
23 |
-
"faquad_nli": 0.0,
|
24 |
-
"sparrow_pt": 0.0
|
25 |
-
},
|
26 |
-
"result_metrics_average": NaN,
|
27 |
-
"result_metrics_npm": NaN
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 193,
|
15 |
+
"job_start_time": "2024-02-14T15-15-51.530711"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
HeyLucasLeao/gpt-neo-small-portuguese_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🆎 : language adapted models (FP, FT, ...)",
|
13 |
"source": "script",
|
14 |
"job_id": 93,
|
15 |
-
"job_start_time": "2024-02-08T01-23-52.798469"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.16235129461161651,
|
19 |
-
"bluex": 0.11265646731571627,
|
20 |
-
"oab_exams": 0.008656036446469248,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.04753851053349196,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.17064375718073424
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.18211922454787924,
|
27 |
-
"result_metrics_npm": -0.14372064652620473
|
28 |
}
|
|
|
12 |
"model_type": "🆎 : language adapted models (FP, FT, ...)",
|
13 |
"source": "script",
|
14 |
"job_id": 93,
|
15 |
+
"job_start_time": "2024-02-08T01-23-52.798469"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 191,
|
15 |
-
"job_start_time": "2024-02-14T13-14-20.585602"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.2232330300909727,
|
19 |
-
"bluex": 0.1808066759388039,
|
20 |
-
"oab_exams": 0.269248291571754,
|
21 |
-
"assin2_rte": 0.33965642015109887,
|
22 |
-
"assin2_sts": 0.14105865232500697,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.14936340466616818
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.2490030924510854,
|
27 |
-
"result_metrics_npm": -0.05979554926598268
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 191,
|
15 |
+
"job_start_time": "2024-02-14T13-14-20.585602"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
OpenLLM-France/Claire-7B-0.1_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🆎 : language adapted models (FP, FT, ...)",
|
13 |
"source": "script",
|
14 |
"job_id": 105,
|
15 |
-
"job_start_time": "2024-02-08T19-59-33.207703"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.21203638908327502,
|
19 |
-
"bluex": 0.24617524339360222,
|
20 |
-
"oab_exams": 0.24738041002277905,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.043451880521346076,
|
23 |
-
"faquad_nli": 0.4471267110923455,
|
24 |
-
"sparrow_pt": 0.2339910645108228
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.25192786170821485,
|
27 |
-
"result_metrics_npm": -0.05055064976935545
|
28 |
}
|
|
|
12 |
"model_type": "🆎 : language adapted models (FP, FT, ...)",
|
13 |
"source": "script",
|
14 |
"job_id": 105,
|
15 |
+
"job_start_time": "2024-02-08T19-59-33.207703"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
OpenLLM-France/Claire-Mistral-7B-0.1_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🆎 : language adapted models (FP, FT, ...)",
|
13 |
"source": "script",
|
14 |
"job_id": 104,
|
15 |
-
"job_start_time": "2024-02-08T18-48-18.223603"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.5864240727781665,
|
19 |
-
"bluex": 0.4603616133518776,
|
20 |
-
"oab_exams": 0.41275626423690204,
|
21 |
-
"assin2_rte": 0.3930811717227137,
|
22 |
-
"assin2_sts": 0.5428546315437224,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.26612905967383516
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.4430374265315729,
|
27 |
-
"result_metrics_npm": 0.18025007075022711
|
28 |
}
|
|
|
12 |
"model_type": "🆎 : language adapted models (FP, FT, ...)",
|
13 |
"source": "script",
|
14 |
"job_id": 104,
|
15 |
+
"job_start_time": "2024-02-08T18-48-18.223603"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 179,
|
15 |
-
"job_start_time": "2024-02-13T19-40-35.556453"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.6627011896431071,
|
19 |
-
"bluex": 0.5702364394993046,
|
20 |
-
"oab_exams": 0.47289293849658315,
|
21 |
-
"assin2_rte": 0.6307200282684031,
|
22 |
-
"assin2_sts": 0.5853055022861218,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.32190683171450984
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.5262025860459747,
|
27 |
-
"result_metrics_npm": 0.3108427262693092
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 179,
|
15 |
+
"job_start_time": "2024-02-13T19-40-35.556453"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
-
"job_start_time": "2024-02-13T13-13-14.633268"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.6242127361791463,
|
19 |
-
"bluex": 0.4937413073713491,
|
20 |
-
"oab_exams": 0.4104783599088838,
|
21 |
-
"assin2_rte": 0.6640797445169774,
|
22 |
-
"assin2_sts": 0.109966284351211,
|
23 |
-
"faquad_nli": 0.5733609182848322,
|
24 |
-
"sparrow_pt": 0.337302679211532
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.45902028997484745,
|
27 |
-
"result_metrics_npm": 0.25783808569270167
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
+
"job_start_time": "2024-02-13T13-13-14.633268"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 184,
|
15 |
-
"job_start_time": "2024-02-14T00-29-11.353268"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.6081175647305809,
|
19 |
-
"bluex": 0.49235048678720444,
|
20 |
-
"oab_exams": 0.40501138952164006,
|
21 |
-
"assin2_rte": 0.6004172619619615,
|
22 |
-
"assin2_sts": 0.7205871294107395,
|
23 |
-
"faquad_nli": 0.8115869651008071,
|
24 |
-
"sparrow_pt": 0.37512447333212245
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.5733136101207224,
|
27 |
-
"result_metrics_npm": 0.39293186808439523
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 184,
|
15 |
+
"job_start_time": "2024-02-14T00-29-11.353268"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
-
"job_start_time": "2024-02-13T16-00-29.790614"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.20573827851644508,
|
19 |
-
"bluex": 0.20584144645340752,
|
20 |
-
"oab_exams": 0.23690205011389523,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.16664859797501688,
|
23 |
-
"faquad_nli": 0.41377140792945644,
|
24 |
-
"sparrow_pt": 0.25291474912683454
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.25930712334976985,
|
27 |
-
"result_metrics_npm": -0.048431144903189324
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 176,
|
15 |
+
"job_start_time": "2024-02-13T16-00-29.790614"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 178,
|
15 |
-
"job_start_time": "2024-02-13T18-20-08.619292"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.37788663400979705,
|
19 |
-
"bluex": 0.3129346314325452,
|
20 |
-
"oab_exams": 0.34943052391799545,
|
21 |
-
"assin2_rte": 0.5360021760870939,
|
22 |
-
"assin2_sts": 0.1052548005298192,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.2957930022375439
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.34527956294694107,
|
27 |
-
"result_metrics_npm": 0.08810495779838001
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 178,
|
15 |
+
"job_start_time": "2024-02-13T18-20-08.619292"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 177,
|
15 |
-
"job_start_time": "2024-02-13T18-20-08.619747"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.4989503149055283,
|
19 |
-
"bluex": 0.4019471488178025,
|
20 |
-
"oab_exams": 0.371753986332574,
|
21 |
-
"assin2_rte": 0.5418181818181818,
|
22 |
-
"assin2_sts": 0.18098885021409325,
|
23 |
-
"faquad_nli": 0.45391686964720673,
|
24 |
-
"sparrow_pt": 0.24948648514446742
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.3855516909828363,
|
27 |
-
"result_metrics_npm": 0.137226170969913
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 177,
|
15 |
+
"job_start_time": "2024-02-13T18-20-08.619747"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
bigscience/bloom-3b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 27,
|
15 |
-
"job_start_time": "2024-02-07T01-42-50.987972"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.2001399580125962,
|
19 |
-
"bluex": 0.18776077885952713,
|
20 |
-
"oab_exams": 0.25740318906605925,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.007784203462752267,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.16983005721811856
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.2279866703380257,
|
27 |
-
"result_metrics_npm": -0.08159221837126322
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 27,
|
15 |
+
"job_start_time": "2024-02-07T01-42-50.987972"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json
CHANGED
@@ -13,18 +13,6 @@
|
|
13 |
"source": "script",
|
14 |
"job_id": 253,
|
15 |
"job_start_time": "2024-02-20T17-31-20.642276",
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.7333799860041987,
|
19 |
-
"bluex": 0.6244784422809457,
|
20 |
-
"oab_exams": 0.532118451025057,
|
21 |
-
"assin2_rte": 0.6950181531654831,
|
22 |
-
"assin2_sts": 0.5337084187331966,
|
23 |
-
"faquad_nli": 0.6754332808144468,
|
24 |
-
"sparrow_pt": 0.36690087459679466
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.5944339438028746,
|
27 |
-
"result_metrics_npm": 0.4267772506411939,
|
28 |
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
29 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
30 |
}
|
|
|
13 |
"source": "script",
|
14 |
"job_id": 253,
|
15 |
"job_start_time": "2024-02-20T17-31-20.642276",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
17 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
18 |
}
|
deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 180,
|
15 |
-
"job_start_time": "2024-02-13T20-08-54.493662"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.4086773967809657,
|
19 |
-
"bluex": 0.3908205841446453,
|
20 |
-
"oab_exams": 0.35353075170842824,
|
21 |
-
"assin2_rte": 0.36087257670772555,
|
22 |
-
"assin2_sts": 0.3246306803889829,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.3046492630055339
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.3689766321642964,
|
27 |
-
"result_metrics_npm": 0.0918381264358727
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 180,
|
15 |
+
"job_start_time": "2024-02-13T20-08-54.493662"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 181,
|
15 |
-
"job_start_time": "2024-02-13T21-07-50.712249"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.36599020293911827,
|
19 |
-
"bluex": 0.3087621696801113,
|
20 |
-
"oab_exams": 0.3316628701594533,
|
21 |
-
"assin2_rte": 0.3669298673297707,
|
22 |
-
"assin2_sts": 0.2023483280938947,
|
23 |
-
"faquad_nli": 0.45391686964720673,
|
24 |
-
"sparrow_pt": 0.24294255630381092
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.32465040916476656,
|
27 |
-
"result_metrics_npm": 0.0404271761205112
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 181,
|
15 |
+
"job_start_time": "2024-02-13T21-07-50.712249"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
facebook/opt-1.3b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 81,
|
15 |
-
"job_start_time": "2024-02-07T17-55-47.448141"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.20643806857942618,
|
19 |
-
"bluex": 0.20305980528511822,
|
20 |
-
"oab_exams": 0.2296127562642369,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.014294092039832406,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.23645916821985138
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.2375503423050845,
|
27 |
-
"result_metrics_npm": -0.06850955080705787
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 81,
|
15 |
+
"job_start_time": "2024-02-07T17-55-47.448141"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
facebook/opt-125m_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 79,
|
15 |
-
"job_start_time": "2024-02-07T17-00-47.502507"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.20363890832750176,
|
19 |
-
"bluex": 0.18219749652294853,
|
20 |
-
"oab_exams": 0.0009111617312072893,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.045915967954967385,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.16100858918916303
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.19523723278184493,
|
27 |
-
"result_metrics_npm": -0.12718865648894817
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 79,
|
15 |
+
"job_start_time": "2024-02-07T17-00-47.502507"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
facebook/opt-13b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 84,
|
15 |
-
"job_start_time": "2024-02-07T19-29-22.779324"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.18964310706787962,
|
19 |
-
"bluex": 0.20584144645340752,
|
20 |
-
"oab_exams": 0.23416856492027335,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.013938417309944717,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.18566533417845207
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.22889219652529766,
|
27 |
-
"result_metrics_npm": -0.08047151398067605
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 84,
|
15 |
+
"job_start_time": "2024-02-07T19-29-22.779324"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
facebook/opt-2.7b_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 82,
|
15 |
-
"job_start_time": "2024-02-07T18-37-10.420552"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.19384184744576627,
|
19 |
-
"bluex": 0.19193324061196107,
|
20 |
-
"oab_exams": 0.2296127562642369,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.016315511418924157,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.2232283026501764
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.23256002344831303,
|
27 |
-
"result_metrics_npm": -0.07520210793362635
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 82,
|
15 |
+
"job_start_time": "2024-02-07T18-37-10.420552"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
facebook/opt-350m_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 80,
|
15 |
-
"job_start_time": "2024-02-07T17-20-17.729922"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.20363890832750176,
|
19 |
-
"bluex": 0.13351877607788595,
|
20 |
-
"oab_exams": 0.00683371298405467,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.018818660102831564,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.2158151928608465
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.1930876794428924,
|
27 |
-
"result_metrics_npm": -0.12779893672772347
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 80,
|
15 |
+
"job_start_time": "2024-02-07T17-20-17.729922"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
google/umt5-base_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 206,
|
15 |
-
"job_start_time": "2024-02-15T05-17-17.944550"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.02939118264520644,
|
19 |
-
"bluex": 0.022253129346314324,
|
20 |
-
"oab_exams": 0.08337129840546698,
|
21 |
-
"assin2_rte": 0.008049597487606372,
|
22 |
-
"assin2_sts": 0.11702145458274152,
|
23 |
-
"faquad_nli": 0.005177993527508091,
|
24 |
-
"sparrow_pt": 0.06908735918169881
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.04776457359664892,
|
27 |
-
"result_metrics_npm": -0.38758303220535195
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 206,
|
15 |
+
"job_start_time": "2024-02-15T05-17-17.944550"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
google/umt5-small_eval_request_False_bfloat16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 205,
|
15 |
-
"job_start_time": "2024-02-15T02-03-26.881523"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.09447165850244926,
|
19 |
-
"bluex": 0.02642559109874826,
|
20 |
-
"oab_exams": 0.1489749430523918,
|
21 |
-
"assin2_rte": 0.19257980378257433,
|
22 |
-
"assin2_sts": 0.043458369255268595,
|
23 |
-
"faquad_nli": 0.014741228752745068,
|
24 |
-
"sparrow_pt": 0.011063279665311027
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.07595926772992691,
|
27 |
-
"result_metrics_npm": -0.32972887684471675
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 205,
|
15 |
+
"job_start_time": "2024-02-15T02-03-26.881523"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
gpt2_eval_request_False_float16_Original.json
CHANGED
@@ -12,17 +12,5 @@
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 187,
|
15 |
-
"job_start_time": "2024-02-14T01-24-06.058943"
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.1364590622813156,
|
19 |
-
"bluex": 0.1933240611961057,
|
20 |
-
"oab_exams": 0.21685649202733484,
|
21 |
-
"assin2_rte": 0.3333333333333333,
|
22 |
-
"assin2_sts": 0.039726178664430394,
|
23 |
-
"faquad_nli": 0.4396551724137931,
|
24 |
-
"sparrow_pt": 0.1782807939159107
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.21966215626174623,
|
27 |
-
"result_metrics_npm": -0.09338594948484692
|
28 |
}
|
|
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
14 |
"job_id": 187,
|
15 |
+
"job_start_time": "2024-02-14T01-24-06.058943"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
huggyllama/llama-30b_eval_request_False_float16_Original.json
CHANGED
@@ -13,18 +13,6 @@
|
|
13 |
"source": "script",
|
14 |
"job_id": 253,
|
15 |
"job_start_time": "2024-02-21T12-12-04.504997",
|
16 |
-
"eval_version": "1.0.0",
|
17 |
-
"result_metrics": {
|
18 |
-
"enem_challenge": 0.6186144156752974,
|
19 |
-
"bluex": 0.5034770514603616,
|
20 |
-
"oab_exams": 0.4214123006833713,
|
21 |
-
"assin2_rte": 0.6994823029869264,
|
22 |
-
"assin2_sts": 0.521939545377829,
|
23 |
-
"faquad_nli": 0.5100755946706865,
|
24 |
-
"sparrow_pt": 0.32914824721309877
|
25 |
-
},
|
26 |
-
"result_metrics_average": 0.514878494009653,
|
27 |
-
"result_metrics_npm": 0.3114125756635208,
|
28 |
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
29 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
30 |
}
|
|
|
13 |
"source": "script",
|
14 |
"job_id": 253,
|
15 |
"job_start_time": "2024-02-21T12-12-04.504997",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"error_msg": "Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!",
|
17 |
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1524, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2361, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 165, in new_forward\n output = module._old_forward(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1148, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 964, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1055, in _update_causal_mask\n padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n"
|
18 |
}
|