Retry 28 FAILED models
Browse files- AbacusResearch/Jallabi-34B_eval_request_False_bfloat16_Original.json +2 -4
- GritLM/GritLM-8x7B-KTO_eval_request_False_bfloat16_Original.json +2 -4
- GritLM/GritLM-8x7B_eval_request_False_bfloat16_Original.json +2 -4
- OpenBuddy/openbuddy-mixtral-7bx8-v18.1-32k_eval_request_False_bfloat16_Original.json +2 -4
- OpenBuddy/openbuddy-yi1.5-34b-v21.3-32k_eval_request_False_bfloat16_Original.json +2 -4
- OpenBuddy/openbuddy-yi1.5-34b-v21.6-32k-fp16_eval_request_False_float16_Original.json +2 -4
- Ramikan-BR/Qwen2-0.5B-v9_eval_request_51d076f_False_float16_Original.json +2 -4
- SenseLLM/ReflectionCoder-CL-34B_eval_request_False_bfloat16_Original.json +2 -4
- SenseLLM/ReflectionCoder-DS-33B_eval_request_False_bfloat16_Original.json +2 -4
- TIGER-Lab/MAmmoTH2-8x7B-Plus_eval_request_False_bfloat16_Original.json +2 -4
- abacusai/Smaug-Mixtral-v0.1_eval_request_False_bfloat16_Original.json +2 -4
- anthracite-org/magnum-v3-34b_eval_request_False_bfloat16_Original.json +2 -4
- brucethemoose/Yi-34B-200K-DARE-merge-v7_eval_request_False_bfloat16_Original.json +2 -4
- byroneverson/Yi-1.5-34B-Chat-abliterated_eval_request_False_bfloat16_Original.json +2 -4
- cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_False_bfloat16_Original.json +2 -4
- cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_d099b57_False_bfloat16_Original.json +2 -4
- cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_626c825_False_bfloat16_Original.json +2 -4
- cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_628c376_False_bfloat16_Original.json +2 -4
- cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_9ad9d14_False_bfloat16_Original.json +2 -4
- cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_False_bfloat16_Original.json +2 -4
- cognitivecomputations/dolphin-2.9.3-Yi-1.5-34B-32k_eval_request_False_bfloat16_Original.json +2 -4
- deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B_eval_request_False_bfloat16_Original.json +2 -4
- dnhkng/RYS-Gemma-2-27b-it_eval_request_False_bfloat16_Original.json +2 -4
- hotmailuser/RombosBeagle-v2beta-MGS-32B_eval_request_False_bfloat16_Original.json +2 -4
- microsoft/GRIN-MoE_eval_request_False_bfloat16_Original.json +2 -4
- microsoft/Phi-3.5-MoE-instruct_eval_request_False_bfloat16_Original.json +2 -4
- oobabooga/CodeBooga-34B-v0.1_eval_request_False_float16_Original.json +2 -4
- vilm/Quyen-Pro-Max-v0.1_eval_request_1366c92_False_bfloat16_Original.json +2 -4
AbacusResearch/Jallabi-34B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-09-05T13:41:51Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1285,
|
16 |
-
"job_start_time": "2024-12-02T14-28-38.149093"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 147.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 10.01 GiB memory in use. Process 2580743 has 8.34 GiB memory in use. Process 2617830 has 57.75 GiB memory in use. Of the allocated memory 9.61 GiB is allocated by PyTorch, and 1.77 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 147.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 10.01 GiB memory in use. Process 2580743 has 8.34 GiB memory in use. Process 2617830 has 57.75 GiB memory in use. Of the allocated memory 9.61 GiB is allocated by PyTorch, and 1.77 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-09-05T13:41:51Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1285,
|
16 |
+
"job_start_time": "2024-12-02T14-28-38.149093"
|
|
|
|
|
17 |
}
|
GritLM/GritLM-8x7B-KTO_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-14T16:48:05Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1341,
|
16 |
-
"job_start_time": "2024-12-08T02-37-22.598684"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 14.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 4104970 has 76.24 GiB memory in use. Of the allocated memory 75.61 GiB is allocated by PyTorch, and 121.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 559, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 14.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 4104970 has 76.24 GiB memory in use. Of the allocated memory 75.61 GiB is allocated by PyTorch, and 121.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-14T16:48:05Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1341,
|
16 |
+
"job_start_time": "2024-12-08T02-37-22.598684"
|
|
|
|
|
17 |
}
|
GritLM/GritLM-8x7B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-30T22:02:19Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1276,
|
16 |
-
"job_start_time": "2024-12-02T07-22-52.676206"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 71.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1841569 has 73.24 GiB memory in use. Process 1894753 has 2.95 GiB memory in use. Of the allocated memory 2.54 GiB is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 559, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 71.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1841569 has 73.24 GiB memory in use. Process 1894753 has 2.95 GiB memory in use. Of the allocated memory 2.54 GiB is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-05-30T22:02:19Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1276,
|
16 |
+
"job_start_time": "2024-12-02T07-22-52.676206"
|
|
|
|
|
17 |
}
|
OpenBuddy/openbuddy-mixtral-7bx8-v18.1-32k_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-09-29T02:48:44Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1294,
|
16 |
-
"job_start_time": "2024-12-03T01-58-37.340317"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 94.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 76.16 GiB memory in use. Of the allocated memory 75.54 GiB is allocated by PyTorch, and 125.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 94.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 76.16 GiB memory in use. Of the allocated memory 75.54 GiB is allocated by PyTorch, and 125.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-09-29T02:48:44Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1294,
|
16 |
+
"job_start_time": "2024-12-03T01-58-37.340317"
|
|
|
|
|
17 |
}
|
OpenBuddy/openbuddy-yi1.5-34b-v21.3-32k_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-17T04:52:48Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1280,
|
16 |
-
"job_start_time": "2024-12-02T10-04-11.684945"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 153.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1841569 has 73.24 GiB memory in use. Process 1894753 has 2.87 GiB memory in use. Of the allocated memory 2.46 GiB is allocated by PyTorch, and 3.46 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 153.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1841569 has 73.24 GiB memory in use. Process 1894753 has 2.87 GiB memory in use. Of the allocated memory 2.46 GiB is allocated by PyTorch, and 3.46 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-17T04:52:48Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1280,
|
16 |
+
"job_start_time": "2024-12-02T10-04-11.684945"
|
|
|
|
|
17 |
}
|
OpenBuddy/openbuddy-yi1.5-34b-v21.6-32k-fp16_eval_request_False_float16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-09-05T13:01:13Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1284,
|
16 |
-
"job_start_time": "2024-12-02T13-18-11.633324"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 11.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 15.81 GiB memory in use. Process 2580743 has 8.34 GiB memory in use. Process 2617830 has 52.08 GiB memory in use. Of the allocated memory 15.41 GiB is allocated by PyTorch, and 3.10 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 11.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 15.81 GiB memory in use. Process 2580743 has 8.34 GiB memory in use. Process 2617830 has 52.08 GiB memory in use. Of the allocated memory 15.41 GiB is allocated by PyTorch, and 3.10 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-09-05T13:01:13Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1284,
|
16 |
+
"job_start_time": "2024-12-02T13-18-11.633324"
|
|
|
|
|
17 |
}
|
Ramikan-BR/Qwen2-0.5B-v9_eval_request_51d076f_False_float16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-07-22T01:06:09Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 925,
|
16 |
-
"job_start_time": "2024-07-22T01-12-41.399956"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 16.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 4.19 MiB is free. Process 1628835 has 2.57 GiB memory in use. Process 3052873 has 76.77 GiB memory in use. Of the allocated memory 1.43 GiB is allocated by PyTorch, and 39.12 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3904, in from_pretrained\n dispatch_model(model, **device_map_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/big_modeling.py\", line 419, in dispatch_model\n attach_align_device_hook_on_blocks(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 648, in attach_align_device_hook_on_blocks\n attach_align_device_hook_on_blocks(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 648, in attach_align_device_hook_on_blocks\n attach_align_device_hook_on_blocks(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 648, in attach_align_device_hook_on_blocks\n attach_align_device_hook_on_blocks(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 611, in attach_align_device_hook_on_blocks\n attach_align_device_hook(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 504, in attach_align_device_hook\n attach_align_device_hook(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 504, in attach_align_device_hook\n attach_align_device_hook(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 495, in attach_align_device_hook\n add_hook_to_module(module, hook, append=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 157, in add_hook_to_module\n module = hook.init_hook(module)\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/hooks.py\", line 304, in init_hook\n set_module_tensor_to_device(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 392, in set_module_tensor_to_device\n new_value = old_value.to(device)\n ^^^^^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 4.19 MiB is free. Process 1628835 has 2.57 GiB memory in use. Process 3052873 has 76.77 GiB memory in use. Of the allocated memory 1.43 GiB is allocated by PyTorch, and 39.12 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-07-22T01:06:09Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 925,
|
16 |
+
"job_start_time": "2024-07-22T01-12-41.399956"
|
|
|
|
|
17 |
}
|
SenseLLM/ReflectionCoder-CL-34B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-09-15T23:47:03Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1286,
|
16 |
-
"job_start_time": "2024-12-02T16-04-53.101011"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 344.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 337.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 15.08 GiB memory in use. Process 2617830 has 60.84 GiB memory in use. Of the allocated memory 14.67 GiB is allocated by PyTorch, and 3.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 344.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 337.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 15.08 GiB memory in use. Process 2617830 has 60.84 GiB memory in use. Of the allocated memory 14.67 GiB is allocated by PyTorch, and 3.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-09-15T23:47:03Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1286,
|
16 |
+
"job_start_time": "2024-12-02T16-04-53.101011"
|
|
|
|
|
17 |
}
|
SenseLLM/ReflectionCoder-DS-33B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-09-15T23:49:22Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1287,
|
16 |
-
"job_start_time": "2024-12-02T17-27-44.473160"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 264.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 113.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 7.04 GiB memory in use. Process 2617830 has 69.09 GiB memory in use. Of the allocated memory 6.61 GiB is allocated by PyTorch, and 30.32 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 264.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 113.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 7.04 GiB memory in use. Process 2617830 has 69.09 GiB memory in use. Of the allocated memory 6.61 GiB is allocated by PyTorch, and 30.32 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-09-15T23:49:22Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1287,
|
16 |
+
"job_start_time": "2024-12-02T17-27-44.473160"
|
|
|
|
|
17 |
}
|
TIGER-Lab/MAmmoTH2-8x7B-Plus_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-17T07:43:39Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1338,
|
16 |
-
"job_start_time": "2024-12-07T15-18-56.073688"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 14.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 4104970 has 76.24 GiB memory in use. Of the allocated memory 75.61 GiB is allocated by PyTorch, and 121.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 14.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 4104970 has 76.24 GiB memory in use. Of the allocated memory 75.61 GiB is allocated by PyTorch, and 121.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-05-17T07:43:39Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1338,
|
16 |
+
"job_start_time": "2024-12-07T15-18-56.073688"
|
|
|
|
|
17 |
}
|
abacusai/Smaug-Mixtral-v0.1_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-30T19:58:52Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1339,
|
16 |
-
"job_start_time": "2024-12-07T15-52-30.419185"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 14.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 4104970 has 76.24 GiB memory in use. Of the allocated memory 75.61 GiB is allocated by PyTorch, and 121.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 14.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 4104970 has 76.24 GiB memory in use. Of the allocated memory 75.61 GiB is allocated by PyTorch, and 121.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-05-30T19:58:52Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1339,
|
16 |
+
"job_start_time": "2024-12-07T15-52-30.419185"
|
|
|
|
|
17 |
}
|
anthracite-org/magnum-v3-34b_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-09-18T18:52:43Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1289,
|
16 |
-
"job_start_time": "2024-12-02T18-37-40.965377"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 155.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 7.00 GiB memory in use. Process 2617830 has 69.09 GiB memory in use. Of the allocated memory 6.60 GiB is allocated by PyTorch, and 1.85 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 155.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 7.00 GiB memory in use. Process 2617830 has 69.09 GiB memory in use. Of the allocated memory 6.60 GiB is allocated by PyTorch, and 1.85 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-09-18T18:52:43Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1289,
|
16 |
+
"job_start_time": "2024-12-02T18-37-40.965377"
|
|
|
|
|
17 |
}
|
brucethemoose/Yi-34B-200K-DARE-merge-v7_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-17T04:44:02Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1279,
|
16 |
-
"job_start_time": "2024-12-02T09-22-18.913794"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 171.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1841569 has 73.24 GiB memory in use. Process 1894753 has 2.85 GiB memory in use. Of the allocated memory 2.44 GiB is allocated by PyTorch, and 1.96 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 171.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1841569 has 73.24 GiB memory in use. Process 1894753 has 2.85 GiB memory in use. Of the allocated memory 2.44 GiB is allocated by PyTorch, and 1.96 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-17T04:44:02Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1279,
|
16 |
+
"job_start_time": "2024-12-02T09-22-18.913794"
|
|
|
|
|
17 |
}
|
byroneverson/Yi-1.5-34B-Chat-abliterated_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2025-01-22T23:46:17Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1394,
|
16 |
-
"job_start_time": "2025-01-31T22-05-12.500511"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacity of 79.35 GiB of which 62.19 MiB is free. Process 2234264 has 1.55 GiB memory in use. Process 3703630 has 36.71 GiB memory in use. Process 3053824 has 1.02 GiB memory in use. Process 3055549 has 1.02 GiB memory in use. Process 3056485 has 1.02 GiB memory in use. Process 3057156 has 1.02 GiB memory in use. Process 3058320 has 3.62 GiB memory in use. Process 3059751 has 2.66 GiB memory in use. Process 3060474 has 1.04 GiB memory in use. Process 3062831 has 2.66 GiB memory in use. Process 3064183 has 1.02 GiB memory in use. Process 3064994 has 1.62 GiB memory in use. Process 3066591 has 4.27 GiB memory in use. Process 3067584 has 1.02 GiB memory in use. Process 3069269 has 2.74 GiB memory in use. Process 3071131 has 2.48 GiB memory in use. Process 3072251 has 4.26 GiB memory in use. Process 3073510 has 4.27 GiB memory in use. Process 3074554 has 4.27 GiB memory in use. Process 3075748 has 1.02 GiB memory in use. Of the allocated memory 36.19 GiB is allocated by PyTorch, and 12.93 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 102, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 63, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 305, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 621, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4270, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4848, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 876, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacity of 79.35 GiB of which 62.19 MiB is free. Process 2234264 has 1.55 GiB memory in use. Process 3703630 has 36.71 GiB memory in use. Process 3053824 has 1.02 GiB memory in use. Process 3055549 has 1.02 GiB memory in use. Process 3056485 has 1.02 GiB memory in use. Process 3057156 has 1.02 GiB memory in use. Process 3058320 has 3.62 GiB memory in use. Process 3059751 has 2.66 GiB memory in use. Process 3060474 has 1.04 GiB memory in use. Process 3062831 has 2.66 GiB memory in use. Process 3064183 has 1.02 GiB memory in use. Process 3064994 has 1.62 GiB memory in use. Process 3066591 has 4.27 GiB memory in use. Process 3067584 has 1.02 GiB memory in use. Process 3069269 has 2.74 GiB memory in use. Process 3071131 has 2.48 GiB memory in use. Process 3072251 has 4.26 GiB memory in use. Process 3073510 has 4.27 GiB memory in use. Process 3074554 has 4.27 GiB memory in use. Process 3075748 has 1.02 GiB memory in use. Of the allocated memory 36.19 GiB is allocated by PyTorch, and 12.93 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2025-01-22T23:46:17Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1394,
|
16 |
+
"job_start_time": "2025-01-31T22-05-12.500511"
|
|
|
|
|
17 |
}
|
cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-17T07:12:51Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 902,
|
16 |
-
"job_start_time": "2024-07-09T02-53-19.032021"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.87 GiB is free. Process 3919661 has 68.47 GiB memory in use. Of the allocated memory 67.28 GiB is allocated by PyTorch, and 102.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3838, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4298, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 895, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.87 GiB is free. Process 3919661 has 68.47 GiB memory in use. Of the allocated memory 67.28 GiB is allocated by PyTorch, and 102.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-17T07:12:51Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 902,
|
16 |
+
"job_start_time": "2024-07-09T02-53-19.032021"
|
|
|
|
|
17 |
}
|
cognitivecomputations/dolphin-2.6-mixtral-8x7b_eval_request_d099b57_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-13T18:30:14Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 901,
|
16 |
-
"job_start_time": "2024-07-09T02-27-51.604980"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.84 GiB is free. Process 3919661 has 68.50 GiB memory in use. Of the allocated memory 67.31 GiB is allocated by PyTorch, and 98.11 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3838, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4298, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 895, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.84 GiB is free. Process 3919661 has 68.50 GiB memory in use. Of the allocated memory 67.31 GiB is allocated by PyTorch, and 98.11 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-13T18:30:14Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 901,
|
16 |
+
"job_start_time": "2024-07-09T02-27-51.604980"
|
|
|
|
|
17 |
}
|
cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_626c825_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-29T06:49:40Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 899,
|
16 |
-
"job_start_time": "2024-07-09T02-21-14.129252"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.87 GiB is free. Process 3919661 has 68.47 GiB memory in use. Of the allocated memory 67.28 GiB is allocated by PyTorch, and 102.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3838, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4298, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 895, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.87 GiB is free. Process 3919661 has 68.47 GiB memory in use. Of the allocated memory 67.28 GiB is allocated by PyTorch, and 102.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-05-29T06:49:40Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 899,
|
16 |
+
"job_start_time": "2024-07-09T02-21-14.129252"
|
|
|
|
|
17 |
}
|
cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_628c376_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-31T08:44:12Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 900,
|
16 |
-
"job_start_time": "2024-07-09T02-24-32.164637"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.87 GiB is free. Process 3919661 has 68.47 GiB memory in use. Of the allocated memory 67.28 GiB is allocated by PyTorch, and 102.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3838, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4298, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 895, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.87 GiB is free. Process 3919661 has 68.47 GiB memory in use. Of the allocated memory 67.28 GiB is allocated by PyTorch, and 102.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-05-31T08:44:12Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 900,
|
16 |
+
"job_start_time": "2024-07-09T02-24-32.164637"
|
|
|
|
|
17 |
}
|
cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_9ad9d14_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-25T06:10:49Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 898,
|
16 |
-
"job_start_time": "2024-07-09T01-56-45.222276"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.84 GiB is free. Process 3919661 has 68.50 GiB memory in use. Of the allocated memory 67.31 GiB is allocated by PyTorch, and 98.11 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3838, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4298, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 895, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.84 GiB is free. Process 3919661 has 68.50 GiB memory in use. Of the allocated memory 67.31 GiB is allocated by PyTorch, and 98.11 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-05-25T06:10:49Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 898,
|
16 |
+
"job_start_time": "2024-07-09T01-56-45.222276"
|
|
|
|
|
17 |
}
|
cognitivecomputations/dolphin-2.7-mixtral-8x7b_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-13T16:00:21Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 897,
|
16 |
-
"job_start_time": "2024-07-09T01-31-58.297976"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.87 GiB is free. Process 3919661 has 68.47 GiB memory in use. Of the allocated memory 67.28 GiB is allocated by PyTorch, and 102.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3838, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4298, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 895, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.87 GiB is free. Process 3919661 has 68.47 GiB memory in use. Of the allocated memory 67.28 GiB is allocated by PyTorch, and 102.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "MixtralForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-05-13T16:00:21Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 897,
|
16 |
+
"job_start_time": "2024-07-09T01-31-58.297976"
|
|
|
|
|
17 |
}
|
cognitivecomputations/dolphin-2.9.3-Yi-1.5-34B-32k_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-29T15:44:09Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1281,
|
16 |
-
"job_start_time": "2024-12-02T10-48-14.523764"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 185.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 5.14 GiB memory in use. Process 2405178 has 70.92 GiB memory in use. Of the allocated memory 4.74 GiB is allocated by PyTorch, and 1.92 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 185.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 5.14 GiB memory in use. Process 2405178 has 70.92 GiB memory in use. Of the allocated memory 4.74 GiB is allocated by PyTorch, and 1.92 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-29T15:44:09Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1281,
|
16 |
+
"job_start_time": "2024-12-02T10-48-14.523764"
|
|
|
|
|
17 |
}
|
deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,7 +8,7 @@
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2025-01-28T23:51:12Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "manual",
|
@@ -28,7 +28,5 @@
|
|
28 |
"tweetsentbr": 0.3709422121860369
|
29 |
},
|
30 |
"result_metrics_average": 0.24260416373877167,
|
31 |
-
"result_metrics_npm": -0.1275114377712255
|
32 |
-
"error_msg": "CUDA out of memory. Tried to allocate 2.23 GiB. GPU 0 has a total capacity of 79.35 GiB of which 2.06 GiB is free. Process 4021222 has 43.41 GiB memory in use. Process 4026639 has 33.88 GiB memory in use. Of the allocated memory 41.24 GiB is allocated by PyTorch, and 179.01 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)",
|
33 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 235, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 106, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 63, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/vllm_causallms.py\", line 126, in __init__\n self.model = LLM(**self.model_args)\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/vllm/utils.py\", line 1039, in inner\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/vllm/entrypoints/llm.py\", line 239, in __init__\n self.llm_engine = self.engine_class.from_engine_args(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/vllm/engine/llm_engine.py\", line 482, in from_engine_args\n engine = cls(\n ^^^^\n File \"/usr/local/lib/python3.12/dist-packages/vllm/engine/llm_engine.py\", line 274, in __init__\n self._initialize_kv_caches()\n File \"/usr/local/lib/python3.12/dist-packages/vllm/engine/llm_engine.py\", line 427, in _initialize_kv_caches\n self.model_executor.initialize_cache(num_gpu_blocks, num_cpu_blocks)\n File \"/usr/local/lib/python3.12/dist-packages/vllm/executor/executor_base.py\", line 119, in initialize_cache\n self.collective_rpc(\"initialize_cache\",\n File \"/usr/local/lib/python3.12/dist-packages/vllm/executor/uniproc_executor.py\", line 49, in collective_rpc\n answer = run_method(self.driver_worker, method, args, kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/vllm/utils.py\", line 2208, in run_method\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/vllm/worker/worker.py\", line 308, in initialize_cache\n self._init_cache_engine()\n File \"/usr/local/lib/python3.12/dist-packages/vllm/worker/worker.py\", line 314, in _init_cache_engine\n CacheEngine(self.cache_config, self.model_config,\n File \"/usr/local/lib/python3.12/dist-packages/vllm/worker/cache_engine.py\", line 62, in __init__\n self.gpu_cache = self._allocate_kv_cache(\n ^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/usr/local/lib/python3.12/dist-packages/vllm/worker/cache_engine.py\", line 81, in _allocate_kv_cache\n torch.zeros(kv_cache_shape,\ntorch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.23 GiB. GPU 0 has a total capacity of 79.35 GiB of which 2.06 GiB is free. Process 4021222 has 43.41 GiB memory in use. Process 4026639 has 33.88 GiB memory in use. Of the allocated memory 41.24 GiB is allocated by PyTorch, and 179.01 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n"
|
34 |
}
|
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2025-01-28T23:51:12Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "manual",
|
|
|
28 |
"tweetsentbr": 0.3709422121860369
|
29 |
},
|
30 |
"result_metrics_average": 0.24260416373877167,
|
31 |
+
"result_metrics_npm": -0.1275114377712255
|
|
|
|
|
32 |
}
|
dnhkng/RYS-Gemma-2-27b-it_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "Gemma2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-08-22T18:28:39Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1282,
|
16 |
-
"job_start_time": "2024-12-02T11-50-36.484369"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 36.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 25.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 6.68 GiB memory in use. Process 2486930 has 69.54 GiB memory in use. Of the allocated memory 6.28 GiB is allocated by PyTorch, and 1.79 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 36.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 25.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 6.68 GiB memory in use. Process 2486930 has 69.54 GiB memory in use. Of the allocated memory 6.28 GiB is allocated by PyTorch, and 1.79 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "Gemma2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-08-22T18:28:39Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1282,
|
16 |
+
"job_start_time": "2024-12-02T11-50-36.484369"
|
|
|
|
|
17 |
}
|
hotmailuser/RombosBeagle-v2beta-MGS-32B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2025-01-22T19:47:57Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1393,
|
16 |
-
"job_start_time": "2025-01-31T21-50-33.255793"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 270.00 MiB. GPU 0 has a total capacity of 79.35 GiB of which 94.19 MiB is free. Process 2234264 has 1.55 GiB memory in use. Process 3703630 has 22.11 GiB memory in use. Process 3053824 has 4.27 GiB memory in use. Process 3054681 has 1.60 GiB memory in use. Process 3055549 has 3.83 GiB memory in use. Process 3056485 has 4.27 GiB memory in use. Process 3057156 has 1.02 GiB memory in use. Process 3058320 has 1.02 GiB memory in use. Process 3059751 has 4.26 GiB memory in use. Process 3060474 has 4.26 GiB memory in use. Process 3062831 has 1.02 GiB memory in use. Process 3064183 has 1.02 GiB memory in use. Process 3064994 has 1.02 GiB memory in use. Process 3066591 has 4.27 GiB memory in use. Process 3067584 has 1.02 GiB memory in use. Process 3069269 has 4.27 GiB memory in use. Process 3071131 has 4.26 GiB memory in use. Process 3072251 has 1.43 GiB memory in use. Process 3073510 has 4.26 GiB memory in use. Process 3074554 has 4.26 GiB memory in use. Process 3075748 has 4.27 GiB memory in use. Of the allocated memory 21.60 GiB is allocated by PyTorch, and 3.20 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 102, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 63, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 305, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 621, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4270, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4848, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 876, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.OutOfMemoryError: CUDA out of memory. Tried to allocate 270.00 MiB. GPU 0 has a total capacity of 79.35 GiB of which 94.19 MiB is free. Process 2234264 has 1.55 GiB memory in use. Process 3703630 has 22.11 GiB memory in use. Process 3053824 has 4.27 GiB memory in use. Process 3054681 has 1.60 GiB memory in use. Process 3055549 has 3.83 GiB memory in use. Process 3056485 has 4.27 GiB memory in use. Process 3057156 has 1.02 GiB memory in use. Process 3058320 has 1.02 GiB memory in use. Process 3059751 has 4.26 GiB memory in use. Process 3060474 has 4.26 GiB memory in use. Process 3062831 has 1.02 GiB memory in use. Process 3064183 has 1.02 GiB memory in use. Process 3064994 has 1.02 GiB memory in use. Process 3066591 has 4.27 GiB memory in use. Process 3067584 has 1.02 GiB memory in use. Process 3069269 has 4.27 GiB memory in use. Process 3071131 has 4.26 GiB memory in use. Process 3072251 has 1.43 GiB memory in use. Process 3073510 has 4.26 GiB memory in use. Process 3074554 has 4.26 GiB memory in use. Process 3075748 has 4.27 GiB memory in use. Of the allocated memory 21.60 GiB is allocated by PyTorch, and 3.20 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n"
|
19 |
}
|
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2025-01-22T19:47:57Z",
|
13 |
"model_type": "๐ค : base merges and moerges",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1393,
|
16 |
+
"job_start_time": "2025-01-31T21-50-33.255793"
|
|
|
|
|
17 |
}
|
microsoft/GRIN-MoE_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "GRIN-MoE",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-09-19T04:44:39Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1292,
|
16 |
-
"job_start_time": "2024-12-03T01-00-50.171188"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 50.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 38.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 76.21 GiB memory in use. Of the allocated memory 75.58 GiB is allocated by PyTorch, and 139.36 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 559, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 50.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 38.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 76.21 GiB memory in use. Of the allocated memory 75.58 GiB is allocated by PyTorch, and 139.36 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "GRIN-MoE",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-09-19T04:44:39Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1292,
|
16 |
+
"job_start_time": "2024-12-03T01-00-50.171188"
|
|
|
|
|
17 |
}
|
microsoft/Phi-3.5-MoE-instruct_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "PhiMoEForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-09-20T17:15:00Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "manual",
|
15 |
"job_id": 1314,
|
16 |
-
"job_start_time": "2024-12-04T11-00-27.308279"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 50.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 30.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 76.22 GiB memory in use. Of the allocated memory 75.58 GiB is allocated by PyTorch, and 139.36 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 559, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 50.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 30.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1894753 has 76.22 GiB memory in use. Of the allocated memory 75.58 GiB is allocated by PyTorch, and 139.36 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "PhiMoEForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-09-20T17:15:00Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "manual",
|
15 |
"job_id": 1314,
|
16 |
+
"job_start_time": "2024-12-04T11-00-27.308279"
|
|
|
|
|
17 |
}
|
oobabooga/CodeBooga-34B-v0.1_eval_request_False_float16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-06-17T03:20:02Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1278,
|
16 |
-
"job_start_time": "2024-12-02T08-40-49.036069"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 344.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 163.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1841569 has 73.24 GiB memory in use. Process 1894753 has 2.86 GiB memory in use. Of the allocated memory 2.45 GiB is allocated by PyTorch, and 1.95 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4225, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4738, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 941, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 344.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 163.19 MiB is free. Process 3777301 has 1.55 GiB memory in use. Process 3780253 has 1.55 GiB memory in use. Process 1841569 has 73.24 GiB memory in use. Process 1894753 has 2.86 GiB memory in use. Of the allocated memory 2.45 GiB is allocated by PyTorch, and 1.95 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-06-17T03:20:02Z",
|
13 |
"model_type": "๐ถ : fine-tuned/fp on domain-specific datasets",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1278,
|
16 |
+
"job_start_time": "2024-12-02T08-40-49.036069"
|
|
|
|
|
17 |
}
|
vilm/Quyen-Pro-Max-v0.1_eval_request_1366c92_False_bfloat16_Original.json
CHANGED
@@ -8,12 +8,10 @@
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-09-15T04:13:04Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1070,
|
16 |
-
"job_start_time": "2024-09-19T02-39-58.642137"
|
17 |
-
"error_msg": "CUDA out of memory. Tried to allocate 384.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.49 GiB is free. Process 1106294 has 68.85 GiB memory in use. Of the allocated memory 67.33 GiB is allocated by PyTorch, and 56.75 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
|
18 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 304, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 616, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 564, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3903, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4377, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 933, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 400, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 384.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.49 GiB is free. Process 1106294 has 68.85 GiB memory in use. Of the allocated memory 67.33 GiB is allocated by PyTorch, and 56.75 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
|
19 |
}
|
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2024-09-15T04:13:04Z",
|
13 |
"model_type": "๐ฌ : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 1070,
|
16 |
+
"job_start_time": "2024-09-19T02-39-58.642137"
|
|
|
|
|
17 |
}
|