eduagarcia commited on
Commit
3dca3e2
β€’
1 Parent(s): cdf590c

Retry 7 FAILED models

Browse files
01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,13 +7,11 @@
7
  "params": 34.389,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:18:19Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 339,
15
  "job_start_time": "2024-04-02T07-14-39.136107",
16
- "error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 142.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 38.02 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 37.50 GiB is allocated by PyTorch, and 12.90 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 142.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 38.02 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 37.50 GiB is allocated by PyTorch, and 12.90 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
  "main_language": "English"
19
  }
 
7
  "params": 34.389,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:18:19Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 339,
15
  "job_start_time": "2024-04-02T07-14-39.136107",
 
 
16
  "main_language": "English"
17
  }
EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json CHANGED
@@ -7,13 +7,11 @@
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 388,
15
  "job_start_time": "2024-04-05T06-21-32.588191",
16
- "main_language": "Other",
17
- "error_msg": "CUDA out of memory. Tried to allocate 158.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 4.19 MiB is free. Process 946248 has 2.83 GiB memory in use. Process 1384258 has 76.51 GiB memory in use. Of the allocated memory 72.22 GiB is allocated by PyTorch, and 3.78 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 240, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1518, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1063, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1527, in generate\n result = self._greedy_search(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2411, in _greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py\", line 1036, in forward\n outputs = self.gpt_neox(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py\", line 927, in forward\n outputs = layer(\n ^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py\", line 692, in forward\n attention_layer_outputs = self.attention(\n ^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/gpt_neox/modeling_gpt_neox.py\", line 209, in forward\n value = torch.cat((past_value, value), dim=-2)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 158.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 4.19 MiB is free. Process 946248 has 2.83 GiB memory in use. Process 1384258 has 76.51 GiB memory in use. Of the allocated memory 72.22 GiB is allocated by PyTorch, and 3.78 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
19
  }
 
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 388,
15
  "job_start_time": "2024-04-05T06-21-32.588191",
16
+ "main_language": "Other"
 
 
17
  }
WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json CHANGED
@@ -7,13 +7,11 @@
7
  "params": 13.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-03-05T16:38:35Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 384,
15
  "job_start_time": "2024-04-04T23-14-44.023205",
16
- "main_language": "English",
17
- "error_msg": "CUDA out of memory. Tried to allocate 1.53 GiB. GPU 0 has a total capacty of 79.35 GiB of which 568.19 MiB is free. Process 946248 has 38.41 GiB memory in use. Process 1384258 has 40.38 GiB memory in use. Of the allocated memory 34.40 GiB is allocated by PyTorch, and 3.50 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 240, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1518, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1063, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1527, in generate\n result = self._greedy_search(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2411, in _greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1196, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1016, in forward\n layer_outputs = decoder_layer(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 739, in forward\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n ^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 670, in forward\n attn_output = torch.nn.functional.scaled_dot_product_attention(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.53 GiB. GPU 0 has a total capacty of 79.35 GiB of which 568.19 MiB is free. Process 946248 has 38.41 GiB memory in use. Process 1384258 has 40.38 GiB memory in use. Of the allocated memory 34.40 GiB is allocated by PyTorch, and 3.50 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
19
  }
 
7
  "params": 13.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-03-05T16:38:35Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 384,
15
  "job_start_time": "2024-04-04T23-14-44.023205",
16
+ "main_language": "English"
 
 
17
  }
WizardLM/WizardLM-70B-V1.0_eval_request_False_float16_Original.json CHANGED
@@ -7,13 +7,11 @@
7
  "params": 70.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-03-05T16:39:12Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 339,
15
  "job_start_time": "2024-04-02T03-37-29.146344",
16
- "error_msg": "CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 21.61 GiB memory in use. Process 4074833 has 20.61 GiB memory in use. Process 188848 has 21.62 GiB memory in use. Process 209361 has 15.45 GiB memory in use. Of the allocated memory 21.21 GiB is allocated by PyTorch, and 3.58 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 21.61 GiB memory in use. Process 4074833 has 20.61 GiB memory in use. Process 188848 has 21.62 GiB memory in use. Process 209361 has 15.45 GiB memory in use. Of the allocated memory 21.21 GiB is allocated by PyTorch, and 3.58 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
  "main_language": "English"
19
  }
 
7
  "params": 70.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-03-05T16:39:12Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 339,
15
  "job_start_time": "2024-04-02T03-37-29.146344",
 
 
16
  "main_language": "English"
17
  }
allenai/tulu-2-dpo-70b_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,13 +7,11 @@
7
  "params": 68.977,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-03-05T16:38:01Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 333,
15
  "job_start_time": "2024-04-02T02-29-13.759360",
16
- "error_msg": "CUDA out of memory. Tried to allocate 500.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 500.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
  "main_language": "English"
19
  }
 
7
  "params": 68.977,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-03-05T16:38:01Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 333,
15
  "job_start_time": "2024-04-02T02-29-13.759360",
 
 
16
  "main_language": "English"
17
  }