eduagarcia commited on
Commit
6748916
β€’
1 Parent(s): 35601c0

Retry 19 FAILED models

Browse files
BAAI/Aquila-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:09:00Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:00Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
BAAI/Aquila2-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:07Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 349,
15
- "job_start_time": "2024-04-02T09-42-02.662702",
16
- "error_msg": "CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 62.19 MiB is free. Process 4070277 has 18.01 GiB memory in use. Process 4074833 has 10.09 GiB memory in use. Process 188848 has 32.47 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 9.57 GiB is allocated by PyTorch, and 13.52 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 62.19 MiB is free. Process 4070277 has 18.01 GiB memory in use. Process 4074833 has 10.09 GiB memory in use. Process 188848 has 32.47 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 9.57 GiB is allocated by PyTorch, and 13.52 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:07Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 349,
15
+ "job_start_time": "2024-04-02T09-42-02.662702"
 
 
16
  }
DAMO-NLP-MT/polylm-13b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 13.0,
8
  "architectures": "PolyLMHeadModel",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-11T13:34:54Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 13.0,
8
  "architectures": "PolyLMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-11T13:34:54Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.044,
8
  "architectures": "DeciLMForCausalLM",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:06:34Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
 
7
  "params": 7.044,
8
  "architectures": "DeciLMForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:06:34Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 14.0,
8
  "architectures": "OrionForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 346,
15
- "job_start_time": "2024-04-02T09-06-00.949513",
16
- "error_msg": "CUDA out of memory. Tried to allocate 50.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 38.19 MiB is free. Process 4070277 has 26.36 GiB memory in use. Process 4074833 has 1.97 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 1.45 GiB is allocated by PyTorch, and 15.57 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 50.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 38.19 MiB is free. Process 4070277 has 26.36 GiB memory in use. Process 4074833 has 1.97 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 1.45 GiB is allocated by PyTorch, and 15.57 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 14.0,
8
  "architectures": "OrionForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 346,
15
+ "job_start_time": "2024-04-02T09-06-00.949513"
 
 
16
  }
Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "SkyworkForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 342,
15
- "job_start_time": "2024-04-02T08-18-54.035564",
16
- "error_msg": "CUDA out of memory. Tried to allocate 108.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 12.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 4.75 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 4.17 GiB is allocated by PyTorch, and 67.72 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 108.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 12.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 4.75 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 4.17 GiB is allocated by PyTorch, and 67.72 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "SkyworkForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 342,
15
+ "job_start_time": "2024-04-02T08-18-54.035564"
 
 
16
  }
THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 6.0,
8
  "architectures": "ChatGLMModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:16Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 350,
15
- "job_start_time": "2024-04-02T09-59-02.085745",
16
- "error_msg": "CUDA out of memory. Tried to allocate 214.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 152.19 MiB is free. Process 4070277 has 18.54 GiB memory in use. Process 4074833 has 9.47 GiB memory in use. Process 188848 has 32.47 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 8.95 GiB is allocated by PyTorch, and 13.31 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 214.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 152.19 MiB is free. Process 4070277 has 18.54 GiB memory in use. Process 4074833 has 9.47 GiB memory in use. Process 188848 has 32.47 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 8.95 GiB is allocated by PyTorch, and 13.31 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 6.0,
8
  "architectures": "ChatGLMModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:16Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 350,
15
+ "job_start_time": "2024-04-02T09-59-02.085745"
 
 
16
  }
WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-03-05T16:38:35Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 338,
15
- "job_start_time": "2024-04-02T03-01-44.245140",
16
- "error_msg": "CUDA out of memory. Tried to allocate 136.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 46.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 11.03 GiB memory in use. Process 188848 has 6.25 GiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 5.84 GiB is allocated by PyTorch, and 3.32 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 136.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 46.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 11.03 GiB memory in use. Process 188848 has 6.25 GiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 5.84 GiB is allocated by PyTorch, and 3.32 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-03-05T16:38:35Z",
12
  "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 338,
15
+ "job_start_time": "2024-04-02T03-01-44.245140"
 
 
16
  }
baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "BaiChuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:13Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 343,
15
- "job_start_time": "2024-04-02T08-32-12.445215",
16
- "error_msg": "CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 4.70 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 4.18 GiB is allocated by PyTorch, and 13.73 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 4.70 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 4.18 GiB is allocated by PyTorch, and 13.73 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "BaiChuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:13Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 343,
15
+ "job_start_time": "2024-04-02T08-32-12.445215"
 
 
16
  }
baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:33Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 345,
15
- "job_start_time": "2024-04-02T08-49-13.084509",
16
- "error_msg": "CUDA out of memory. Tried to allocate 134.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 90.19 MiB is free. Process 4070277 has 26.36 GiB memory in use. Process 4074833 has 1.92 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 1.40 GiB is allocated by PyTorch, and 11.88 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/datasets/hf_cache/modules/transformers_modules/baichuan-inc/Baichuan2-13B-Base/c6f590cab590cf33e78ad834dbd5f9bd6df34a94/modeling_baichuan.py\", line 672, in from_pretrained\n return super(BaichuanForCausalLM, cls).from_pretrained(pretrained_model_name_or_path, *model_args,\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 134.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 90.19 MiB is free. Process 4070277 has 26.36 GiB memory in use. Process 4074833 has 1.92 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 1.40 GiB is allocated by PyTorch, and 11.88 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:33Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 345,
15
+ "job_start_time": "2024-04-02T08-49-13.084509"
 
 
16
  }
baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 344,
15
- "job_start_time": "2024-04-02T08-40-10.638840",
16
- "error_msg": "CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 48.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 4.71 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 4.19 GiB is allocated by PyTorch, and 13.75 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/datasets/hf_cache/modules/transformers_modules/baichuan-inc/Baichuan2-7B-Base/f9d4d8dd2f7a3dbede3bda3b0cf0224e9272bbe5/modeling_baichuan.py\", line 658, in from_pretrained\n return super(BaichuanForCausalLM, cls).from_pretrained(pretrained_model_name_or_path, *model_args,\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 48.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 4.71 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 4.19 GiB is allocated by PyTorch, and 13.75 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 344,
15
+ "job_start_time": "2024-04-02T08-40-10.638840"
 
 
16
  }
deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:46Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 347,
15
- "job_start_time": "2024-04-02T09-18-28.675982",
16
- "error_msg": "CUDA out of memory. Tried to allocate 32.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 14.19 MiB is free. Process 4070277 has 26.36 GiB memory in use. Process 4074833 has 1.78 GiB memory in use. Process 188848 has 32.47 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 1.26 GiB is allocated by PyTorch, and 13.86 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 14.19 MiB is free. Process 4070277 has 26.36 GiB memory in use. Process 4074833 has 1.78 GiB memory in use. Process 188848 has 32.47 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 1.26 GiB is allocated by PyTorch, and 13.86 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:46Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 347,
15
+ "job_start_time": "2024-04-02T09-18-28.675982"
 
 
16
  }
gpt2_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0.137,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:10:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 0.137,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
nicolasdec/CabraMistral7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.242,
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-04-02T01:16:18Z",
12
  "model_type": "πŸ”Ά : fine-tuned/fp on domain-specific datasets",
13
  "source": "leaderboard",
14
  "job_id": 334,
15
- "job_start_time": "2024-04-02T02-29-32.109952",
16
- "error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 90.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 778.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 362.01 MiB is allocated by PyTorch, and 1.99 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 90.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 778.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 362.01 MiB is allocated by PyTorch, and 1.99 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 7.242,
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-04-02T01:16:18Z",
12
  "model_type": "πŸ”Ά : fine-tuned/fp on domain-specific datasets",
13
  "source": "leaderboard",
14
  "job_id": 334,
15
+ "job_start_time": "2024-04-02T02-29-32.109952"
 
 
16
  }
nicolasdec/CabraQwen14b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 14.163,
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-04-01T23:45:26Z",
12
  "model_type": "πŸ”Ά : fine-tuned/fp on domain-specific datasets",
13
  "source": "leaderboard",
14
  "job_id": 335,
15
- "job_start_time": "2024-04-02T03-36-31.782335",
16
- "error_msg": "CUDA out of memory. Tried to allocate 134.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 62.19 MiB is free. Process 4070277 has 52.11 GiB memory in use. Process 4074833 has 10.79 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 15.98 GiB memory in use. Of the allocated memory 10.27 GiB is allocated by PyTorch, and 14.21 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 134.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 62.19 MiB is free. Process 4070277 has 52.11 GiB memory in use. Process 4074833 has 10.79 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 15.98 GiB memory in use. Of the allocated memory 10.27 GiB is allocated by PyTorch, and 14.21 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 14.163,
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-04-01T23:45:26Z",
12
  "model_type": "πŸ”Ά : fine-tuned/fp on domain-specific datasets",
13
  "source": "leaderboard",
14
  "job_id": 335,
15
+ "job_start_time": "2024-04-02T03-36-31.782335"
 
 
16
  }
pedrogengo/gemma-ptbr_eval_request_False_float16_Adapter.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
- "status": "FAILED",
11
  "submitted_time": "2024-03-28T00:46:18Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 335,
15
- "job_start_time": "2024-04-02T02-29-58.329296",
16
- "error_msg": "CUDA out of memory. Tried to allocate 1000.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1000.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-03-28T00:46:18Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 335,
15
+ "job_start_time": "2024-04-02T02-29-58.329296"
 
 
16
  }
semantixai/LloroV2_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 6.739,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-03-13T15:29:58Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "leaderboard",
14
  "job_id": 328,
15
- "job_start_time": "2024-04-01T20-53-50.809981",
16
- "error_msg": "CUDA out of memory. Tried to allocate 32.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 2.19 MiB is free. Process 3931410 has 68.65 GiB memory in use. Process 3994524 has 7.85 GiB memory in use. Process 4055024 has 2.82 GiB memory in use. Of the allocated memory 2.41 GiB is allocated by PyTorch, and 3.78 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 2.19 MiB is free. Process 3931410 has 68.65 GiB memory in use. Process 3994524 has 7.85 GiB memory in use. Process 4055024 has 2.82 GiB memory in use. Of the allocated memory 2.41 GiB is allocated by PyTorch, and 3.78 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 6.739,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-03-13T15:29:58Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "leaderboard",
14
  "job_id": 328,
15
+ "job_start_time": "2024-04-01T20-53-50.809981"
 
 
16
  }
xverse/XVERSE-13B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:51Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 341,
15
- "job_start_time": "2024-04-02T08-01-43.539477",
16
- "error_msg": "CUDA out of memory. Tried to allocate 136.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 110.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 4.65 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 4.13 GiB is allocated by PyTorch, and 13.75 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 136.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 110.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 4.65 GiB memory in use. Process 188848 has 32.26 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 4.13 GiB is allocated by PyTorch, and 13.75 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:51Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 341,
15
+ "job_start_time": "2024-04-02T08-01-43.539477"
 
 
16
  }
xverse/XVERSE-7B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:39Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 340,
15
- "job_start_time": "2024-04-02T07-49-32.883892",
16
- "error_msg": "CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 60.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 8.21 GiB memory in use. Process 188848 has 28.75 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 7.69 GiB is allocated by PyTorch, and 13.57 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 60.19 MiB is free. Process 4070277 has 25.40 GiB memory in use. Process 4074833 has 8.21 GiB memory in use. Process 188848 has 28.75 GiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 7.69 GiB is allocated by PyTorch, and 13.57 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:39Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 340,
15
+ "job_start_time": "2024-04-02T07-49-32.883892"
 
 
16
  }