Commit
•
993c3d9
1
Parent(s):
f702111
Update status of Qwen/Qwen1.5-110B_eval_request_False_bfloat16_Original to FAILED
Browse files
Qwen/Qwen1.5-110B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,10 +8,12 @@
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2024-05-18T22:56:09Z",
|
13 |
"model_type": "🟢 : pretrained",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 696,
|
16 |
-
"job_start_time": "2024-05-22T20-41-34.060447"
|
|
|
|
|
17 |
}
|
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "FAILED",
|
12 |
"submitted_time": "2024-05-18T22:56:09Z",
|
13 |
"model_type": "🟢 : pretrained",
|
14 |
"source": "leaderboard",
|
15 |
"job_id": 696,
|
16 |
+
"job_start_time": "2024-05-22T20-41-34.060447",
|
17 |
+
"error_msg": "No executable batch and length size found, max_length reached less than 512 at batch_size 1.",
|
18 |
+
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 70, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1433, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 822, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 147, in decorator\n raise RuntimeError(f\"No executable batch and length size found, max_length reached less than {minumum_max_length} at batch_size {batch_size}.\")\nRuntimeError: No executable batch and length size found, max_length reached less than 512 at batch_size 1.\n"
|
19 |
}
|