File size: 2,913 Bytes
ccb71eb
 
 
 
 
 
 
 
 
 
993c3d9
ccb71eb
 
 
f702111
993c3d9
 
 
ccb71eb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
{
    "model": "Qwen/Qwen1.5-110B",
    "base_model": "",
    "revision": "main",
    "private": false,
    "precision": "bfloat16",
    "params": 111.21,
    "architectures": "Qwen2ForCausalLM",
    "weight_type": "Original",
    "main_language": "English",
    "status": "FAILED",
    "submitted_time": "2024-05-18T22:56:09Z",
    "model_type": "🟢 : pretrained",
    "source": "leaderboard",
    "job_id": 696,
    "job_start_time": "2024-05-22T20-41-34.060447",
    "error_msg": "No executable batch and length size found, max_length reached less than 512 at batch_size 1.",
    "traceback": "Traceback (most recent call last):\n  File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n    run_request(\n  File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 70, in run_request\n    results = run_eval_on_model(\n              ^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n    result = evaluate(\n             ^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n    results = evaluator.simple_evaluate(\n              ^^^^^^^^^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n    return fn(*args, **kwargs)\n           ^^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n    results = evaluate(\n              ^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n    return fn(*args, **kwargs)\n           ^^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n    resps = getattr(lm, reqtype)(cloned_reqs)\n            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1433, in generate_until\n    batch_size, _ = self._detect_batch_size_and_length()\n                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 822, in _detect_batch_size_and_length\n    batch_size, max_length = forward_batch()\n                             ^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 147, in decorator\n    raise RuntimeError(f\"No executable batch and length size found, max_length reached less than {minumum_max_length} at batch_size {batch_size}.\")\nRuntimeError: No executable batch and length size found, max_length reached less than 512 at batch_size 1.\n"
}