eduagarcia commited on
Commit
d064ad6
1 Parent(s): ab0d079

Update status of Qwen/Qwen1.5-72B-Chat_eval_request_False_bfloat16_Original to FAILED

Browse files
Qwen/Qwen1.5-72B-Chat_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,10 +7,12 @@
7
  "params": 72.0,
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-18T11:55:28Z",
12
  "model_type": "💬 : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 253,
15
- "job_start_time": "2024-02-18T15-43-20.059302"
 
 
16
  }
 
7
  "params": 72.0,
8
  "architectures": "Qwen2ForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "FAILED",
11
  "submitted_time": "2024-02-18T11:55:28Z",
12
  "model_type": "💬 : chat models (RLHF, DPO, IFT, ...)",
13
  "source": "leaderboard",
14
  "job_id": 253,
15
+ "job_start_time": "2024-02-18T15-43-20.059302",
16
+ "error_msg": "No executable batch and length size found, max_length reached less than 512 at batch_size 1.",
17
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1420, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 809, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 137, in decorator\n raise RuntimeError(f\"No executable batch and length size found, max_length reached less than {minumum_max_length} at batch_size {batch_size}.\")\nRuntimeError: No executable batch and length size found, max_length reached less than 512 at batch_size 1.\n"
18
  }