llm_pt_leaderboard_requests / meta-llama /Llama-2-7b-hf_eval_request_False_float16_Original.json
eduagarcia's picture
Update status of meta-llama/Llama-2-7b-hf_eval_request_False_float16_Original to FAILED
9b811b0 verified
raw
history blame
No virus
2.87 kB
{
"model": "meta-llama/Llama-2-7b-hf",
"base_model": "",
"revision": "main",
"private": false,
"precision": "float16",
"params": 6.738,
"architectures": "LlamaForCausalLM",
"weight_type": "Original",
"status": "FAILED",
"submitted_time": "2024-02-05T22:59:42Z",
"model_type": "🟢 : pretrained",
"source": "script",
"job_id": 0,
"job_start_time": "2024-02-05T23-36-46.051171",
"eval_version": "1.0.0",
"result_metrics": {
"enem_challenge": 0.31560531840447864,
"bluex": 0.2906815020862309,
"oab_exams": 0.34578587699316626,
"assin2_rte": 0.3632717133913797,
"assin2_sts": 0.22243979190632546,
"faquad_nli": 0.5421626984126984,
"sparrow_pt": 0.3338694424894331
},
"result_metrics_average": 0.3448309062405303,
"error_msg": "Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/meta-llama/Llama-2-7b-hf' is not a directory",
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 183, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 107, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 89, in upload_raw_results\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 80, in _try_request_again\n pass\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 74, in _try_request_again\n try:\n ^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 61, in _upload_raw_results\n #upload results\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4565, in upload_folder\n add_operations = _prepare_upload_folder_additions(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 8253, in _prepare_upload_folder_additions\n raise ValueError(f\"Provided path: '{folder_path}' is not a directory\")\nValueError: Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/meta-llama/Llama-2-7b-hf' is not a directory\n"
}