eduagarcia commited on
Commit
9b811b0
1 Parent(s): 0e233d6

Update status of meta-llama/Llama-2-7b-hf_eval_request_False_float16_Original to FAILED

Browse files
meta-llama/Llama-2-7b-hf_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 6.738,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T22:59:42Z",
12
  "model_type": "🟢 : pretrained",
13
  "source": "script",
@@ -23,5 +23,7 @@
23
  "faquad_nli": 0.5421626984126984,
24
  "sparrow_pt": 0.3338694424894331
25
  },
26
- "result_metrics_average": 0.3448309062405303
 
 
27
  }
 
7
  "params": 6.738,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "FAILED",
11
  "submitted_time": "2024-02-05T22:59:42Z",
12
  "model_type": "🟢 : pretrained",
13
  "source": "script",
 
23
  "faquad_nli": 0.5421626984126984,
24
  "sparrow_pt": 0.3338694424894331
25
  },
26
+ "result_metrics_average": 0.3448309062405303,
27
+ "error_msg": "Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/meta-llama/Llama-2-7b-hf' is not a directory",
28
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 183, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 107, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 89, in upload_raw_results\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 80, in _try_request_again\n pass\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 74, in _try_request_again\n try:\n ^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 61, in _upload_raw_results\n #upload results\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4565, in upload_folder\n add_operations = _prepare_upload_folder_additions(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 8253, in _prepare_upload_folder_additions\n raise ValueError(f\"Provided path: '{folder_path}' is not a directory\")\nValueError: Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/meta-llama/Llama-2-7b-hf' is not a directory\n"
29
  }