eduagarcia
commited on
Commit
•
aa673b2
1
Parent(s):
3a88acd
Update status of TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T_eval_request_False_float16_Original to FAILED
Browse files
TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T_eval_request_False_float16_Original.json
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
"params": 1.1,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
-
"status": "
|
11 |
"submitted_time": "2024-02-05T22:59:37Z",
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
@@ -23,5 +23,7 @@
|
|
23 |
"faquad_nli": 0.4396551724137931,
|
24 |
"sparrow_pt": 0.21726163284972172
|
25 |
},
|
26 |
-
"result_metrics_average": 0.24837335824446702
|
|
|
|
|
27 |
}
|
|
|
7 |
"params": 1.1,
|
8 |
"architectures": "LlamaForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
+
"status": "FAILED",
|
11 |
"submitted_time": "2024-02-05T22:59:37Z",
|
12 |
"model_type": "🟢 : pretrained",
|
13 |
"source": "script",
|
|
|
23 |
"faquad_nli": 0.4396551724137931,
|
24 |
"sparrow_pt": 0.21726163284972172
|
25 |
},
|
26 |
+
"result_metrics_average": 0.24837335824446702,
|
27 |
+
"error_msg": "Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T' is not a directory",
|
28 |
+
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 183, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 107, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 89, in upload_raw_results\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 80, in _try_request_again\n pass\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 74, in _try_request_again\n try:\n ^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 61, in _upload_raw_results\n #upload results\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4565, in upload_folder\n add_operations = _prepare_upload_folder_additions(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 8253, in _prepare_upload_folder_additions\n raise ValueError(f\"Provided path: '{folder_path}' is not a directory\")\nValueError: Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T' is not a directory\n"
|
29 |
}
|