eduagarcia commited on
Commit
e8b33fe
β€’
1 Parent(s): 4e12df9

Retry 4 FAILED models

Browse files
TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 1.1,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T22:59:37Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
@@ -23,7 +23,5 @@
23
  "faquad_nli": 0.4396551724137931,
24
  "sparrow_pt": 0.21726163284972172
25
  },
26
- "result_metrics_average": 0.24837335824446702,
27
- "error_msg": "Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T' is not a directory",
28
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 183, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 107, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 89, in upload_raw_results\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 80, in _try_request_again\n pass\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 74, in _try_request_again\n try:\n ^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 61, in _upload_raw_results\n #upload results\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4565, in upload_folder\n add_operations = _prepare_upload_folder_additions(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 8253, in _prepare_upload_folder_additions\n raise ValueError(f\"Provided path: '{folder_path}' is not a directory\")\nValueError: Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T' is not a directory\n"
29
  }
 
7
  "params": 1.1,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T22:59:37Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
23
  "faquad_nli": 0.4396551724137931,
24
  "sparrow_pt": 0.21726163284972172
25
  },
26
+ "result_metrics_average": 0.24837335824446702
 
 
27
  }
huggyllama/llama-7b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 6.738,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T22:59:55Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
@@ -23,7 +23,5 @@
23
  "faquad_nli": 0.5548892542799111,
24
  "sparrow_pt": 0.3432233434254706
25
  },
26
- "result_metrics_average": 0.29724944957428684,
27
- "error_msg": "Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/huggyllama/llama-7b' is not a directory",
28
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 183, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 107, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 89, in upload_raw_results\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 80, in _try_request_again\n pass\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 74, in _try_request_again\n try:\n ^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 61, in _upload_raw_results\n #upload results\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4565, in upload_folder\n add_operations = _prepare_upload_folder_additions(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 8253, in _prepare_upload_folder_additions\n raise ValueError(f\"Provided path: '{folder_path}' is not a directory\")\nValueError: Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/huggyllama/llama-7b' is not a directory\n"
29
  }
 
7
  "params": 6.738,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T22:59:55Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
23
  "faquad_nli": 0.5548892542799111,
24
  "sparrow_pt": 0.3432233434254706
25
  },
26
+ "result_metrics_average": 0.29724944957428684
 
 
27
  }
meta-llama/Llama-2-13b-hf_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 13.016,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:00:11Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
@@ -23,7 +23,5 @@
23
  "faquad_nli": 0.4396551724137931,
24
  "sparrow_pt": 0.3423268312484707
25
  },
26
- "result_metrics_average": 0.39375029690653635,
27
- "error_msg": "Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/meta-llama/Llama-2-13b-hf' is not a directory",
28
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 183, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 107, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 89, in upload_raw_results\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 80, in _try_request_again\n pass\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 74, in _try_request_again\n try:\n ^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 61, in _upload_raw_results\n #upload results\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4565, in upload_folder\n add_operations = _prepare_upload_folder_additions(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 8253, in _prepare_upload_folder_additions\n raise ValueError(f\"Provided path: '{folder_path}' is not a directory\")\nValueError: Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/meta-llama/Llama-2-13b-hf' is not a directory\n"
29
  }
 
7
  "params": 13.016,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:00:11Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
23
  "faquad_nli": 0.4396551724137931,
24
  "sparrow_pt": 0.3423268312484707
25
  },
26
+ "result_metrics_average": 0.39375029690653635
 
 
27
  }
meta-llama/Llama-2-7b-hf_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 6.738,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T22:59:42Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
@@ -23,7 +23,5 @@
23
  "faquad_nli": 0.5421626984126984,
24
  "sparrow_pt": 0.3338694424894331
25
  },
26
- "result_metrics_average": 0.3448309062405303,
27
- "error_msg": "Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/meta-llama/Llama-2-7b-hf' is not a directory",
28
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 183, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 107, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 89, in upload_raw_results\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 80, in _try_request_again\n pass\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 74, in _try_request_again\n try:\n ^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 61, in _upload_raw_results\n #upload results\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4565, in upload_folder\n add_operations = _prepare_upload_folder_additions(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 8253, in _prepare_upload_folder_additions\n raise ValueError(f\"Provided path: '{folder_path}' is not a directory\")\nValueError: Provided path: '/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/downloads/eval-queue/meta-llama/Llama-2-7b-hf' is not a directory\n"
29
  }
 
7
  "params": 6.738,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T22:59:42Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
23
  "faquad_nli": 0.5421626984126984,
24
  "sparrow_pt": 0.3338694424894331
25
  },
26
+ "result_metrics_average": 0.3448309062405303
 
 
27
  }