eduagarcia commited on
Commit
faf2408
1 Parent(s): fb2444c

fix some languages

Browse files
Files changed (30) hide show
  1. AetherResearch/Cerebrum-1.0-7b_eval_request_False_float16_Original.json +1 -1
  2. CohereForAI/aya-101_eval_request_False_float16_Original.json +1 -1
  3. FuseAI/FuseChat-7B-VaRM_eval_request_False_bfloat16_Original.json +1 -1
  4. FuseAI/OpenChat-3.5-7B-Solar_eval_request_False_bfloat16_Original.json +1 -1
  5. HuggingFaceH4/zephyr-7b-gemma-v0.1_eval_request_False_bfloat16_Original.json +1 -1
  6. JJhooww/MistralReloadBR_v2_ptbr_eval_request_False_bfloat16_Original.json +1 -1
  7. JJhooww/Mistral_Relora_Step2k_eval_request_False_float16_Original.json +1 -1
  8. NOVA-vision-language/GlorIA-1.3B_eval_request_False_float16_Original.json +1 -1
  9. Nexusflow/Starling-LM-7B-beta_eval_request_False_bfloat16_Original.json +1 -1
  10. PORTULAN/gervasio-7b-portuguese-ptbr-decoder_eval_request_False_bfloat16_Original.json +1 -1
  11. PORTULAN/gervasio-7b-portuguese-ptpt-decoder_eval_request_False_bfloat16_Original.json +1 -1
  12. WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json +1 -1
  13. WizardLM/WizardLM-70B-V1.0_eval_request_False_float16_Original.json +1 -1
  14. WizardLM/WizardLM-7B-V1.0_eval_request_False_float16_Original.json +1 -1
  15. allenai/tulu-2-dpo-13b_eval_request_False_bfloat16_Original.json +1 -1
  16. allenai/tulu-2-dpo-70b_eval_request_False_bfloat16_Original.json +1 -1
  17. allenai/tulu-2-dpo-7b_eval_request_False_bfloat16_Original.json +1 -1
  18. berkeley-nest/Starling-LM-7B-alpha_eval_request_False_bfloat16_Original.json +1 -1
  19. cnmoro/Mistral-7B-Portuguese_eval_request_False_float16_Original.json +1 -1
  20. fernandosola/bluearara-7B-instruct_eval_request_False_bfloat16_Original.json +1 -1
  21. lmsys/vicuna-33b-v1.3_eval_request_False_float16_Original.json +1 -1
  22. nicolasdec/CabraMistral7b_eval_request_False_bfloat16_Original.json +1 -1
  23. nicolasdec/CabraQwen14b_eval_request_False_bfloat16_Original.json +1 -1
  24. nicolasdec/CabraQwen14b_eval_request_False_float16_Original.json +1 -1
  25. nicolasdec/CabraQwen7b_eval_request_False_bfloat16_Original.json +1 -1
  26. nicolasdec/Cabramistral7b_eval_request_False_float16_Original.json +1 -1
  27. pedrogengo/gemma-ptbr_eval_request_False_float16_Adapter.json +1 -1
  28. recogna-nlp/GemBode-2b-it_eval_request_False_float16_Original.json +1 -1
  29. recogna-nlp/Phi-Bode_eval_request_False_float16_Original.json +1 -1
  30. semantixai/LloroV2_eval_request_False_bfloat16_Original.json +1 -1
AetherResearch/Cerebrum-1.0-7b_eval_request_False_float16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6605252682234545,
29
  "result_metrics_npm": 0.49485266203952055,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6605252682234545,
29
  "result_metrics_npm": 0.49485266203952055,
30
+ "main_language": "English"
31
  }
CohereForAI/aya-101_eval_request_False_float16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.5555649777522137,
29
  "result_metrics_npm": 0.35408599648006006,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.5555649777522137,
29
  "result_metrics_npm": 0.35408599648006006,
30
+ "main_language": "English"
31
  }
FuseAI/FuseChat-7B-VaRM_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6749035343197202,
29
  "result_metrics_npm": 0.5201529644708365,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6749035343197202,
29
  "result_metrics_npm": 0.5201529644708365,
30
+ "main_language": "English"
31
  }
FuseAI/OpenChat-3.5-7B-Solar_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6904086247281405,
29
  "result_metrics_npm": 0.5432596799854227,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6904086247281405,
29
  "result_metrics_npm": 0.5432596799854227,
30
+ "main_language": "English"
31
  }
HuggingFaceH4/zephyr-7b-gemma-v0.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6591230054795971,
29
  "result_metrics_npm": 0.4972532580931312,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6591230054795971,
29
  "result_metrics_npm": 0.4972532580931312,
30
+ "main_language": "English"
31
  }
JJhooww/MistralReloadBR_v2_ptbr_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6398422053760237,
29
  "result_metrics_npm": 0.4567270236874747,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6398422053760237,
29
  "result_metrics_npm": 0.4567270236874747,
30
+ "main_language": "Portuguese"
31
  }
JJhooww/Mistral_Relora_Step2k_eval_request_False_float16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6441765668293375,
29
  "result_metrics_npm": 0.4715334429973541,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6441765668293375,
29
  "result_metrics_npm": 0.4715334429973541,
30
+ "main_language": "Portuguese"
31
  }
NOVA-vision-language/GlorIA-1.3B_eval_request_False_float16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.04095462153077381,
29
  "result_metrics_npm": -0.4996940439828647,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.04095462153077381,
29
  "result_metrics_npm": -0.4996940439828647,
30
+ "main_language": "Portuguese"
31
  }
Nexusflow/Starling-LM-7B-beta_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6903083539690762,
29
  "result_metrics_npm": 0.541225702715447,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6903083539690762,
29
  "result_metrics_npm": 0.541225702715447,
30
+ "main_language": "English"
31
  }
PORTULAN/gervasio-7b-portuguese-ptbr-decoder_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.39464537169007613,
29
  "result_metrics_npm": 0.0737189789524371,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.39464537169007613,
29
  "result_metrics_npm": 0.0737189789524371,
30
+ "main_language": "Portuguese"
31
  }
PORTULAN/gervasio-7b-portuguese-ptpt-decoder_eval_request_False_bfloat16_Original.json CHANGED
@@ -29,5 +29,5 @@
29
  "result_metrics_npm": 0.2072835886017701,
30
  "error_msg": "Error while uploading 'PORTULAN/gervasio-7b-portuguese-ptpt-decoder/raw_2024-03-08T02-58-56.846301/pretrained__PORTULAN__gervasio-7b-portuguese-ptpt-decoder,dtype__bfloat16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__2560_bluex.jsonl' to the Hub.",
31
  "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 286, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 429 Client Error: Too Many Requests for url: https://huggingface.co/api/complete_multipart?uploadId=xoJbpoXjQ8LOXQtDQWLZfVCuMVNtwQ070pPsPn1TvA4H4kTEHhKa1pjJipp_253GzPTjA.HE7rRohkEvCcEkm_bImFKN0P1lYGzMOrzgN.Bw703I7pUuG5G3_WcSeAd.&bucket=hf-hub-lfs-us-east-1&prefix=repos%2Faf%2F42%2Faf4283b4152e41f109733722a9330015e04433eff5043f6398de5e010e08b7ae&expiration=Sat%2C+09+Mar+2024+07%3A02%3A15+GMT&signature=26216a32adb052b978189d77079b990944b940ea648ecdcb052ea11188a7f6ad\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 400, in _wrapped_lfs_upload\n lfs_upload(operation=operation, lfs_batch_action=batch_action, token=token)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/lfs.py\", line 228, in lfs_upload\n _upload_multi_part(operation=operation, header=header, chunk_size=chunk_size, upload_url=upload_action[\"href\"])\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/lfs.py\", line 334, in _upload_multi_part\n hf_raise_for_status(completion_res)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 333, in hf_raise_for_status\n raise HfHubHTTPError(str(e), response=response) from e\nhuggingface_hub.utils._errors.HfHubHTTPError: 429 Client Error: Too Many Requests for url: https://huggingface.co/api/complete_multipart?uploadId=xoJbpoXjQ8LOXQtDQWLZfVCuMVNtwQ070pPsPn1TvA4H4kTEHhKa1pjJipp_253GzPTjA.HE7rRohkEvCcEkm_bImFKN0P1lYGzMOrzgN.Bw703I7pUuG5G3_WcSeAd.&bucket=hf-hub-lfs-us-east-1&prefix=repos%2Faf%2F42%2Faf4283b4152e41f109733722a9330015e04433eff5043f6398de5e010e08b7ae&expiration=Sat%2C+09+Mar+2024+07%3A02%3A15+GMT&signature=26216a32adb052b978189d77079b990944b940ea648ecdcb052ea11188a7f6ad\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 111, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 91, in upload_raw_results\n return _try_request_again(_upload_raw_results, lambda: time.sleep(1), *args)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 82, in _try_request_again\n raise e\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 75, in _try_request_again\n func(*args)\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 62, in _upload_raw_results\n api.upload_folder(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4598, in upload_folder\n commit_info = self.create_commit(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 3558, in create_commit\n self.preupload_lfs_files(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4058, in preupload_lfs_files\n _upload_lfs_files(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 415, in _upload_lfs_files\n thread_map(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/contrib/concurrent.py\", line 69, in thread_map\n return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/contrib/concurrent.py\", line 51, in _executor_map\n return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs))\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1182, in __iter__\n for obj in iterable:\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 619, in result_iterator\n yield _result_or_cancel(fs.pop())\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 317, in _result_or_cancel\n return fut.result(timeout)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 449, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/thread.py\", line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 402, in _wrapped_lfs_upload\n raise RuntimeError(f\"Error while uploading '{operation.path_in_repo}' to the Hub.\") from exc\nRuntimeError: Error while uploading 'PORTULAN/gervasio-7b-portuguese-ptpt-decoder/raw_2024-03-08T02-58-56.846301/pretrained__PORTULAN__gervasio-7b-portuguese-ptpt-decoder,dtype__bfloat16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__2560_bluex.jsonl' to the Hub.\n",
32
- "main_language": "?"
33
  }
 
29
  "result_metrics_npm": 0.2072835886017701,
30
  "error_msg": "Error while uploading 'PORTULAN/gervasio-7b-portuguese-ptpt-decoder/raw_2024-03-08T02-58-56.846301/pretrained__PORTULAN__gervasio-7b-portuguese-ptpt-decoder,dtype__bfloat16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__2560_bluex.jsonl' to the Hub.",
31
  "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 286, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 429 Client Error: Too Many Requests for url: https://huggingface.co/api/complete_multipart?uploadId=xoJbpoXjQ8LOXQtDQWLZfVCuMVNtwQ070pPsPn1TvA4H4kTEHhKa1pjJipp_253GzPTjA.HE7rRohkEvCcEkm_bImFKN0P1lYGzMOrzgN.Bw703I7pUuG5G3_WcSeAd.&bucket=hf-hub-lfs-us-east-1&prefix=repos%2Faf%2F42%2Faf4283b4152e41f109733722a9330015e04433eff5043f6398de5e010e08b7ae&expiration=Sat%2C+09+Mar+2024+07%3A02%3A15+GMT&signature=26216a32adb052b978189d77079b990944b940ea648ecdcb052ea11188a7f6ad\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 400, in _wrapped_lfs_upload\n lfs_upload(operation=operation, lfs_batch_action=batch_action, token=token)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/lfs.py\", line 228, in lfs_upload\n _upload_multi_part(operation=operation, header=header, chunk_size=chunk_size, upload_url=upload_action[\"href\"])\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/lfs.py\", line 334, in _upload_multi_part\n hf_raise_for_status(completion_res)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 333, in hf_raise_for_status\n raise HfHubHTTPError(str(e), response=response) from e\nhuggingface_hub.utils._errors.HfHubHTTPError: 429 Client Error: Too Many Requests for url: https://huggingface.co/api/complete_multipart?uploadId=xoJbpoXjQ8LOXQtDQWLZfVCuMVNtwQ070pPsPn1TvA4H4kTEHhKa1pjJipp_253GzPTjA.HE7rRohkEvCcEkm_bImFKN0P1lYGzMOrzgN.Bw703I7pUuG5G3_WcSeAd.&bucket=hf-hub-lfs-us-east-1&prefix=repos%2Faf%2F42%2Faf4283b4152e41f109733722a9330015e04433eff5043f6398de5e010e08b7ae&expiration=Sat%2C+09+Mar+2024+07%3A02%3A15+GMT&signature=26216a32adb052b978189d77079b990944b940ea648ecdcb052ea11188a7f6ad\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 111, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 91, in upload_raw_results\n return _try_request_again(_upload_raw_results, lambda: time.sleep(1), *args)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 82, in _try_request_again\n raise e\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 75, in _try_request_again\n func(*args)\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 62, in _upload_raw_results\n api.upload_folder(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4598, in upload_folder\n commit_info = self.create_commit(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 3558, in create_commit\n self.preupload_lfs_files(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4058, in preupload_lfs_files\n _upload_lfs_files(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 415, in _upload_lfs_files\n thread_map(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/contrib/concurrent.py\", line 69, in thread_map\n return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/contrib/concurrent.py\", line 51, in _executor_map\n return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs))\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1182, in __iter__\n for obj in iterable:\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 619, in result_iterator\n yield _result_or_cancel(fs.pop())\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 317, in _result_or_cancel\n return fut.result(timeout)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 449, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/thread.py\", line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 402, in _wrapped_lfs_upload\n raise RuntimeError(f\"Error while uploading '{operation.path_in_repo}' to the Hub.\") from exc\nRuntimeError: Error while uploading 'PORTULAN/gervasio-7b-portuguese-ptpt-decoder/raw_2024-03-08T02-58-56.846301/pretrained__PORTULAN__gervasio-7b-portuguese-ptpt-decoder,dtype__bfloat16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__2560_bluex.jsonl' to the Hub.\n",
32
+ "main_language": "Portuguese"
33
  }
WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json CHANGED
@@ -15,5 +15,5 @@
15
  "job_start_time": "2024-04-02T12-19-02.586807",
16
  "error_msg": "CUDA out of memory. Tried to allocate 1.53 GiB. GPU 0 has a total capacty of 79.35 GiB of which 360.19 MiB is free. Process 4074833 has 34.95 GiB memory in use. Process 209361 has 44.04 GiB memory in use. Of the allocated memory 31.73 GiB is allocated by PyTorch, and 2.71 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
  "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n else:\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1518, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1063, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1544, in generate\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2404, in greedy_search\n model_kwargs[\"cache_position\"] = torch.arange(cur_len, device=input_ids.device)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1176, in forward\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1019, in forward\n position_ids=position_ids,\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 740, in forward\n hidden_states=hidden_states,\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 671, in forward\n query_states,\n ^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.53 GiB. GPU 0 has a total capacty of 79.35 GiB of which 360.19 MiB is free. Process 4074833 has 34.95 GiB memory in use. Process 209361 has 44.04 GiB memory in use. Of the allocated memory 31.73 GiB is allocated by PyTorch, and 2.71 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
- "main_language": "?"
19
  }
 
15
  "job_start_time": "2024-04-02T12-19-02.586807",
16
  "error_msg": "CUDA out of memory. Tried to allocate 1.53 GiB. GPU 0 has a total capacty of 79.35 GiB of which 360.19 MiB is free. Process 4074833 has 34.95 GiB memory in use. Process 209361 has 44.04 GiB memory in use. Of the allocated memory 31.73 GiB is allocated by PyTorch, and 2.71 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
  "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n else:\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1518, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1063, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1544, in generate\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2404, in greedy_search\n model_kwargs[\"cache_position\"] = torch.arange(cur_len, device=input_ids.device)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1176, in forward\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1019, in forward\n position_ids=position_ids,\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 740, in forward\n hidden_states=hidden_states,\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 671, in forward\n query_states,\n ^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.53 GiB. GPU 0 has a total capacty of 79.35 GiB of which 360.19 MiB is free. Process 4074833 has 34.95 GiB memory in use. Process 209361 has 44.04 GiB memory in use. Of the allocated memory 31.73 GiB is allocated by PyTorch, and 2.71 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
+ "main_language": "English"
19
  }
WizardLM/WizardLM-70B-V1.0_eval_request_False_float16_Original.json CHANGED
@@ -15,5 +15,5 @@
15
  "job_start_time": "2024-04-02T03-37-29.146344",
16
  "error_msg": "CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 21.61 GiB memory in use. Process 4074833 has 20.61 GiB memory in use. Process 188848 has 21.62 GiB memory in use. Process 209361 has 15.45 GiB memory in use. Of the allocated memory 21.21 GiB is allocated by PyTorch, and 3.58 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
  "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 21.61 GiB memory in use. Process 4074833 has 20.61 GiB memory in use. Process 188848 has 21.62 GiB memory in use. Process 209361 has 15.45 GiB memory in use. Of the allocated memory 21.21 GiB is allocated by PyTorch, and 3.58 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
- "main_language": "?"
19
  }
 
15
  "job_start_time": "2024-04-02T03-37-29.146344",
16
  "error_msg": "CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 21.61 GiB memory in use. Process 4074833 has 20.61 GiB memory in use. Process 188848 has 21.62 GiB memory in use. Process 209361 has 15.45 GiB memory in use. Of the allocated memory 21.21 GiB is allocated by PyTorch, and 3.58 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
  "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 21.61 GiB memory in use. Process 4074833 has 20.61 GiB memory in use. Process 188848 has 21.62 GiB memory in use. Process 209361 has 15.45 GiB memory in use. Of the allocated memory 21.21 GiB is allocated by PyTorch, and 3.58 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
+ "main_language": "English"
19
  }
WizardLM/WizardLM-7B-V1.0_eval_request_False_float16_Original.json CHANGED
@@ -15,5 +15,5 @@
15
  "job_start_time": "2024-04-02T02-44-54.274722",
16
  "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f34971bf090>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: f3038b08-1a6a-4f32-9910-e6c5f4932050)')",
17
  "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (<urllib3.connection.HTTPSConnection object at 0x7f34971bf090>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f34971bf090>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f34971bf090>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: f3038b08-1a6a-4f32-9910-e6c5f4932050)')\n",
18
- "main_language": "?"
19
  }
 
15
  "job_start_time": "2024-04-02T02-44-54.274722",
16
  "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f34971bf090>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: f3038b08-1a6a-4f32-9910-e6c5f4932050)')",
17
  "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (<urllib3.connection.HTTPSConnection object at 0x7f34971bf090>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f34971bf090>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f34971bf090>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: f3038b08-1a6a-4f32-9910-e6c5f4932050)')\n",
18
+ "main_language": "English"
19
  }
allenai/tulu-2-dpo-13b_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6577595030605168,
29
  "result_metrics_npm": 0.49102913342997134,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6577595030605168,
29
  "result_metrics_npm": 0.49102913342997134,
30
+ "main_language": "English"
31
  }
allenai/tulu-2-dpo-70b_eval_request_False_bfloat16_Original.json CHANGED
@@ -15,5 +15,5 @@
15
  "job_start_time": "2024-04-02T02-29-13.759360",
16
  "error_msg": "CUDA out of memory. Tried to allocate 500.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
  "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 500.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
- "main_language": "?"
19
  }
 
15
  "job_start_time": "2024-04-02T02-29-13.759360",
16
  "error_msg": "CUDA out of memory. Tried to allocate 500.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
  "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 500.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
+ "main_language": "English"
19
  }
allenai/tulu-2-dpo-7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.63949781885722,
29
  "result_metrics_npm": 0.47383474887492505,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.63949781885722,
29
  "result_metrics_npm": 0.47383474887492505,
30
+ "main_language": "English"
31
  }
berkeley-nest/Starling-LM-7B-alpha_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6789838178640438,
29
  "result_metrics_npm": 0.5252770761531216,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6789838178640438,
29
  "result_metrics_npm": 0.5252770761531216,
30
+ "main_language": "English"
31
  }
cnmoro/Mistral-7B-Portuguese_eval_request_False_float16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6470422904888635,
29
  "result_metrics_npm": 0.47240583552942517,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6470422904888635,
29
  "result_metrics_npm": 0.47240583552942517,
30
+ "main_language": "Portuguese"
31
  }
fernandosola/bluearara-7B-instruct_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.43231598069332267,
29
  "result_metrics_npm": 0.1728623143276737,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.43231598069332267,
29
  "result_metrics_npm": 0.1728623143276737,
30
+ "main_language": "Portuguese"
31
  }
lmsys/vicuna-33b-v1.3_eval_request_False_float16_Original.json CHANGED
@@ -15,5 +15,5 @@
15
  "job_start_time": "2024-04-02T03-13-40.175246",
16
  "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f15a86e6b90>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 842d7bd4-c273-4885-aab9-e56d00d98eb6)')",
17
  "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (<urllib3.connection.HTTPSConnection object at 0x7f15a86e6b90>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f15a86e6b90>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f15a86e6b90>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 842d7bd4-c273-4885-aab9-e56d00d98eb6)')\n",
18
- "main_language": "?"
19
  }
 
15
  "job_start_time": "2024-04-02T03-13-40.175246",
16
  "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f15a86e6b90>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 842d7bd4-c273-4885-aab9-e56d00d98eb6)')",
17
  "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (<urllib3.connection.HTTPSConnection object at 0x7f15a86e6b90>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f15a86e6b90>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f15a86e6b90>, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 842d7bd4-c273-4885-aab9-e56d00d98eb6)')\n",
18
+ "main_language": "English"
19
  }
nicolasdec/CabraMistral7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6510067195144671,
29
  "result_metrics_npm": 0.48247175601183667,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6510067195144671,
29
  "result_metrics_npm": 0.48247175601183667,
30
+ "main_language": "Portuguese"
31
  }
nicolasdec/CabraQwen14b_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6865741938302509,
29
  "result_metrics_npm": 0.5166394028213106,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6865741938302509,
29
  "result_metrics_npm": 0.5166394028213106,
30
+ "main_language": "Portuguese"
31
  }
nicolasdec/CabraQwen14b_eval_request_False_float16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6865208563559055,
29
  "result_metrics_npm": 0.5164348805699208,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6865208563559055,
29
  "result_metrics_npm": 0.5164348805699208,
30
+ "main_language": "Portuguese"
31
  }
nicolasdec/CabraQwen7b_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6699463067847506,
29
  "result_metrics_npm": 0.49839457062901427,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6699463067847506,
29
  "result_metrics_npm": 0.49839457062901427,
30
+ "main_language": "Portuguese"
31
  }
nicolasdec/Cabramistral7b_eval_request_False_float16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.6520268207392702,
29
  "result_metrics_npm": 0.4839021383288866,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.6520268207392702,
29
  "result_metrics_npm": 0.4839021383288866,
30
+ "main_language": "Portuguese"
31
  }
pedrogengo/gemma-ptbr_eval_request_False_float16_Adapter.json CHANGED
@@ -15,5 +15,5 @@
15
  "job_start_time": "2024-04-02T12-16-21.488325",
16
  "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'layer_replication'",
17
  "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n else:\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 637, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n # load the config\n ^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'layer_replication'\n",
18
- "main_language": "?"
19
  }
 
15
  "job_start_time": "2024-04-02T12-16-21.488325",
16
  "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'layer_replication'",
17
  "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n else:\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 637, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n # load the config\n ^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'layer_replication'\n",
18
+ "main_language": "Portuguese"
19
  }
recogna-nlp/GemBode-2b-it_eval_request_False_float16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.3607908395997714,
29
  "result_metrics_npm": 0.04411491120774061,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.3607908395997714,
29
  "result_metrics_npm": 0.04411491120774061,
30
+ "main_language": "Portuguese"
31
  }
recogna-nlp/Phi-Bode_eval_request_False_float16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.4359386272010365,
29
  "result_metrics_npm": 0.16028744061991526,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.4359386272010365,
29
  "result_metrics_npm": 0.16028744061991526,
30
+ "main_language": "Portuguese"
31
  }
semantixai/LloroV2_eval_request_False_bfloat16_Original.json CHANGED
@@ -27,5 +27,5 @@
27
  },
28
  "result_metrics_average": 0.396846258942384,
29
  "result_metrics_npm": 0.09503536416762777,
30
- "main_language": "?"
31
  }
 
27
  },
28
  "result_metrics_average": 0.396846258942384,
29
  "result_metrics_npm": 0.09503536416762777,
30
+ "main_language": "Portuguese"
31
  }