eduagarcia commited on
Commit
fb2444c
β€’
1 Parent(s): 054cf69

Add main_language to model requests

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. 01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json +2 -1
  2. 01-ai/Yi-34B-Chat_eval_request_False_bfloat16_Original.json +2 -1
  3. 01-ai/Yi-34B_eval_request_False_bfloat16_Original.json +2 -1
  4. 01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json +2 -1
  5. 01-ai/Yi-6B-Chat_eval_request_False_bfloat16_Original.json +2 -1
  6. 01-ai/Yi-6B_eval_request_False_bfloat16_Original.json +2 -1
  7. 22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json +2 -1
  8. 22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json +2 -1
  9. 22h/open-cabrita3b_eval_request_False_float16_Original.json +2 -1
  10. AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json +2 -1
  11. AI-Sweden-Models/gpt-sw3-40b_eval_request_False_float16_Original.json +2 -1
  12. AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json +2 -1
  13. AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json +2 -1
  14. AdaptLLM/finance-LLM-13B_eval_request_False_float16_Original.json +17 -1
  15. AdaptLLM/finance-LLM_eval_request_False_float16_Original.json +17 -1
  16. AdaptLLM/law-LLM-13B_eval_request_False_float16_Original.json +17 -1
  17. AdaptLLM/law-LLM_eval_request_False_float16_Original.json +17 -1
  18. AdaptLLM/medicine-LLM-13B_eval_request_False_float16_Original.json +17 -1
  19. AdaptLLM/medicine-LLM_eval_request_False_float16_Original.json +17 -1
  20. AetherResearch/Cerebrum-1.0-7b_eval_request_False_float16_Original.json +2 -1
  21. BAAI/Aquila-7B_eval_request_False_float16_Original.json +2 -1
  22. BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json +2 -1
  23. BAAI/Aquila2-7B_eval_request_False_float16_Original.json +2 -1
  24. Bruno/Caramelinho_eval_request_False_bfloat16_Adapter.json +2 -1
  25. Bruno/Caramelo_7B_eval_request_False_bfloat16_Adapter.json +2 -1
  26. CohereForAI/aya-101_eval_request_False_float16_Original.json +2 -1
  27. DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json +2 -1
  28. DAMO-NLP-MT/polylm-13b_eval_request_False_float16_Original.json +2 -1
  29. Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json +2 -1
  30. Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json +2 -1
  31. EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json +2 -1
  32. EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json +2 -1
  33. EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json +2 -1
  34. EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json +2 -1
  35. EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json +2 -1
  36. EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json +2 -1
  37. EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json +2 -1
  38. EleutherAI/pythia-12b_eval_request_False_float16_Original.json +17 -1
  39. EleutherAI/pythia-14m_eval_request_False_float16_Original.json +2 -1
  40. EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json +2 -1
  41. EleutherAI/pythia-160m_eval_request_False_float16_Original.json +17 -1
  42. EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json +2 -1
  43. EleutherAI/pythia-1b_eval_request_False_float16_Original.json +2 -1
  44. EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json +2 -1
  45. EleutherAI/pythia-2.8b_eval_request_False_float16_Original.json +17 -1
  46. EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json +2 -1
  47. EleutherAI/pythia-410m_eval_request_False_float16_Original.json +17 -1
  48. EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json +2 -1
  49. EleutherAI/pythia-6.9b_eval_request_False_float16_Original.json +2 -1
  50. EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json +2 -1
01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json CHANGED
@@ -14,5 +14,6 @@
14
  "job_id": 339,
15
  "job_start_time": "2024-04-02T07-14-39.136107",
16
  "error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 142.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 38.02 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 37.50 GiB is allocated by PyTorch, and 12.90 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 142.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 38.02 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 37.50 GiB is allocated by PyTorch, and 12.90 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
 
18
  }
 
14
  "job_id": 339,
15
  "job_start_time": "2024-04-02T07-14-39.136107",
16
  "error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 142.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 38.02 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 37.50 GiB is allocated by PyTorch, and 12.90 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 142.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 38.02 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 37.50 GiB is allocated by PyTorch, and 12.90 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n",
18
+ "main_language": "English"
19
  }
01-ai/Yi-34B-Chat_eval_request_False_bfloat16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.6880686233555414
27
  },
28
  "result_metrics_average": 0.7076191361692378,
29
- "result_metrics_npm": 0.5577893098264872
 
30
  }
 
26
  "tweetsentbr": 0.6880686233555414
27
  },
28
  "result_metrics_average": 0.7076191361692378,
29
+ "result_metrics_npm": 0.5577893098264872,
30
+ "main_language": "English"
31
  }
01-ai/Yi-34B_eval_request_False_bfloat16_Original.json CHANGED
@@ -14,5 +14,6 @@
14
  "job_id": 337,
15
  "job_start_time": "2024-04-02T04-04-14.271750",
16
  "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fc1d4183fd0>, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 9f067574-76d1-4495-b2a0-f29cb4578d25)')",
17
- "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (<urllib3.connection.HTTPSConnection object at 0x7fc1d4183fd0>, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fc1d4183fd0>, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fc1d4183fd0>, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 9f067574-76d1-4495-b2a0-f29cb4578d25)')\n"
 
18
  }
 
14
  "job_id": 337,
15
  "job_start_time": "2024-04-02T04-04-14.271750",
16
  "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fc1d4183fd0>, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 9f067574-76d1-4495-b2a0-f29cb4578d25)')",
17
+ "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (<urllib3.connection.HTTPSConnection object at 0x7fc1d4183fd0>, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fc1d4183fd0>, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7fc1d4183fd0>, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 9f067574-76d1-4495-b2a0-f29cb4578d25)')\n",
18
+ "main_language": "English"
19
  }
01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 121,
15
- "job_start_time": "2024-02-09T07-10-30.923872"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 121,
15
+ "job_start_time": "2024-02-09T07-10-30.923872",
16
+ "main_language": "English"
17
  }
01-ai/Yi-6B-Chat_eval_request_False_bfloat16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.5864804330790114
27
  },
28
  "result_metrics_average": 0.600468220733431,
29
- "result_metrics_npm": 0.40260964945878
 
30
  }
 
26
  "tweetsentbr": 0.5864804330790114
27
  },
28
  "result_metrics_average": 0.600468220733431,
29
+ "result_metrics_npm": 0.40260964945878,
30
+ "main_language": "English"
31
  }
01-ai/Yi-6B_eval_request_False_bfloat16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.5081067075683067
27
  },
28
  "result_metrics_average": 0.5947625326022572,
29
- "result_metrics_npm": 0.39162578790069535
 
30
  }
 
26
  "tweetsentbr": 0.5081067075683067
27
  },
28
  "result_metrics_average": 0.5947625326022572,
29
+ "result_metrics_npm": 0.39162578790069535,
30
+ "main_language": "English"
31
  }
22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json CHANGED
@@ -14,5 +14,6 @@
14
  "job_id": 336,
15
  "job_start_time": "2024-04-02T03-54-36.291839",
16
  "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 637, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n # load the config\n ^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'\n"
 
18
  }
 
14
  "job_id": 336,
15
  "job_start_time": "2024-04-02T03-54-36.291839",
16
  "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'",
17
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 637, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n # load the config\n ^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'\n",
18
+ "main_language": "Portuguese"
19
  }
22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.4575265405956153
27
  },
28
  "result_metrics_average": 0.32141956108734737,
29
- "result_metrics_npm": -0.03225449824402505
 
30
  }
 
26
  "tweetsentbr": 0.4575265405956153
27
  },
28
  "result_metrics_average": 0.32141956108734737,
29
+ "result_metrics_npm": -0.03225449824402505,
30
+ "main_language": "Portuguese"
31
  }
22h/open-cabrita3b_eval_request_False_float16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.47963247012405114
27
  },
28
  "result_metrics_average": 0.3303614816761663,
29
- "result_metrics_npm": -0.005341553963556416
 
30
  }
 
26
  "tweetsentbr": 0.47963247012405114
27
  },
28
  "result_metrics_average": 0.3303614816761663,
29
+ "result_metrics_npm": -0.005341553963556416,
30
+ "main_language": "Portuguese"
31
  }
AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 102,
15
- "job_start_time": "2024-02-08T16-32-05.080295"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 102,
15
+ "job_start_time": "2024-02-08T16-32-05.080295",
16
+ "main_language": "English"
17
  }
AI-Sweden-Models/gpt-sw3-40b_eval_request_False_float16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.491745311259787
27
  },
28
  "result_metrics_average": 0.35406676272196724,
29
- "result_metrics_npm": 0.01835378100931048
 
30
  }
 
26
  "tweetsentbr": 0.491745311259787
27
  },
28
  "result_metrics_average": 0.35406676272196724,
29
+ "result_metrics_npm": 0.01835378100931048,
30
+ "main_language": "English"
31
  }
AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 101,
15
- "job_start_time": "2024-02-08T15-37-30.575084"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 101,
15
+ "job_start_time": "2024-02-08T15-37-30.575084",
16
+ "main_language": "English"
17
  }
AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 100,
15
- "job_start_time": "2024-02-08T14-27-00.255851"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 100,
15
+ "job_start_time": "2024-02-08T14-27-00.255851",
16
+ "main_language": "English"
17
  }
AdaptLLM/finance-LLM-13B_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "AdaptLLM/finance-LLM-13B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 13.0, "architectures": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:27Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "AdaptLLM/finance-LLM-13B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 13.0,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:37:27Z",
12
+ "model_type": "πŸ”Ά : fine-tuned",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
AdaptLLM/finance-LLM_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "AdaptLLM/finance-LLM", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "LLaMAForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:12Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "AdaptLLM/finance-LLM",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0,
8
+ "architectures": "LLaMAForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:37:12Z",
12
+ "model_type": "πŸ”Ά : fine-tuned",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
AdaptLLM/law-LLM-13B_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "AdaptLLM/law-LLM-13B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 13.0, "architectures": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:17Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "AdaptLLM/law-LLM-13B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 13.0,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:37:17Z",
12
+ "model_type": "πŸ”Ά : fine-tuned",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
AdaptLLM/law-LLM_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "AdaptLLM/law-LLM", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "LLaMAForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:01Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "AdaptLLM/law-LLM",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0,
8
+ "architectures": "LLaMAForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:37:01Z",
12
+ "model_type": "πŸ”Ά : fine-tuned",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
AdaptLLM/medicine-LLM-13B_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "AdaptLLM/medicine-LLM-13B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 13.0, "architectures": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:22Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "AdaptLLM/medicine-LLM-13B",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 13.0,
8
+ "architectures": "LlamaForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:37:22Z",
12
+ "model_type": "πŸ”Ά : fine-tuned",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
AdaptLLM/medicine-LLM_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "AdaptLLM/medicine-LLM", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "LLaMAForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:07Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "AdaptLLM/medicine-LLM",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0,
8
+ "architectures": "LLaMAForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:37:07Z",
12
+ "model_type": "πŸ”Ά : fine-tuned",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
AetherResearch/Cerebrum-1.0-7b_eval_request_False_float16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.6171926929726294
27
  },
28
  "result_metrics_average": 0.6605252682234545,
29
- "result_metrics_npm": 0.49485266203952055
 
30
  }
 
26
  "tweetsentbr": 0.6171926929726294
27
  },
28
  "result_metrics_average": 0.6605252682234545,
29
+ "result_metrics_npm": 0.49485266203952055,
30
+ "main_language": "?"
31
  }
BAAI/Aquila-7B_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 334,
15
- "job_start_time": "2024-04-02T09-28-19.670881"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 334,
15
+ "job_start_time": "2024-04-02T09-28-19.670881",
16
+ "main_language": "?"
17
  }
BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json CHANGED
@@ -14,5 +14,6 @@
14
  "job_id": 341,
15
  "job_start_time": "2024-04-02T07-07-26.615695",
16
  "error_msg": "Consistency check failed: file should be of size 9814920915 but has size 7945050153 (pytorch_model-00002-of-00007.bin).\nWe are sorry for the inconvenience. Please retry download and pass `force_download=True, resume_download=False` as argument.\nIf the issue persists, please let us know by opening an issue on https://github.com/huggingface/huggingface_hub.",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 552, in http_get\n raise EnvironmentError(\nOSError: Consistency check failed: file should be of size 9814920915 but has size 7945050153 (pytorch_model-00002-of-00007.bin).\nWe are sorry for the inconvenience. Please retry download and pass `force_download=True, resume_download=False` as argument.\nIf the issue persists, please let us know by opening an issue on https://github.com/huggingface/huggingface_hub.\n"
 
18
  }
 
14
  "job_id": 341,
15
  "job_start_time": "2024-04-02T07-07-26.615695",
16
  "error_msg": "Consistency check failed: file should be of size 9814920915 but has size 7945050153 (pytorch_model-00002-of-00007.bin).\nWe are sorry for the inconvenience. Please retry download and pass `force_download=True, resume_download=False` as argument.\nIf the issue persists, please let us know by opening an issue on https://github.com/huggingface/huggingface_hub.",
17
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 552, in http_get\n raise EnvironmentError(\nOSError: Consistency check failed: file should be of size 9814920915 but has size 7945050153 (pytorch_model-00002-of-00007.bin).\nWe are sorry for the inconvenience. Please retry download and pass `force_download=True, resume_download=False` as argument.\nIf the issue persists, please let us know by opening an issue on https://github.com/huggingface/huggingface_hub.\n",
18
+ "main_language": "?"
19
  }
BAAI/Aquila2-7B_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 349,
15
- "job_start_time": "2024-04-02T09-42-02.662702"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 349,
15
+ "job_start_time": "2024-04-02T09-42-02.662702",
16
+ "main_language": "?"
17
  }
Bruno/Caramelinho_eval_request_False_bfloat16_Adapter.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.563106045239156
27
  },
28
  "result_metrics_average": 0.353173826101491,
29
- "result_metrics_npm": 0.017927804420097764
 
30
  }
 
26
  "tweetsentbr": 0.563106045239156
27
  },
28
  "result_metrics_average": 0.353173826101491,
29
+ "result_metrics_npm": 0.017927804420097764,
30
+ "main_language": "Portuguese"
31
  }
Bruno/Caramelo_7B_eval_request_False_bfloat16_Adapter.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.35365936890599253
27
  },
28
  "result_metrics_average": 0.3172500278594119,
29
- "result_metrics_npm": -0.028868288338713292
 
30
  }
 
26
  "tweetsentbr": 0.35365936890599253
27
  },
28
  "result_metrics_average": 0.3172500278594119,
29
+ "result_metrics_npm": -0.028868288338713292,
30
+ "main_language": "Portuguese"
31
  }
CohereForAI/aya-101_eval_request_False_float16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.7292099162284759
27
  },
28
  "result_metrics_average": 0.5555649777522137,
29
- "result_metrics_npm": 0.35408599648006006
 
30
  }
 
26
  "tweetsentbr": 0.7292099162284759
27
  },
28
  "result_metrics_average": 0.5555649777522137,
29
+ "result_metrics_npm": 0.35408599648006006,
30
+ "main_language": "?"
31
  }
DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 215,
15
- "job_start_time": "2024-02-16T14-47-24.223296"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 215,
15
+ "job_start_time": "2024-02-16T14-47-24.223296",
16
+ "main_language": "English"
17
  }
DAMO-NLP-MT/polylm-13b_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 342,
15
- "job_start_time": "2024-04-02T07-21-28.801557"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 342,
15
+ "job_start_time": "2024-04-02T07-21-28.801557",
16
+ "main_language": "English"
17
  }
Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json CHANGED
@@ -14,5 +14,6 @@
14
  "job_id": 253,
15
  "job_start_time": "2024-02-25T19-40-34.104437",
16
  "error_msg": "'DeciLMModel' object has no attribute 'causal_mask'",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1421, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 805, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1047, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1167, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 985, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1064, in _update_causal_mask\n if seq_length > self.causal_mask.shape[-1]:\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1695, in __getattr__\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\nAttributeError: 'DeciLMModel' object has no attribute 'causal_mask'\n"
 
18
  }
 
14
  "job_id": 253,
15
  "job_start_time": "2024-02-25T19-40-34.104437",
16
  "error_msg": "'DeciLMModel' object has no attribute 'causal_mask'",
17
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1421, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 805, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1047, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1167, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 985, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1064, in _update_causal_mask\n if seq_length > self.causal_mask.shape[-1]:\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1695, in __getattr__\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\nAttributeError: 'DeciLMModel' object has no attribute 'causal_mask'\n",
18
+ "main_language": "English"
19
  }
Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.6506550848022137
27
  },
28
  "result_metrics_average": 0.644463491952594,
29
- "result_metrics_npm": 0.4740648400029519
 
30
  }
 
26
  "tweetsentbr": 0.6506550848022137
27
  },
28
  "result_metrics_average": 0.644463491952594,
29
+ "result_metrics_npm": 0.4740648400029519,
30
+ "main_language": "English"
31
  }
EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 77,
15
- "job_start_time": "2024-02-07T15-56-06.233510"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 77,
15
+ "job_start_time": "2024-02-07T15-56-06.233510",
16
+ "main_language": "English"
17
  }
EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 201,
15
- "job_start_time": "2024-02-14T20-42-35.885763"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 201,
15
+ "job_start_time": "2024-02-14T20-42-35.885763",
16
+ "main_language": "English"
17
  }
EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 200,
15
- "job_start_time": "2024-02-14T20-18-41.235422"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 200,
15
+ "job_start_time": "2024-02-14T20-18-41.235422",
16
+ "main_language": "English"
17
  }
EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 202,
15
- "job_start_time": "2024-02-14T21-23-17.848186"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 202,
15
+ "job_start_time": "2024-02-14T21-23-17.848186",
16
+ "main_language": "English"
17
  }
EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 78,
15
- "job_start_time": "2024-02-07T16-49-06.259210"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 78,
15
+ "job_start_time": "2024-02-07T16-49-06.259210",
16
+ "main_language": "English"
17
  }
EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 203,
15
- "job_start_time": "2024-02-14T22-19-56.698691"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 203,
15
+ "job_start_time": "2024-02-14T22-19-56.698691",
16
+ "main_language": "Other"
17
  }
EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 199,
15
- "job_start_time": "2024-02-14T18-51-32.259622"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 199,
15
+ "job_start_time": "2024-02-14T18-51-32.259622",
16
+ "main_language": "English"
17
  }
EleutherAI/pythia-12b_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "EleutherAI/pythia-12b", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 12.0, "architectures": "GPTNeoXForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:39Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "EleutherAI/pythia-12b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 12.0,
8
+ "architectures": "GPTNeoXForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:39:39Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
EleutherAI/pythia-14m_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 192,
15
- "job_start_time": "2024-02-14T15-03-57.922027"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 192,
15
+ "job_start_time": "2024-02-14T15-03-57.922027",
16
+ "main_language": "English"
17
  }
EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 194,
15
- "job_start_time": "2024-02-14T15-27-43.932506"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 194,
15
+ "job_start_time": "2024-02-14T15-27-43.932506",
16
+ "main_language": "English"
17
  }
EleutherAI/pythia-160m_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "EleutherAI/pythia-160m", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.213, "architectures": "GPTNeoXForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:10Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "EleutherAI/pythia-160m",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0.213,
8
+ "architectures": "GPTNeoXForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:39:10Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 196,
15
- "job_start_time": "2024-02-14T16-20-18.432424"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 196,
15
+ "job_start_time": "2024-02-14T16-20-18.432424",
16
+ "main_language": "English"
17
  }
EleutherAI/pythia-1b_eval_request_False_float16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.1800598326070416
27
  },
28
  "result_metrics_average": 0.2523278334045798,
29
- "result_metrics_npm": -0.13319007373022923
 
30
  }
 
26
  "tweetsentbr": 0.1800598326070416
27
  },
28
  "result_metrics_average": 0.2523278334045798,
29
+ "result_metrics_npm": -0.13319007373022923,
30
+ "main_language": "English"
31
  }
EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 197,
15
- "job_start_time": "2024-02-14T16-49-11.216028"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 197,
15
+ "job_start_time": "2024-02-14T16-49-11.216028",
16
+ "main_language": "English"
17
  }
EleutherAI/pythia-2.8b_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "EleutherAI/pythia-2.8b", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 2.909, "architectures": "GPTNeoXForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:28Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "EleutherAI/pythia-2.8b",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 2.909,
8
+ "architectures": "GPTNeoXForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:39:28Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 195,
15
- "job_start_time": "2024-02-14T15-42-42.785588"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 195,
15
+ "job_start_time": "2024-02-14T15-42-42.785588",
16
+ "main_language": "English"
17
  }
EleutherAI/pythia-410m_eval_request_False_float16_Original.json CHANGED
@@ -1 +1,17 @@
1
- {"model": "EleutherAI/pythia-410m", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.506, "architectures": "GPTNeoXForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:16Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "EleutherAI/pythia-410m",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 0.506,
8
+ "architectures": "GPTNeoXForCausalLM",
9
+ "weight_type": "Original",
10
+ "status": "PENDING",
11
+ "submitted_time": "2024-02-11T13:39:16Z",
12
+ "model_type": "🟒 : pretrained",
13
+ "source": "script",
14
+ "job_id": -1,
15
+ "job_start_time": null,
16
+ "main_language": "English"
17
+ }
EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 198,
15
- "job_start_time": "2024-02-14T17-44-29.767283"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 198,
15
+ "job_start_time": "2024-02-14T17-44-29.767283",
16
+ "main_language": "English"
17
  }
EleutherAI/pythia-6.9b_eval_request_False_float16_Original.json CHANGED
@@ -26,5 +26,6 @@
26
  "tweetsentbr": 0.32668391292199067
27
  },
28
  "result_metrics_average": 0.29038032919558077,
29
- "result_metrics_npm": -0.06560922527243283
 
30
  }
 
26
  "tweetsentbr": 0.32668391292199067
27
  },
28
  "result_metrics_average": 0.29038032919558077,
29
+ "result_metrics_npm": -0.06560922527243283,
30
+ "main_language": "English"
31
  }
EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -12,5 +12,6 @@
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 193,
15
- "job_start_time": "2024-02-14T15-15-51.530711"
 
16
  }
 
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 193,
15
+ "job_start_time": "2024-02-14T15-15-51.530711",
16
+ "main_language": "English"
17
  }