diff --git a/01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json b/01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json index 006569874e6b9e45897ff57c5b907bfa03c9a0f0..57a9629d9679e6ad7d4822a9834b6b686249eaf7 100644 --- a/01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json +++ b/01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json @@ -14,5 +14,6 @@ "job_id": 339, "job_start_time": "2024-04-02T07-14-39.136107", "error_msg": "CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 142.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 38.02 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 37.50 GiB is allocated by PyTorch, and 12.90 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 142.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 38.02 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 37.50 GiB is allocated by PyTorch, and 12.90 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 280.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 142.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 38.02 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.93 GiB memory in use. Of the allocated memory 37.50 GiB is allocated by PyTorch, and 12.90 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n", + "main_language": "English" } \ No newline at end of file diff --git a/01-ai/Yi-34B-Chat_eval_request_False_bfloat16_Original.json b/01-ai/Yi-34B-Chat_eval_request_False_bfloat16_Original.json index 39ab8cfc7c45aec5a01be6840807feb19012a709..6b159ca0c7973846ea9a2feed4effd07a986a98b 100644 --- a/01-ai/Yi-34B-Chat_eval_request_False_bfloat16_Original.json +++ b/01-ai/Yi-34B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6880686233555414 }, "result_metrics_average": 0.7076191361692378, - "result_metrics_npm": 0.5577893098264872 + "result_metrics_npm": 0.5577893098264872, + "main_language": "English" } \ No newline at end of file diff --git a/01-ai/Yi-34B_eval_request_False_bfloat16_Original.json b/01-ai/Yi-34B_eval_request_False_bfloat16_Original.json index 83b4ea23ab58adf194db8a82064f02dddfff8f87..1662d4e8cd1287e1b30aad2f28d22dee98c8c43e 100644 --- a/01-ai/Yi-34B_eval_request_False_bfloat16_Original.json +++ b/01-ai/Yi-34B_eval_request_False_bfloat16_Original.json @@ -14,5 +14,6 @@ "job_id": 337, "job_start_time": "2024-04-02T04-04-14.271750", "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 9f067574-76d1-4495-b2a0-f29cb4578d25)')", - "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 9f067574-76d1-4495-b2a0-f29cb4578d25)')\n" + "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/3b/8a/3b8adbcd40adccd100cd9e8a6950a4219d7b8703092fc5353c97c28dbdea553b/028c29a21e851f0edc3e2f7375a00720c0fde4eaf1f71cd7fbd8bfc628f5ce52?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00001-of-00007.safetensors%3B+filename%3D%22model-00001-of-00007.safetensors%22%3B&Expires=1712289856&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4OTg1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzNiLzhhLzNiOGFkYmNkNDBhZGNjZDEwMGNkOWU4YTY5NTBhNDIxOWQ3Yjg3MDMwOTJmYzUzNTNjOTdjMjhkYmRlYTU1M2IvMDI4YzI5YTIxZTg1MWYwZWRjM2UyZjczNzVhMDA3MjBjMGZkZTRlYWYxZjcxY2Q3ZmJkOGJmYzYyOGY1Y2U1Mj9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=DDRtjhOHZfv0Vkrckn998cCNlNUiKpTg3eBB1ZO7Tx5Ss~mt5ziVVYPtywPc4Td4PW8GEkj9kBbbE3nbVeYy~1NVVaxWp5ffRccWDj67H8pNY9K1hHe6OkSuJtkUh37dAOIisgG2y02IeYMIEVQk3g4HD~hblfvOzPx~Ov3bmqGTsTCd7gsjfY45vNiOMafRPzTT7gCE9P-dR2M56T4iZ8yoE7HjZ~m96PrVDdm-f2rkXroI97M~9sgiAtP3DPjkL9Uw7Plc0GvgRlEU1lpNuu55LlPpQmGRGpV4yggn-X8FK5G6Q3o96mFjbHjPFRdX8v6goGCH5uxzGJbcOulSZA__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 9f067574-76d1-4495-b2a0-f29cb4578d25)')\n", + "main_language": "English" } \ No newline at end of file diff --git a/01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json b/01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json index fc7fb30477ea3caf7edf09b3dab2430bc1c756e6..dc1ba7c9e2bd91f055f0a393785ac78ed07695b9 100644 --- a/01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json +++ b/01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 121, - "job_start_time": "2024-02-09T07-10-30.923872" + "job_start_time": "2024-02-09T07-10-30.923872", + "main_language": "English" } \ No newline at end of file diff --git a/01-ai/Yi-6B-Chat_eval_request_False_bfloat16_Original.json b/01-ai/Yi-6B-Chat_eval_request_False_bfloat16_Original.json index df89546a5fd538d3b9170019c76a19fd05b84022..58699dedd6bad5d6ce81aecd71d26e50e89b6603 100644 --- a/01-ai/Yi-6B-Chat_eval_request_False_bfloat16_Original.json +++ b/01-ai/Yi-6B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5864804330790114 }, "result_metrics_average": 0.600468220733431, - "result_metrics_npm": 0.40260964945878 + "result_metrics_npm": 0.40260964945878, + "main_language": "English" } \ No newline at end of file diff --git a/01-ai/Yi-6B_eval_request_False_bfloat16_Original.json b/01-ai/Yi-6B_eval_request_False_bfloat16_Original.json index 13631ef7f299810a8d70ce2098418f9e9451abba..b8746aaafbaee2407884ee9424f476fb78c615de 100644 --- a/01-ai/Yi-6B_eval_request_False_bfloat16_Original.json +++ b/01-ai/Yi-6B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5081067075683067 }, "result_metrics_average": 0.5947625326022572, - "result_metrics_npm": 0.39162578790069535 + "result_metrics_npm": 0.39162578790069535, + "main_language": "English" } \ No newline at end of file diff --git a/22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json b/22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json index 01d18202c2d4f5d0b9ad2e66fc2b31e01ef36c59..02704ba5e35c0b45fd0af1c4ecc0cd01c0c9551f 100644 --- a/22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json +++ b/22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json @@ -14,5 +14,6 @@ "job_id": 336, "job_start_time": "2024-04-02T03-54-36.291839", "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 637, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n # load the config\n ^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 637, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n # load the config\n ^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'\n", + "main_language": "Portuguese" } \ No newline at end of file diff --git a/22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json b/22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json index 73cd48eb0f57844dcf22b8c90fa02f2b7b60677d..710bf42c8e83d1d90356cf28abc48393c7089f6a 100644 --- a/22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json +++ b/22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4575265405956153 }, "result_metrics_average": 0.32141956108734737, - "result_metrics_npm": -0.03225449824402505 + "result_metrics_npm": -0.03225449824402505, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/22h/open-cabrita3b_eval_request_False_float16_Original.json b/22h/open-cabrita3b_eval_request_False_float16_Original.json index e8beb240891e74d1a6d7016447ea0fc0c3fb1849..b631e8f117cbf0bd3727d61c92ec603a7e49cac2 100644 --- a/22h/open-cabrita3b_eval_request_False_float16_Original.json +++ b/22h/open-cabrita3b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.47963247012405114 }, "result_metrics_average": 0.3303614816761663, - "result_metrics_npm": -0.005341553963556416 + "result_metrics_npm": -0.005341553963556416, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json b/AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json index c69b616814abd72b1f47686682cc3b456a80d46d..ecaf926f20e85e2081f28e853d30942cf04bd4b2 100644 --- a/AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json +++ b/AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 102, - "job_start_time": "2024-02-08T16-32-05.080295" + "job_start_time": "2024-02-08T16-32-05.080295", + "main_language": "English" } \ No newline at end of file diff --git a/AI-Sweden-Models/gpt-sw3-40b_eval_request_False_float16_Original.json b/AI-Sweden-Models/gpt-sw3-40b_eval_request_False_float16_Original.json index 55d40549c78fc7b944c94d3182740bc814e6185e..909e41d24f1c6b3ad3e0ae701617bebd0be88dc5 100644 --- a/AI-Sweden-Models/gpt-sw3-40b_eval_request_False_float16_Original.json +++ b/AI-Sweden-Models/gpt-sw3-40b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.491745311259787 }, "result_metrics_average": 0.35406676272196724, - "result_metrics_npm": 0.01835378100931048 + "result_metrics_npm": 0.01835378100931048, + "main_language": "English" } \ No newline at end of file diff --git a/AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json b/AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json index 039d44b0cc33be0766e65023386d985dee49fdbc..4bffae12a4cab95086f270e39b673f30f98c83cb 100644 --- a/AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json +++ b/AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 101, - "job_start_time": "2024-02-08T15-37-30.575084" + "job_start_time": "2024-02-08T15-37-30.575084", + "main_language": "English" } \ No newline at end of file diff --git a/AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json b/AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json index a5e1c36f5b7050424dd5a8c7e34d3df511f6267d..f4d73d8f241a384afc74bd34d78cbbf59ab526ac 100644 --- a/AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json +++ b/AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 100, - "job_start_time": "2024-02-08T14-27-00.255851" + "job_start_time": "2024-02-08T14-27-00.255851", + "main_language": "English" } \ No newline at end of file diff --git a/AdaptLLM/finance-LLM-13B_eval_request_False_float16_Original.json b/AdaptLLM/finance-LLM-13B_eval_request_False_float16_Original.json index d2db8960cb0739a7d0589169ff4849a0b338a2eb..38f2ce5c96764d201561622778bf22292cf10a95 100644 --- a/AdaptLLM/finance-LLM-13B_eval_request_False_float16_Original.json +++ b/AdaptLLM/finance-LLM-13B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "AdaptLLM/finance-LLM-13B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 13.0, "architectures": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:27Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "AdaptLLM/finance-LLM-13B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 13.0, + "architectures": "LlamaForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:27Z", + "model_type": "🔶 : fine-tuned", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/AdaptLLM/finance-LLM_eval_request_False_float16_Original.json b/AdaptLLM/finance-LLM_eval_request_False_float16_Original.json index 4ab34a6e397f007ad156445cd92a7cbb03d23431..43bf9e6e9d5c0b9b23e1b176b91e854c96dc79cc 100644 --- a/AdaptLLM/finance-LLM_eval_request_False_float16_Original.json +++ b/AdaptLLM/finance-LLM_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "AdaptLLM/finance-LLM", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "LLaMAForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:12Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "AdaptLLM/finance-LLM", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "LLaMAForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:12Z", + "model_type": "🔶 : fine-tuned", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/AdaptLLM/law-LLM-13B_eval_request_False_float16_Original.json b/AdaptLLM/law-LLM-13B_eval_request_False_float16_Original.json index 3418efcf9e92c9fe282b15b94a95f34633658d24..33a52ec3ba2574154273f36eb28925da5e9b518a 100644 --- a/AdaptLLM/law-LLM-13B_eval_request_False_float16_Original.json +++ b/AdaptLLM/law-LLM-13B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "AdaptLLM/law-LLM-13B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 13.0, "architectures": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:17Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "AdaptLLM/law-LLM-13B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 13.0, + "architectures": "LlamaForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:17Z", + "model_type": "🔶 : fine-tuned", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/AdaptLLM/law-LLM_eval_request_False_float16_Original.json b/AdaptLLM/law-LLM_eval_request_False_float16_Original.json index d50619fb9876533fbae29787a9014d4751a32c60..56e5b91ba76173cf16ce44f85bd0903adb451c1f 100644 --- a/AdaptLLM/law-LLM_eval_request_False_float16_Original.json +++ b/AdaptLLM/law-LLM_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "AdaptLLM/law-LLM", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "LLaMAForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:01Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "AdaptLLM/law-LLM", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "LLaMAForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:01Z", + "model_type": "🔶 : fine-tuned", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/AdaptLLM/medicine-LLM-13B_eval_request_False_float16_Original.json b/AdaptLLM/medicine-LLM-13B_eval_request_False_float16_Original.json index 39b7dca854691958545e37662f9314142c1a6235..532b119a7639e009ad81c62d2fa11adc55d7df0d 100644 --- a/AdaptLLM/medicine-LLM-13B_eval_request_False_float16_Original.json +++ b/AdaptLLM/medicine-LLM-13B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "AdaptLLM/medicine-LLM-13B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 13.0, "architectures": "LlamaForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:22Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "AdaptLLM/medicine-LLM-13B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 13.0, + "architectures": "LlamaForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:22Z", + "model_type": "🔶 : fine-tuned", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/AdaptLLM/medicine-LLM_eval_request_False_float16_Original.json b/AdaptLLM/medicine-LLM_eval_request_False_float16_Original.json index 8a29b4e1bf94212bfa6f3fe40350ae284369983a..0cb2ef16d6d46b3c4ee68c3b01f7e787d10e04fd 100644 --- a/AdaptLLM/medicine-LLM_eval_request_False_float16_Original.json +++ b/AdaptLLM/medicine-LLM_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "AdaptLLM/medicine-LLM", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "LLaMAForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:07Z", "model_type": "\ud83d\udd36 : fine-tuned", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "AdaptLLM/medicine-LLM", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "LLaMAForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:07Z", + "model_type": "🔶 : fine-tuned", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/AetherResearch/Cerebrum-1.0-7b_eval_request_False_float16_Original.json b/AetherResearch/Cerebrum-1.0-7b_eval_request_False_float16_Original.json index 87a36db16225f03ea59d8fdb4019451173d3b0b3..958207077d755111e29dff45ae059ec8eae43f26 100644 --- a/AetherResearch/Cerebrum-1.0-7b_eval_request_False_float16_Original.json +++ b/AetherResearch/Cerebrum-1.0-7b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6171926929726294 }, "result_metrics_average": 0.6605252682234545, - "result_metrics_npm": 0.49485266203952055 + "result_metrics_npm": 0.49485266203952055, + "main_language": "?" } \ No newline at end of file diff --git a/BAAI/Aquila-7B_eval_request_False_float16_Original.json b/BAAI/Aquila-7B_eval_request_False_float16_Original.json index e7853d517ffdd496cc9e8b1106be64875d1025c5..20c4ead856bf4f0a7312fd631ca11543e35dbc84 100644 --- a/BAAI/Aquila-7B_eval_request_False_float16_Original.json +++ b/BAAI/Aquila-7B_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 334, - "job_start_time": "2024-04-02T09-28-19.670881" + "job_start_time": "2024-04-02T09-28-19.670881", + "main_language": "?" } \ No newline at end of file diff --git a/BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json b/BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json index 3d36de1b660dddb7a6a519b6b76f3f51b36ab0be..93a9b5aa2efc34234eb5555797b94b12fb127300 100644 --- a/BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json +++ b/BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json @@ -14,5 +14,6 @@ "job_id": 341, "job_start_time": "2024-04-02T07-07-26.615695", "error_msg": "Consistency check failed: file should be of size 9814920915 but has size 7945050153 (pytorch_model-00002-of-00007.bin).\nWe are sorry for the inconvenience. Please retry download and pass `force_download=True, resume_download=False` as argument.\nIf the issue persists, please let us know by opening an issue on https://github.com/huggingface/huggingface_hub.", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 552, in http_get\n raise EnvironmentError(\nOSError: Consistency check failed: file should be of size 9814920915 but has size 7945050153 (pytorch_model-00002-of-00007.bin).\nWe are sorry for the inconvenience. Please retry download and pass `force_download=True, resume_download=False` as argument.\nIf the issue persists, please let us know by opening an issue on https://github.com/huggingface/huggingface_hub.\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 552, in http_get\n raise EnvironmentError(\nOSError: Consistency check failed: file should be of size 9814920915 but has size 7945050153 (pytorch_model-00002-of-00007.bin).\nWe are sorry for the inconvenience. Please retry download and pass `force_download=True, resume_download=False` as argument.\nIf the issue persists, please let us know by opening an issue on https://github.com/huggingface/huggingface_hub.\n", + "main_language": "?" } \ No newline at end of file diff --git a/BAAI/Aquila2-7B_eval_request_False_float16_Original.json b/BAAI/Aquila2-7B_eval_request_False_float16_Original.json index bd4334b0fe7429bd37f2953132eb6351d9628375..01e0173cd408cd54b5933cb1e4d256a6e0edb085 100644 --- a/BAAI/Aquila2-7B_eval_request_False_float16_Original.json +++ b/BAAI/Aquila2-7B_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 349, - "job_start_time": "2024-04-02T09-42-02.662702" + "job_start_time": "2024-04-02T09-42-02.662702", + "main_language": "?" } \ No newline at end of file diff --git a/Bruno/Caramelinho_eval_request_False_bfloat16_Adapter.json b/Bruno/Caramelinho_eval_request_False_bfloat16_Adapter.json index c2fdd1141f42869ab5030bf025fdd4302160cb7b..44e091b407dfc07a5d51f941a9f20f71ae81b1d6 100644 --- a/Bruno/Caramelinho_eval_request_False_bfloat16_Adapter.json +++ b/Bruno/Caramelinho_eval_request_False_bfloat16_Adapter.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.563106045239156 }, "result_metrics_average": 0.353173826101491, - "result_metrics_npm": 0.017927804420097764 + "result_metrics_npm": 0.017927804420097764, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/Bruno/Caramelo_7B_eval_request_False_bfloat16_Adapter.json b/Bruno/Caramelo_7B_eval_request_False_bfloat16_Adapter.json index 3530dad35bd612c6c8b55eb1f1f37d0d6a6eb808..68a81d9545b15b47448bd25f08623006e9471297 100644 --- a/Bruno/Caramelo_7B_eval_request_False_bfloat16_Adapter.json +++ b/Bruno/Caramelo_7B_eval_request_False_bfloat16_Adapter.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.35365936890599253 }, "result_metrics_average": 0.3172500278594119, - "result_metrics_npm": -0.028868288338713292 + "result_metrics_npm": -0.028868288338713292, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/CohereForAI/aya-101_eval_request_False_float16_Original.json b/CohereForAI/aya-101_eval_request_False_float16_Original.json index 40f9ab570808ea0f0cc97074222c5713e22a79b8..fe3efc2ca3a68a2f36e36da8a170d03813eed9f8 100644 --- a/CohereForAI/aya-101_eval_request_False_float16_Original.json +++ b/CohereForAI/aya-101_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.7292099162284759 }, "result_metrics_average": 0.5555649777522137, - "result_metrics_npm": 0.35408599648006006 + "result_metrics_npm": 0.35408599648006006, + "main_language": "?" } \ No newline at end of file diff --git a/DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json b/DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json index decef396afa10fdd5a792fa06fbc207ab81d8b3c..0c996b6ba56b581da9b7e937f299ee1df25a6733 100644 --- a/DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json +++ b/DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 215, - "job_start_time": "2024-02-16T14-47-24.223296" + "job_start_time": "2024-02-16T14-47-24.223296", + "main_language": "English" } \ No newline at end of file diff --git a/DAMO-NLP-MT/polylm-13b_eval_request_False_float16_Original.json b/DAMO-NLP-MT/polylm-13b_eval_request_False_float16_Original.json index f9a52970149bfdc1441bc4858cb3ed57e550f265..91126c1b3d76705a237ea9153a0af24ae4457960 100644 --- a/DAMO-NLP-MT/polylm-13b_eval_request_False_float16_Original.json +++ b/DAMO-NLP-MT/polylm-13b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 342, - "job_start_time": "2024-04-02T07-21-28.801557" + "job_start_time": "2024-04-02T07-21-28.801557", + "main_language": "English" } \ No newline at end of file diff --git a/Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json b/Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json index 375900f9663e0eb79b79f716d57b5b86b7aed902..7a7cd2d9f6650eabf47bb49f3a2297279c0fc347 100644 --- a/Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json +++ b/Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json @@ -14,5 +14,6 @@ "job_id": 253, "job_start_time": "2024-02-25T19-40-34.104437", "error_msg": "'DeciLMModel' object has no attribute 'causal_mask'", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1421, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 805, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1047, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1167, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 985, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1064, in _update_causal_mask\n if seq_length > self.causal_mask.shape[-1]:\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1695, in __getattr__\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\nAttributeError: 'DeciLMModel' object has no attribute 'causal_mask'\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1421, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 805, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1047, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1167, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 985, in forward\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1064, in _update_causal_mask\n if seq_length > self.causal_mask.shape[-1]:\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1695, in __getattr__\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\nAttributeError: 'DeciLMModel' object has no attribute 'causal_mask'\n", + "main_language": "English" } \ No newline at end of file diff --git a/Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json b/Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json index 4c85c2892490ee48b0aab38b187998a1519bdd3d..60091d4cbcb2fc6520d07519e62b746dbe88979b 100644 --- a/Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json +++ b/Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6506550848022137 }, "result_metrics_average": 0.644463491952594, - "result_metrics_npm": 0.4740648400029519 + "result_metrics_npm": 0.4740648400029519, + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json b/EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json index e1e4f7db43610b0d161d52e43ecc8602c74fffe6..3a298bd16205e972c06ac3a798b35eb4a4117d34 100644 --- a/EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json +++ b/EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 77, - "job_start_time": "2024-02-07T15-56-06.233510" + "job_start_time": "2024-02-07T15-56-06.233510", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json b/EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json index ec97e5d93b7acf167434e27e7a2a678710fc6e94..ff2c383776891756dd94d85f31bb17bb8acf8beb 100644 --- a/EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json +++ b/EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 201, - "job_start_time": "2024-02-14T20-42-35.885763" + "job_start_time": "2024-02-14T20-42-35.885763", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json b/EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json index 3d144451a918ed7f67ba09efe5ab05736c55d215..b9bd0bc434f0af02b0e22e1f0c351c4011d18227 100644 --- a/EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json +++ b/EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 200, - "job_start_time": "2024-02-14T20-18-41.235422" + "job_start_time": "2024-02-14T20-18-41.235422", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json b/EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json index e3e8047986f36395caad6fe1c4ed8b4a7d44b41d..2f396be8c0a2a3be85666e3ba4a74bf2b2cf6d1e 100644 --- a/EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json +++ b/EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 202, - "job_start_time": "2024-02-14T21-23-17.848186" + "job_start_time": "2024-02-14T21-23-17.848186", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json b/EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json index 19eb310974e3457a8a6179686e7f4f7603e605b1..8d3e6a6e3c84439f8f537382425bce0dfa8ee06f 100644 --- a/EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json +++ b/EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 78, - "job_start_time": "2024-02-07T16-49-06.259210" + "job_start_time": "2024-02-07T16-49-06.259210", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json b/EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json index b9b15286e0ae7e841f457ac45e34eff4c45af1f9..98571a794f41142b16f57f0e602ff74862fde764 100644 --- a/EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json +++ b/EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 203, - "job_start_time": "2024-02-14T22-19-56.698691" + "job_start_time": "2024-02-14T22-19-56.698691", + "main_language": "Other" } \ No newline at end of file diff --git a/EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json b/EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json index d5a0122779181f201144654d14d7989acde6857b..7882b468b2405142591f7c4327601f79873845fa 100644 --- a/EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 199, - "job_start_time": "2024-02-14T18-51-32.259622" + "job_start_time": "2024-02-14T18-51-32.259622", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-12b_eval_request_False_float16_Original.json b/EleutherAI/pythia-12b_eval_request_False_float16_Original.json index 26cd2708d672044d431c855c776326e03417d4e9..2fbaaaf583c4a9214a7f392fb8d8f384b82bfae0 100644 --- a/EleutherAI/pythia-12b_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-12b_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "EleutherAI/pythia-12b", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 12.0, "architectures": "GPTNeoXForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:39Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "EleutherAI/pythia-12b", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 12.0, + "architectures": "GPTNeoXForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:39:39Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/EleutherAI/pythia-14m_eval_request_False_float16_Original.json b/EleutherAI/pythia-14m_eval_request_False_float16_Original.json index dbc107bb32bbcaa75e99448010b84aa795c7ac3e..4df785f3f4aca8d92faaaf1e7ddfd78ea1ebb0d5 100644 --- a/EleutherAI/pythia-14m_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-14m_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 192, - "job_start_time": "2024-02-14T15-03-57.922027" + "job_start_time": "2024-02-14T15-03-57.922027", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json b/EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json index 7931ba7fce96d4e365d82050fd0cb52cb7609be0..9a8b3c83b5dc40e988b1df27e2097f4b336da721 100644 --- a/EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 194, - "job_start_time": "2024-02-14T15-27-43.932506" + "job_start_time": "2024-02-14T15-27-43.932506", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-160m_eval_request_False_float16_Original.json b/EleutherAI/pythia-160m_eval_request_False_float16_Original.json index 791b8cf5ed5ce1ce773d4b88ea5f6b3e9e89f9d0..73691eaac386fac6f9a9c9c9f15ecc4de2c92fdb 100644 --- a/EleutherAI/pythia-160m_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-160m_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "EleutherAI/pythia-160m", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.213, "architectures": "GPTNeoXForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:10Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "EleutherAI/pythia-160m", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0.213, + "architectures": "GPTNeoXForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:39:10Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json b/EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json index bcfba49cffa2cbff186f2e0d41b67c522e585f55..1a5153711f0e527b5ff24a877d5d20afe1d2d80b 100644 --- a/EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 196, - "job_start_time": "2024-02-14T16-20-18.432424" + "job_start_time": "2024-02-14T16-20-18.432424", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-1b_eval_request_False_float16_Original.json b/EleutherAI/pythia-1b_eval_request_False_float16_Original.json index e18ffd7d8fb47fa424b27a8dea469ebf51b48c90..8692ad254bf296c06b8e272acf435fa8a1ac0339 100644 --- a/EleutherAI/pythia-1b_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-1b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.1800598326070416 }, "result_metrics_average": 0.2523278334045798, - "result_metrics_npm": -0.13319007373022923 + "result_metrics_npm": -0.13319007373022923, + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json b/EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json index e981e74a1de38c545c3ea10d0cc5326dc0edb600..21a521a017b2671075b61dc2b93c373c8bc6d62d 100644 --- a/EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 197, - "job_start_time": "2024-02-14T16-49-11.216028" + "job_start_time": "2024-02-14T16-49-11.216028", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-2.8b_eval_request_False_float16_Original.json b/EleutherAI/pythia-2.8b_eval_request_False_float16_Original.json index 42a820617bf5dfc355518a8e2b881518bb52eb15..13e2b0a4f6a8744d5b42307c0e4ca173836f7885 100644 --- a/EleutherAI/pythia-2.8b_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-2.8b_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "EleutherAI/pythia-2.8b", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 2.909, "architectures": "GPTNeoXForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:28Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "EleutherAI/pythia-2.8b", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 2.909, + "architectures": "GPTNeoXForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:39:28Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json b/EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json index f46e286c84a4522ecba10d721849bb1c8c84f86e..0d1d27c8acfc5be2cb6b3207cb25a24a6925e17f 100644 --- a/EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 195, - "job_start_time": "2024-02-14T15-42-42.785588" + "job_start_time": "2024-02-14T15-42-42.785588", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-410m_eval_request_False_float16_Original.json b/EleutherAI/pythia-410m_eval_request_False_float16_Original.json index 4c6385ffa337824ce54d7a08341e51db8a04a897..fcf6bcf7642d2415f1ff08d72f71e6f3fd551f85 100644 --- a/EleutherAI/pythia-410m_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-410m_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "EleutherAI/pythia-410m", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.506, "architectures": "GPTNeoXForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:16Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "EleutherAI/pythia-410m", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0.506, + "architectures": "GPTNeoXForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:39:16Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json b/EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json index 095bce1e1dfd6c18e237e14c4a4837d301373a6d..5267d75584947290aebf705fffcce15602e5dfb7 100644 --- a/EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 198, - "job_start_time": "2024-02-14T17-44-29.767283" + "job_start_time": "2024-02-14T17-44-29.767283", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-6.9b_eval_request_False_float16_Original.json b/EleutherAI/pythia-6.9b_eval_request_False_float16_Original.json index d9c2f0d25c3f24300759914f007a7d1f7ae0d26d..0ec717fe9019a0a006acfb85eb6518ba5c82398c 100644 --- a/EleutherAI/pythia-6.9b_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-6.9b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.32668391292199067 }, "result_metrics_average": 0.29038032919558077, - "result_metrics_npm": -0.06560922527243283 + "result_metrics_npm": -0.06560922527243283, + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json b/EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json index e3cbd890e8d6436f7a450b7a071c1063f79e107a..ca4ae38942ec78dfee5cbebbcc8ea90d22901598 100644 --- a/EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 193, - "job_start_time": "2024-02-14T15-15-51.530711" + "job_start_time": "2024-02-14T15-15-51.530711", + "main_language": "English" } \ No newline at end of file diff --git a/EleutherAI/pythia-70m_eval_request_False_float16_Original.json b/EleutherAI/pythia-70m_eval_request_False_float16_Original.json index 54c66f641a69bafe30b1bbdc6972ee49606446da..3b49e60492384df85f0fd031cf50eed6fe6ce8c9 100644 --- a/EleutherAI/pythia-70m_eval_request_False_float16_Original.json +++ b/EleutherAI/pythia-70m_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "EleutherAI/pythia-70m", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.096, "architectures": "GPTNeoXForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:38:58Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "EleutherAI/pythia-70m", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0.096, + "architectures": "GPTNeoXForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:38:58Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/FuseAI/FuseChat-7B-VaRM_eval_request_False_bfloat16_Original.json b/FuseAI/FuseChat-7B-VaRM_eval_request_False_bfloat16_Original.json index 064c8072abfc44c20f338035ce485cf960d7751c..68fe85f135c0e900ece60bb1dd5d0c2a0324b28c 100644 --- a/FuseAI/FuseChat-7B-VaRM_eval_request_False_bfloat16_Original.json +++ b/FuseAI/FuseChat-7B-VaRM_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.44067858320963216 }, "result_metrics_average": 0.6749035343197202, - "result_metrics_npm": 0.5201529644708365 + "result_metrics_npm": 0.5201529644708365, + "main_language": "?" } \ No newline at end of file diff --git a/FuseAI/OpenChat-3.5-7B-Solar_eval_request_False_bfloat16_Original.json b/FuseAI/OpenChat-3.5-7B-Solar_eval_request_False_bfloat16_Original.json index 0f05f599d94939faf4c37c969d444c7e5e9cfd9e..7a95e6afaa766039bb859572c836529285d27e0f 100644 --- a/FuseAI/OpenChat-3.5-7B-Solar_eval_request_False_bfloat16_Original.json +++ b/FuseAI/OpenChat-3.5-7B-Solar_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5877550319137789 }, "result_metrics_average": 0.6904086247281405, - "result_metrics_npm": 0.5432596799854227 + "result_metrics_npm": 0.5432596799854227, + "main_language": "?" } \ No newline at end of file diff --git a/HeyLucasLeao/gpt-neo-small-portuguese_eval_request_False_float16_Original.json b/HeyLucasLeao/gpt-neo-small-portuguese_eval_request_False_float16_Original.json index 7acb1234a1f90f049bf2033d74137ac701a184de..838588f246486dd10de7b0d29ff060f53de4b1aa 100644 --- a/HeyLucasLeao/gpt-neo-small-portuguese_eval_request_False_float16_Original.json +++ b/HeyLucasLeao/gpt-neo-small-portuguese_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.1506866897702477 }, "result_metrics_average": 0.20327309752532952, - "result_metrics_npm": -0.20432934163805447 + "result_metrics_npm": -0.20432934163805447, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/HuggingFaceH4/zephyr-7b-beta_eval_request_False_bfloat16_Original.json b/HuggingFaceH4/zephyr-7b-beta_eval_request_False_bfloat16_Original.json index eea7b26e3c44f63e4a17c10c8c1a9e9e22bab792..61f5b53fcc537bccf9df71e4086a86a4dcf7bb05 100644 --- a/HuggingFaceH4/zephyr-7b-beta_eval_request_False_bfloat16_Original.json +++ b/HuggingFaceH4/zephyr-7b-beta_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.46064331884597925 }, "result_metrics_average": 0.6276835593344771, - "result_metrics_npm": 0.45237979734164724 + "result_metrics_npm": 0.45237979734164724, + "main_language": "English" } \ No newline at end of file diff --git a/HuggingFaceH4/zephyr-7b-gemma-v0.1_eval_request_False_bfloat16_Original.json b/HuggingFaceH4/zephyr-7b-gemma-v0.1_eval_request_False_bfloat16_Original.json index 3ec84aa3fa5cc331313318ab5a54e6a9698dd265..2d943a760de4f9abf664ce639a27871bca952199 100644 --- a/HuggingFaceH4/zephyr-7b-gemma-v0.1_eval_request_False_bfloat16_Original.json +++ b/HuggingFaceH4/zephyr-7b-gemma-v0.1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6152504166981012 }, "result_metrics_average": 0.6591230054795971, - "result_metrics_npm": 0.4972532580931312 + "result_metrics_npm": 0.4972532580931312, + "main_language": "?" } \ No newline at end of file diff --git a/HuggingFaceTB/cosmo-1b_eval_request_False_float16_Original.json b/HuggingFaceTB/cosmo-1b_eval_request_False_float16_Original.json index e0713aa9a74a06d10d9ce8f26be4b74a56441b53..f5503e6a9cbf7c035039ae60900b70aa9f37e8fb 100644 --- a/HuggingFaceTB/cosmo-1b_eval_request_False_float16_Original.json +++ b/HuggingFaceTB/cosmo-1b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.2534405090147715 }, "result_metrics_average": 0.28355881960698925, - "result_metrics_npm": -0.08522459258817766 + "result_metrics_npm": -0.08522459258817766, + "main_language": "English" } \ No newline at end of file diff --git a/Intel/neural-chat-7b-v3-1_eval_request_False_float16_Original.json b/Intel/neural-chat-7b-v3-1_eval_request_False_float16_Original.json index e7f70f8f238a89cfdb029f9871b6bf1e7473542b..4eeb499187f35cbc2360aa79137505571c481f1d 100644 --- a/Intel/neural-chat-7b-v3-1_eval_request_False_float16_Original.json +++ b/Intel/neural-chat-7b-v3-1_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5145983702206705 }, "result_metrics_average": 0.6726525565288831, - "result_metrics_npm": 0.5225859783991329 + "result_metrics_npm": 0.5225859783991329, + "main_language": "English" } \ No newline at end of file diff --git a/Intel/neural-chat-7b-v3-3_eval_request_False_float16_Original.json b/Intel/neural-chat-7b-v3-3_eval_request_False_float16_Original.json index bfaffe5584de3a15d87ddbf691ff6da8fe6d2ba7..408bfbefd802063358b68f4a1148bbb7f1724c73 100644 --- a/Intel/neural-chat-7b-v3-3_eval_request_False_float16_Original.json +++ b/Intel/neural-chat-7b-v3-3_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4689260995001763 }, "result_metrics_average": 0.6533609527579876, - "result_metrics_npm": 0.4871606934211402 + "result_metrics_npm": 0.4871606934211402, + "main_language": "English" } \ No newline at end of file diff --git a/JJhooww/MistralReloadBR_v2_ptbr_eval_request_False_bfloat16_Original.json b/JJhooww/MistralReloadBR_v2_ptbr_eval_request_False_bfloat16_Original.json index 535b02752a5f13789d46d4c70a3ac615bb684187..7b26a2bedcded9ee2974bf4bca49d90a72382984 100644 --- a/JJhooww/MistralReloadBR_v2_ptbr_eval_request_False_bfloat16_Original.json +++ b/JJhooww/MistralReloadBR_v2_ptbr_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6700347269707226 }, "result_metrics_average": 0.6398422053760237, - "result_metrics_npm": 0.4567270236874747 + "result_metrics_npm": 0.4567270236874747, + "main_language": "?" } \ No newline at end of file diff --git a/JJhooww/Mistral_Relora_Step2k_eval_request_False_float16_Original.json b/JJhooww/Mistral_Relora_Step2k_eval_request_False_float16_Original.json index 0dd1eec6186b6edb6df970a1ee5b8663bc0c6b0a..f2d9d7e71fa2556be88304897e4cdc5f81745e15 100644 --- a/JJhooww/Mistral_Relora_Step2k_eval_request_False_float16_Original.json +++ b/JJhooww/Mistral_Relora_Step2k_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5193585604823832 }, "result_metrics_average": 0.6441765668293375, - "result_metrics_npm": 0.4715334429973541 + "result_metrics_npm": 0.4715334429973541, + "main_language": "?" } \ No newline at end of file diff --git a/NOVA-vision-language/GlorIA-1.3B_eval_request_False_float16_Original.json b/NOVA-vision-language/GlorIA-1.3B_eval_request_False_float16_Original.json index 997eea8a4fe5bd80b2d80ba9a28ca53e3d9473bb..e58d368fe2879a481682099cf07c46adc3348673 100644 --- a/NOVA-vision-language/GlorIA-1.3B_eval_request_False_float16_Original.json +++ b/NOVA-vision-language/GlorIA-1.3B_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.0018832391713747645 }, "result_metrics_average": 0.04095462153077381, - "result_metrics_npm": -0.4996940439828647 + "result_metrics_npm": -0.4996940439828647, + "main_language": "?" } \ No newline at end of file diff --git a/Nexusflow/Starling-LM-7B-beta_eval_request_False_bfloat16_Original.json b/Nexusflow/Starling-LM-7B-beta_eval_request_False_bfloat16_Original.json index d8595026b783514053d48ce59f408b41c9f1924f..9a52b0906c9fb71ccde00dc9bb2ed65b6f9908ea 100644 --- a/Nexusflow/Starling-LM-7B-beta_eval_request_False_bfloat16_Original.json +++ b/Nexusflow/Starling-LM-7B-beta_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5036962690569555 }, "result_metrics_average": 0.6903083539690762, - "result_metrics_npm": 0.541225702715447 + "result_metrics_npm": 0.541225702715447, + "main_language": "?" } \ No newline at end of file diff --git a/NousResearch/Nous-Hermes-13b_eval_request_False_bfloat16_Original.json b/NousResearch/Nous-Hermes-13b_eval_request_False_bfloat16_Original.json index e56e1a28e8d8bfcf9ab4a89965828cbd0198e02c..0cc352550ea6ccaeb24e99b3135302b2909f5187 100644 --- a/NousResearch/Nous-Hermes-13b_eval_request_False_bfloat16_Original.json +++ b/NousResearch/Nous-Hermes-13b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.591700154492257 }, "result_metrics_average": 0.5570174936492952, - "result_metrics_npm": 0.3440715321174036 + "result_metrics_npm": 0.3440715321174036, + "main_language": "English" } \ No newline at end of file diff --git a/NousResearch/Nous-Hermes-2-Mistral-7B-DPO_eval_request_False_bfloat16_Original.json b/NousResearch/Nous-Hermes-2-Mistral-7B-DPO_eval_request_False_bfloat16_Original.json index c5f47120a7fb6d90f33d1393f39e7342939b7aa3..7b55234acec6c659374e0905c0fce09f6a02e5ee 100644 --- a/NousResearch/Nous-Hermes-2-Mistral-7B-DPO_eval_request_False_bfloat16_Original.json +++ b/NousResearch/Nous-Hermes-2-Mistral-7B-DPO_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4521585213804042 }, "result_metrics_average": 0.6173183506223013, - "result_metrics_npm": 0.4163010463383218 + "result_metrics_npm": 0.4163010463383218, + "main_language": "English" } \ No newline at end of file diff --git a/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO_eval_request_False_bfloat16_Original.json b/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO_eval_request_False_bfloat16_Original.json index 0ccd11271a85ee30c28df7db4a83956d188ddccf..e6c6930f1085fa917cabfb9b5e8f67e7e3bf64e5 100644 --- a/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO_eval_request_False_bfloat16_Original.json +++ b/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6190805774400715 }, "result_metrics_average": 0.6721516064689931, - "result_metrics_npm": 0.5058743157671289 + "result_metrics_npm": 0.5058743157671289, + "main_language": "English" } \ No newline at end of file diff --git a/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO_eval_request_False_float16_Original.json b/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO_eval_request_False_float16_Original.json index c98714f5fa2de905bbee49ed4de8265ee7a76dec..10ab4e94e47c0dac500c039ca43df503336e811f 100644 --- a/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO_eval_request_False_float16_Original.json +++ b/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO_eval_request_False_float16_Original.json @@ -27,5 +27,6 @@ }, "result_metrics_average": 0.6717470081853019, "result_metrics_npm": 0.5055825901432658, - "hide": true + "hide": true, + "main_language": "English" } \ No newline at end of file diff --git a/NousResearch/Nous-Hermes-2-SOLAR-10.7B_eval_request_False_bfloat16_Original.json b/NousResearch/Nous-Hermes-2-SOLAR-10.7B_eval_request_False_bfloat16_Original.json index aaf1ee4bf83f62d0d73ef89a38ba048529b13ac9..953eeaa4ca636e768d641320fed3e7300064e660 100644 --- a/NousResearch/Nous-Hermes-2-SOLAR-10.7B_eval_request_False_bfloat16_Original.json +++ b/NousResearch/Nous-Hermes-2-SOLAR-10.7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6633893741720925 }, "result_metrics_average": 0.7163312218494068, - "result_metrics_npm": 0.5786507489651551 + "result_metrics_npm": 0.5786507489651551, + "main_language": "English" } \ No newline at end of file diff --git a/NousResearch/Nous-Hermes-2-Yi-34B_eval_request_False_bfloat16_Original.json b/NousResearch/Nous-Hermes-2-Yi-34B_eval_request_False_bfloat16_Original.json index 099fb57a1ebdbfd1245fdfdf2db6a859f847bc33..ab710303be87854fde8614c6a13ed3a2665cf4b1 100644 --- a/NousResearch/Nous-Hermes-2-Yi-34B_eval_request_False_bfloat16_Original.json +++ b/NousResearch/Nous-Hermes-2-Yi-34B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6569392825486907 }, "result_metrics_average": 0.7241873146105138, - "result_metrics_npm": 0.5795863393852321 + "result_metrics_npm": 0.5795863393852321, + "main_language": "English" } \ No newline at end of file diff --git a/NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json b/NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json index a2bd09c75fac519ad69391c2d4cd77c42f2198a4..12cb177e11112b8bc96d35ce8da29cdb7ed46d0e 100644 --- a/NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json +++ b/NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 191, - "job_start_time": "2024-02-14T13-14-20.585602" + "job_start_time": "2024-02-14T13-14-20.585602", + "main_language": "English" } \ No newline at end of file diff --git a/OpenLLM-France/Claire-7B-0.1_eval_request_False_bfloat16_Original.json b/OpenLLM-France/Claire-7B-0.1_eval_request_False_bfloat16_Original.json index 85f339baca2d47f0dd12bc4605b0fe640f2c577c..9a92e4f591dcab264ca23eedbfa5a0129f66685b 100644 --- a/OpenLLM-France/Claire-7B-0.1_eval_request_False_bfloat16_Original.json +++ b/OpenLLM-France/Claire-7B-0.1_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🆎 : language adapted models (FP, FT, ...)", "source": "script", "job_id": 105, - "job_start_time": "2024-02-08T19-59-33.207703" + "job_start_time": "2024-02-08T19-59-33.207703", + "main_language": "Other" } \ No newline at end of file diff --git a/OpenLLM-France/Claire-Mistral-7B-0.1_eval_request_False_bfloat16_Original.json b/OpenLLM-France/Claire-Mistral-7B-0.1_eval_request_False_bfloat16_Original.json index bbb55e8740c1e611de74f809d993341cc1ebd0ae..8fe116396b976763ac349d359e077cf013cd2d6a 100644 --- a/OpenLLM-France/Claire-Mistral-7B-0.1_eval_request_False_bfloat16_Original.json +++ b/OpenLLM-France/Claire-Mistral-7B-0.1_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🆎 : language adapted models (FP, FT, ...)", "source": "script", "job_id": 104, - "job_start_time": "2024-02-08T18-48-18.223603" + "job_start_time": "2024-02-08T18-48-18.223603", + "main_language": "Other" } \ No newline at end of file diff --git a/OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json b/OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json index 2907a515300010de458749230fbcafe7660683d3..b987b735c8b8e132f393914bcbe0fff6a1ee3f5e 100644 --- a/OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json +++ b/OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 342, - "job_start_time": "2024-04-03T01-15-16.610234" + "job_start_time": "2024-04-03T01-15-16.610234", + "main_language": "Chinese" } \ No newline at end of file diff --git a/PORTULAN/gervasio-7b-portuguese-ptbr-decoder_eval_request_False_bfloat16_Original.json b/PORTULAN/gervasio-7b-portuguese-ptbr-decoder_eval_request_False_bfloat16_Original.json index 2031c3da366b7eef85ac44b8ab76ddf73abaa9f4..7c6a3731b32c41ab2519cf18e6de461a0744e4ce 100644 --- a/PORTULAN/gervasio-7b-portuguese-ptbr-decoder_eval_request_False_bfloat16_Original.json +++ b/PORTULAN/gervasio-7b-portuguese-ptbr-decoder_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.14208441880992456 }, "result_metrics_average": 0.39464537169007613, - "result_metrics_npm": 0.0737189789524371 + "result_metrics_npm": 0.0737189789524371, + "main_language": "?" } \ No newline at end of file diff --git a/PORTULAN/gervasio-7b-portuguese-ptpt-decoder_eval_request_False_bfloat16_Original.json b/PORTULAN/gervasio-7b-portuguese-ptpt-decoder_eval_request_False_bfloat16_Original.json index 57b08062bb4b3ec0369a7070e3a736e321776dae..dccad873dda58e431af4ea52f88741719c2b3238 100644 --- a/PORTULAN/gervasio-7b-portuguese-ptpt-decoder_eval_request_False_bfloat16_Original.json +++ b/PORTULAN/gervasio-7b-portuguese-ptpt-decoder_eval_request_False_bfloat16_Original.json @@ -28,5 +28,6 @@ "result_metrics_average": 0.4684299589672376, "result_metrics_npm": 0.2072835886017701, "error_msg": "Error while uploading 'PORTULAN/gervasio-7b-portuguese-ptpt-decoder/raw_2024-03-08T02-58-56.846301/pretrained__PORTULAN__gervasio-7b-portuguese-ptpt-decoder,dtype__bfloat16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__2560_bluex.jsonl' to the Hub.", - "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 286, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 429 Client Error: Too Many Requests for url: https://huggingface.co/api/complete_multipart?uploadId=xoJbpoXjQ8LOXQtDQWLZfVCuMVNtwQ070pPsPn1TvA4H4kTEHhKa1pjJipp_253GzPTjA.HE7rRohkEvCcEkm_bImFKN0P1lYGzMOrzgN.Bw703I7pUuG5G3_WcSeAd.&bucket=hf-hub-lfs-us-east-1&prefix=repos%2Faf%2F42%2Faf4283b4152e41f109733722a9330015e04433eff5043f6398de5e010e08b7ae&expiration=Sat%2C+09+Mar+2024+07%3A02%3A15+GMT&signature=26216a32adb052b978189d77079b990944b940ea648ecdcb052ea11188a7f6ad\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 400, in _wrapped_lfs_upload\n lfs_upload(operation=operation, lfs_batch_action=batch_action, token=token)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/lfs.py\", line 228, in lfs_upload\n _upload_multi_part(operation=operation, header=header, chunk_size=chunk_size, upload_url=upload_action[\"href\"])\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/lfs.py\", line 334, in _upload_multi_part\n hf_raise_for_status(completion_res)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 333, in hf_raise_for_status\n raise HfHubHTTPError(str(e), response=response) from e\nhuggingface_hub.utils._errors.HfHubHTTPError: 429 Client Error: Too Many Requests for url: https://huggingface.co/api/complete_multipart?uploadId=xoJbpoXjQ8LOXQtDQWLZfVCuMVNtwQ070pPsPn1TvA4H4kTEHhKa1pjJipp_253GzPTjA.HE7rRohkEvCcEkm_bImFKN0P1lYGzMOrzgN.Bw703I7pUuG5G3_WcSeAd.&bucket=hf-hub-lfs-us-east-1&prefix=repos%2Faf%2F42%2Faf4283b4152e41f109733722a9330015e04433eff5043f6398de5e010e08b7ae&expiration=Sat%2C+09+Mar+2024+07%3A02%3A15+GMT&signature=26216a32adb052b978189d77079b990944b940ea648ecdcb052ea11188a7f6ad\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 111, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 91, in upload_raw_results\n return _try_request_again(_upload_raw_results, lambda: time.sleep(1), *args)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 82, in _try_request_again\n raise e\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 75, in _try_request_again\n func(*args)\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 62, in _upload_raw_results\n api.upload_folder(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4598, in upload_folder\n commit_info = self.create_commit(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 3558, in create_commit\n self.preupload_lfs_files(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4058, in preupload_lfs_files\n _upload_lfs_files(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 415, in _upload_lfs_files\n thread_map(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/contrib/concurrent.py\", line 69, in thread_map\n return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/contrib/concurrent.py\", line 51, in _executor_map\n return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs))\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1182, in __iter__\n for obj in iterable:\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 619, in result_iterator\n yield _result_or_cancel(fs.pop())\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 317, in _result_or_cancel\n return fut.result(timeout)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 449, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/thread.py\", line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 402, in _wrapped_lfs_upload\n raise RuntimeError(f\"Error while uploading '{operation.path_in_repo}' to the Hub.\") from exc\nRuntimeError: Error while uploading 'PORTULAN/gervasio-7b-portuguese-ptpt-decoder/raw_2024-03-08T02-58-56.846301/pretrained__PORTULAN__gervasio-7b-portuguese-ptpt-decoder,dtype__bfloat16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__2560_bluex.jsonl' to the Hub.\n" + "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 286, in hf_raise_for_status\n response.raise_for_status()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 429 Client Error: Too Many Requests for url: https://huggingface.co/api/complete_multipart?uploadId=xoJbpoXjQ8LOXQtDQWLZfVCuMVNtwQ070pPsPn1TvA4H4kTEHhKa1pjJipp_253GzPTjA.HE7rRohkEvCcEkm_bImFKN0P1lYGzMOrzgN.Bw703I7pUuG5G3_WcSeAd.&bucket=hf-hub-lfs-us-east-1&prefix=repos%2Faf%2F42%2Faf4283b4152e41f109733722a9330015e04433eff5043f6398de5e010e08b7ae&expiration=Sat%2C+09+Mar+2024+07%3A02%3A15+GMT&signature=26216a32adb052b978189d77079b990944b940ea648ecdcb052ea11188a7f6ad\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 400, in _wrapped_lfs_upload\n lfs_upload(operation=operation, lfs_batch_action=batch_action, token=token)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/lfs.py\", line 228, in lfs_upload\n _upload_multi_part(operation=operation, header=header, chunk_size=chunk_size, upload_url=upload_action[\"href\"])\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/lfs.py\", line 334, in _upload_multi_part\n hf_raise_for_status(completion_res)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py\", line 333, in hf_raise_for_status\n raise HfHubHTTPError(str(e), response=response) from e\nhuggingface_hub.utils._errors.HfHubHTTPError: 429 Client Error: Too Many Requests for url: https://huggingface.co/api/complete_multipart?uploadId=xoJbpoXjQ8LOXQtDQWLZfVCuMVNtwQ070pPsPn1TvA4H4kTEHhKa1pjJipp_253GzPTjA.HE7rRohkEvCcEkm_bImFKN0P1lYGzMOrzgN.Bw703I7pUuG5G3_WcSeAd.&bucket=hf-hub-lfs-us-east-1&prefix=repos%2Faf%2F42%2Faf4283b4152e41f109733722a9330015e04433eff5043f6398de5e010e08b7ae&expiration=Sat%2C+09+Mar+2024+07%3A02%3A15+GMT&signature=26216a32adb052b978189d77079b990944b940ea648ecdcb052ea11188a7f6ad\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 111, in run_request\n upload_raw_results(request_data['model'])\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 91, in upload_raw_results\n return _try_request_again(_upload_raw_results, lambda: time.sleep(1), *args)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 82, in _try_request_again\n raise e\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 75, in _try_request_again\n func(*args)\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/hf_util.py\", line 62, in _upload_raw_results\n api.upload_folder(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4598, in upload_folder\n commit_info = self.create_commit(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 1208, in _inner\n return fn(self, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 3558, in create_commit\n self.preupload_lfs_files(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/hf_api.py\", line 4058, in preupload_lfs_files\n _upload_lfs_files(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 415, in _upload_lfs_files\n thread_map(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/contrib/concurrent.py\", line 69, in thread_map\n return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/contrib/concurrent.py\", line 51, in _executor_map\n return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs))\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/tqdm/std.py\", line 1182, in __iter__\n for obj in iterable:\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 619, in result_iterator\n yield _result_or_cancel(fs.pop())\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 317, in _result_or_cancel\n return fut.result(timeout)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 449, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/root/miniconda3/envs/torch21/lib/python3.11/concurrent/futures/thread.py\", line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/_commit_api.py\", line 402, in _wrapped_lfs_upload\n raise RuntimeError(f\"Error while uploading '{operation.path_in_repo}' to the Hub.\") from exc\nRuntimeError: Error while uploading 'PORTULAN/gervasio-7b-portuguese-ptpt-decoder/raw_2024-03-08T02-58-56.846301/pretrained__PORTULAN__gervasio-7b-portuguese-ptpt-decoder,dtype__bfloat16,device__cuda:0,revision__main,trust_remote_code__True,starting_max_length__2560_bluex.jsonl' to the Hub.\n", + "main_language": "?" } \ No newline at end of file diff --git a/Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json index 314809aa7ad4e81590b3814a164682c2a46c6612..4e5c9f9d9aae8c2de2af4b0bbf64e08404d688c7 100644 --- a/Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6822692907802579 }, "result_metrics_average": 0.6786367103386248, - "result_metrics_npm": 0.5101390554584714 + "result_metrics_npm": 0.5101390554584714, + "main_language": "Chinese" } \ No newline at end of file diff --git a/Qwen/Qwen-1_8B-Chat_eval_request_False_bfloat16_Original.json b/Qwen/Qwen-1_8B-Chat_eval_request_False_bfloat16_Original.json index a7396c1a5cd01adbbe1668328b26426e63a73800..517740d9d186d71630e50af3cf0c67393df84403 100644 --- a/Qwen/Qwen-1_8B-Chat_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen-1_8B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.48306623914561087 }, "result_metrics_average": 0.3765299810379858, - "result_metrics_npm": 0.08348997450998025 + "result_metrics_npm": 0.08348997450998025, + "main_language": "Chinese" } \ No newline at end of file diff --git a/Qwen/Qwen-1_8B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen-1_8B_eval_request_False_bfloat16_Original.json index e19e4317369de172d09b90b620f5f363a18ce2f3..2a557fa6ce2c69de009882e478a23c6e32a8956c 100644 --- a/Qwen/Qwen-1_8B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen-1_8B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.39256829515938096 }, "result_metrics_average": 0.36190921703436124, - "result_metrics_npm": 0.03342295904115142 + "result_metrics_npm": 0.03342295904115142, + "main_language": "Chinese" } \ No newline at end of file diff --git a/Qwen/Qwen-72B-Chat_eval_request_False_bfloat16_Original.json b/Qwen/Qwen-72B-Chat_eval_request_False_bfloat16_Original.json index f63641242d283281e7a29d8ef915c4522e16b11e..515fd6948164ab944b4756f3fd313b851aec70bc 100644 --- a/Qwen/Qwen-72B-Chat_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen-72B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.719922008738035 }, "result_metrics_average": 0.30800541944916443, - "result_metrics_npm": -0.0016944521105141603 + "result_metrics_npm": -0.0016944521105141603, + "main_language": "Chinese" } \ No newline at end of file diff --git a/Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json index 3d6a26e25eb31cd09247ff6b4e109182c4a0c531..2b77dd34654bc000d85695aeb4751436dc695b02 100644 --- a/Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.7053416915294696 }, "result_metrics_average": 0.7423987445651828, - "result_metrics_npm": 0.6043756456518657 + "result_metrics_npm": 0.6043756456518657, + "main_language": "Chinese" } \ No newline at end of file diff --git a/Qwen/Qwen-7B-Chat_eval_request_False_bfloat16_Original.json b/Qwen/Qwen-7B-Chat_eval_request_False_bfloat16_Original.json index 185e6c323cc2d51909390ca37cf11c6e96c29f77..cad501b5971147aecca630a0075856e17044ab97 100644 --- a/Qwen/Qwen-7B-Chat_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen-7B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6299230071720173 }, "result_metrics_average": 0.5987398272440491, - "result_metrics_npm": 0.39385370292915256 + "result_metrics_npm": 0.39385370292915256, + "main_language": "Chinese" } \ No newline at end of file diff --git a/Qwen/Qwen-7B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen-7B_eval_request_False_bfloat16_Original.json index 778e340ad56a4b17c0c047629a7f5e94ac46d378..c2ae84b3f0244c4788d69337be7c39e1dfc87ef0 100644 --- a/Qwen/Qwen-7B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen-7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6516624308828275 }, "result_metrics_average": 0.5813546855677915, - "result_metrics_npm": 0.354321247309157 + "result_metrics_npm": 0.354321247309157, + "main_language": "Chinese" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-0.5B-Chat_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-0.5B-Chat_eval_request_False_bfloat16_Original.json index cb59dac02d4bccb442c17d03c26201f6226bf41d..125642f8184a97cf945c105e04ca59ffb09865bc 100644 --- a/Qwen/Qwen1.5-0.5B-Chat_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-0.5B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.34340555482214197 }, "result_metrics_average": 0.2538418340223843, - "result_metrics_npm": -0.15602223674356652 + "result_metrics_npm": -0.15602223674356652, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-0.5B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-0.5B_eval_request_False_bfloat16_Original.json index 27b38ab3e849ca2561a6d5367779f468d231c93e..88457fe063d932519950a848370c82070fa2f600 100644 --- a/Qwen/Qwen1.5-0.5B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-0.5B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.15147894988110025 }, "result_metrics_average": 0.25743006751736736, - "result_metrics_npm": -0.139537678958747 + "result_metrics_npm": -0.139537678958747, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-1.8B-Chat_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-1.8B-Chat_eval_request_False_bfloat16_Original.json index 4a06db4275e38097d05573e0bdbe0647846ebc2c..74106da32f356b35cf72fe92c14fd25256e4bca2 100644 --- a/Qwen/Qwen1.5-1.8B-Chat_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-1.8B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.38276941328891767 }, "result_metrics_average": 0.4321643698140234, - "result_metrics_npm": 0.15965557305727524 + "result_metrics_npm": 0.15965557305727524, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-1.8B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-1.8B_eval_request_False_bfloat16_Original.json index b95db27a145c3071bcc144481cf592bf14668160..acf07436bbe203523f07058fdf59dae7b67b69be 100644 --- a/Qwen/Qwen1.5-1.8B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-1.8B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.20266417241880366 }, "result_metrics_average": 0.3013659068209853, - "result_metrics_npm": -0.084109000068956 + "result_metrics_npm": -0.084109000068956, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-14B-Chat_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-14B-Chat_eval_request_False_bfloat16_Original.json index b621196dcdc361a04221177c27036b556b027313..7665ce80977a9328ad5916fc5b7689fa8d53582c 100644 --- a/Qwen/Qwen1.5-14B-Chat_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-14B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6949939147805195 }, "result_metrics_average": 0.7258813752717574, - "result_metrics_npm": 0.594091639672785 + "result_metrics_npm": 0.594091639672785, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-14B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-14B_eval_request_False_bfloat16_Original.json index 347efd0807601c09d7beb9d07d97dce0df8feb6c..58b8d7c4b1512dcbffa6fffbe8042e3407d3ee48 100644 --- a/Qwen/Qwen1.5-14B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-14B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6703619121394994 }, "result_metrics_average": 0.6822413032467703, - "result_metrics_npm": 0.5361520489478075 + "result_metrics_npm": 0.5361520489478075, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-4B-Chat_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-4B-Chat_eval_request_False_bfloat16_Original.json index 03fd8554ebfee8eb6a6191e3c8bd5f24659b48c8..5687f2c8f9d3755cdccca41403732e384b4655f3 100644 --- a/Qwen/Qwen1.5-4B-Chat_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-4B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6318902485175268 }, "result_metrics_average": 0.6133754905699706, - "result_metrics_npm": 0.4195198779787766 + "result_metrics_npm": 0.4195198779787766, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-4B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-4B_eval_request_False_bfloat16_Original.json index 38b0712d06a1ea443fc921647a31c8d22124e2b4..68ae8200f98f5447a60821dd9a8870f81b62cee4 100644 --- a/Qwen/Qwen1.5-4B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-4B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4501661444781519 }, "result_metrics_average": 0.5139409498148056, - "result_metrics_npm": 0.2700271661161034 + "result_metrics_npm": 0.2700271661161034, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-72B-Chat_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-72B-Chat_eval_request_False_bfloat16_Original.json index 9b04befadbcfcb42a44a484e2ff1583456ae0f87..dbb2b62daba70c3eec1c1d0be94a2b5118caafb0 100644 --- a/Qwen/Qwen1.5-72B-Chat_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-72B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6898011277963295 }, "result_metrics_average": 0.7431618289326288, - "result_metrics_npm": 0.6130103855969451 + "result_metrics_npm": 0.6130103855969451, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-72B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-72B_eval_request_False_bfloat16_Original.json index 69798f01f6c166fc8293fe2b364a83fa643142af..0deb8092155de74f3a90770949d391a001ebaa4a 100644 --- a/Qwen/Qwen1.5-72B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-72B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.7080442610412353 }, "result_metrics_average": 0.6741558078246019, - "result_metrics_npm": 0.5339373221484036 + "result_metrics_npm": 0.5339373221484036, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-7B-Chat_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-7B-Chat_eval_request_False_bfloat16_Original.json index 5d259c2fc4271f19b0886297183288b73e46fd16..6fedeb376076cb3c16c7615249168e8464353aa4 100644 --- a/Qwen/Qwen1.5-7B-Chat_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-7B-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6840962577146366 }, "result_metrics_average": 0.677229738417662, - "result_metrics_npm": 0.5240522910723275 + "result_metrics_npm": 0.5240522910723275, + "main_language": "English" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-7B_eval_request_False_bfloat16_Original.json b/Qwen/Qwen1.5-7B_eval_request_False_bfloat16_Original.json index 7f23530a2ef5545320bc6b25a2760136bc42f58e..d63b3c44d0738a2379f0aa21b11cb73c78a44ec8 100644 --- a/Qwen/Qwen1.5-7B_eval_request_False_bfloat16_Original.json +++ b/Qwen/Qwen1.5-7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.532166164812724 }, "result_metrics_average": 0.6110901465856801, - "result_metrics_npm": 0.41977699620419967 + "result_metrics_npm": 0.41977699620419967, + "main_language": "English" } \ No newline at end of file diff --git a/Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json b/Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json index 07814cb20f730ee957c260316520b9b209007048..ceff79199536ef49563c7b9868d1daa92b7fbb9d 100644 --- a/Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json +++ b/Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6058446214806321 }, "result_metrics_average": 0.593645697168258, - "result_metrics_npm": 0.4006164591091523 + "result_metrics_npm": 0.4006164591091523, + "main_language": "?" } \ No newline at end of file diff --git a/THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json b/THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json index bccd89fe5acefe86df3c2ae8acf775717b010df1..eafab44690ffa459648f254bfa2f58fd4776f53f 100644 --- a/THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json +++ b/THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 350, - "job_start_time": "2024-04-02T09-59-02.085745" + "job_start_time": "2024-04-02T09-59-02.085745", + "main_language": "Chinese" } \ No newline at end of file diff --git a/THUDM/chatglm3-6b_eval_request_False_float16_Original.json b/THUDM/chatglm3-6b_eval_request_False_float16_Original.json index d460611cf3b40151b28674f8f51e5f097a4be974..48fb9e427d1c268d2b797eee80a6fbaeeac00d30 100644 --- a/THUDM/chatglm3-6b_eval_request_False_float16_Original.json +++ b/THUDM/chatglm3-6b_eval_request_False_float16_Original.json @@ -14,5 +14,6 @@ "job_id": 253, "job_start_time": "2024-02-24T23-49-51.958720", "error_msg": "ChatGLMForConditionalGeneration._update_model_kwargs_for_generation() got an unexpected keyword argument 'model_inputs'", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1549, in generate\n result = self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2468, in greedy_search\n model_kwargs = self._update_model_kwargs_for_generation(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTypeError: ChatGLMForConditionalGeneration._update_model_kwargs_for_generation() got an unexpected keyword argument 'model_inputs'\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1549, in generate\n result = self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2468, in greedy_search\n model_kwargs = self._update_model_kwargs_for_generation(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTypeError: ChatGLMForConditionalGeneration._update_model_kwargs_for_generation() got an unexpected keyword argument 'model_inputs'\n", + "main_language": "Chinese" } \ No newline at end of file diff --git a/THUDM/glm-10b_eval_request_False_float16_Original.json b/THUDM/glm-10b_eval_request_False_float16_Original.json index df2c7c4a51f9bc4cfa8de07979619cc5f96b6944..a24a5941941165e8cfe0703137307f718722ea0c 100644 --- a/THUDM/glm-10b_eval_request_False_float16_Original.json +++ b/THUDM/glm-10b_eval_request_False_float16_Original.json @@ -14,5 +14,6 @@ "job_id": -1, "job_start_time": null, "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GemmaConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, Qwen2Config, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, StableLmConfig, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig.", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 206, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GemmaConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, Qwen2Config, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, StableLmConfig, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig.\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 206, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GemmaConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, Qwen2Config, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, StableLmConfig, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig.\n", + "main_language": "English" } \ No newline at end of file diff --git a/THUDM/glm-2b_eval_request_False_float16_Original.json b/THUDM/glm-2b_eval_request_False_float16_Original.json index 074bcc889cadefb97d116729c06eb497e9ee21c3..7b0aacbd921ea014c1badd2e3154f9acaaa8b469 100644 --- a/THUDM/glm-2b_eval_request_False_float16_Original.json +++ b/THUDM/glm-2b_eval_request_False_float16_Original.json @@ -14,5 +14,6 @@ "job_id": -1, "job_start_time": null, "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GemmaConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, Qwen2Config, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, StableLmConfig, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig.", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 206, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GemmaConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, Qwen2Config, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, StableLmConfig, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig.\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 206, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GemmaConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, Qwen2Config, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, StableLmConfig, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig.\n", + "main_language": "English" } \ No newline at end of file diff --git a/TencentARC/Mistral_Pro_8B_v0.1_eval_request_False_bfloat16_Original.json b/TencentARC/Mistral_Pro_8B_v0.1_eval_request_False_bfloat16_Original.json index 6d07da9ed765f499d4c66c9d172b95593ce9af50..6c09260d29b2b1330221484daea09f4f7e19db1e 100644 --- a/TencentARC/Mistral_Pro_8B_v0.1_eval_request_False_bfloat16_Original.json +++ b/TencentARC/Mistral_Pro_8B_v0.1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6645622681681381 }, "result_metrics_average": 0.6071586786279549, - "result_metrics_npm": 0.3982908724792693 + "result_metrics_npm": 0.3982908724792693, + "main_language": "English" } \ No newline at end of file diff --git a/TinyLlama/TinyLlama-1.1B-Chat-v1.0_eval_request_False_bfloat16_Original.json b/TinyLlama/TinyLlama-1.1B-Chat-v1.0_eval_request_False_bfloat16_Original.json index b003405b85ec42d5f8f4b139cb6f8c0f6a2be0da..76df2c8dfe776f480d552a58af24a6953f4c448b 100644 --- a/TinyLlama/TinyLlama-1.1B-Chat-v1.0_eval_request_False_bfloat16_Original.json +++ b/TinyLlama/TinyLlama-1.1B-Chat-v1.0_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.2803846999928849 }, "result_metrics_average": 0.28217907782839224, - "result_metrics_npm": -0.08112375073263198 + "result_metrics_npm": -0.08112375073263198, + "main_language": "English" } \ No newline at end of file diff --git a/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T_eval_request_False_float16_Original.json b/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T_eval_request_False_float16_Original.json index 12bddfc9fa0d2d1a98b7177a4a940b5a16a06bef..68227b4b69ae915c11498e4747cc2e19f8547633 100644 --- a/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T_eval_request_False_float16_Original.json +++ b/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.31998368175826974 }, "result_metrics_average": 0.32276742786144397, - "result_metrics_npm": -0.0217747938879217 + "result_metrics_npm": -0.0217747938879217, + "main_language": "English" } \ No newline at end of file diff --git a/Unbabel/TowerBase-7B-v0.1_eval_request_False_bfloat16_Original.json b/Unbabel/TowerBase-7B-v0.1_eval_request_False_bfloat16_Original.json index 293a451e13d74ea3051451bc52432b1936d873a6..ce12ddcbfb715084a54831377f3ea3ac9c3b4f23 100644 --- a/Unbabel/TowerBase-7B-v0.1_eval_request_False_bfloat16_Original.json +++ b/Unbabel/TowerBase-7B-v0.1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.539906904071922 }, "result_metrics_average": 0.41443166245257623, - "result_metrics_npm": 0.11270490486580001 + "result_metrics_npm": 0.11270490486580001, + "main_language": "English" } \ No newline at end of file diff --git a/WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json b/WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json index 33295d78522ad12bfae0abdc654fe68f0f0c6dae..512c5a26986966b34f555692c5c17b951387a32e 100644 --- a/WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json +++ b/WizardLM/WizardLM-13B-V1.2_eval_request_False_float16_Original.json @@ -14,5 +14,6 @@ "job_id": 354, "job_start_time": "2024-04-02T12-19-02.586807", "error_msg": "CUDA out of memory. Tried to allocate 1.53 GiB. GPU 0 has a total capacty of 79.35 GiB of which 360.19 MiB is free. Process 4074833 has 34.95 GiB memory in use. Process 209361 has 44.04 GiB memory in use. Of the allocated memory 31.73 GiB is allocated by PyTorch, and 2.71 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n else:\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1518, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1063, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1544, in generate\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2404, in greedy_search\n model_kwargs[\"cache_position\"] = torch.arange(cur_len, device=input_ids.device)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1176, in forward\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1019, in forward\n position_ids=position_ids,\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 740, in forward\n hidden_states=hidden_states,\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 671, in forward\n query_states,\n ^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.53 GiB. GPU 0 has a total capacty of 79.35 GiB of which 360.19 MiB is free. Process 4074833 has 34.95 GiB memory in use. Process 209361 has 44.04 GiB memory in use. Of the allocated memory 31.73 GiB is allocated by PyTorch, and 2.71 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n else:\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1518, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1063, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1544, in generate\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2404, in greedy_search\n model_kwargs[\"cache_position\"] = torch.arange(cur_len, device=input_ids.device)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1176, in forward\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1019, in forward\n position_ids=position_ids,\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 740, in forward\n hidden_states=hidden_states,\n \n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 671, in forward\n query_states,\n ^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 1.53 GiB. GPU 0 has a total capacty of 79.35 GiB of which 360.19 MiB is free. Process 4074833 has 34.95 GiB memory in use. Process 209361 has 44.04 GiB memory in use. Of the allocated memory 31.73 GiB is allocated by PyTorch, and 2.71 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n", + "main_language": "?" } \ No newline at end of file diff --git a/WizardLM/WizardLM-70B-V1.0_eval_request_False_float16_Original.json b/WizardLM/WizardLM-70B-V1.0_eval_request_False_float16_Original.json index 4037ba3e0fc2fa7392f3cf9a30c35332623539ab..04068937467349ea13b83ed63f4fe5e662351a0d 100644 --- a/WizardLM/WizardLM-70B-V1.0_eval_request_False_float16_Original.json +++ b/WizardLM/WizardLM-70B-V1.0_eval_request_False_float16_Original.json @@ -14,5 +14,6 @@ "job_id": 339, "job_start_time": "2024-04-02T03-37-29.146344", "error_msg": "CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 21.61 GiB memory in use. Process 4074833 has 20.61 GiB memory in use. Process 188848 has 21.62 GiB memory in use. Process 209361 has 15.45 GiB memory in use. Of the allocated memory 21.21 GiB is allocated by PyTorch, and 3.58 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 21.61 GiB memory in use. Process 4074833 has 20.61 GiB memory in use. Process 188848 has 21.62 GiB memory in use. Process 209361 has 15.45 GiB memory in use. Of the allocated memory 21.21 GiB is allocated by PyTorch, and 3.58 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 58.19 MiB is free. Process 4070277 has 21.61 GiB memory in use. Process 4074833 has 20.61 GiB memory in use. Process 188848 has 21.62 GiB memory in use. Process 209361 has 15.45 GiB memory in use. Of the allocated memory 21.21 GiB is allocated by PyTorch, and 3.58 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n", + "main_language": "?" } \ No newline at end of file diff --git a/WizardLM/WizardLM-7B-V1.0_eval_request_False_float16_Original.json b/WizardLM/WizardLM-7B-V1.0_eval_request_False_float16_Original.json index 4e3a44318a42e82b78cb376b56db15223ad7db47..835b65b04a7d9c778852ddd7e05644bfd30e98ff 100644 --- a/WizardLM/WizardLM-7B-V1.0_eval_request_False_float16_Original.json +++ b/WizardLM/WizardLM-7B-V1.0_eval_request_False_float16_Original.json @@ -14,5 +14,6 @@ "job_id": 337, "job_start_time": "2024-04-02T02-44-54.274722", "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: f3038b08-1a6a-4f32-9910-e6c5f4932050)')", - "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: f3038b08-1a6a-4f32-9910-e6c5f4932050)')\n" + "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/fd/38/fd3830fff0be4ae87c1afeffcd02a1df438bb0a46c48746073f175988f1c9371/f92e1d5a3549574f4266746acb0dd1585f28adc969e0a7dcb5f5c4524f788117?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00003-of-00003.bin%3B+filename%3D%22pytorch_model-00003-of-00003.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712285095&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4NTA5NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy9mZC8zOC9mZDM4MzBmZmYwYmU0YWU4N2MxYWZlZmZjZDAyYTFkZjQzOGJiMGE0NmM0ODc0NjA3M2YxNzU5ODhmMWM5MzcxL2Y5MmUxZDVhMzU0OTU3NGY0MjY2NzQ2YWNiMGRkMTU4NWYyOGFkYzk2OWUwYTdkY2I1ZjVjNDUyNGY3ODgxMTc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=GI45dJgB3hMP9qzkyS0BjXoywMCKkSJXHfth08wpFsaH8Ag7pRL88Ypg-s-tlzyzlVoGZD2ZareotfI09fGofxSkZGQN~YvEu33cJ32a-e2HMa6VfuwjJkvr39rSNGeOZioHu9bXIKsezPRsPbVV0HJkxwlv~zlaAkP-k1OHnAHUTv3ytEiXeK~difi9xgBrQfDcqjH~~X7kTCxI0BOa0kvjNsqDtCkqSDmBLDPsXupHaUBxTZvAH7SzwSLqenwZ6N-dMRtw1N9qqvjY38VmUESBpUAdZODPAWA4G2ldKJPbI6oUcTJaHEvdYe8Q2Vp4Fuhpn5vdT-JFTm~NbputVA__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: f3038b08-1a6a-4f32-9910-e6c5f4932050)')\n", + "main_language": "?" } \ No newline at end of file diff --git a/abacusai/Smaug-34B-v0.1_eval_request_False_bfloat16_Original.json b/abacusai/Smaug-34B-v0.1_eval_request_False_bfloat16_Original.json index 5b7ed9839d8d0641541ae241e75561c5bb090a01..a871f8a478560a62f12e41f8ce0699e7192f48db 100644 --- a/abacusai/Smaug-34B-v0.1_eval_request_False_bfloat16_Original.json +++ b/abacusai/Smaug-34B-v0.1_eval_request_False_bfloat16_Original.json @@ -14,5 +14,6 @@ "job_id": 309, "job_start_time": "2024-03-08T12-54-43.097236", "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/47/ee/47eec340ea0d2c8145b3137620af1e598bafec193311e8483be0e16edec15595/63f2b971ca374912224ac13d3c04a77d60c46e482962973b681ae2d33ebe02b5?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00012-of-00015.safetensors%3B+filename%3D%22model-00012-of-00015.safetensors%22%3B&Expires=1710163227&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMDE2MzIyN319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzQ3L2VlLzQ3ZWVjMzQwZWEwZDJjODE0NWIzMTM3NjIwYWYxZTU5OGJhZmVjMTkzMzExZTg0ODNiZTBlMTZlZGVjMTU1OTUvNjNmMmI5NzFjYTM3NDkxMjIyNGFjMTNkM2MwNGE3N2Q2MGM0NmU0ODI5NjI5NzNiNjgxYWUyZDMzZWJlMDJiNT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=PeCghCcEdrENWKaYuj5g~sNgZhGpKTGjEwFp-5RVNoSiwphhIylZOqcfEuw3mjwY~y-k4QbWO-2dId9VHZadrN-1zoyqkdPTVkBDzcXltvhDrkHx7-UUZArSP7zspIQEf4sQ7gl7BGAI4ItxKCLznl5~LAhJ-bDJt-A5hjUrzeQSbHdAto62dOmcoAHaz6ET4tcN7Fd1HFoMrlF539HjNF6dNpEDGljtvJyHHFk09Nk2JWzaoPrkXUWFgMj26TtKfzL71SSUWqdJrc6WjHZSHeQNv1Ra3VouDc8TIrpfClhShbENF1wnu0JfqmGhk0Ryi27dtc7I0Ef6ikmHTFLeuQ__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: caf7ca96-7bf9-4dd4-8d79-e5d345b57e18)')", - "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/47/ee/47eec340ea0d2c8145b3137620af1e598bafec193311e8483be0e16edec15595/63f2b971ca374912224ac13d3c04a77d60c46e482962973b681ae2d33ebe02b5?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00012-of-00015.safetensors%3B+filename%3D%22model-00012-of-00015.safetensors%22%3B&Expires=1710163227&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMDE2MzIyN319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzQ3L2VlLzQ3ZWVjMzQwZWEwZDJjODE0NWIzMTM3NjIwYWYxZTU5OGJhZmVjMTkzMzExZTg0ODNiZTBlMTZlZGVjMTU1OTUvNjNmMmI5NzFjYTM3NDkxMjIyNGFjMTNkM2MwNGE3N2Q2MGM0NmU0ODI5NjI5NzNiNjgxYWUyZDMzZWJlMDJiNT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=PeCghCcEdrENWKaYuj5g~sNgZhGpKTGjEwFp-5RVNoSiwphhIylZOqcfEuw3mjwY~y-k4QbWO-2dId9VHZadrN-1zoyqkdPTVkBDzcXltvhDrkHx7-UUZArSP7zspIQEf4sQ7gl7BGAI4ItxKCLznl5~LAhJ-bDJt-A5hjUrzeQSbHdAto62dOmcoAHaz6ET4tcN7Fd1HFoMrlF539HjNF6dNpEDGljtvJyHHFk09Nk2JWzaoPrkXUWFgMj26TtKfzL71SSUWqdJrc6WjHZSHeQNv1Ra3VouDc8TIrpfClhShbENF1wnu0JfqmGhk0Ryi27dtc7I0Ef6ikmHTFLeuQ__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/47/ee/47eec340ea0d2c8145b3137620af1e598bafec193311e8483be0e16edec15595/63f2b971ca374912224ac13d3c04a77d60c46e482962973b681ae2d33ebe02b5?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00012-of-00015.safetensors%3B+filename%3D%22model-00012-of-00015.safetensors%22%3B&Expires=1710163227&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMDE2MzIyN319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzQ3L2VlLzQ3ZWVjMzQwZWEwZDJjODE0NWIzMTM3NjIwYWYxZTU5OGJhZmVjMTkzMzExZTg0ODNiZTBlMTZlZGVjMTU1OTUvNjNmMmI5NzFjYTM3NDkxMjIyNGFjMTNkM2MwNGE3N2Q2MGM0NmU0ODI5NjI5NzNiNjgxYWUyZDMzZWJlMDJiNT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=PeCghCcEdrENWKaYuj5g~sNgZhGpKTGjEwFp-5RVNoSiwphhIylZOqcfEuw3mjwY~y-k4QbWO-2dId9VHZadrN-1zoyqkdPTVkBDzcXltvhDrkHx7-UUZArSP7zspIQEf4sQ7gl7BGAI4ItxKCLznl5~LAhJ-bDJt-A5hjUrzeQSbHdAto62dOmcoAHaz6ET4tcN7Fd1HFoMrlF539HjNF6dNpEDGljtvJyHHFk09Nk2JWzaoPrkXUWFgMj26TtKfzL71SSUWqdJrc6WjHZSHeQNv1Ra3VouDc8TIrpfClhShbENF1wnu0JfqmGhk0Ryi27dtc7I0Ef6ikmHTFLeuQ__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: caf7ca96-7bf9-4dd4-8d79-e5d345b57e18)')\n" + "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/47/ee/47eec340ea0d2c8145b3137620af1e598bafec193311e8483be0e16edec15595/63f2b971ca374912224ac13d3c04a77d60c46e482962973b681ae2d33ebe02b5?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00012-of-00015.safetensors%3B+filename%3D%22model-00012-of-00015.safetensors%22%3B&Expires=1710163227&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMDE2MzIyN319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzQ3L2VlLzQ3ZWVjMzQwZWEwZDJjODE0NWIzMTM3NjIwYWYxZTU5OGJhZmVjMTkzMzExZTg0ODNiZTBlMTZlZGVjMTU1OTUvNjNmMmI5NzFjYTM3NDkxMjIyNGFjMTNkM2MwNGE3N2Q2MGM0NmU0ODI5NjI5NzNiNjgxYWUyZDMzZWJlMDJiNT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=PeCghCcEdrENWKaYuj5g~sNgZhGpKTGjEwFp-5RVNoSiwphhIylZOqcfEuw3mjwY~y-k4QbWO-2dId9VHZadrN-1zoyqkdPTVkBDzcXltvhDrkHx7-UUZArSP7zspIQEf4sQ7gl7BGAI4ItxKCLznl5~LAhJ-bDJt-A5hjUrzeQSbHdAto62dOmcoAHaz6ET4tcN7Fd1HFoMrlF539HjNF6dNpEDGljtvJyHHFk09Nk2JWzaoPrkXUWFgMj26TtKfzL71SSUWqdJrc6WjHZSHeQNv1Ra3VouDc8TIrpfClhShbENF1wnu0JfqmGhk0Ryi27dtc7I0Ef6ikmHTFLeuQ__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs-us-1.huggingface.co', port=443): Max retries exceeded with url: /repos/47/ee/47eec340ea0d2c8145b3137620af1e598bafec193311e8483be0e16edec15595/63f2b971ca374912224ac13d3c04a77d60c46e482962973b681ae2d33ebe02b5?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27model-00012-of-00015.safetensors%3B+filename%3D%22model-00012-of-00015.safetensors%22%3B&Expires=1710163227&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMDE2MzIyN319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmh1Z2dpbmdmYWNlLmNvL3JlcG9zLzQ3L2VlLzQ3ZWVjMzQwZWEwZDJjODE0NWIzMTM3NjIwYWYxZTU5OGJhZmVjMTkzMzExZTg0ODNiZTBlMTZlZGVjMTU1OTUvNjNmMmI5NzFjYTM3NDkxMjIyNGFjMTNkM2MwNGE3N2Q2MGM0NmU0ODI5NjI5NzNiNjgxYWUyZDMzZWJlMDJiNT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSoifV19&Signature=PeCghCcEdrENWKaYuj5g~sNgZhGpKTGjEwFp-5RVNoSiwphhIylZOqcfEuw3mjwY~y-k4QbWO-2dId9VHZadrN-1zoyqkdPTVkBDzcXltvhDrkHx7-UUZArSP7zspIQEf4sQ7gl7BGAI4ItxKCLznl5~LAhJ-bDJt-A5hjUrzeQSbHdAto62dOmcoAHaz6ET4tcN7Fd1HFoMrlF539HjNF6dNpEDGljtvJyHHFk09Nk2JWzaoPrkXUWFgMj26TtKfzL71SSUWqdJrc6WjHZSHeQNv1Ra3VouDc8TIrpfClhShbENF1wnu0JfqmGhk0Ryi27dtc7I0Ef6ikmHTFLeuQ__&Key-Pair-Id=KCD77M1F0VK2B (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs-us-1.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: caf7ca96-7bf9-4dd4-8d79-e5d345b57e18)')\n", + "main_language": "English" } \ No newline at end of file diff --git a/abacusai/Smaug-72B-v0.1_eval_request_False_bfloat16_Original.json b/abacusai/Smaug-72B-v0.1_eval_request_False_bfloat16_Original.json index e9e9c9c7fc8fa881ce60d62bec557421037a6d78..48b76fe25b1b1cdbe3e04f967ab59c8a481157ff 100644 --- a/abacusai/Smaug-72B-v0.1_eval_request_False_bfloat16_Original.json +++ b/abacusai/Smaug-72B-v0.1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.7238464426088375 }, "result_metrics_average": 0.7560356017498102, - "result_metrics_npm": 0.6292037751808968 + "result_metrics_npm": 0.6292037751808968, + "main_language": "English" } \ No newline at end of file diff --git a/ai-forever/mGPT-13B_eval_request_False_float16_Original.json b/ai-forever/mGPT-13B_eval_request_False_float16_Original.json index f0fc25ff183bf10c6c5ea08922135fbaa0a56d8b..6ec48cb90fc9f9afc2abce10a6906a7216643d13 100644 --- a/ai-forever/mGPT-13B_eval_request_False_float16_Original.json +++ b/ai-forever/mGPT-13B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "ai-forever/mGPT-13B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 13.0, "architectures": "GPT2LMHeadModel", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:38:43Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "ai-forever/mGPT-13B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 13.0, + "architectures": "GPT2LMHeadModel", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:38:43Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/ai-forever/mGPT_eval_request_False_float16_Original.json b/ai-forever/mGPT_eval_request_False_float16_Original.json index 002ad7d130410c02f7f2e092f0f861e899c07948..5419d3b0a3da25ab882779733159db913fe50b4b 100644 --- a/ai-forever/mGPT_eval_request_False_float16_Original.json +++ b/ai-forever/mGPT_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "ai-forever/mGPT", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "GPT2LMHeadModel", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:38:50Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "ai-forever/mGPT", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "GPT2LMHeadModel", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:38:50Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/allenai/OLMo-1B_eval_request_False_float16_Original.json b/allenai/OLMo-1B_eval_request_False_float16_Original.json index 5030c04136bbf10c2ca0027ac838317d8bdc8977..ac551bceadd1aa9d782b6048d6f73a7c691252e7 100644 --- a/allenai/OLMo-1B_eval_request_False_float16_Original.json +++ b/allenai/OLMo-1B_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.3692456851026562 }, "result_metrics_average": 0.29711563228842597, - "result_metrics_npm": -0.06618215150395826 + "result_metrics_npm": -0.06618215150395826, + "main_language": "English" } \ No newline at end of file diff --git a/allenai/OLMo-7B-Twin-2T_eval_request_False_float16_Original.json b/allenai/OLMo-7B-Twin-2T_eval_request_False_float16_Original.json index d5093b8ff4940008449a6f084986e6bc311650f4..28cd0762d1d3cc713d5a4de8aba1419d0043be8a 100644 --- a/allenai/OLMo-7B-Twin-2T_eval_request_False_float16_Original.json +++ b/allenai/OLMo-7B-Twin-2T_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.48325837191287774 }, "result_metrics_average": 0.4503336858012037, - "result_metrics_npm": 0.20285603368786248 + "result_metrics_npm": 0.20285603368786248, + "main_language": "English" } \ No newline at end of file diff --git a/allenai/OLMo-7B_eval_request_False_float16_Original.json b/allenai/OLMo-7B_eval_request_False_float16_Original.json index 855dac033da8e019dabd49a726d5ccc144b5ebbc..dac07ce34caa6f0ee29bcc67667d525262b7a372 100644 --- a/allenai/OLMo-7B_eval_request_False_float16_Original.json +++ b/allenai/OLMo-7B_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4763245933758418 }, "result_metrics_average": 0.34582417169603247, - "result_metrics_npm": 0.02563463954011023 + "result_metrics_npm": 0.02563463954011023, + "main_language": "English" } \ No newline at end of file diff --git a/allenai/tulu-2-dpo-13b_eval_request_False_bfloat16_Original.json b/allenai/tulu-2-dpo-13b_eval_request_False_bfloat16_Original.json index ada23c8e60450c43ea075b6e6f04b1cfdf72dd5d..e3f019109639880aadfd38c02e9ffa3d3977b23d 100644 --- a/allenai/tulu-2-dpo-13b_eval_request_False_bfloat16_Original.json +++ b/allenai/tulu-2-dpo-13b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6182422810204251 }, "result_metrics_average": 0.6577595030605168, - "result_metrics_npm": 0.49102913342997134 + "result_metrics_npm": 0.49102913342997134, + "main_language": "?" } \ No newline at end of file diff --git a/allenai/tulu-2-dpo-70b_eval_request_False_bfloat16_Original.json b/allenai/tulu-2-dpo-70b_eval_request_False_bfloat16_Original.json index ec4f60e863790fe342b9c930e6a181f6dba369cf..d3dc3149895e5df3de567c85e707d725ca308399 100644 --- a/allenai/tulu-2-dpo-70b_eval_request_False_bfloat16_Original.json +++ b/allenai/tulu-2-dpo-70b_eval_request_False_bfloat16_Original.json @@ -14,5 +14,6 @@ "job_id": 333, "job_start_time": "2024-04-02T02-29-13.759360", "error_msg": "CUDA out of memory. Tried to allocate 500.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 500.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 500.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 454.19 MiB is free. Process 4070277 has 45.46 GiB memory in use. Process 4074833 has 16.47 GiB memory in use. Process 188848 has 414.00 MiB memory in use. Process 209361 has 16.56 GiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n", + "main_language": "?" } \ No newline at end of file diff --git a/allenai/tulu-2-dpo-7b_eval_request_False_bfloat16_Original.json b/allenai/tulu-2-dpo-7b_eval_request_False_bfloat16_Original.json index 53f46fb028b16382e1bfe575591fd8e9cbd0af99..8b4bc94b436c74547d5d959f50bd9bee3c288b8d 100644 --- a/allenai/tulu-2-dpo-7b_eval_request_False_bfloat16_Original.json +++ b/allenai/tulu-2-dpo-7b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6409551735643995 }, "result_metrics_average": 0.63949781885722, - "result_metrics_npm": 0.47383474887492505 + "result_metrics_npm": 0.47383474887492505, + "main_language": "?" } \ No newline at end of file diff --git a/argilla/CapybaraHermes-2.5-Mistral-7B_eval_request_False_float16_Original.json b/argilla/CapybaraHermes-2.5-Mistral-7B_eval_request_False_float16_Original.json index 040da7b61be43829555aee01747510ba0601564a..d3bb2cce5ae269cb129c7a3e50734cbf3c52b0ea 100644 --- a/argilla/CapybaraHermes-2.5-Mistral-7B_eval_request_False_float16_Original.json +++ b/argilla/CapybaraHermes-2.5-Mistral-7B_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.42828409799858047 }, "result_metrics_average": 0.6508421325259556, - "result_metrics_npm": 0.48468978584541744 + "result_metrics_npm": 0.48468978584541744, + "main_language": "English" } \ No newline at end of file diff --git a/argilla/notus-7b-v1_eval_request_False_bfloat16_Original.json b/argilla/notus-7b-v1_eval_request_False_bfloat16_Original.json index 6e4c5c2559d1c881be2935b4d351592ea0d6d63a..127450a9cec79e1674aeac2e8680c75854a78c53 100644 --- a/argilla/notus-7b-v1_eval_request_False_bfloat16_Original.json +++ b/argilla/notus-7b-v1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5794545135101782 }, "result_metrics_average": 0.6481792797962, - "result_metrics_npm": 0.4745286256100909 + "result_metrics_npm": 0.4745286256100909, + "main_language": "English" } \ No newline at end of file diff --git a/argilla/notux-8x7b-v1_eval_request_False_bfloat16_Original.json b/argilla/notux-8x7b-v1_eval_request_False_bfloat16_Original.json index 3c73dee9e2cec79c0c6c38507c93ba3e82a16c40..f7f62e4963c59e19927ddc46b9a232b931b9e3b7 100644 --- a/argilla/notux-8x7b-v1_eval_request_False_bfloat16_Original.json +++ b/argilla/notux-8x7b-v1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.533069727855328 }, "result_metrics_average": 0.6769341256020452, - "result_metrics_npm": 0.4989549218909805 + "result_metrics_npm": 0.4989549218909805, + "main_language": "English" } \ No newline at end of file diff --git a/baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json b/baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json index 722e4e976bc03fb6936f7d5394bcd8fd8244adbf..0d8422c40fcc12c7c6b45144310dde4f101da038 100644 --- a/baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json +++ b/baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.36242611957263016 }, "result_metrics_average": 0.33495619523691605, - "result_metrics_npm": 0.013616351269721867 + "result_metrics_npm": 0.013616351269721867, + "main_language": "Chinese" } \ No newline at end of file diff --git a/baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json b/baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json index 2106a3a1081f54dd83346891d2c9208103a6c297..d188cc40ea9155e274c48f3b28bed3c435a16494 100644 --- a/baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json +++ b/baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 358, - "job_start_time": "2024-04-03T00-36-23.239142" + "job_start_time": "2024-04-03T00-36-23.239142", + "main_language": "Chinese" } \ No newline at end of file diff --git a/baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json b/baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json index 8f707aee08f62ca458509428ac466232d6b8f77e..a59fb1f2769899968d7bd6c844033f931ae0794f 100644 --- a/baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json +++ b/baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.47206540115849704 }, "result_metrics_average": 0.4681820328881288, - "result_metrics_npm": 0.18303534924933723 + "result_metrics_npm": 0.18303534924933723, + "main_language": "Chinese" } \ No newline at end of file diff --git a/bardsai/jaskier-7b-dpo-v5.6_eval_request_False_bfloat16_Original.json b/bardsai/jaskier-7b-dpo-v5.6_eval_request_False_bfloat16_Original.json index 1a47f1c7dd243b9cc7772dd62bd89335568b4b46..23761af70ff6b4897bfdba1435c6b28c43351ee2 100644 --- a/bardsai/jaskier-7b-dpo-v5.6_eval_request_False_bfloat16_Original.json +++ b/bardsai/jaskier-7b-dpo-v5.6_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4946921882690798 }, "result_metrics_average": 0.6757619115996536, - "result_metrics_npm": 0.5214463627134215 + "result_metrics_npm": 0.5214463627134215, + "main_language": "English" } \ No newline at end of file diff --git a/berkeley-nest/Starling-LM-7B-alpha_eval_request_False_bfloat16_Original.json b/berkeley-nest/Starling-LM-7B-alpha_eval_request_False_bfloat16_Original.json index b5297e9d798a9e0d44d280cbdaff3fc675f6285b..5b2f4ef6476487b9d80c263a69fb372ce19069ac 100644 --- a/berkeley-nest/Starling-LM-7B-alpha_eval_request_False_bfloat16_Original.json +++ b/berkeley-nest/Starling-LM-7B-alpha_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.45688146565713056 }, "result_metrics_average": 0.6789838178640438, - "result_metrics_npm": 0.5252770761531216 + "result_metrics_npm": 0.5252770761531216, + "main_language": "?" } \ No newline at end of file diff --git a/bigscience/bloom-1b7_eval_request_False_float16_Original.json b/bigscience/bloom-1b7_eval_request_False_float16_Original.json index 09e2bea691612e0c5b6cf3987e69e8758bf7a88b..3a4e76362143919a2a1470abcbf6c07ac4f8eeba 100644 --- a/bigscience/bloom-1b7_eval_request_False_float16_Original.json +++ b/bigscience/bloom-1b7_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.1506866897702477 }, "result_metrics_average": 0.28555133074623035, - "result_metrics_npm": -0.07299737308769665 + "result_metrics_npm": -0.07299737308769665, + "main_language": "English" } \ No newline at end of file diff --git a/bigscience/bloom-3b_eval_request_False_float16_Original.json b/bigscience/bloom-3b_eval_request_False_float16_Original.json index b6fef7b1d1654c25fed12fa6d6f416756ad59d35..37c4773a3fc1fe512fb0d5457c4d34eeeb45ea7f 100644 --- a/bigscience/bloom-3b_eval_request_False_float16_Original.json +++ b/bigscience/bloom-3b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.2450457553395994 }, "result_metrics_average": 0.3354492314740121, - "result_metrics_npm": 0.018008724388031243 + "result_metrics_npm": 0.018008724388031243, + "main_language": "English" } \ No newline at end of file diff --git a/bigscience/bloom-560m_eval_request_False_float16_Original.json b/bigscience/bloom-560m_eval_request_False_float16_Original.json index d4bbff595f53d4a2df680698cf70a28bf02a3045..b42e4775333a1bd4033e72fa73beca2151e5c050 100644 --- a/bigscience/bloom-560m_eval_request_False_float16_Original.json +++ b/bigscience/bloom-560m_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.20736168267032465 }, "result_metrics_average": 0.2543095353635786, - "result_metrics_npm": -0.13934638516229184 + "result_metrics_npm": -0.13934638516229184, + "main_language": "English" } \ No newline at end of file diff --git a/bigscience/bloom-7b1_eval_request_False_float16_Original.json b/bigscience/bloom-7b1_eval_request_False_float16_Original.json index 4e9852cbed5664fb580b3ecae61bb71117201c77..16aa00e10f59c8557cdb41342579e1db3ee20f95 100644 --- a/bigscience/bloom-7b1_eval_request_False_float16_Original.json +++ b/bigscience/bloom-7b1_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.2884430449383611 }, "result_metrics_average": 0.2887082618601528, - "result_metrics_npm": -0.08218986934474465 + "result_metrics_npm": -0.08218986934474465, + "main_language": "English" } \ No newline at end of file diff --git a/cerebras/Cerebras-GPT-1.3B_eval_request_False_float16_Original.json b/cerebras/Cerebras-GPT-1.3B_eval_request_False_float16_Original.json index 3a8aecdcf1c39b260af981e79573a8afc7bbec6c..ef51522e15fd50054b5e009a0fe1e4fa33c1422c 100644 --- a/cerebras/Cerebras-GPT-1.3B_eval_request_False_float16_Original.json +++ b/cerebras/Cerebras-GPT-1.3B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "cerebras/Cerebras-GPT-1.3B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 1.3, "architectures": "?", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:58Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "cerebras/Cerebras-GPT-1.3B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 1.3, + "architectures": "?", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:58Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/cerebras/Cerebras-GPT-111M_eval_request_False_float16_Original.json b/cerebras/Cerebras-GPT-111M_eval_request_False_float16_Original.json index 6147e7445bdae1fb1820a930441158ca3aeb5bf6..d0ec75c3c0cf0fb00b1ee925fd568543a8e235d9 100644 --- a/cerebras/Cerebras-GPT-111M_eval_request_False_float16_Original.json +++ b/cerebras/Cerebras-GPT-111M_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "cerebras/Cerebras-GPT-111M", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.111, "architectures": "?", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:36Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "cerebras/Cerebras-GPT-111M", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0.111, + "architectures": "?", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:36Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/cerebras/Cerebras-GPT-13B_eval_request_False_float16_Original.json b/cerebras/Cerebras-GPT-13B_eval_request_False_float16_Original.json index be3f22768436ece739cfe7bb299a1600d2c6a543..f1b4ed6362c8f56f5ae900717578ab0b4c9591c2 100644 --- a/cerebras/Cerebras-GPT-13B_eval_request_False_float16_Original.json +++ b/cerebras/Cerebras-GPT-13B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "cerebras/Cerebras-GPT-13B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 13.0, "architectures": "GPT2Model", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:38:19Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "cerebras/Cerebras-GPT-13B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 13.0, + "architectures": "GPT2Model", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:38:19Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/cerebras/Cerebras-GPT-2.7B_eval_request_False_float16_Original.json b/cerebras/Cerebras-GPT-2.7B_eval_request_False_float16_Original.json index 383fe4819f54e981eb4c8fa2505d040edb0d4947..69529716a578ad302dbb8bf0578dfe01a269dd22 100644 --- a/cerebras/Cerebras-GPT-2.7B_eval_request_False_float16_Original.json +++ b/cerebras/Cerebras-GPT-2.7B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "cerebras/Cerebras-GPT-2.7B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 2.7, "architectures": "?", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:38:05Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "cerebras/Cerebras-GPT-2.7B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 2.7, + "architectures": "?", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:38:05Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/cerebras/Cerebras-GPT-256M_eval_request_False_float16_Original.json b/cerebras/Cerebras-GPT-256M_eval_request_False_float16_Original.json index d6047f780274876b8216798552d17ff60f376080..cbde154efcf560d298a811151967b67cb1876c1f 100644 --- a/cerebras/Cerebras-GPT-256M_eval_request_False_float16_Original.json +++ b/cerebras/Cerebras-GPT-256M_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "cerebras/Cerebras-GPT-256M", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.256, "architectures": "?", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:41Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "cerebras/Cerebras-GPT-256M", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0.256, + "architectures": "?", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:41Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/cerebras/Cerebras-GPT-590M_eval_request_False_float16_Original.json b/cerebras/Cerebras-GPT-590M_eval_request_False_float16_Original.json index 62406beff1f1ac5ac598d6f308b1b81e4891fcf7..735d8ccff303f5957e72e7b0b570eb29b5bc599d 100644 --- a/cerebras/Cerebras-GPT-590M_eval_request_False_float16_Original.json +++ b/cerebras/Cerebras-GPT-590M_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "cerebras/Cerebras-GPT-590M", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.59, "architectures": "?", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:37:47Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "cerebras/Cerebras-GPT-590M", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0.59, + "architectures": "?", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:37:47Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/cerebras/Cerebras-GPT-6.7B_eval_request_False_float16_Original.json b/cerebras/Cerebras-GPT-6.7B_eval_request_False_float16_Original.json index d3b3eace5382970e3d55cdb06d9af6f06a5442b8..d009d315e2c5e6b4e733df9fa8ca8e477c30b522 100644 --- a/cerebras/Cerebras-GPT-6.7B_eval_request_False_float16_Original.json +++ b/cerebras/Cerebras-GPT-6.7B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "cerebras/Cerebras-GPT-6.7B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 6.7, "architectures": "?", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:38:13Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "cerebras/Cerebras-GPT-6.7B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 6.7, + "architectures": "?", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:38:13Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/cerebras/btlm-3b-8k-base_eval_request_False_bfloat16_Original.json b/cerebras/btlm-3b-8k-base_eval_request_False_bfloat16_Original.json index 79f2fdd21c19c7740f99de0e6ce1af556bd4040c..2d10429a09fdc962fe7833b534f48d8055282df0 100644 --- a/cerebras/btlm-3b-8k-base_eval_request_False_bfloat16_Original.json +++ b/cerebras/btlm-3b-8k-base_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "cerebras/btlm-3b-8k-base", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 3.0, "architectures": "BTLMLMHeadModel", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:38:32Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "cerebras/btlm-3b-8k-base", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 3.0, + "architectures": "BTLMLMHeadModel", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:38:32Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/cnmoro/Mistral-7B-Portuguese_eval_request_False_float16_Original.json b/cnmoro/Mistral-7B-Portuguese_eval_request_False_float16_Original.json index d188ba00abee6356a0b1d0831b8e1a813bdc3f27..33f7149b4074164d935a1a86b3ae8158da816df8 100644 --- a/cnmoro/Mistral-7B-Portuguese_eval_request_False_float16_Original.json +++ b/cnmoro/Mistral-7B-Portuguese_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6471160335032415 }, "result_metrics_average": 0.6470422904888635, - "result_metrics_npm": 0.47240583552942517 + "result_metrics_npm": 0.47240583552942517, + "main_language": "?" } \ No newline at end of file diff --git a/croissantllm/CroissantLLMBase_eval_request_False_float16_Original.json b/croissantllm/CroissantLLMBase_eval_request_False_float16_Original.json index a9288b4349c4437bde90e3f55f1aa9a6e4063aff..f2491feadbe1bb71d872594b0283684f2f840268 100644 --- a/croissantllm/CroissantLLMBase_eval_request_False_float16_Original.json +++ b/croissantllm/CroissantLLMBase_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.1506866897702477 }, "result_metrics_average": 0.2752308423815297, - "result_metrics_npm": -0.10852874111412539 + "result_metrics_npm": -0.10852874111412539, + "main_language": "English" } \ No newline at end of file diff --git a/deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json b/deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json index a019c00724e59132ed0f1ac442bcf7ff0c762212..dc5944357667ab05e68272c2f3028c40b4da3693 100644 --- a/deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json +++ b/deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json @@ -14,5 +14,6 @@ "job_id": 340, "job_start_time": "2024-04-02T06-11-15.986508", "error_msg": "CUDA out of memory. Tried to allocate 344.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 36.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 27.00 GiB memory in use. Process 188848 has 11.95 GiB memory in use. Process 209361 has 16.51 GiB memory in use. Of the allocated memory 11.54 GiB is allocated by PyTorch, and 1.78 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 344.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 36.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 27.00 GiB memory in use. Process 188848 has 11.95 GiB memory in use. Process 209361 has 16.51 GiB memory in use. Of the allocated memory 11.54 GiB is allocated by PyTorch, and 1.78 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 344.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 36.19 MiB is free. Process 4070277 has 23.85 GiB memory in use. Process 4074833 has 27.00 GiB memory in use. Process 188848 has 11.95 GiB memory in use. Process 209361 has 16.51 GiB memory in use. Of the allocated memory 11.54 GiB is allocated by PyTorch, and 1.78 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n", + "main_language": "?" } \ No newline at end of file diff --git a/deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json b/deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json index b47d8db4246c794c484ff996635326cef04c814c..67deb8a01ae14131160ae52eb7f089248486d9eb 100644 --- a/deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json +++ b/deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 347, - "job_start_time": "2024-04-02T09-18-28.675982" + "job_start_time": "2024-04-02T09-18-28.675982", + "main_language": "?" } \ No newline at end of file diff --git a/deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json b/deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json index a55937730522ea54f1586f38972b188e30612fdc..872ec45668fa41863f6390b3a63dbceb6a03bbb3 100644 --- a/deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json +++ b/deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json @@ -14,5 +14,6 @@ "job_id": 348, "job_start_time": "2024-04-02T09-25-05.375938", "error_msg": "CUDA out of memory. Tried to allocate 20.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.19 MiB is free. Process 4070277 has 538.00 MiB memory in use. Process 4074833 has 27.62 GiB memory in use. Process 188848 has 32.47 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 22.56 GiB is allocated by PyTorch, and 4.55 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.19 MiB is free. Process 4070277 has 538.00 MiB memory in use. Process 4074833 has 27.62 GiB memory in use. Process 188848 has 32.47 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 22.56 GiB is allocated by PyTorch, and 4.55 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 556, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3502, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3926, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 805, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 10.19 MiB is free. Process 4070277 has 538.00 MiB memory in use. Process 4074833 has 27.62 GiB memory in use. Process 188848 has 32.47 GiB memory in use. Process 209361 has 18.72 GiB memory in use. Of the allocated memory 22.56 GiB is allocated by PyTorch, and 4.55 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n", + "main_language": "?" } \ No newline at end of file diff --git a/dominguesm/Canarim-7B-Instruct_eval_request_False_float16_Original.json b/dominguesm/Canarim-7B-Instruct_eval_request_False_float16_Original.json index 646e10ae0cff5c0565637cbd1d180f944844d0e8..2801f56235b4e3b054b0e1bd65f7af6a11f42f3a 100644 --- a/dominguesm/Canarim-7B-Instruct_eval_request_False_float16_Original.json +++ b/dominguesm/Canarim-7B-Instruct_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6599646979143782 }, "result_metrics_average": 0.4721088496402211, - "result_metrics_npm": 0.24510641688389662 + "result_metrics_npm": 0.24510641688389662, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/dominguesm/canarim-7b_eval_request_False_float16_Original.json b/dominguesm/canarim-7b_eval_request_False_float16_Original.json index 2ad4d3c8325e5b30285782720a232fe95c10d0fe..b4721f7eed97fa9c2ec529db3dfb94298ba1c3ef 100644 --- a/dominguesm/canarim-7b_eval_request_False_float16_Original.json +++ b/dominguesm/canarim-7b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6238054035800533 }, "result_metrics_average": 0.47356065488549093, - "result_metrics_npm": 0.24504356404158173 + "result_metrics_npm": 0.24504356404158173, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/dynamofl/dynamo-8B-v0.1_eval_request_False_bfloat16_Original.json b/dynamofl/dynamo-8B-v0.1_eval_request_False_bfloat16_Original.json index 4145f9ff544cfba3c347caa9f68bb36c7a35f769..92a290a9f92d894b10756045543459bff88db53f 100644 --- a/dynamofl/dynamo-8B-v0.1_eval_request_False_bfloat16_Original.json +++ b/dynamofl/dynamo-8B-v0.1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5268244812945606 }, "result_metrics_average": 0.26089027529591824, - "result_metrics_npm": -0.1486061927100144 + "result_metrics_npm": -0.1486061927100144, + "main_language": "English" } \ No newline at end of file diff --git a/eduagarcia/gemma-7b-it_no_chat_template_eval_request_False_bfloat16_Original.json b/eduagarcia/gemma-7b-it_no_chat_template_eval_request_False_bfloat16_Original.json index 7289caccc0569014b5a25373862a919cad09c800..eb488ca5659feec424c7e70955da097a61e9d383 100644 --- a/eduagarcia/gemma-7b-it_no_chat_template_eval_request_False_bfloat16_Original.json +++ b/eduagarcia/gemma-7b-it_no_chat_template_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.601954964588851 }, "result_metrics_average": 0.5516779270390081, - "result_metrics_npm": 0.32000456969789404 + "result_metrics_npm": 0.32000456969789404, + "main_language": "English" } \ No newline at end of file diff --git a/eduagarcia/gemma-7b-it_singleturn_chat_template_eval_request_False_bfloat16_Original.json b/eduagarcia/gemma-7b-it_singleturn_chat_template_eval_request_False_bfloat16_Original.json index f839ae71fe02d53edfa9ab070245ed9e9d9f07e3..6448abc2094a34e38da1c3755380a16c04115e30 100644 --- a/eduagarcia/gemma-7b-it_singleturn_chat_template_eval_request_False_bfloat16_Original.json +++ b/eduagarcia/gemma-7b-it_singleturn_chat_template_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.20849546951839723 }, "result_metrics_average": 0.1426764423224328, - "result_metrics_npm": -0.3007127051013984 + "result_metrics_npm": -0.3007127051013984, + "main_language": "English" } \ No newline at end of file diff --git a/facebook/galactica-1.3b_eval_request_False_float16_Original.json b/facebook/galactica-1.3b_eval_request_False_float16_Original.json index db00455df1c5d7525dd0ed99595472611cd7347f..7af6d12e74aa425dddee35a6bc627bd72266d474 100644 --- a/facebook/galactica-1.3b_eval_request_False_float16_Original.json +++ b/facebook/galactica-1.3b_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "facebook/galactica-1.3b", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 1.3, "architectures": "OPTForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:49Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "facebook/galactica-1.3b", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 1.3, + "architectures": "OPTForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:39:49Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/facebook/galactica-125m_eval_request_False_float16_Original.json b/facebook/galactica-125m_eval_request_False_float16_Original.json index c18b2e7e00a89e174c826c8065dae2ebaed9250c..046f80f62169a88c6287330337ce7f8f1d670fa2 100644 --- a/facebook/galactica-125m_eval_request_False_float16_Original.json +++ b/facebook/galactica-125m_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "facebook/galactica-125m", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.125, "architectures": "OPTForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:44Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "facebook/galactica-125m", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0.125, + "architectures": "OPTForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:39:44Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/facebook/galactica-30b_eval_request_False_float16_Original.json b/facebook/galactica-30b_eval_request_False_float16_Original.json index 50bfd33b8045cead6813d2cf2d1e40a22f057a9e..dda27ae9ed6a10e2639ea36341483c642cb4108c 100644 --- a/facebook/galactica-30b_eval_request_False_float16_Original.json +++ b/facebook/galactica-30b_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "facebook/galactica-30b", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 30.0, "architectures": "OPTForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:59Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "facebook/galactica-30b", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 30.0, + "architectures": "OPTForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:39:59Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/facebook/galactica-6.7b_eval_request_False_float16_Original.json b/facebook/galactica-6.7b_eval_request_False_float16_Original.json index d9b3d7eab53dbbe08f40da7ab0a68d0875f17c42..2b2a4563b29fed56695fa221380d68ed36946305 100644 --- a/facebook/galactica-6.7b_eval_request_False_float16_Original.json +++ b/facebook/galactica-6.7b_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "facebook/galactica-6.7b", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 6.7, "architectures": "OPTForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:39:54Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "facebook/galactica-6.7b", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 6.7, + "architectures": "OPTForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:39:54Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/facebook/opt-1.3b_eval_request_False_float16_Original.json b/facebook/opt-1.3b_eval_request_False_float16_Original.json index a24736343044cdba35b1ca85d0513a7074453ee3..a70f16cf0ada858d263ee531726a3bf4a464db1d 100644 --- a/facebook/opt-1.3b_eval_request_False_float16_Original.json +++ b/facebook/opt-1.3b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 81, - "job_start_time": "2024-02-07T17-55-47.448141" + "job_start_time": "2024-02-07T17-55-47.448141", + "main_language": "English" } \ No newline at end of file diff --git a/facebook/opt-125m_eval_request_False_float16_Original.json b/facebook/opt-125m_eval_request_False_float16_Original.json index 0a2881abd5046f2cd17f3f6173da3f7e54cdfcfa..82904d48e30ffb43012830679dfea970a40fa027 100644 --- a/facebook/opt-125m_eval_request_False_float16_Original.json +++ b/facebook/opt-125m_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 79, - "job_start_time": "2024-02-07T17-00-47.502507" + "job_start_time": "2024-02-07T17-00-47.502507", + "main_language": "English" } \ No newline at end of file diff --git a/facebook/opt-13b_eval_request_False_float16_Original.json b/facebook/opt-13b_eval_request_False_float16_Original.json index bf522001eb125e17e600910ce82877c115657568..7faf7fbd021c38234614fb5a83b873faba816916 100644 --- a/facebook/opt-13b_eval_request_False_float16_Original.json +++ b/facebook/opt-13b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 84, - "job_start_time": "2024-02-07T19-29-22.779324" + "job_start_time": "2024-02-07T19-29-22.779324", + "main_language": "English" } \ No newline at end of file diff --git a/facebook/opt-2.7b_eval_request_False_float16_Original.json b/facebook/opt-2.7b_eval_request_False_float16_Original.json index edc9d542eae43276334d64787adad9297c8c3950..50456cc31ef896687b4c6250cbdaf959207cfe2a 100644 --- a/facebook/opt-2.7b_eval_request_False_float16_Original.json +++ b/facebook/opt-2.7b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 82, - "job_start_time": "2024-02-07T18-37-10.420552" + "job_start_time": "2024-02-07T18-37-10.420552", + "main_language": "English" } \ No newline at end of file diff --git a/facebook/opt-30b_eval_request_False_float16_Original.json b/facebook/opt-30b_eval_request_False_float16_Original.json index a827bdf7feee2ecfb86cac21053bc2668cab413d..bcaf242aa0ba12f23914bfa42681713bf1c1c1a3 100644 --- a/facebook/opt-30b_eval_request_False_float16_Original.json +++ b/facebook/opt-30b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5327918414745878 }, "result_metrics_average": 0.3954129903259924, - "result_metrics_npm": 0.12475670235820485 + "result_metrics_npm": 0.12475670235820485, + "main_language": "English" } \ No newline at end of file diff --git a/facebook/opt-350m_eval_request_False_float16_Original.json b/facebook/opt-350m_eval_request_False_float16_Original.json index 579eadd11dbac37f0789a5f573e0e40a5bb300e7..d76b87bef1479509060f759cdfa87aaf18eacef3 100644 --- a/facebook/opt-350m_eval_request_False_float16_Original.json +++ b/facebook/opt-350m_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 80, - "job_start_time": "2024-02-07T17-20-17.729922" + "job_start_time": "2024-02-07T17-20-17.729922", + "main_language": "English" } \ No newline at end of file diff --git a/facebook/opt-6.7b_eval_request_False_float16_Original.json b/facebook/opt-6.7b_eval_request_False_float16_Original.json index e823b1822d1953eef63af7f9682252084f06a27f..771a3f1f105f3f689bc6191067d457f7eb2c7666 100644 --- a/facebook/opt-6.7b_eval_request_False_float16_Original.json +++ b/facebook/opt-6.7b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.16163733221014545 }, "result_metrics_average": 0.28646973983686086, - "result_metrics_npm": -0.07392776762776518 + "result_metrics_npm": -0.07392776762776518, + "main_language": "English" } \ No newline at end of file diff --git a/facebook/opt-66b_eval_request_False_float16_Original.json b/facebook/opt-66b_eval_request_False_float16_Original.json index e2dafaca812b6c7fec307cfd7292f6d2ce8a2f43..8861be5a0dc65290a89c47f21673d8c0213a0247 100644 --- a/facebook/opt-66b_eval_request_False_float16_Original.json +++ b/facebook/opt-66b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.3858315033756536 }, "result_metrics_average": 0.3498824996123715, - "result_metrics_npm": 0.028371803666412985 + "result_metrics_npm": 0.028371803666412985, + "main_language": "English" } \ No newline at end of file diff --git a/facebook/xglm-1.7B_eval_request_False_float16_Original.json b/facebook/xglm-1.7B_eval_request_False_float16_Original.json index 18c1fe88dc49073998480b4b5492fa03682d4398..8ab06d4897f0cdf10e023c32bc923db2d119d15d 100644 --- a/facebook/xglm-1.7B_eval_request_False_float16_Original.json +++ b/facebook/xglm-1.7B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "facebook/xglm-1.7B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 1.7, "architectures": "XGLMForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:40:16Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "facebook/xglm-1.7B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 1.7, + "architectures": "XGLMForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:40:16Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/facebook/xglm-2.9B_eval_request_False_float16_Original.json b/facebook/xglm-2.9B_eval_request_False_float16_Original.json index 901c7cf52f77e8b7e918b07849d7e9abdc9eb884..7c3533882ef6b92f340c66bc97ebde124cf7a230 100644 --- a/facebook/xglm-2.9B_eval_request_False_float16_Original.json +++ b/facebook/xglm-2.9B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "facebook/xglm-2.9B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 2.9, "architectures": "XGLMForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:40:25Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "facebook/xglm-2.9B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 2.9, + "architectures": "XGLMForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:40:25Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/facebook/xglm-4.5B_eval_request_False_float16_Original.json b/facebook/xglm-4.5B_eval_request_False_float16_Original.json index f2565a549191122683cc876298449b5393b29fec..20ebd1ad375a539f26f17c985628797b78ab5be3 100644 --- a/facebook/xglm-4.5B_eval_request_False_float16_Original.json +++ b/facebook/xglm-4.5B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "facebook/xglm-4.5B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 5.077, "architectures": "XGLMForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:40:34Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "facebook/xglm-4.5B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 5.077, + "architectures": "XGLMForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:40:34Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/facebook/xglm-564M_eval_request_False_float16_Original.json b/facebook/xglm-564M_eval_request_False_float16_Original.json index 07e2a7cd4155018630491eb13d1f92743951af64..85c706ff96f4fcbcf8d6d9b6059146485bd1e546 100644 --- a/facebook/xglm-564M_eval_request_False_float16_Original.json +++ b/facebook/xglm-564M_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "facebook/xglm-564M", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0.564, "architectures": "XGLMForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:40:04Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "facebook/xglm-564M", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0.564, + "architectures": "XGLMForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:40:04Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/facebook/xglm-7.5B_eval_request_False_float16_Original.json b/facebook/xglm-7.5B_eval_request_False_float16_Original.json index ab251fb48bd65b8889cdeed5dcaee247c22fc0f9..7655b09e6f2adeedc53892812090dd8d64c7e7bb 100644 --- a/facebook/xglm-7.5B_eval_request_False_float16_Original.json +++ b/facebook/xglm-7.5B_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "facebook/xglm-7.5B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 7.5, "architectures": "XGLMForCausalLM", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:40:46Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "facebook/xglm-7.5B", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 7.5, + "architectures": "XGLMForCausalLM", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:40:46Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/fernandosola/bluearara-7B-instruct_eval_request_False_bfloat16_Original.json b/fernandosola/bluearara-7B-instruct_eval_request_False_bfloat16_Original.json index 3f7ff43f651223c228eeb902d95fbb7130ebde6b..b46e13f2aa33ffda691b17e4448d1f5de162d78a 100644 --- a/fernandosola/bluearara-7B-instruct_eval_request_False_bfloat16_Original.json +++ b/fernandosola/bluearara-7B-instruct_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5448226084485626 }, "result_metrics_average": 0.43231598069332267, - "result_metrics_npm": 0.1728623143276737 + "result_metrics_npm": 0.1728623143276737, + "main_language": "?" } \ No newline at end of file diff --git a/fernandosola/bluearara-7B_eval_request_False_bfloat16_Original.json b/fernandosola/bluearara-7B_eval_request_False_bfloat16_Original.json index f4802dd68e11a4f92462a8d7ad06801fbb533019..590e37b39d95d2dd2613255f11fcb1e2b1eb4de7 100644 --- a/fernandosola/bluearara-7B_eval_request_False_bfloat16_Original.json +++ b/fernandosola/bluearara-7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4847783653404505 }, "result_metrics_average": 0.4059146139544385, - "result_metrics_npm": 0.13413365006382902 + "result_metrics_npm": 0.13413365006382902, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/fernandosola/bluearara-7B_eval_request_False_float16_Original.json b/fernandosola/bluearara-7B_eval_request_False_float16_Original.json index 6b59c8a78ab5f2f833f058e0b554a975fdad060d..7828fac0d878157073928b78c84cf0fa4de17d44 100644 --- a/fernandosola/bluearara-7B_eval_request_False_float16_Original.json +++ b/fernandosola/bluearara-7B_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4674139206717111 }, "result_metrics_average": 0.3935606636087955, - "result_metrics_npm": 0.11148954880306625 + "result_metrics_npm": 0.11148954880306625, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/google/gemma-2b-it_eval_request_False_bfloat16_Original.json b/google/gemma-2b-it_eval_request_False_bfloat16_Original.json index b009150a216359831c2c6c94563962e908f8bd66..709be24af92f762d9e2304f794d66d8ae131ae3d 100644 --- a/google/gemma-2b-it_eval_request_False_bfloat16_Original.json +++ b/google/gemma-2b-it_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.38805657029537627 }, "result_metrics_average": 0.34082250795127883, - "result_metrics_npm": 0.008614141876633459 + "result_metrics_npm": 0.008614141876633459, + "main_language": "English" } \ No newline at end of file diff --git a/google/gemma-2b_eval_request_False_bfloat16_Original.json b/google/gemma-2b_eval_request_False_bfloat16_Original.json index 6b6d9b8e60d531a8b70b0c026fce250b5b75729d..d6332478cade63319e026be06f6aefe07629009e 100644 --- a/google/gemma-2b_eval_request_False_bfloat16_Original.json +++ b/google/gemma-2b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5519025982637341 }, "result_metrics_average": 0.44413621368052186, - "result_metrics_npm": 0.17161625549410653 + "result_metrics_npm": 0.17161625549410653, + "main_language": "English" } \ No newline at end of file diff --git a/google/gemma-7b-it_eval_request_False_bfloat16_Original.json b/google/gemma-7b-it_eval_request_False_bfloat16_Original.json index 7ef4ae37f621be927c416c4c807473791ae2d205..6503d949d0359b7d2ed0b0f8d5fe49a4b91bc487 100644 --- a/google/gemma-7b-it_eval_request_False_bfloat16_Original.json +++ b/google/gemma-7b-it_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.20300571934116485 }, "result_metrics_average": 0.47663784635744416, - "result_metrics_npm": 0.2285834515065909 + "result_metrics_npm": 0.2285834515065909, + "main_language": "English" } \ No newline at end of file diff --git a/google/gemma-7b-it_eval_request_False_float16_Original.json b/google/gemma-7b-it_eval_request_False_float16_Original.json index 9c24feddad778eca3a213f2a51698659f8f25591..f80f0b1b5fb8ba9292e523e0bbc7cf14353b7cd5 100644 --- a/google/gemma-7b-it_eval_request_False_float16_Original.json +++ b/google/gemma-7b-it_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.2376686015020967 }, "result_metrics_average": 0.4963584246845505, - "result_metrics_npm": 0.2530151466861248 + "result_metrics_npm": 0.2530151466861248, + "main_language": "English" } \ No newline at end of file diff --git a/google/gemma-7b_eval_request_False_bfloat16_Original.json b/google/gemma-7b_eval_request_False_bfloat16_Original.json index 4f90578ae3a04833c9ee1f20dea625929c737027..f3d5c332ba1c8656496ea481274eabd5508fc5f7 100644 --- a/google/gemma-7b_eval_request_False_bfloat16_Original.json +++ b/google/gemma-7b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6677004780062082 }, "result_metrics_average": 0.6387664509216472, - "result_metrics_npm": 0.4519999814306199 + "result_metrics_npm": 0.4519999814306199, + "main_language": "English" } \ No newline at end of file diff --git a/google/mt5-base_eval_request_False_bfloat16_Original.json b/google/mt5-base_eval_request_False_bfloat16_Original.json index fd99fcac523728635bf4fec0700dbb1df126611b..1a895f7d5f43188d09fcd912c88daf802a98ae9c 100644 --- a/google/mt5-base_eval_request_False_bfloat16_Original.json +++ b/google/mt5-base_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/mt5-base", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "MT5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:35:30Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/mt5-base", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "MT5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:35:30Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/mt5-large_eval_request_False_bfloat16_Original.json b/google/mt5-large_eval_request_False_bfloat16_Original.json index ab2907b131a7864685e37de90a30b7ed37e6a4ad..af3a7eed7712db2b5797dca4b4c2ad09765e6c69 100644 --- a/google/mt5-large_eval_request_False_bfloat16_Original.json +++ b/google/mt5-large_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/mt5-large", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "MT5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:35:35Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/mt5-large", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "MT5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:35:35Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/mt5-small_eval_request_False_bfloat16_Original.json b/google/mt5-small_eval_request_False_bfloat16_Original.json index 088856e65f9e5a2d7b72952eea5535339debe304..12040d2b4cec508d765b25cf6e686b2571b519d5 100644 --- a/google/mt5-small_eval_request_False_bfloat16_Original.json +++ b/google/mt5-small_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/mt5-small", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "MT5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:35:25Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/mt5-small", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "MT5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:35:25Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/mt5-xl_eval_request_False_bfloat16_Original.json b/google/mt5-xl_eval_request_False_bfloat16_Original.json index f85802f1bc62c723a88c28f391519cfda8f81b9f..676c67239d79f3852d7e4c799f28536362a87015 100644 --- a/google/mt5-xl_eval_request_False_bfloat16_Original.json +++ b/google/mt5-xl_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/mt5-xl", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "MT5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:37Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/mt5-xl", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "MT5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:37Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/mt5-xxl_eval_request_False_bfloat16_Original.json b/google/mt5-xxl_eval_request_False_bfloat16_Original.json index 556ddc945d1a75e2d37af79e37c9a8409a4ecead..7d26bb1c325e46586a66d4e9595a0ca79bb400a9 100644 --- a/google/mt5-xxl_eval_request_False_bfloat16_Original.json +++ b/google/mt5-xxl_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/mt5-xxl", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "T5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:42Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/mt5-xxl", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "T5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:42Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/t5-v1_1-base_eval_request_False_bfloat16_Original.json b/google/t5-v1_1-base_eval_request_False_bfloat16_Original.json index 543eba3665277ad37da27c55f11e829c7ffe779d..3a49be63053ed8f8ad2c4f39a37181bd05ad8280 100644 --- a/google/t5-v1_1-base_eval_request_False_bfloat16_Original.json +++ b/google/t5-v1_1-base_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/t5-v1_1-base", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "T5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:14Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/t5-v1_1-base", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "T5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:14Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/t5-v1_1-large_eval_request_False_bfloat16_Original.json b/google/t5-v1_1-large_eval_request_False_bfloat16_Original.json index 97f81bd31b2ad1282bedce569bf0389c65ad6282..1db45573c6cbe64756142535a2c3570b153c3189 100644 --- a/google/t5-v1_1-large_eval_request_False_bfloat16_Original.json +++ b/google/t5-v1_1-large_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/t5-v1_1-large", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "T5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:23Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/t5-v1_1-large", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "T5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:23Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/t5-v1_1-small_eval_request_False_bfloat16_Original.json b/google/t5-v1_1-small_eval_request_False_bfloat16_Original.json index 9c72d88337e5c82a9422756a6ab6dd0c83406e82..611c0e93f81490d62cc7a30236921e2ae051018e 100644 --- a/google/t5-v1_1-small_eval_request_False_bfloat16_Original.json +++ b/google/t5-v1_1-small_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/t5-v1_1-small", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "T5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:20Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/t5-v1_1-small", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "T5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:20Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/t5-v1_1-xl_eval_request_False_bfloat16_Original.json b/google/t5-v1_1-xl_eval_request_False_bfloat16_Original.json index 7f9fae60dbdde8d145e52ce176e92228b4457ee9..5125dbb24342496034c2002b61b0ee105655c77a 100644 --- a/google/t5-v1_1-xl_eval_request_False_bfloat16_Original.json +++ b/google/t5-v1_1-xl_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/t5-v1_1-xl", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "T5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:27Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/t5-v1_1-xl", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "T5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:27Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/t5-v1_1-xxl_eval_request_False_bfloat16_Original.json b/google/t5-v1_1-xxl_eval_request_False_bfloat16_Original.json index 08715c7781bb8986718d8d2423216867a622a0a0..2940a0cce28c41a34bb5f269197ccb454a361052 100644 --- a/google/t5-v1_1-xxl_eval_request_False_bfloat16_Original.json +++ b/google/t5-v1_1-xxl_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/t5-v1_1-xxl", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "T5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:31Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/t5-v1_1-xxl", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "T5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:31Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/google/umt5-base_eval_request_False_bfloat16_Original.json b/google/umt5-base_eval_request_False_bfloat16_Original.json index 8a25e7b5e17ca947b3dce221f6917e2f655fdd1a..d3a034c3ccfd3ad64e9e0c9ecd2fc0cda3d35d30 100644 --- a/google/umt5-base_eval_request_False_bfloat16_Original.json +++ b/google/umt5-base_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 206, - "job_start_time": "2024-02-15T05-17-17.944550" + "job_start_time": "2024-02-15T05-17-17.944550", + "main_language": "English" } \ No newline at end of file diff --git a/google/umt5-small_eval_request_False_bfloat16_Original.json b/google/umt5-small_eval_request_False_bfloat16_Original.json index 73bbb105a53b62763d5716ca2a7eb9f2b31982a7..4640685036362b76e3fc2cc9c95091c1e1f4ddce 100644 --- a/google/umt5-small_eval_request_False_bfloat16_Original.json +++ b/google/umt5-small_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 205, - "job_start_time": "2024-02-15T02-03-26.881523" + "job_start_time": "2024-02-15T02-03-26.881523", + "main_language": "English" } \ No newline at end of file diff --git a/google/umt5-xxl_eval_request_False_bfloat16_Original.json b/google/umt5-xxl_eval_request_False_bfloat16_Original.json index 645eb768f080e0f682e366443447cb8e100b9e50..9e53146ca28440f0bb24e7156496cbc4a85707f8 100644 --- a/google/umt5-xxl_eval_request_False_bfloat16_Original.json +++ b/google/umt5-xxl_eval_request_False_bfloat16_Original.json @@ -1 +1,17 @@ -{"model": "google/umt5-xxl", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 0, "architectures": "UMT5ForConditionalGeneration", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:51Z", "model_type": "\ud83d\udfe2 : pretrained", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "google/umt5-xxl", + "base_model": "", + "revision": "main", + "private": false, + "precision": "bfloat16", + "params": 0, + "architectures": "UMT5ForConditionalGeneration", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:51Z", + "model_type": "🟢 : pretrained", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "English" +} \ No newline at end of file diff --git a/gpt2_eval_request_False_float16_Original.json b/gpt2_eval_request_False_float16_Original.json index 42fc188005c4a3b319f70b233c09e6d449112bae..a8a97e1085047730fb5047f48e65cafc7381d285 100644 --- a/gpt2_eval_request_False_float16_Original.json +++ b/gpt2_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.1506866897702477 }, "result_metrics_average": 0.2330603674905938, - "result_metrics_npm": -0.16921099864311392 + "result_metrics_npm": -0.16921099864311392, + "main_language": "English" } \ No newline at end of file diff --git a/huggyllama/llama-13b_eval_request_False_float16_Original.json b/huggyllama/llama-13b_eval_request_False_float16_Original.json index 719a1c8788f5eb5c5011ec3a13527971eb2ee4f8..0be1651868daa2e328b4654ecd8fdaa06fb42597 100644 --- a/huggyllama/llama-13b_eval_request_False_float16_Original.json +++ b/huggyllama/llama-13b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5685631464991553 }, "result_metrics_average": 0.5298352465569122, - "result_metrics_npm": 0.3220459477729154 + "result_metrics_npm": 0.3220459477729154, + "main_language": "English" } \ No newline at end of file diff --git a/huggyllama/llama-30b_eval_request_False_float16_Original.json b/huggyllama/llama-30b_eval_request_False_float16_Original.json index 2d821061955a2d20ef9289b0e917af28f77b1b02..ca7d11c626db14f4f73d3acc3074a72412512e5c 100644 --- a/huggyllama/llama-30b_eval_request_False_float16_Original.json +++ b/huggyllama/llama-30b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6009045227861195 }, "result_metrics_average": 0.5967608935766783, - "result_metrics_npm": 0.3848156782676072 + "result_metrics_npm": 0.3848156782676072, + "main_language": "English" } \ No newline at end of file diff --git a/huggyllama/llama-65b_eval_request_False_float16_Original.json b/huggyllama/llama-65b_eval_request_False_float16_Original.json index 4fe85336d1df075640293a6a2aafdafeae8cf035..195edaf801673f283df217a170ba1e5f53b75f9e 100644 --- a/huggyllama/llama-65b_eval_request_False_float16_Original.json +++ b/huggyllama/llama-65b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6729660354724708 }, "result_metrics_average": 0.6223684910582947, - "result_metrics_npm": 0.413676060283789 + "result_metrics_npm": 0.413676060283789, + "main_language": "English" } \ No newline at end of file diff --git a/huggyllama/llama-7b_eval_request_False_float16_Original.json b/huggyllama/llama-7b_eval_request_False_float16_Original.json index e86ecff87a0db4531974f3fef9de94052d62a156..7572c62606346ffb33c537981321b66d2f4a429c 100644 --- a/huggyllama/llama-7b_eval_request_False_float16_Original.json +++ b/huggyllama/llama-7b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5949836280294828 }, "result_metrics_average": 0.3347101298048668, - "result_metrics_npm": -0.019421871708215212 + "result_metrics_npm": -0.019421871708215212, + "main_language": "English" } \ No newline at end of file diff --git a/internlm/internlm-20b_eval_request_False_bfloat16_Original.json b/internlm/internlm-20b_eval_request_False_bfloat16_Original.json index 663f2825eb8c62da21b147932c1aeafcf8f8a493..55b3deb2e8b6856cfe2e5e024b60528ffb91a23b 100644 --- a/internlm/internlm-20b_eval_request_False_bfloat16_Original.json +++ b/internlm/internlm-20b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5348047712369928 }, "result_metrics_average": 0.679711344567989, - "result_metrics_npm": 0.5249391006784612 + "result_metrics_npm": 0.5249391006784612, + "main_language": "?" } \ No newline at end of file diff --git a/internlm/internlm-7b_eval_request_False_float16_Original.json b/internlm/internlm-7b_eval_request_False_float16_Original.json index b86270adabadc03782040947d7b0a0e40616b61a..22f8dcfa9dca9c2349604d05746a9b8b9af34c9f 100644 --- a/internlm/internlm-7b_eval_request_False_float16_Original.json +++ b/internlm/internlm-7b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.578002483815162 }, "result_metrics_average": 0.47268682021850583, - "result_metrics_npm": 0.20951746086587958 + "result_metrics_npm": 0.20951746086587958, + "main_language": "?" } \ No newline at end of file diff --git a/internlm/internlm2-1_8b_eval_request_False_bfloat16_Original.json b/internlm/internlm2-1_8b_eval_request_False_bfloat16_Original.json index a0c8b88cd9a1516690dfd6981a2449b791572c2b..21214ef3240ba78a3a0c5f749b0af90f7362fff4 100644 --- a/internlm/internlm2-1_8b_eval_request_False_bfloat16_Original.json +++ b/internlm/internlm2-1_8b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4580804674594538 }, "result_metrics_average": 0.37024732502806157, - "result_metrics_npm": 0.04237054568941951 + "result_metrics_npm": 0.04237054568941951, + "main_language": "?" } \ No newline at end of file diff --git a/internlm/internlm2-20b_eval_request_False_bfloat16_Original.json b/internlm/internlm2-20b_eval_request_False_bfloat16_Original.json index 58dc6cfd6b22c118fb96111d5214045b77f3d1a7..8e99c1d69353233da06c61682d1b09272ae36eeb 100644 --- a/internlm/internlm2-20b_eval_request_False_bfloat16_Original.json +++ b/internlm/internlm2-20b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6848095629249187 }, "result_metrics_average": 0.6942095763147612, - "result_metrics_npm": 0.5338295826973051 + "result_metrics_npm": 0.5338295826973051, + "main_language": "?" } \ No newline at end of file diff --git a/internlm/internlm2-7b_eval_request_False_bfloat16_Original.json b/internlm/internlm2-7b_eval_request_False_bfloat16_Original.json index ec07ebc887c37bc13b9b0ec4a9622e6a9c8719d3..616788b3d58b4f69a1380cdde8421c572eb1343a 100644 --- a/internlm/internlm2-7b_eval_request_False_bfloat16_Original.json +++ b/internlm/internlm2-7b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6636091915630467 }, "result_metrics_average": 0.634362168185937, - "result_metrics_npm": 0.43720596129981004 + "result_metrics_npm": 0.43720596129981004, + "main_language": "?" } \ No newline at end of file diff --git a/internlm/internlm2-base-20b_eval_request_False_bfloat16_Original.json b/internlm/internlm2-base-20b_eval_request_False_bfloat16_Original.json index 3a0ef6b5dd1e5ba58955226ae733a7544d5874f0..853db3d4901b27e4cfd59eec74f1132fdda72d70 100644 --- a/internlm/internlm2-base-20b_eval_request_False_bfloat16_Original.json +++ b/internlm/internlm2-base-20b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.597596515284592 }, "result_metrics_average": 0.6470685786650505, - "result_metrics_npm": 0.4593771467862757 + "result_metrics_npm": 0.4593771467862757, + "main_language": "?" } \ No newline at end of file diff --git a/internlm/internlm2-base-7b_eval_request_False_bfloat16_Original.json b/internlm/internlm2-base-7b_eval_request_False_bfloat16_Original.json index b663a63834753c4ed80494ed924b11f07a050e9e..bcee8c7c79e13dec452bf57f4b4ac5a7bed5f156 100644 --- a/internlm/internlm2-base-7b_eval_request_False_bfloat16_Original.json +++ b/internlm/internlm2-base-7b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6007880391042911 }, "result_metrics_average": 0.5530807009049986, - "result_metrics_npm": 0.30828390314840837 + "result_metrics_npm": 0.30828390314840837, + "main_language": "?" } \ No newline at end of file diff --git a/internlm/internlm2-chat-1_8b_eval_request_False_float16_Original.json b/internlm/internlm2-chat-1_8b_eval_request_False_float16_Original.json index 6cb84fcd5160c2d27fbe33e6a07629f2ab06c43f..cae74692b02749fe0e1a0acedb1ebc969c21f592 100644 --- a/internlm/internlm2-chat-1_8b_eval_request_False_float16_Original.json +++ b/internlm/internlm2-chat-1_8b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4016425862508011 }, "result_metrics_average": 0.49214506342262265, - "result_metrics_npm": 0.24338722214495512 + "result_metrics_npm": 0.24338722214495512, + "main_language": "?" } \ No newline at end of file diff --git a/internlm/internlm2-chat-20b_eval_request_False_float16_Original.json b/internlm/internlm2-chat-20b_eval_request_False_float16_Original.json index bb192383e0c68c19f654f6f78222e071ee02605a..eeb8de0abcd5dc46126f19ce6d972142ca0084b1 100644 --- a/internlm/internlm2-chat-20b_eval_request_False_float16_Original.json +++ b/internlm/internlm2-chat-20b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5573254035000713 }, "result_metrics_average": 0.6458686427895824, - "result_metrics_npm": 0.4554919111495502 + "result_metrics_npm": 0.4554919111495502, + "main_language": "?" } \ No newline at end of file diff --git a/internlm/internlm2-chat-7b_eval_request_False_float16_Original.json b/internlm/internlm2-chat-7b_eval_request_False_float16_Original.json index 2f403c4d6c18e29ea168f9792768c553bff62ef6..f9b5e068f1800f2b7cf8c4572d6a71ba353e5757 100644 --- a/internlm/internlm2-chat-7b_eval_request_False_float16_Original.json +++ b/internlm/internlm2-chat-7b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6153192807721571 }, "result_metrics_average": 0.6981461888193505, - "result_metrics_npm": 0.5569862407191862 + "result_metrics_npm": 0.5569862407191862, + "main_language": "?" } \ No newline at end of file diff --git a/josu/gpt-neo-pt-1.3B_eval_request_False_float16_Original.json b/josu/gpt-neo-pt-1.3B_eval_request_False_float16_Original.json index df5edc4386ccc6d2584369079deef807ab270301..3f0684713e0913239e4bf126ab45ef5716b3ab71 100644 --- a/josu/gpt-neo-pt-1.3B_eval_request_False_float16_Original.json +++ b/josu/gpt-neo-pt-1.3B_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.0 }, "result_metrics_average": 0.11254916606345369, - "result_metrics_npm": -0.39662382064506585 + "result_metrics_npm": -0.39662382064506585, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/josu/gpt-neo-pt-br_eval_request_False_float16_Original.json b/josu/gpt-neo-pt-br_eval_request_False_float16_Original.json index e28a9fa66c695324e84232d370fc2fdc10f81ca9..41fb187923b274cf138db74383b253a459c56b82 100644 --- a/josu/gpt-neo-pt-br_eval_request_False_float16_Original.json +++ b/josu/gpt-neo-pt-br_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.10755827436961755 }, "result_metrics_average": 0.19539944967227518, - "result_metrics_npm": -0.24163039991147078 + "result_metrics_npm": -0.24163039991147078, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/lmsys/vicuna-13b-v1.5_eval_request_False_float16_Original.json b/lmsys/vicuna-13b-v1.5_eval_request_False_float16_Original.json index 8c8b70eaa34de79bce86e728bf8970d65f45a4c0..45983fd8a4bc620a2c9a5d95c5e2599cf66262db 100644 --- a/lmsys/vicuna-13b-v1.5_eval_request_False_float16_Original.json +++ b/lmsys/vicuna-13b-v1.5_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5782348144944496 }, "result_metrics_average": 0.6367429534187249, - "result_metrics_npm": 0.46015130973622337 + "result_metrics_npm": 0.46015130973622337, + "main_language": "English" } \ No newline at end of file diff --git a/lmsys/vicuna-33b-v1.3_eval_request_False_float16_Original.json b/lmsys/vicuna-33b-v1.3_eval_request_False_float16_Original.json index 95d71ed5edd463ed5145be9b49a3f266055ad726..f37b9aea586639638ef90ed0db94e58c36750283 100644 --- a/lmsys/vicuna-33b-v1.3_eval_request_False_float16_Original.json +++ b/lmsys/vicuna-33b-v1.3_eval_request_False_float16_Original.json @@ -14,5 +14,6 @@ "job_id": 331, "job_start_time": "2024-04-02T03-13-40.175246", "error_msg": "(MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 842d7bd4-c273-4885-aab9-e56d00d98eb6)')", - "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 842d7bd4-c273-4885-aab9-e56d00d98eb6)')\n" + "traceback": "Traceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 444, in _error_catcher\n yield\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 567, in read\n data = self._fp_read(amt) if not fp_closed else b\"\"\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 533, in _fp_read\n return self._fp.read(amt) if amt is not None else self._fp.read()\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/http/client.py\", line 473, in read\n s = self.fp.read(amt)\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/socket.py\", line 706, in readinto\n return self._sock.recv_into(b)\n ^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1315, in recv_into\n return self.read(nbytes, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/ssl.py\", line 1167, in read\n return self._sslobj.read(len, buffer)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTimeoutError: The read operation timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 816, in generate\n yield from self.raw.stream(chunk_size, decode_content=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 628, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 566, in read\n with self._error_catcher():\n File \"/root/miniconda3/envs/torch21/lib/python3.11/contextlib.py\", line 158, in __exit__\n self.gen.throw(typ, value, traceback)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/response.py\", line 449, in _error_catcher\n raise ReadTimeoutError(self._pool, None, \"Read timed out.\")\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 524, in http_get\n for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/models.py\", line 822, in generate\n raise ConnectionError(e)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Read timed out.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 715, in urlopen\n httplib_response = self._make_request(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 404, in _make_request\n self._validate_conn(conn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1058, in _validate_conn\n conn.connect()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connection.py\", line 179, in _new_conn\n raise ConnectTimeoutError(\nurllib3.exceptions.ConnectTimeoutError: (, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)')\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 799, in urlopen\n retries = retries.increment(\n ^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3264, in from_pretrained\n resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 1038, in get_checkpoint_shard_files\n cached_filename = cached_file(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/utils/hub.py\", line 398, in cached_file\n resolved_file = hf_hub_download(\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1457, in hf_hub_download\n http_get(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 541, in http_get\n return http_get(\n ^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 451, in http_get\n r = _request_wrapper(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 408, in _request_wrapper\n response = get_session().request(method=method, url=url, **params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 67, in send\n return super().send(request, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/requests/adapters.py\", line 507, in send\n raise ConnectTimeout(e, request=request)\nrequests.exceptions.ConnectTimeout: (MaxRetryError(\"HTTPSConnectionPool(host='cdn-lfs.huggingface.co', port=443): Max retries exceeded with url: /repos/0b/d9/0bd95a8445a38bce13b7b997bf365a6244cc6b8e9d9a1fa611a4ca42bd8a322d/445c09eaaef05a199f775733a0a66a50da5dae1f68b8f484178cbd8aed5f5b67?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27pytorch_model-00004-of-00007.bin%3B+filename%3D%22pytorch_model-00004-of-00007.bin%22%3B&response-content-type=application%2Foctet-stream&Expires=1712288010&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcxMjI4ODAxMH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8wYi9kOS8wYmQ5NWE4NDQ1YTM4YmNlMTNiN2I5OTdiZjM2NWE2MjQ0Y2M2YjhlOWQ5YTFmYTYxMWE0Y2E0MmJkOGEzMjJkLzQ0NWMwOWVhYWVmMDVhMTk5Zjc3NTczM2EwYTY2YTUwZGE1ZGFlMWY2OGI4ZjQ4NDE3OGNiZDhhZWQ1ZjViNjc~cmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=r4nCdNt6B2~ptQXsAFwp6gdicyOzI9lVNu27vwen4l7MHi8bF8QkgucNoV92WMvxWwfReVsVzHOze507yP3Os1-95ecOSZGtqbGPhNqpbDYJrEnJcCt9ug0vSqCYme1JvUYmGavHYIEs9uq-biPrN9soJDNOFvDGBy8l1aRnQJjb7pWx4odVKMrjHf5hg4ys-w93hg3C14jaMGVYhhut3XSNtWEwQ3K1YEB~6hZODMGNqkmjvDIJsxi0WhTXUec~N9KFvLx8TXi3x1art~7UD4M~XLOGWmQfRkIUOYWXAm9ZUr3NywGrwxmuqJvcOhS-lJ9SrIVU3JAT1CWV1G1fxQ__&Key-Pair-Id=KVTP0A1DKRTAX (Caused by ConnectTimeoutError(, 'Connection to cdn-lfs.huggingface.co timed out. (connect timeout=10)'))\"), '(Request ID: 842d7bd4-c273-4885-aab9-e56d00d98eb6)')\n", + "main_language": "?" } \ No newline at end of file diff --git a/lmsys/vicuna-7b-v1.5_eval_request_False_float16_Original.json b/lmsys/vicuna-7b-v1.5_eval_request_False_float16_Original.json index 44300986c8396b282fb0217be46394f722585447..288880d58ef804433ac4c4c35006c28022d8291a 100644 --- a/lmsys/vicuna-7b-v1.5_eval_request_False_float16_Original.json +++ b/lmsys/vicuna-7b-v1.5_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.598923029569774 }, "result_metrics_average": 0.5702960057897264, - "result_metrics_npm": 0.36327170040276774 + "result_metrics_npm": 0.36327170040276774, + "main_language": "English" } \ No newline at end of file diff --git a/lrds-code/boana-7b-instruct_eval_request_False_bfloat16_Original.json b/lrds-code/boana-7b-instruct_eval_request_False_bfloat16_Original.json index 9049aa67872e13dd35f39dfda204c1ae2fd13927..cd12d40f51a986b5d4f5af01acf826e9ed0f7232 100644 --- a/lrds-code/boana-7b-instruct_eval_request_False_bfloat16_Original.json +++ b/lrds-code/boana-7b-instruct_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4037922571146918 }, "result_metrics_average": 0.4457238657513895, - "result_metrics_npm": 0.18282320960894285 + "result_metrics_npm": 0.18282320960894285, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/lrds-code/samba-1.1B_eval_request_False_bfloat16_Original.json b/lrds-code/samba-1.1B_eval_request_False_bfloat16_Original.json index 239f2d5a5027300706d4a44502926393a5fa430b..b25e8bda0a855579737de1196e4bc033c97e6cbe 100644 --- a/lrds-code/samba-1.1B_eval_request_False_bfloat16_Original.json +++ b/lrds-code/samba-1.1B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.03273007518573507 }, "result_metrics_average": 0.16887824041133082, - "result_metrics_npm": -0.26600705330094676 + "result_metrics_npm": -0.26600705330094676, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/maritaca-ai/sabia-7b_eval_request_False_float16_Original.json b/maritaca-ai/sabia-7b_eval_request_False_float16_Original.json index da9e64fae2c27337b7e2565b3688985536f2f62b..8c6f7f2e55f201a23d991549827cbb7a93ac83e3 100644 --- a/maritaca-ai/sabia-7b_eval_request_False_float16_Original.json +++ b/maritaca-ai/sabia-7b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.46639312914326236 }, "result_metrics_average": 0.47088513412710825, - "result_metrics_npm": 0.21391066022971233 + "result_metrics_npm": 0.21391066022971233, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/matsuo-lab/weblab-10b_eval_request_False_float16_Original.json b/matsuo-lab/weblab-10b_eval_request_False_float16_Original.json index 438936ddb7b05dd990be196c104d6e1de9132d58..9f445332336bf6748ba723fbe748655567fae16c 100644 --- a/matsuo-lab/weblab-10b_eval_request_False_float16_Original.json +++ b/matsuo-lab/weblab-10b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 204, - "job_start_time": "2024-02-15T00-41-06.033489" + "job_start_time": "2024-02-15T00-41-06.033489", + "main_language": "Other" } \ No newline at end of file diff --git a/meta-llama/Llama-2-13b-hf_eval_request_False_float16_Original.json b/meta-llama/Llama-2-13b-hf_eval_request_False_float16_Original.json index ed3fa28459da78d9a7a6f64a0e13cc41d885df65..3bcddee2316c57ba01895781976e85bebc56d8e6 100644 --- a/meta-llama/Llama-2-13b-hf_eval_request_False_float16_Original.json +++ b/meta-llama/Llama-2-13b-hf_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.560251113736967 }, "result_metrics_average": 0.5749262910144977, - "result_metrics_npm": 0.3617628150284507 + "result_metrics_npm": 0.3617628150284507, + "main_language": "English" } \ No newline at end of file diff --git a/meta-llama/Llama-2-70b-chat-hf_eval_request_False_float16_Original.json b/meta-llama/Llama-2-70b-chat-hf_eval_request_False_float16_Original.json index 7fbde2044a620164a9b175dbc9ad684ed5536852..b2ff090f22bc8b359728b47e054ce0db2bc048a1 100644 --- a/meta-llama/Llama-2-70b-chat-hf_eval_request_False_float16_Original.json +++ b/meta-llama/Llama-2-70b-chat-hf_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5394249643958294 }, "result_metrics_average": 0.6568819949823851, - "result_metrics_npm": 0.4968097494880348 + "result_metrics_npm": 0.4968097494880348, + "main_language": "English" } \ No newline at end of file diff --git a/meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json b/meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json index d0f8cc79c8e9dc6e880ff6aafaab7f9a99e17323..703b984ca5f582d05ce27117face2e4bdec0a1e6 100644 --- a/meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json +++ b/meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6889417684152425 }, "result_metrics_average": 0.6772842585120299, - "result_metrics_npm": 0.49752162525696636 + "result_metrics_npm": 0.49752162525696636, + "main_language": "English" } \ No newline at end of file diff --git a/meta-llama/Llama-2-7b-hf_eval_request_False_float16_Original.json b/meta-llama/Llama-2-7b-hf_eval_request_False_float16_Original.json index 018c642872565a65fe26a51fb698c119e4a32e4d..9dc9aaa509c12ed1fe2dcf8f1528b18a96467e40 100644 --- a/meta-llama/Llama-2-7b-hf_eval_request_False_float16_Original.json +++ b/meta-llama/Llama-2-7b-hf_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5906311367573921 }, "result_metrics_average": 0.48902418811379506, - "result_metrics_npm": 0.2489201920526417 + "result_metrics_npm": 0.2489201920526417, + "main_language": "English" } \ No newline at end of file diff --git a/microsoft/phi-1_5_eval_request_False_float16_Original.json b/microsoft/phi-1_5_eval_request_False_float16_Original.json index abd7441cecf1de2fa17a0f07d3cae37a6f7ca842..5a707874f5303358281018ec6f3b664a7152f597 100644 --- a/microsoft/phi-1_5_eval_request_False_float16_Original.json +++ b/microsoft/phi-1_5_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.3288095806256486 }, "result_metrics_average": 0.2840818682517694, - "result_metrics_npm": -0.09968724524802117 + "result_metrics_npm": -0.09968724524802117, + "main_language": "English" } \ No newline at end of file diff --git a/microsoft/phi-1_eval_request_False_float16_Original.json b/microsoft/phi-1_eval_request_False_float16_Original.json index 1d26d5d6bd97c610d44c8876d4c71124be5714db..75d17ba804b1d34cab30c3664e76f05bd33e1a8c 100644 --- a/microsoft/phi-1_eval_request_False_float16_Original.json +++ b/microsoft/phi-1_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.24545755510800893 }, "result_metrics_average": 0.27535656333204706, - "result_metrics_npm": -0.09846172362708186 + "result_metrics_npm": -0.09846172362708186, + "main_language": "English" } \ No newline at end of file diff --git a/microsoft/phi-2_eval_request_False_float16_Original.json b/microsoft/phi-2_eval_request_False_float16_Original.json index 20b629e2de32018cf628b16c2e72ee8f58acfd60..3b3e1f9f55abe3b12649c513c11fe1b6c52e1312 100644 --- a/microsoft/phi-2_eval_request_False_float16_Original.json +++ b/microsoft/phi-2_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.3637209670732187 }, "result_metrics_average": 0.36517919759969963, - "result_metrics_npm": 0.047123728910760526 + "result_metrics_npm": 0.047123728910760526, + "main_language": "English" } \ No newline at end of file diff --git a/mistralai/Mistral-7B-Instruct-v0.2_eval_request_False_bfloat16_Original.json b/mistralai/Mistral-7B-Instruct-v0.2_eval_request_False_bfloat16_Original.json index cbbe3dd93c81472e2d8c5e44941a2070021c7f7c..239e0ed91c1f7ccbd420e6e7f1c0eed90c77f94f 100644 --- a/mistralai/Mistral-7B-Instruct-v0.2_eval_request_False_bfloat16_Original.json +++ b/mistralai/Mistral-7B-Instruct-v0.2_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5056730165368587 }, "result_metrics_average": 0.6480934189188431, - "result_metrics_npm": 0.47857208866539325 + "result_metrics_npm": 0.47857208866539325, + "main_language": "English" } \ No newline at end of file diff --git a/mistralai/Mistral-7B-v0.1_eval_request_False_bfloat16_Original.json b/mistralai/Mistral-7B-v0.1_eval_request_False_bfloat16_Original.json index bb92d66dfd504dce8df3ee1d81c1355ce3f710c4..f4473a18e66117f902204eb01cad79eee7e18c39 100644 --- a/mistralai/Mistral-7B-v0.1_eval_request_False_bfloat16_Original.json +++ b/mistralai/Mistral-7B-v0.1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5683829062715523 }, "result_metrics_average": 0.611272278420037, - "result_metrics_npm": 0.4131773387748313 + "result_metrics_npm": 0.4131773387748313, + "main_language": "English" } \ No newline at end of file diff --git a/mistralai/Mixtral-8x7B-Instruct-v0.1_eval_request_False_bfloat16_Original.json b/mistralai/Mixtral-8x7B-Instruct-v0.1_eval_request_False_bfloat16_Original.json index 9c4e2911fd456aba73ebcece6ec03545c982b681..604bfb497610bbb69920e95084351954ef14c16a 100644 --- a/mistralai/Mixtral-8x7B-Instruct-v0.1_eval_request_False_bfloat16_Original.json +++ b/mistralai/Mixtral-8x7B-Instruct-v0.1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.7046312157201152 }, "result_metrics_average": 0.6970627681087668, - "result_metrics_npm": 0.5290515708117544 + "result_metrics_npm": 0.5290515708117544, + "main_language": "English" } \ No newline at end of file diff --git a/mistralai/Mixtral-8x7B-v0.1_eval_request_False_bfloat16_Original.json b/mistralai/Mixtral-8x7B-v0.1_eval_request_False_bfloat16_Original.json index 875d835ca0f6731f713b86f6820e13ea1cb3aaef..21ce65a1f7ad0a34a583a805db063b696d261795 100644 --- a/mistralai/Mixtral-8x7B-v0.1_eval_request_False_bfloat16_Original.json +++ b/mistralai/Mixtral-8x7B-v0.1_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.7182878118620253 }, "result_metrics_average": 0.6787139689055947, - "result_metrics_npm": 0.4944446456685305 + "result_metrics_npm": 0.4944446456685305, + "main_language": "English" } \ No newline at end of file diff --git a/mlabonne/Monarch-7B_eval_request_False_bfloat16_Original.json b/mlabonne/Monarch-7B_eval_request_False_bfloat16_Original.json index 49e3539c34704d2e67112a96a6d4fa6723adbb5d..29eace04e545f46d19c183f3056d0bf818016f1a 100644 --- a/mlabonne/Monarch-7B_eval_request_False_bfloat16_Original.json +++ b/mlabonne/Monarch-7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4840153980116987 }, "result_metrics_average": 0.6700665154826869, - "result_metrics_npm": 0.513919089898855 + "result_metrics_npm": 0.513919089898855, + "main_language": "English" } \ No newline at end of file diff --git a/monilouise/opt125M_portuguese_eval_request_False_float16_Original.json b/monilouise/opt125M_portuguese_eval_request_False_float16_Original.json index 23bdd66b1da4841636e2f3af15ae9a581373f901..ea89e4d6cafd9a56166be414d74990b7181e221f 100644 --- a/monilouise/opt125M_portuguese_eval_request_False_float16_Original.json +++ b/monilouise/opt125M_portuguese_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.18883547008547008 }, "result_metrics_average": 0.13406757911393863, - "result_metrics_npm": -0.3282848857129527 + "result_metrics_npm": -0.3282848857129527, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/mosaicml/mpt-30b_eval_request_False_bfloat16_Original.json b/mosaicml/mpt-30b_eval_request_False_bfloat16_Original.json index 2dbc7e3a744fd7e63557daddcdddc17c090d8399..02f0fdb3d983cef47ba0c7faa000b83cff3297cc 100644 --- a/mosaicml/mpt-30b_eval_request_False_bfloat16_Original.json +++ b/mosaicml/mpt-30b_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 119, - "job_start_time": "2024-02-09T05-24-45.198006" + "job_start_time": "2024-02-09T05-24-45.198006", + "main_language": "English" } \ No newline at end of file diff --git a/mosaicml/mpt-7b-8k_eval_request_False_bfloat16_Original.json b/mosaicml/mpt-7b-8k_eval_request_False_bfloat16_Original.json index 1071356691c09c7c99955f9723a6480cf158ee45..1363840d415c9c793fc790c9077ec65f3f604957 100644 --- a/mosaicml/mpt-7b-8k_eval_request_False_bfloat16_Original.json +++ b/mosaicml/mpt-7b-8k_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 120, - "job_start_time": "2024-02-09T06-08-56.199589" + "job_start_time": "2024-02-09T06-08-56.199589", + "main_language": "English" } \ No newline at end of file diff --git a/mosaicml/mpt-7b_eval_request_False_bfloat16_Original.json b/mosaicml/mpt-7b_eval_request_False_bfloat16_Original.json index b9e623a2c1c63871277d28366d819e0e48eeeb1b..71633629b2c4b4a8b47bc5cb8d18a0dc944d94ea 100644 --- a/mosaicml/mpt-7b_eval_request_False_bfloat16_Original.json +++ b/mosaicml/mpt-7b_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 118, - "job_start_time": "2024-02-09T05-01-56.623731" + "job_start_time": "2024-02-09T05-01-56.623731", + "main_language": "English" } \ No newline at end of file diff --git a/nicholasKluge/Aira-2-portuguese-124M_eval_request_False_float16_Original.json b/nicholasKluge/Aira-2-portuguese-124M_eval_request_False_float16_Original.json index 61570cd1063d2c8eed377efb68d622572011d349..6ab58687132c0df098061f1ba041138e2b6af830 100644 --- a/nicholasKluge/Aira-2-portuguese-124M_eval_request_False_float16_Original.json +++ b/nicholasKluge/Aira-2-portuguese-124M_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.1520898272343359 }, "result_metrics_average": 0.20274277811204158, - "result_metrics_npm": -0.22525959354036182 + "result_metrics_npm": -0.22525959354036182, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/nicholasKluge/Aira-2-portuguese-1B7_eval_request_False_float16_Original.json b/nicholasKluge/Aira-2-portuguese-1B7_eval_request_False_float16_Original.json index 0f76345f2f776e82efd57a557f410948d6fb8b04..12abc18af5d68b3c9eb66f44471606dd0eb216c1 100644 --- a/nicholasKluge/Aira-2-portuguese-1B7_eval_request_False_float16_Original.json +++ b/nicholasKluge/Aira-2-portuguese-1B7_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.0 }, "result_metrics_average": 0.038235051006132725, - "result_metrics_npm": -0.5029557834046139 + "result_metrics_npm": -0.5029557834046139, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/nicholasKluge/Aira-2-portuguese-560M_eval_request_False_float16_Original.json b/nicholasKluge/Aira-2-portuguese-560M_eval_request_False_float16_Original.json index b992c35064ebc8da0aac9c3824416e8b936dbc85..62200a7700c045293f27c3145f8dd9d590492bf8 100644 --- a/nicholasKluge/Aira-2-portuguese-560M_eval_request_False_float16_Original.json +++ b/nicholasKluge/Aira-2-portuguese-560M_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.0 }, "result_metrics_average": 0.0013282247765006383, - "result_metrics_npm": -0.5661626885288303 + "result_metrics_npm": -0.5661626885288303, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/nicholasKluge/TeenyTinyLlama-160m_eval_request_False_bfloat16_Original.json b/nicholasKluge/TeenyTinyLlama-160m_eval_request_False_bfloat16_Original.json index c97d0b916b07c0db3db7d8e8d331d69448932395..af2c860a0d37a529413a239467ac29532a734f70 100644 --- a/nicholasKluge/TeenyTinyLlama-160m_eval_request_False_bfloat16_Original.json +++ b/nicholasKluge/TeenyTinyLlama-160m_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.11387655922153524 }, "result_metrics_average": 0.28201339569415895, - "result_metrics_npm": -0.07406339381740137 + "result_metrics_npm": -0.07406339381740137, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/nicholasKluge/TeenyTinyLlama-460m-Chat_eval_request_False_bfloat16_Original.json b/nicholasKluge/TeenyTinyLlama-460m-Chat_eval_request_False_bfloat16_Original.json index cf19baf99ca68cf05a13524d1ddb1e9474110e52..a348e0b339d8ee9705b6c7160ae2119e487c41dd 100644 --- a/nicholasKluge/TeenyTinyLlama-460m-Chat_eval_request_False_bfloat16_Original.json +++ b/nicholasKluge/TeenyTinyLlama-460m-Chat_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.18133818528465914 }, "result_metrics_average": 0.2548790479303147, - "result_metrics_npm": -0.13935681499454955 + "result_metrics_npm": -0.13935681499454955, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/nicholasKluge/TeenyTinyLlama-460m_eval_request_False_bfloat16_Original.json b/nicholasKluge/TeenyTinyLlama-460m_eval_request_False_bfloat16_Original.json index 090fdbaa3936e39aa45a367628fcc75df7b3fced..277dbc7fae9109c5ae24501edde344ad6c0ed6dd 100644 --- a/nicholasKluge/TeenyTinyLlama-460m_eval_request_False_bfloat16_Original.json +++ b/nicholasKluge/TeenyTinyLlama-460m_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.17282362437106502 }, "result_metrics_average": 0.2886364424366584, - "result_metrics_npm": -0.08332147197226997 + "result_metrics_npm": -0.08332147197226997, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/nicolasdec/CabraMistral7b-0.4_eval_request_False_float16_Original.json b/nicolasdec/CabraMistral7b-0.4_eval_request_False_float16_Original.json index 5be2f5d07b2459b6b50a1ca29725b5bec18329fd..48b7c3696688e7a3ed4d5ab0337aabc0803f9e5a 100644 --- a/nicolasdec/CabraMistral7b-0.4_eval_request_False_float16_Original.json +++ b/nicolasdec/CabraMistral7b-0.4_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.40400326114958657 }, "result_metrics_average": 0.42637827784490756, - "result_metrics_npm": 0.1499983870394086 + "result_metrics_npm": 0.1499983870394086, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/nicolasdec/CabraMistral7b_eval_request_False_bfloat16_Original.json b/nicolasdec/CabraMistral7b_eval_request_False_bfloat16_Original.json index c219e98ab1a18d2572ea7ef51fe2f54e7ec9a6e2..dd8a7260ad1eea1b0fbd34a70a32318242cc9ce0 100644 --- a/nicolasdec/CabraMistral7b_eval_request_False_bfloat16_Original.json +++ b/nicolasdec/CabraMistral7b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.647986430647758 }, "result_metrics_average": 0.6510067195144671, - "result_metrics_npm": 0.48247175601183667 + "result_metrics_npm": 0.48247175601183667, + "main_language": "?" } \ No newline at end of file diff --git a/nicolasdec/CabraQwen14b_eval_request_False_bfloat16_Original.json b/nicolasdec/CabraQwen14b_eval_request_False_bfloat16_Original.json index 32c724a56147ec5e1979ae5faa983b4e9ca2b1ff..cbe7b715c09536e77c0899a2e92e6413ea516100 100644 --- a/nicolasdec/CabraQwen14b_eval_request_False_bfloat16_Original.json +++ b/nicolasdec/CabraQwen14b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6264894401912291 }, "result_metrics_average": 0.6865741938302509, - "result_metrics_npm": 0.5166394028213106 + "result_metrics_npm": 0.5166394028213106, + "main_language": "?" } \ No newline at end of file diff --git a/nicolasdec/CabraQwen14b_eval_request_False_float16_Original.json b/nicolasdec/CabraQwen14b_eval_request_False_float16_Original.json index bc026c5d61aac1f1592e86de51fd89a451e41a55..d6ae916e5053fc5539cf7e9d98290846e624332d 100644 --- a/nicolasdec/CabraQwen14b_eval_request_False_float16_Original.json +++ b/nicolasdec/CabraQwen14b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6265764931667666 }, "result_metrics_average": 0.6865208563559055, - "result_metrics_npm": 0.5164348805699208 + "result_metrics_npm": 0.5164348805699208, + "main_language": "?" } \ No newline at end of file diff --git a/nicolasdec/CabraQwen7b_eval_request_False_bfloat16_Original.json b/nicolasdec/CabraQwen7b_eval_request_False_bfloat16_Original.json index d2ad8b6d4e446fd7792fa25f2449b41ec153233c..16ee9b24563af165095df570dc33523f86755b3f 100644 --- a/nicolasdec/CabraQwen7b_eval_request_False_bfloat16_Original.json +++ b/nicolasdec/CabraQwen7b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6595932050508154 }, "result_metrics_average": 0.6699463067847506, - "result_metrics_npm": 0.49839457062901427 + "result_metrics_npm": 0.49839457062901427, + "main_language": "?" } \ No newline at end of file diff --git a/nicolasdec/Cabra_eval_request_False_float16_Original.json b/nicolasdec/Cabra_eval_request_False_float16_Original.json index 1176d8a1bf846beeb14c55bd16775f49cb9b96b3..3cd9926af58382fa3a0ed2f481e854129516b25e 100644 --- a/nicolasdec/Cabra_eval_request_False_float16_Original.json +++ b/nicolasdec/Cabra_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5032536119610214 }, "result_metrics_average": 0.504081906784482, - "result_metrics_npm": 0.2857583116785958 + "result_metrics_npm": 0.2857583116785958, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/nicolasdec/Cabramistral7b_eval_request_False_float16_Original.json b/nicolasdec/Cabramistral7b_eval_request_False_float16_Original.json index 4540aec07ed3088cc750633be8e4f4b6c6f41fcc..48bb8f4b9fd9b120fee593425a3e21896afa944e 100644 --- a/nicolasdec/Cabramistral7b_eval_request_False_float16_Original.json +++ b/nicolasdec/Cabramistral7b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6505957541772256 }, "result_metrics_average": 0.6520268207392702, - "result_metrics_npm": 0.4839021383288866 + "result_metrics_npm": 0.4839021383288866, + "main_language": "?" } \ No newline at end of file diff --git a/nicolasdec/cabra13b_eval_request_False_float16_Original.json b/nicolasdec/cabra13b_eval_request_False_float16_Original.json index 7e86d671ee0169b8e76e6b5c06aca0991d280a83..6f3f54ada6156c5cf2618b4c9a8c07e580b240aa 100644 --- a/nicolasdec/cabra13b_eval_request_False_float16_Original.json +++ b/nicolasdec/cabra13b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5337029701391601 }, "result_metrics_average": 0.5650071262781954, - "result_metrics_npm": 0.3542760927948254 + "result_metrics_npm": 0.3542760927948254, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/openai-community/gpt2-large_eval_request_False_float16_Original.json b/openai-community/gpt2-large_eval_request_False_float16_Original.json index c54f9e7440a8569875d9289c54a113f9fb02025b..ec72a668a90df762cc00f750d0cdbc7854aedf52 100644 --- a/openai-community/gpt2-large_eval_request_False_float16_Original.json +++ b/openai-community/gpt2-large_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 113, - "job_start_time": "2024-02-09T01-12-14.059849" + "job_start_time": "2024-02-09T01-12-14.059849", + "main_language": "English" } \ No newline at end of file diff --git a/openai-community/gpt2-medium_eval_request_False_float16_Original.json b/openai-community/gpt2-medium_eval_request_False_float16_Original.json index d3bd4ea3c1bf4d8d420aa46aaf038f6a73656121..22ea374ea320bc5f6ffc8b701d168da850867006 100644 --- a/openai-community/gpt2-medium_eval_request_False_float16_Original.json +++ b/openai-community/gpt2-medium_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 112, - "job_start_time": "2024-02-09T00-38-03.057064" + "job_start_time": "2024-02-09T00-38-03.057064", + "main_language": "English" } \ No newline at end of file diff --git a/openai-community/gpt2-xl_eval_request_False_float16_Original.json b/openai-community/gpt2-xl_eval_request_False_float16_Original.json index 7495778387ba7e5359cd02479fe4a7ba5961add2..7694e10f057092acdb969b3d5128942fdb2ff51d 100644 --- a/openai-community/gpt2-xl_eval_request_False_float16_Original.json +++ b/openai-community/gpt2-xl_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 114, - "job_start_time": "2024-02-09T01-34-08.460486" + "job_start_time": "2024-02-09T01-34-08.460486", + "main_language": "English" } \ No newline at end of file diff --git a/openai-community/openai-gpt_eval_request_False_float16_Original.json b/openai-community/openai-gpt_eval_request_False_float16_Original.json index ba61f5d7c92a5805b1d10deb4ef145b5fa0d327a..0c4e630482c94d41f7ed5844c9262a0a44d0aed7 100644 --- a/openai-community/openai-gpt_eval_request_False_float16_Original.json +++ b/openai-community/openai-gpt_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 111, - "job_start_time": "2024-02-08T23-49-26.472276" + "job_start_time": "2024-02-08T23-49-26.472276", + "main_language": "English" } \ No newline at end of file diff --git a/openchat/openchat-3.5-0106_eval_request_False_bfloat16_Original.json b/openchat/openchat-3.5-0106_eval_request_False_bfloat16_Original.json index 36390d03f653551d7b6608b308cdd3f1b61c2c47..8c7cac25f3311fbde178db66586e7c9829ceb8a3 100644 --- a/openchat/openchat-3.5-0106_eval_request_False_bfloat16_Original.json +++ b/openchat/openchat-3.5-0106_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5000062386719876 }, "result_metrics_average": 0.6869470364405353, - "result_metrics_npm": 0.5350961910969678 + "result_metrics_npm": 0.5350961910969678, + "main_language": "English" } \ No newline at end of file diff --git a/openlm-research/open_llama_13b_eval_request_False_float16_Original.json b/openlm-research/open_llama_13b_eval_request_False_float16_Original.json index cb56a56cf7e9f4be23d899654a015be7a282d8e6..a38efde028ee2f5c54a9c006375529987ac3fda8 100644 --- a/openlm-research/open_llama_13b_eval_request_False_float16_Original.json +++ b/openlm-research/open_llama_13b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.30527096629716716 }, "result_metrics_average": 0.37011979864137023, - "result_metrics_npm": 0.052933367736282336 + "result_metrics_npm": 0.052933367736282336, + "main_language": "English" } \ No newline at end of file diff --git a/openlm-research/open_llama_3b_eval_request_False_float16_Original.json b/openlm-research/open_llama_3b_eval_request_False_float16_Original.json index 705baede7e1a4964b481a63601feaf9fc2d0479e..47e1ec64918326c52a344696be273dc741467196 100644 --- a/openlm-research/open_llama_3b_eval_request_False_float16_Original.json +++ b/openlm-research/open_llama_3b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.3732164088082346 }, "result_metrics_average": 0.3018688721827909, - "result_metrics_npm": -0.055465157217862306 + "result_metrics_npm": -0.055465157217862306, + "main_language": "English" } \ No newline at end of file diff --git a/openlm-research/open_llama_3b_v2_eval_request_False_float16_Original.json b/openlm-research/open_llama_3b_v2_eval_request_False_float16_Original.json index 18478f432e6ca009bb6f007667db9011a3aa106b..2800197d8940c1cfc53617571b1eba5efcbce52a 100644 --- a/openlm-research/open_llama_3b_v2_eval_request_False_float16_Original.json +++ b/openlm-research/open_llama_3b_v2_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4934462145814824 }, "result_metrics_average": 0.3064320183092692, - "result_metrics_npm": -0.05598303797306345 + "result_metrics_npm": -0.05598303797306345, + "main_language": "English" } \ No newline at end of file diff --git a/openlm-research/open_llama_7b_eval_request_False_float16_Original.json b/openlm-research/open_llama_7b_eval_request_False_float16_Original.json index de56aab9e964ee79a520548b98ce8671596e9219..b710c3b25a581366f4d7e87c2e031bce58c5c6f1 100644 --- a/openlm-research/open_llama_7b_eval_request_False_float16_Original.json +++ b/openlm-research/open_llama_7b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.368042700579295 }, "result_metrics_average": 0.3091522175209846, - "result_metrics_npm": -0.05674718342028821 + "result_metrics_npm": -0.05674718342028821, + "main_language": "English" } \ No newline at end of file diff --git a/openlm-research/open_llama_7b_v2_eval_request_False_float16_Original.json b/openlm-research/open_llama_7b_v2_eval_request_False_float16_Original.json index d0c254814999afc1d3da24ef6a2fe9b7308f8b01..1fa6c518644dc5663e5c47916aff3645667aad24 100644 --- a/openlm-research/open_llama_7b_v2_eval_request_False_float16_Original.json +++ b/openlm-research/open_llama_7b_v2_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.28404357144756615 }, "result_metrics_average": 0.3920595750412292, - "result_metrics_npm": 0.12273639741817756 + "result_metrics_npm": 0.12273639741817756, + "main_language": "English" } \ No newline at end of file diff --git a/paulml/OGNO-7B_eval_request_False_bfloat16_Original.json b/paulml/OGNO-7B_eval_request_False_bfloat16_Original.json index ad165e155e446c79633143ed65c86e465288ba0b..4f51c0d7ec6e4f6008a742279b2a085db526a545 100644 --- a/paulml/OGNO-7B_eval_request_False_bfloat16_Original.json +++ b/paulml/OGNO-7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.49222624056760605 }, "result_metrics_average": 0.6763168078948447, - "result_metrics_npm": 0.52228027733777 + "result_metrics_npm": 0.52228027733777, + "main_language": "English" } \ No newline at end of file diff --git a/pedrogengo/gemma-ptbr_eval_request_False_float16_Adapter.json b/pedrogengo/gemma-ptbr_eval_request_False_float16_Adapter.json index 7995ffbeb0f0274f8be127ad95fe6a820d34111f..722d91e092651a06ae8b0baf4fad989c7c26afd5 100644 --- a/pedrogengo/gemma-ptbr_eval_request_False_float16_Adapter.json +++ b/pedrogengo/gemma-ptbr_eval_request_False_float16_Adapter.json @@ -14,5 +14,6 @@ "job_id": 353, "job_start_time": "2024-04-02T12-16-21.488325", "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'layer_replication'", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n else:\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 637, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n # load the config\n ^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'layer_replication'\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 238, in wait_download_and_run_request\n else:\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 637, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n # load the config\n ^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'layer_replication'\n", + "main_language": "?" } \ No newline at end of file diff --git a/pfnet/plamo-13b_eval_request_False_float16_Original.json b/pfnet/plamo-13b_eval_request_False_float16_Original.json index b82c7bd4262d560fb0ad63c5cb5e4301a7ee02b3..933e4e2c0dda57033e41c77ae5e8c53f3c9e3ed1 100644 --- a/pfnet/plamo-13b_eval_request_False_float16_Original.json +++ b/pfnet/plamo-13b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 99, - "job_start_time": "2024-02-08T14-27-00.255480" + "job_start_time": "2024-02-08T14-27-00.255480", + "main_language": "English" } \ No newline at end of file diff --git a/pierreguillou/gpt2-small-portuguese_eval_request_False_float16_Original.json b/pierreguillou/gpt2-small-portuguese_eval_request_False_float16_Original.json index 9a3182d10f7828207d04ef08e1b8baba06f8f9d8..d03357ee616d7cef8e2e74f9303e99d4d47c5152 100644 --- a/pierreguillou/gpt2-small-portuguese_eval_request_False_float16_Original.json +++ b/pierreguillou/gpt2-small-portuguese_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.13618368962787014 }, "result_metrics_average": 0.21644745411916888, - "result_metrics_npm": -0.19276206437931806 + "result_metrics_npm": -0.19276206437931806, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/projecte-aina/FLOR-1.3B_eval_request_False_float16_Original.json b/projecte-aina/FLOR-1.3B_eval_request_False_float16_Original.json index c1e21565cdf574acfbc6345e7a343bd88438b700..8bd170cd759dd7e47b5c69a20442735da179ea57 100644 --- a/projecte-aina/FLOR-1.3B_eval_request_False_float16_Original.json +++ b/projecte-aina/FLOR-1.3B_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🆎 : language adapted models (FP, FT, ...)", "source": "script", "job_id": 95, - "job_start_time": "2024-02-08T03-13-08.820840" + "job_start_time": "2024-02-08T03-13-08.820840", + "main_language": "Spanish" } \ No newline at end of file diff --git a/projecte-aina/FLOR-6.3B_eval_request_False_float16_Original.json b/projecte-aina/FLOR-6.3B_eval_request_False_float16_Original.json index 6ba46983be8d3ca1ae1fd414ecdeebb3ae3ddbe6..6b7aa5c246af91c12633fc45d23660e074ff857f 100644 --- a/projecte-aina/FLOR-6.3B_eval_request_False_float16_Original.json +++ b/projecte-aina/FLOR-6.3B_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🆎 : language adapted models (FP, FT, ...)", "source": "script", "job_id": 96, - "job_start_time": "2024-02-08T03-29-01.274461" + "job_start_time": "2024-02-08T03-29-01.274461", + "main_language": "Spanish" } \ No newline at end of file diff --git a/projecte-aina/FLOR-760M_eval_request_False_float16_Original.json b/projecte-aina/FLOR-760M_eval_request_False_float16_Original.json index 9c15be57d6c95dfd27d66cb2dc2b011f8b48b17f..32d843f05c635556864523de08f7cd661bf8f427 100644 --- a/projecte-aina/FLOR-760M_eval_request_False_float16_Original.json +++ b/projecte-aina/FLOR-760M_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🆎 : language adapted models (FP, FT, ...)", "source": "script", "job_id": 94, - "job_start_time": "2024-02-08T02-51-18.675380" + "job_start_time": "2024-02-08T02-51-18.675380", + "main_language": "Spanish" } \ No newline at end of file diff --git a/projecte-aina/aguila-7b_eval_request_False_float16_Original.json b/projecte-aina/aguila-7b_eval_request_False_float16_Original.json index dc12276a3070bcfc5f3d1a54e07138baa969752d..911fdddf262fb867f411143ca20b5cf65a0ebf9c 100644 --- a/projecte-aina/aguila-7b_eval_request_False_float16_Original.json +++ b/projecte-aina/aguila-7b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🆎 : language adapted models (FP, FT, ...)", "source": "script", "job_id": 97, - "job_start_time": "2024-02-08T03-52-57.903345" + "job_start_time": "2024-02-08T03-52-57.903345", + "main_language": "English" } \ No newline at end of file diff --git a/pucpr/gpt2-bio-pt_eval_request_False_float16_Original.json b/pucpr/gpt2-bio-pt_eval_request_False_float16_Original.json index 7de5d425feed470fa4e74743c15a770841d696be..ba866037ff68569bcef3d57f2c07bd33df14caf2 100644 --- a/pucpr/gpt2-bio-pt_eval_request_False_float16_Original.json +++ b/pucpr/gpt2-bio-pt_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.102101506740682 }, "result_metrics_average": 0.15306949074867926, - "result_metrics_npm": -0.31887860497472553 + "result_metrics_npm": -0.31887860497472553, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/recogna-nlp/GemBode-2b-it_eval_request_False_float16_Original.json b/recogna-nlp/GemBode-2b-it_eval_request_False_float16_Original.json index 0f690f4a271c5682f2bcdb06d9f52e8a35215bfc..9c7f59a794b8941db677dff3f3f88b1dc5193ee5 100644 --- a/recogna-nlp/GemBode-2b-it_eval_request_False_float16_Original.json +++ b/recogna-nlp/GemBode-2b-it_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.37466822155101803 }, "result_metrics_average": 0.3607908395997714, - "result_metrics_npm": 0.04411491120774061 + "result_metrics_npm": 0.04411491120774061, + "main_language": "?" } \ No newline at end of file diff --git a/recogna-nlp/Phi-Bode_eval_request_False_float16_Original.json b/recogna-nlp/Phi-Bode_eval_request_False_float16_Original.json index b6b59b8847a4af3cfeab0f591052fc3a03c4c4c0..e4587ef3157ab15f91e8af239d0c312e8ae699ca 100644 --- a/recogna-nlp/Phi-Bode_eval_request_False_float16_Original.json +++ b/recogna-nlp/Phi-Bode_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.46784528577305107 }, "result_metrics_average": 0.4359386272010365, - "result_metrics_npm": 0.16028744061991526 + "result_metrics_npm": 0.16028744061991526, + "main_language": "?" } \ No newline at end of file diff --git a/recogna-nlp/bode-13b-alpaca-pt-br_eval_request_False_float16_Adapter.json b/recogna-nlp/bode-13b-alpaca-pt-br_eval_request_False_float16_Adapter.json index 26dce03de67b5d82b328421465d8d8e88221a43f..9b8bc1d08706d487a42db821eeb62b3b7f81b150 100644 --- a/recogna-nlp/bode-13b-alpaca-pt-br_eval_request_False_float16_Adapter.json +++ b/recogna-nlp/bode-13b-alpaca-pt-br_eval_request_False_float16_Adapter.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.47550189311608904 }, "result_metrics_average": 0.5254306861512226, - "result_metrics_npm": 0.3029925635037457 + "result_metrics_npm": 0.3029925635037457, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/recogna-nlp/bode-7b-alpaca-pt-br_eval_request_False_float16_Adapter.json b/recogna-nlp/bode-7b-alpaca-pt-br_eval_request_False_float16_Adapter.json index 53fbf9de2a2a3b91019995fffe606036be4a1a23..bba68ec0ff11d9fdc2673d3de00003bd6effc536 100644 --- a/recogna-nlp/bode-7b-alpaca-pt-br_eval_request_False_float16_Adapter.json +++ b/recogna-nlp/bode-7b-alpaca-pt-br_eval_request_False_float16_Adapter.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4325035290687774 }, "result_metrics_average": 0.5321362999211775, - "result_metrics_npm": 0.3302533414364077 + "result_metrics_npm": 0.3302533414364077, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/rishiraj/CatPPT-base_eval_request_False_float16_Original.json b/rishiraj/CatPPT-base_eval_request_False_float16_Original.json index c53e16ebac490d54dbdbdd3a1b21ad21dbbcaa0d..4299c7617b4799d996da3885188a1a4f5eef5a33 100644 --- a/rishiraj/CatPPT-base_eval_request_False_float16_Original.json +++ b/rishiraj/CatPPT-base_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.46780307205929983 }, "result_metrics_average": 0.6791700571775308, - "result_metrics_npm": 0.5261877414935174 + "result_metrics_npm": 0.5261877414935174, + "main_language": "English" } \ No newline at end of file diff --git a/rishiraj/CatPPT_eval_request_False_bfloat16_Original.json b/rishiraj/CatPPT_eval_request_False_bfloat16_Original.json index 2e76b72079772c453a33084269d028cdd618fc84..708cc33714e465f8abe5768bf438db2041d07992 100644 --- a/rishiraj/CatPPT_eval_request_False_bfloat16_Original.json +++ b/rishiraj/CatPPT_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.46985479631500854 }, "result_metrics_average": 0.6805757326131097, - "result_metrics_npm": 0.5283465491333306 + "result_metrics_npm": 0.5283465491333306, + "main_language": "English" } \ No newline at end of file diff --git a/semantixai/LloroV2_eval_request_False_bfloat16_Original.json b/semantixai/LloroV2_eval_request_False_bfloat16_Original.json index bfdde296a08a9af98ec5e3582bcf2eaeb1c031f5..154bcb43f6871ca7101434b39e3698cc8f3de0f8 100644 --- a/semantixai/LloroV2_eval_request_False_bfloat16_Original.json +++ b/semantixai/LloroV2_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.35208775211155663 }, "result_metrics_average": 0.396846258942384, - "result_metrics_npm": 0.09503536416762777 + "result_metrics_npm": 0.09503536416762777, + "main_language": "?" } \ No newline at end of file diff --git a/stabilityai/stablelm-2-1_6b_eval_request_False_bfloat16_Original.json b/stabilityai/stablelm-2-1_6b_eval_request_False_bfloat16_Original.json index a8e94ec8ca37042daca1ec6fc7c2d460dfe8a1b6..83d6e6e515082ed4477489cef2b93ff931d5b7c8 100644 --- a/stabilityai/stablelm-2-1_6b_eval_request_False_bfloat16_Original.json +++ b/stabilityai/stablelm-2-1_6b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.40798634396147043 }, "result_metrics_average": 0.3189093088468887, - "result_metrics_npm": -0.04468615135380814 + "result_metrics_npm": -0.04468615135380814, + "main_language": "English" } \ No newline at end of file diff --git a/stabilityai/stablelm-2-zephyr-1_6b_eval_request_False_float16_Original.json b/stabilityai/stablelm-2-zephyr-1_6b_eval_request_False_float16_Original.json index f6b51df06d9b2755820eb76bb8036ec0ea169ec8..60c99ddcf14f922addadde1131f2d7b2881dc911 100644 --- a/stabilityai/stablelm-2-zephyr-1_6b_eval_request_False_float16_Original.json +++ b/stabilityai/stablelm-2-zephyr-1_6b_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5989683366150594 }, "result_metrics_average": 0.558147540151528, - "result_metrics_npm": 0.34316752576546294 + "result_metrics_npm": 0.34316752576546294, + "main_language": "English" } \ No newline at end of file diff --git a/stabilityai/stablelm-3b-4e1t_eval_request_False_bfloat16_Original.json b/stabilityai/stablelm-3b-4e1t_eval_request_False_bfloat16_Original.json index 41d260f37b014c7a6b0c98c99e3846942f5bb143..fcf51efc641d028615d953f59d5ab777c93cc9ce 100644 --- a/stabilityai/stablelm-3b-4e1t_eval_request_False_bfloat16_Original.json +++ b/stabilityai/stablelm-3b-4e1t_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.39570451674455126 }, "result_metrics_average": 0.3978973283463708, - "result_metrics_npm": 0.10478465402570339 + "result_metrics_npm": 0.10478465402570339, + "main_language": "English" } \ No newline at end of file diff --git a/stabilityai/stablelm-base-alpha-3b-v2_eval_request_False_float16_Original.json b/stabilityai/stablelm-base-alpha-3b-v2_eval_request_False_float16_Original.json index 60f7933c3078f4c4e84d3dc7bdb991ba7ca3d170..315c25a7fe4cad0475b885c39eadf6f0e50ea13f 100644 --- a/stabilityai/stablelm-base-alpha-3b-v2_eval_request_False_float16_Original.json +++ b/stabilityai/stablelm-base-alpha-3b-v2_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 107, - "job_start_time": "2024-02-08T21-44-32.013299" + "job_start_time": "2024-02-08T21-44-32.013299", + "main_language": "English" } \ No newline at end of file diff --git a/stabilityai/stablelm-base-alpha-3b_eval_request_False_float16_Original.json b/stabilityai/stablelm-base-alpha-3b_eval_request_False_float16_Original.json index 1fe6ddc6702657412d4497dc0c8e24b895aaf51f..91cb50e1dc97bec83b1d1f5829c64fdd8cd945af 100644 --- a/stabilityai/stablelm-base-alpha-3b_eval_request_False_float16_Original.json +++ b/stabilityai/stablelm-base-alpha-3b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 109, - "job_start_time": "2024-02-08T23-07-43.590555" + "job_start_time": "2024-02-08T23-07-43.590555", + "main_language": "English" } \ No newline at end of file diff --git a/stabilityai/stablelm-base-alpha-7b-v2_eval_request_False_float16_Original.json b/stabilityai/stablelm-base-alpha-7b-v2_eval_request_False_float16_Original.json index ccb25f354d9d1781266b30e6aefd8abbdecb99f1..20940ed962d400d1c8026eea714708b078476c9f 100644 --- a/stabilityai/stablelm-base-alpha-7b-v2_eval_request_False_float16_Original.json +++ b/stabilityai/stablelm-base-alpha-7b-v2_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 108, - "job_start_time": "2024-02-08T22-34-53.379516" + "job_start_time": "2024-02-08T22-34-53.379516", + "main_language": "English" } \ No newline at end of file diff --git a/stabilityai/stablelm-base-alpha-7b_eval_request_False_float16_Original.json b/stabilityai/stablelm-base-alpha-7b_eval_request_False_float16_Original.json index fe272ba833ba8027f0608ea636949883797e3098..8727dc68500137ccc2acc374e523fbe28c2787b7 100644 --- a/stabilityai/stablelm-base-alpha-7b_eval_request_False_float16_Original.json +++ b/stabilityai/stablelm-base-alpha-7b_eval_request_False_float16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 110, - "job_start_time": "2024-02-08T23-40-37.936580" + "job_start_time": "2024-02-08T23-40-37.936580", + "main_language": "English" } \ No newline at end of file diff --git a/stabilityai/stablelm-zephyr-3b_eval_request_False_bfloat16_Original.json b/stabilityai/stablelm-zephyr-3b_eval_request_False_bfloat16_Original.json index fb347a89f5ece3d33464d2c50e44f07131ad2666..289ddca8f65c8b392db4c78a0fc5574e901fd6b8 100644 --- a/stabilityai/stablelm-zephyr-3b_eval_request_False_bfloat16_Original.json +++ b/stabilityai/stablelm-zephyr-3b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4075187482434461 }, "result_metrics_average": 0.5436941655889522, - "result_metrics_npm": 0.3221354141850381 + "result_metrics_npm": 0.3221354141850381, + "main_language": "English" } \ No newline at end of file diff --git a/t5-base_eval_request_False_bfloat16_Original.json b/t5-base_eval_request_False_bfloat16_Original.json index c2e32fa10a2b215234ef58515b23565614322dc1..cbecdf81563b3548c18dfdf1b32bf14414be0ad1 100644 --- a/t5-base_eval_request_False_bfloat16_Original.json +++ b/t5-base_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 189, - "job_start_time": "2024-02-14T03-48-55.037590" + "job_start_time": "2024-02-14T03-48-55.037590", + "main_language": "English" } \ No newline at end of file diff --git a/t5-large_eval_request_False_bfloat16_Original.json b/t5-large_eval_request_False_bfloat16_Original.json index 69abf9f7af5c0567c3b15bd3751cb50f1d013eac..9c75eb227c8d000836c774425fd20a8bb9fe5b39 100644 --- a/t5-large_eval_request_False_bfloat16_Original.json +++ b/t5-large_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 190, - "job_start_time": "2024-02-14T07-17-19.452890" + "job_start_time": "2024-02-14T07-17-19.452890", + "main_language": "English" } \ No newline at end of file diff --git a/t5-small_eval_request_False_bfloat16_Original.json b/t5-small_eval_request_False_bfloat16_Original.json index 552ba25adfd6d4a75976ee1774409a64aad266a2..8aeac78b7a63f4936d25c62f7b76bf8e9fbec1bd 100644 --- a/t5-small_eval_request_False_bfloat16_Original.json +++ b/t5-small_eval_request_False_bfloat16_Original.json @@ -12,5 +12,6 @@ "model_type": "🟢 : pretrained", "source": "script", "job_id": 188, - "job_start_time": "2024-02-14T01-51-06.060926" + "job_start_time": "2024-02-14T01-51-06.060926", + "main_language": "English" } \ No newline at end of file diff --git a/teknium/OpenHermes-2-Mistral-7B_eval_request_False_bfloat16_Original.json b/teknium/OpenHermes-2-Mistral-7B_eval_request_False_bfloat16_Original.json index 2ddfa324cb93c794afe1b1974e8f441bffd98012..dc3f8878740083e608e3d04abd44873613457fb3 100644 --- a/teknium/OpenHermes-2-Mistral-7B_eval_request_False_bfloat16_Original.json +++ b/teknium/OpenHermes-2-Mistral-7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4725714806310689 }, "result_metrics_average": 0.6376100892236958, - "result_metrics_npm": 0.45837915240917326 + "result_metrics_npm": 0.45837915240917326, + "main_language": "English" } \ No newline at end of file diff --git a/teknium/OpenHermes-2.5-Mistral-7B_eval_request_False_bfloat16_Original.json b/teknium/OpenHermes-2.5-Mistral-7B_eval_request_False_bfloat16_Original.json index e4d818b04cffcc880b60efd197546293d9026135..c53a46221e3a955e919e8d147312f3c568e2dc18 100644 --- a/teknium/OpenHermes-2.5-Mistral-7B_eval_request_False_bfloat16_Original.json +++ b/teknium/OpenHermes-2.5-Mistral-7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4384148442756514 }, "result_metrics_average": 0.6484331776234641, - "result_metrics_npm": 0.48035915464260714 + "result_metrics_npm": 0.48035915464260714, + "main_language": "English" } \ No newline at end of file diff --git a/tiiuae/falcon-40b_eval_request_False_bfloat16_Original.json b/tiiuae/falcon-40b_eval_request_False_bfloat16_Original.json index a15db56c46880ec7c32d55b84fce13c61b539d66..11891f1dca3c2d60dddddbb635583efde17cc873 100644 --- a/tiiuae/falcon-40b_eval_request_False_bfloat16_Original.json +++ b/tiiuae/falcon-40b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6988724003524762 }, "result_metrics_average": 0.6026797554099517, - "result_metrics_npm": 0.4023441073120957 + "result_metrics_npm": 0.4023441073120957, + "main_language": "English" } \ No newline at end of file diff --git a/tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json b/tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json index f27d82211c5dd2ae49b8bd546be04293418c9321..fea090fa477ccc1728f73e88b86fe21b9b5512a8 100644 --- a/tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json +++ b/tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.4786772413746598 }, "result_metrics_average": 0.36863370236538595, - "result_metrics_npm": 0.06258366897091738 + "result_metrics_npm": 0.06258366897091738, + "main_language": "English" } \ No newline at end of file diff --git a/togethercomputer/RedPajama-INCITE-7B-Base_eval_request_False_float16_Original.json b/togethercomputer/RedPajama-INCITE-7B-Base_eval_request_False_float16_Original.json index fc83f1fd268e828c5cd52a26dd7811805dab37d4..0d339aeae4e530952ed2cf1fa05b51053d007213 100644 --- a/togethercomputer/RedPajama-INCITE-7B-Base_eval_request_False_float16_Original.json +++ b/togethercomputer/RedPajama-INCITE-7B-Base_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.26314583105132494 }, "result_metrics_average": 0.32974235243255756, - "result_metrics_npm": -0.00048136815513133424 + "result_metrics_npm": -0.00048136815513133424, + "main_language": "English" } \ No newline at end of file diff --git a/togethercomputer/RedPajama-INCITE-Base-3B-v1_eval_request_False_float16_Original.json b/togethercomputer/RedPajama-INCITE-Base-3B-v1_eval_request_False_float16_Original.json index 147f43224d2f5f7caa1429670f622bfd1440f6c1..cad2aa2531770c5c6b18869a51f12d965650f562 100644 --- a/togethercomputer/RedPajama-INCITE-Base-3B-v1_eval_request_False_float16_Original.json +++ b/togethercomputer/RedPajama-INCITE-Base-3B-v1_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.2072905953605302 }, "result_metrics_average": 0.29701484183881266, - "result_metrics_npm": -0.05920303709483327 + "result_metrics_npm": -0.05920303709483327, + "main_language": "English" } \ No newline at end of file diff --git a/unicamp-dl/ptt5-base-portuguese-vocab_eval_request_False_float16_Original.json b/unicamp-dl/ptt5-base-portuguese-vocab_eval_request_False_float16_Original.json index 1bee66a0896b070036ffaa7cb8d296a6f54fc855..99833c3a18e2a956f963c5d26a403b5cde9a05f3 100644 --- a/unicamp-dl/ptt5-base-portuguese-vocab_eval_request_False_float16_Original.json +++ b/unicamp-dl/ptt5-base-portuguese-vocab_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "unicamp-dl/ptt5-base-portuguese-vocab", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "T5WithLMHeadModel", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:35:46Z", "model_type": "\ud83c\udd8e : language adapted models (FP, FT, ...)", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "unicamp-dl/ptt5-base-portuguese-vocab", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "T5WithLMHeadModel", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:35:46Z", + "model_type": "🆎 : language adapted models (FP, FT, ...)", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "Portuguese" +} \ No newline at end of file diff --git a/unicamp-dl/ptt5-base-t5-vocab_eval_request_False_float16_Original.json b/unicamp-dl/ptt5-base-t5-vocab_eval_request_False_float16_Original.json index a70bc8f04c97058affcc45aa925f1d19a3c8b094..a2adc029f0fe15200bb2cb7856b91054a57e64d6 100644 --- a/unicamp-dl/ptt5-base-t5-vocab_eval_request_False_float16_Original.json +++ b/unicamp-dl/ptt5-base-t5-vocab_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "unicamp-dl/ptt5-base-t5-vocab", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "T5WithLMHeadModel", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:06Z", "model_type": "\ud83c\udd8e : language adapted models (FP, FT, ...)", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "unicamp-dl/ptt5-base-t5-vocab", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "T5WithLMHeadModel", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:06Z", + "model_type": "🆎 : language adapted models (FP, FT, ...)", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "Portuguese" +} \ No newline at end of file diff --git a/unicamp-dl/ptt5-large-portuguese-vocab_eval_request_False_float16_Original.json b/unicamp-dl/ptt5-large-portuguese-vocab_eval_request_False_float16_Original.json index 1d339f99c7b60b63a5da57c2c6ef026b1524a641..853f3fd885ca3d4c5f3120bb2d02bd426099f445 100644 --- a/unicamp-dl/ptt5-large-portuguese-vocab_eval_request_False_float16_Original.json +++ b/unicamp-dl/ptt5-large-portuguese-vocab_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "unicamp-dl/ptt5-large-portuguese-vocab", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "T5WithLMHeadModel", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:35:51Z", "model_type": "\ud83c\udd8e : language adapted models (FP, FT, ...)", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "unicamp-dl/ptt5-large-portuguese-vocab", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "T5WithLMHeadModel", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:35:51Z", + "model_type": "🆎 : language adapted models (FP, FT, ...)", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "Portuguese" +} \ No newline at end of file diff --git a/unicamp-dl/ptt5-large-t5-vocab_eval_request_False_float16_Original.json b/unicamp-dl/ptt5-large-t5-vocab_eval_request_False_float16_Original.json index 623489e18dfa9594c278880bca96b382e67648f2..834109b0a9ddaf008c385728a1da19d0090fa5c5 100644 --- a/unicamp-dl/ptt5-large-t5-vocab_eval_request_False_float16_Original.json +++ b/unicamp-dl/ptt5-large-t5-vocab_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "unicamp-dl/ptt5-large-t5-vocab", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "T5WithLMHeadModel", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:36:10Z", "model_type": "\ud83c\udd8e : language adapted models (FP, FT, ...)", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "unicamp-dl/ptt5-large-t5-vocab", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "T5WithLMHeadModel", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:36:10Z", + "model_type": "🆎 : language adapted models (FP, FT, ...)", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "Portuguese" +} \ No newline at end of file diff --git a/unicamp-dl/ptt5-small-portuguese-vocab_eval_request_False_float16_Original.json b/unicamp-dl/ptt5-small-portuguese-vocab_eval_request_False_float16_Original.json index 95a2b825a98f6796e25c30a173462d078cb80d98..d21e890abc2ddb564b172623380346f264861a1b 100644 --- a/unicamp-dl/ptt5-small-portuguese-vocab_eval_request_False_float16_Original.json +++ b/unicamp-dl/ptt5-small-portuguese-vocab_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "unicamp-dl/ptt5-small-portuguese-vocab", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "T5WithLMHeadModel", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:35:41Z", "model_type": "\ud83c\udd8e : language adapted models (FP, FT, ...)", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "unicamp-dl/ptt5-small-portuguese-vocab", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "T5WithLMHeadModel", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:35:41Z", + "model_type": "🆎 : language adapted models (FP, FT, ...)", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "Portuguese" +} \ No newline at end of file diff --git a/unicamp-dl/ptt5-small-t5-vocab_eval_request_False_float16_Original.json b/unicamp-dl/ptt5-small-t5-vocab_eval_request_False_float16_Original.json index becc61cc68a45fa3540179a9660dafc83beec85b..011ab2d08332a5308c3cc96f338dcf579b79686a 100644 --- a/unicamp-dl/ptt5-small-t5-vocab_eval_request_False_float16_Original.json +++ b/unicamp-dl/ptt5-small-t5-vocab_eval_request_False_float16_Original.json @@ -1 +1,17 @@ -{"model": "unicamp-dl/ptt5-small-t5-vocab", "base_model": "", "revision": "main", "private": false, "precision": "float16", "params": 0, "architectures": "T5WithLMHeadModel", "weight_type": "Original", "status": "PENDING", "submitted_time": "2024-02-11T13:35:57Z", "model_type": "\ud83c\udd8e : language adapted models (FP, FT, ...)", "source": "script", "job_id": -1, "job_start_time": null} \ No newline at end of file +{ + "model": "unicamp-dl/ptt5-small-t5-vocab", + "base_model": "", + "revision": "main", + "private": false, + "precision": "float16", + "params": 0, + "architectures": "T5WithLMHeadModel", + "weight_type": "Original", + "status": "PENDING", + "submitted_time": "2024-02-11T13:35:57Z", + "model_type": "🆎 : language adapted models (FP, FT, ...)", + "source": "script", + "job_id": -1, + "job_start_time": null, + "main_language": "Portuguese" +} \ No newline at end of file diff --git a/upstage/SOLAR-10.7B-Instruct-v1.0_eval_request_False_float16_Original.json b/upstage/SOLAR-10.7B-Instruct-v1.0_eval_request_False_float16_Original.json index 370223365d8b17055df300b752564ca6fb2cb1e8..41c18dc8cd9969c884391eac4078042c11df7694 100644 --- a/upstage/SOLAR-10.7B-Instruct-v1.0_eval_request_False_float16_Original.json +++ b/upstage/SOLAR-10.7B-Instruct-v1.0_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.5322791024834643 }, "result_metrics_average": 0.6946516532182557, - "result_metrics_npm": 0.5427884839238583 + "result_metrics_npm": 0.5427884839238583, + "main_language": "English" } \ No newline at end of file diff --git a/upstage/SOLAR-10.7B-v1.0_eval_request_False_float16_Original.json b/upstage/SOLAR-10.7B-v1.0_eval_request_False_float16_Original.json index 3498e20849126b3155cdc7e486f7024fd5374cf6..1781a362ba9f7c7d8573f443cae556c9fce6d728 100644 --- a/upstage/SOLAR-10.7B-v1.0_eval_request_False_float16_Original.json +++ b/upstage/SOLAR-10.7B-v1.0_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.6312543629368131 }, "result_metrics_average": 0.6886836571901778, - "result_metrics_npm": 0.5285867458138527 + "result_metrics_npm": 0.5285867458138527, + "main_language": "English" } \ No newline at end of file diff --git a/wandgibaut/periquito-3B_eval_request_False_float16_Original.json b/wandgibaut/periquito-3B_eval_request_False_float16_Original.json index 36d7cc5e6fba3a7fba4b1e519ea5afd295ea38ba..76b457019a1722354ebbf8f46b56f23d27cdd8d5 100644 --- a/wandgibaut/periquito-3B_eval_request_False_float16_Original.json +++ b/wandgibaut/periquito-3B_eval_request_False_float16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.47963247012405114 }, "result_metrics_average": 0.3303614816761663, - "result_metrics_npm": -0.005341553963556416 + "result_metrics_npm": -0.005341553963556416, + "main_language": "Portuguese" } \ No newline at end of file diff --git a/xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json b/xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json index e123c58f0df950ed29df1e91965a063a671e621e..89883350981424f77bc62ef5420f0e37fcc753dd 100644 --- a/xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json +++ b/xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json @@ -14,5 +14,6 @@ "job_id": 282, "job_start_time": "2024-02-28T16-28-22.463190", "error_msg": "use_cache is not supported", - "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1549, in generate\n result = self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2420, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 715, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 603, in forward\n layer_outputs = decoder_layer(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 311, in forward\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n ^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 249, in forward\n assert not use_cache, \"use_cache is not supported\"\nAssertionError: use_cache is not supported\n" + "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 207, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1513, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1058, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1549, in generate\n result = self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2420, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 715, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 603, in forward\n layer_outputs = decoder_layer(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 311, in forward\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n ^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/xverse/XVERSE-13B-256K/c7606667b6b17ed0a0f6d6a7f6f3037a42c13560/modeling_xverse.py\", line 249, in forward\n assert not use_cache, \"use_cache is not supported\"\nAssertionError: use_cache is not supported\n", + "main_language": "?" } \ No newline at end of file diff --git a/xverse/XVERSE-13B_eval_request_False_bfloat16_Original.json b/xverse/XVERSE-13B_eval_request_False_bfloat16_Original.json index fd3fa0d34d637b753106ea37c4b8f69d364343c9..5aa0e8e7de2cb52d7f594350a5e3177cc874e913 100644 --- a/xverse/XVERSE-13B_eval_request_False_bfloat16_Original.json +++ b/xverse/XVERSE-13B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.48636217182153496 }, "result_metrics_average": 0.5259372195179259, - "result_metrics_npm": 0.28907130498283157 + "result_metrics_npm": 0.28907130498283157, + "main_language": "?" } \ No newline at end of file diff --git a/xverse/XVERSE-65B-2_eval_request_False_bfloat16_Original.json b/xverse/XVERSE-65B-2_eval_request_False_bfloat16_Original.json index 151bd506ea192856656436552580ffe770bfed83..9eca50df5d1f64a674e518fd1474286c05d51088 100644 --- a/xverse/XVERSE-65B-2_eval_request_False_bfloat16_Original.json +++ b/xverse/XVERSE-65B-2_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.46961323018027046 }, "result_metrics_average": 0.4436810684905202, - "result_metrics_npm": 0.11412657877044352 + "result_metrics_npm": 0.11412657877044352, + "main_language": "?" } \ No newline at end of file diff --git a/xverse/XVERSE-65B_eval_request_False_bfloat16_Original.json b/xverse/XVERSE-65B_eval_request_False_bfloat16_Original.json index 64713f16af48fab1d728d12b2eca302b3e2d020a..24b7d14b99b73ffc42c64a1735388210225fed08 100644 --- a/xverse/XVERSE-65B_eval_request_False_bfloat16_Original.json +++ b/xverse/XVERSE-65B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.46996729873073895 }, "result_metrics_average": 0.537099653924716, - "result_metrics_npm": 0.30395711342849147 + "result_metrics_npm": 0.30395711342849147, + "main_language": "?" } \ No newline at end of file diff --git a/xverse/XVERSE-7B_eval_request_False_bfloat16_Original.json b/xverse/XVERSE-7B_eval_request_False_bfloat16_Original.json index bcecce4e68ed606489b4af3ab452ccd4ea99d418..deb1c260fac9a1ab07c1c3e8a4335ec92e18e32b 100644 --- a/xverse/XVERSE-7B_eval_request_False_bfloat16_Original.json +++ b/xverse/XVERSE-7B_eval_request_False_bfloat16_Original.json @@ -26,5 +26,6 @@ "tweetsentbr": 0.618149563013502 }, "result_metrics_average": 0.47691685662162, - "result_metrics_npm": 0.22302493012653693 + "result_metrics_npm": 0.22302493012653693, + "main_language": "?" } \ No newline at end of file