eduagarcia commited on
Commit
1060764
β€’
1 Parent(s): 0dffd0a

Retry 49 FAILED models

Browse files
Files changed (49) hide show
  1. 22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json +2 -4
  2. BAAI/Aquila-7B_eval_request_False_float16_Original.json +2 -4
  3. BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json +2 -4
  4. BAAI/Aquila2-7B_eval_request_False_float16_Original.json +2 -4
  5. Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json +2 -4
  6. EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json +2 -4
  7. EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json +2 -4
  8. EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json +2 -4
  9. EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json +2 -4
  10. EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json +2 -4
  11. EleutherAI/pythia-14m_eval_request_False_float16_Original.json +2 -4
  12. EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json +2 -4
  13. EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json +2 -4
  14. EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json +2 -4
  15. EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json +2 -4
  16. EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json +2 -4
  17. EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json +2 -4
  18. NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json +2 -4
  19. OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json +2 -4
  20. Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json +2 -4
  21. Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json +2 -4
  22. Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json +2 -4
  23. THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json +2 -4
  24. THUDM/glm-10b_eval_request_False_float16_Original.json +2 -4
  25. THUDM/glm-2b_eval_request_False_float16_Original.json +2 -4
  26. baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json +2 -4
  27. baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json +2 -4
  28. baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json +2 -4
  29. deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json +2 -4
  30. deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json +2 -4
  31. deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json +2 -4
  32. facebook/opt-66b_eval_request_False_float16_Original.json +2 -4
  33. google/umt5-base_eval_request_False_bfloat16_Original.json +2 -4
  34. google/umt5-small_eval_request_False_bfloat16_Original.json +2 -4
  35. gpt2_eval_request_False_float16_Original.json +2 -4
  36. huggyllama/llama-30b_eval_request_False_float16_Original.json +2 -4
  37. huggyllama/llama-65b_eval_request_False_float16_Original.json +2 -4
  38. matsuo-lab/weblab-10b_eval_request_False_float16_Original.json +2 -4
  39. meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json +2 -4
  40. mistralai/Mixtral-8x7B-v0.1_eval_request_False_bfloat16_Original.json +2 -4
  41. t5-base_eval_request_False_bfloat16_Original.json +2 -4
  42. t5-large_eval_request_False_bfloat16_Original.json +2 -4
  43. t5-small_eval_request_False_bfloat16_Original.json +2 -4
  44. tiiuae/falcon-40b_eval_request_False_bfloat16_Original.json +2 -4
  45. xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json +2 -4
  46. xverse/XVERSE-13B_eval_request_False_bfloat16_Original.json +2 -4
  47. xverse/XVERSE-65B-2_eval_request_False_bfloat16_Original.json +2 -4
  48. xverse/XVERSE-65B_eval_request_False_bfloat16_Original.json +2 -4
  49. xverse/XVERSE-7B_eval_request_False_bfloat16_Original.json +2 -4
22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:03:11Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
14
  "job_id": 125,
15
- "job_start_time": "2024-02-09T18-04-49.511299",
16
- "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 633, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n config = PEFT_TYPE_TO_CONFIG_MAPPING[\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n return cls.from_peft_type(**kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\n return config_cls(**kwargs)\n ^^^^^^^^^^^^^^^^^^^^\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'\n"
18
  }
 
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:03:11Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
14
  "job_id": 125,
15
+ "job_start_time": "2024-02-09T18-04-49.511299"
 
 
16
  }
BAAI/Aquila-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:00Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 145,
15
- "job_start_time": "2024-02-09T21-47-14.417940",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:00Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 145,
15
+ "job_start_time": "2024-02-09T21-47-14.417940"
 
 
16
  }
BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 34.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 154,
15
- "job_start_time": "2024-02-09T21-48-27.776519",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 34.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 154,
15
+ "job_start_time": "2024-02-09T21-48-27.776519"
 
 
16
  }
BAAI/Aquila2-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:07Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 146,
15
- "job_start_time": "2024-02-09T21-47-14.611039",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:07Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 146,
15
+ "job_start_time": "2024-02-09T21-47-14.611039"
 
 
16
  }
Deci/DeciLM-6b_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 5.717,
8
  "architectures": "DeciLMForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:06:24Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
14
  "job_id": 132,
15
- "job_start_time": "2024-02-09T20-53-34.339418",
16
- "error_msg": "'DeciLMModel' object has no attribute '_use_flash_attention_2'",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1420, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 809, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 804, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1046, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1181, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1027, in forward\n if self._use_flash_attention_2:\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1695, in __getattr__\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\nAttributeError: 'DeciLMModel' object has no attribute '_use_flash_attention_2'\n"
18
  }
 
7
  "params": 5.717,
8
  "architectures": "DeciLMForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:06:24Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
14
  "job_id": 132,
15
+ "job_start_time": "2024-02-09T20-53-34.339418"
 
 
16
  }
EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 1.366,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:12:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 1.366,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:12:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.15,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:59Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.15,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:59Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 2.718,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:12:14Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 2.718,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:12:14Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 173,
15
- "job_start_time": "2024-02-09T22-06-50.537954",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 173,
15
+ "job_start_time": "2024-02-09T22-06-50.537954"
 
 
16
  }
EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 12.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:53Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 167,
15
- "job_start_time": "2024-02-09T21-56-45.849878",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 12.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:53Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 167,
15
+ "job_start_time": "2024-02-09T21-56-45.849878"
 
 
16
  }
EleutherAI/pythia-14m_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.039,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:12Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.039,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:12Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.213,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:23Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.213,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:23Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 1.079,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 1.079,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 2.909,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:43Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 2.909,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:43Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.506,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.506,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 6.9,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:48Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 166,
15
- "job_start_time": "2024-02-09T21-56-44.273907",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 6.9,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:48Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 166,
15
+ "job_start_time": "2024-02-09T21-56-44.273907"
 
 
16
  }
EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.096,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.096,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 21.828,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:11:04Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 159,
15
- "job_start_time": "2024-02-09T21-49-34.849675",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 21.828,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:11:04Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 159,
15
+ "job_start_time": "2024-02-09T21-49-34.849675"
 
 
16
  }
OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 14.0,
8
  "architectures": "OrionForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 142,
15
- "job_start_time": "2024-02-09T21-46-08.731681",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 14.0,
8
  "architectures": "OrionForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 142,
15
+ "job_start_time": "2024-02-09T21-46-08.731681"
 
 
16
  }
Qwen/Qwen-14B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 14.167,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 134,
15
- "job_start_time": "2024-02-09T21-41-03.819623",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1420, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 809, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 140, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 804, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1046, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 1043, in forward\n transformer_outputs = self.transformer(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 891, in forward\n outputs = block(\n ^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 610, in forward\n attn_outputs = self.attn(\n ^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 433, in forward\n key = apply_rotary_pos_emb(key, k_pos_emb)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/.cache/huggingface/modules/transformers_modules/Qwen/Qwen-14B/c4051215126d906ac22bb67fe5edb39a921cd831/modeling_qwen.py\", line 1342, in apply_rotary_pos_emb\n return apply_rotary_emb_func(t_float, cos, sin).type_as(t)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 14.167,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 134,
15
+ "job_start_time": "2024-02-09T21-41-03.819623"
 
 
16
  }
Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 72.288,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:47Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 150,
15
- "job_start_time": "2024-02-09T21-48-18.959593",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 72.288,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:47Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 150,
15
+ "job_start_time": "2024-02-09T21-48-18.959593"
 
 
16
  }
Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "SkyworkForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 138,
15
- "job_start_time": "2024-02-09T21-45-02.961546",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "SkyworkForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 138,
15
+ "job_start_time": "2024-02-09T21-45-02.961546"
 
 
16
  }
THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 6.0,
8
  "architectures": "ChatGLMModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:16Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 147,
15
- "job_start_time": "2024-02-09T21-47-17.309598",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 6.0,
8
  "architectures": "ChatGLMModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:16Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 147,
15
+ "job_start_time": "2024-02-09T21-47-17.309598"
 
 
16
  }
THUDM/glm-10b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 10.0,
8
  "architectures": "GLMModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-10b.696788d4f82ac96b90823555f547d1e754839ff4.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, FalconConfig, DeciLMConfig, QWenConfig, QWenConfig.",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-10b.696788d4f82ac96b90823555f547d1e754839ff4.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, FalconConfig, DeciLMConfig, QWenConfig, QWenConfig.\n"
18
  }
 
7
  "params": 10.0,
8
  "architectures": "GLMModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
THUDM/glm-2b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 2.0,
8
  "architectures": "GLMModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:22Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
- "job_start_time": null,
16
- "error_msg": "Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-2b.774fda883d7ad028b8effc3c65afec510fce9634.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, FalconConfig, DeciLMConfig, QWenConfig, QWenConfig.",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 186, in wait_download_and_run_request\n raise Exception(f\"Failed to download and/or use the AutoModel class, trust_remote_code={TRUST_REMOTE_CODE} - Original Exception: {exception_msg}\")\nException: Failed to download and/or use the AutoModel class, trust_remote_code=True - Original Exception: Unrecognized configuration class <class 'transformers_modules.THUDM.glm-2b.774fda883d7ad028b8effc3c65afec510fce9634.configuration_glm.GLMConfig'> for this kind of AutoModel: AutoModelForCausalLM.\nModel type should be one of BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CpmAntConfig, CTRLConfig, Data2VecTextConfig, ElectraConfig, ErnieConfig, FalconConfig, FuyuConfig, GitConfig, GPT2Config, GPT2Config, GPTBigCodeConfig, GPTNeoConfig, GPTNeoXConfig, GPTNeoXJapaneseConfig, GPTJConfig, LlamaConfig, MarianConfig, MBartConfig, MegaConfig, MegatronBertConfig, MistralConfig, MixtralConfig, MptConfig, MusicgenConfig, MvpConfig, OpenLlamaConfig, OpenAIGPTConfig, OPTConfig, PegasusConfig, PersimmonConfig, PhiConfig, PLBartConfig, ProphetNetConfig, QDQBertConfig, ReformerConfig, RemBertConfig, RobertaConfig, RobertaPreLayerNormConfig, RoCBertConfig, RoFormerConfig, RwkvConfig, Speech2Text2Config, TransfoXLConfig, TrOCRConfig, WhisperConfig, XGLMConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLMRobertaXLConfig, XLNetConfig, XmodConfig, FalconConfig, DeciLMConfig, QWenConfig, QWenConfig.\n"
18
  }
 
7
  "params": 2.0,
8
  "architectures": "GLMModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:22Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": -1,
15
+ "job_start_time": null
 
 
16
  }
baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "BaiChuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:13Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 139,
15
- "job_start_time": "2024-02-09T21-45-04.133655",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "BaiChuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:13Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 139,
15
+ "job_start_time": "2024-02-09T21-45-04.133655"
 
 
16
  }
baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:33Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 141,
15
- "job_start_time": "2024-02-09T21-46-06.054028",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:33Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 141,
15
+ "job_start_time": "2024-02-09T21-46-06.054028"
 
 
16
  }
baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 140,
15
- "job_start_time": "2024-02-09T21-46-04.308969",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 140,
15
+ "job_start_time": "2024-02-09T21-46-04.308969"
 
 
16
  }
deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 67.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 153,
15
- "job_start_time": "2024-02-09T21-48-26.409232",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 67.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 153,
15
+ "job_start_time": "2024-02-09T21-48-26.409232"
 
 
16
  }
deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:46Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 143,
15
- "job_start_time": "2024-02-09T21-46-12.747281",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:46Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 143,
15
+ "job_start_time": "2024-02-09T21-46-12.747281"
 
 
16
  }
deepseek-ai/deepseek-moe-16b-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 16.376,
8
  "architectures": "DeepseekForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:08:52Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 144,
15
- "job_start_time": "2024-02-09T21-46-12.976525",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 16.376,
8
  "architectures": "DeepseekForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:08:52Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 144,
15
+ "job_start_time": "2024-02-09T21-46-12.976525"
 
 
16
  }
facebook/opt-66b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 66.0,
8
  "architectures": "OPTForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:13:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 171,
15
- "job_start_time": "2024-02-09T22-06-48.916395",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 66.0,
8
  "architectures": "OPTForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:13:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 171,
15
+ "job_start_time": "2024-02-09T22-06-48.916395"
 
 
16
  }
google/umt5-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:18:55Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 176,
15
- "job_start_time": "2024-02-09T22-07-56.533776",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:18:55Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 176,
15
+ "job_start_time": "2024-02-09T22-07-56.533776"
 
 
16
  }
google/umt5-small_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:18:45Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 175,
15
- "job_start_time": "2024-02-09T22-07-53.654418",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0,
8
  "architectures": "UMT5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:18:45Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 175,
15
+ "job_start_time": "2024-02-09T22-07-53.654418"
 
 
16
  }
gpt2_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.137,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 155,
15
- "job_start_time": "2024-02-09T21-49-28.540225",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.137,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 155,
15
+ "job_start_time": "2024-02-09T21-49-28.540225"
 
 
16
  }
huggyllama/llama-30b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 32.529,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:05:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 129,
15
- "job_start_time": "2024-02-09T20-43-22.140086",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1512, in generate_until\n cont = self._model_generate(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1057, in _model_generate\n return self.model.generate(\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 1718, in generate\n return self.greedy_search(\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/generation/utils.py\", line 2579, in greedy_search\n outputs = self(\n ^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1181, in forward\n outputs = self.model(\n ^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 1068, in forward\n layer_outputs = decoder_layer(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 793, in forward\n hidden_states = self.input_layernorm(hidden_states)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1518, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1527, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/llama/modeling_llama.py\", line 117, in forward\n return self.weight * hidden_states.to(input_dtype)\n ~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 32.529,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:05:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 129,
15
+ "job_start_time": "2024-02-09T20-43-22.140086"
 
 
16
  }
huggyllama/llama-65b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 65.286,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:05:56Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 131,
15
- "job_start_time": "2024-02-09T20-52-31.935366",
16
- "error_msg": "CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 42.19 MiB is free. Process 2566761 has 79.30 GiB memory in use. Of the allocated memory 78.78 GiB is allocated by PyTorch, and 28.25 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 566, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3706, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4116, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 778, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 128.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 42.19 MiB is free. Process 2566761 has 79.30 GiB memory in use. Of the allocated memory 78.78 GiB is allocated by PyTorch, and 28.25 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 65.286,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:05:56Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 131,
15
+ "job_start_time": "2024-02-09T20-52-31.935366"
 
 
16
  }
matsuo-lab/weblab-10b_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 10.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:15:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 174,
15
- "job_start_time": "2024-02-09T22-06-52.058466",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 10.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:15:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 174,
15
+ "job_start_time": "2024-02-09T22-06-52.058466"
 
 
16
  }
meta-llama/Llama-2-70b-hf_eval_request_False_float16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 68.977,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:05:49Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 130,
15
- "job_start_time": "2024-02-09T20-51-33.495697",
16
- "error_msg": "CUDA out of memory. Tried to allocate 448.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 258.19 MiB is free. Process 2566761 has 79.09 GiB memory in use. Of the allocated memory 78.59 GiB is allocated by PyTorch, and 4.33 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 566, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3706, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4116, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 778, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 448.00 MiB. GPU 0 has a total capacty of 79.35 GiB of which 258.19 MiB is free. Process 2566761 has 79.09 GiB memory in use. Of the allocated memory 78.59 GiB is allocated by PyTorch, and 4.33 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 68.977,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:05:49Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 130,
15
+ "job_start_time": "2024-02-09T20-51-33.495697"
 
 
16
  }
mistralai/Mixtral-8x7B-v0.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 46.703,
8
  "architectures": "MixtralForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:05:20Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 128,
15
- "job_start_time": "2024-02-09T20-42-13.414191",
16
- "error_msg": "CUDA out of memory. Tried to allocate 112.00 MiB. GPU 1 has a total capacty of 79.35 GiB of which 70.19 MiB is free. Process 2566761 has 79.28 GiB memory in use. Of the allocated memory 78.64 GiB is allocated by PyTorch, and 129.61 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 566, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3706, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 4116, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 778, in _load_state_dict_into_meta_model\n set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 347, in set_module_tensor_to_device\n new_value = value.to(device)\n ^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU 1 has a total capacty of 79.35 GiB of which 70.19 MiB is free. Process 2566761 has 79.28 GiB memory in use. Of the allocated memory 78.64 GiB is allocated by PyTorch, and 129.61 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n"
18
  }
 
7
  "params": 46.703,
8
  "architectures": "MixtralForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:05:20Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 128,
15
+ "job_start_time": "2024-02-09T20-42-13.414191"
 
 
16
  }
t5-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.223,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:42Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 157,
15
- "job_start_time": "2024-02-09T21-49-33.356524",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.223,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:42Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 157,
15
+ "job_start_time": "2024-02-09T21-49-33.356524"
 
 
16
  }
t5-large_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.738,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:49Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 158,
15
- "job_start_time": "2024-02-09T21-49-34.849100",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.738,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:49Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 158,
15
+ "job_start_time": "2024-02-09T21-49-34.849100"
 
 
16
  }
t5-small_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 0.061,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 156,
15
- "job_start_time": "2024-02-09T21-49-33.010820",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 0.061,
8
  "architectures": "T5ForConditionalGeneration",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 156,
15
+ "job_start_time": "2024-02-09T21-49-33.010820"
 
 
16
  }
tiiuae/falcon-40b_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 40.0,
8
  "architectures": "FalconForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:13:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 172,
15
- "job_start_time": "2024-02-09T22-06-50.537542",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 40.0,
8
  "architectures": "FalconForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:13:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 172,
15
+ "job_start_time": "2024-02-09T22-06-50.537542"
 
 
16
  }
xverse/XVERSE-13B-256K_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:58Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 137,
15
- "job_start_time": "2024-02-09T21-45-02.654525",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:58Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 137,
15
+ "job_start_time": "2024-02-09T21-45-02.654525"
 
 
16
  }
xverse/XVERSE-13B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:51Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 136,
15
- "job_start_time": "2024-02-09T21-44-59.968938",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 13.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:51Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 136,
15
+ "job_start_time": "2024-02-09T21-44-59.968938"
 
 
16
  }
xverse/XVERSE-65B-2_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 65.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:10:02Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 152,
15
- "job_start_time": "2024-02-09T21-48-24.181053",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 65.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:10:02Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 152,
15
+ "job_start_time": "2024-02-09T21-48-24.181053"
 
 
16
  }
xverse/XVERSE-65B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 65.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:09:56Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 151,
15
- "job_start_time": "2024-02-09T21-48-20.197494",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 65.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:09:56Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 151,
15
+ "job_start_time": "2024-02-09T21-48-20.197494"
 
 
16
  }
xverse/XVERSE-7B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,12 +7,10 @@
7
  "params": 7.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:07:39Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 135,
15
- "job_start_time": "2024-02-09T21-44-59.597022",
16
- "error_msg": "CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
17
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 187, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 81, in simple_evaluate\n torch.manual_seed(\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/random.py\", line 40, in manual_seed\n torch.cuda.manual_seed_all(seed)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 124, in manual_seed_all\n _lazy_call(cb, seed_all=True)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 229, in _lazy_call\n callable()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/cuda/random.py\", line 122, in cb\n default_generator.manual_seed(seed)\nRuntimeError: CUDA error: an illegal memory access was encountered\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
18
  }
 
7
  "params": 7.0,
8
  "architectures": "XverseForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "RERUN",
11
  "submitted_time": "2024-02-05T23:07:39Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
14
  "job_id": 135,
15
+ "job_start_time": "2024-02-09T21-44-59.597022"
 
 
16
  }