eduagarcia commited on
Commit
ccd59f2
β€’
1 Parent(s): e517690

Retry 4 FAILED models

Browse files
Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged-v2_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged-v2",
3
+ "base_model": "",
4
+ "revision": "main",
5
+ "private": false,
6
+ "precision": "float16",
7
+ "params": 7.242,
8
+ "architectures": "MistralForCausalLM",
9
+ "weight_type": "Original",
10
+ "main_language": "Other",
11
+ "status": "FAILED",
12
+ "submitted_time": "2024-04-09T02:54:08Z",
13
+ "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)",
14
+ "source": "leaderboard",
15
+ "job_id": 426,
16
+ "job_start_time": "2024-04-12T16-20-45.540031",
17
+ "error_msg": "Supplied state dict for model.layers.0.mlp.down_proj.weight does not contain `bitsandbytes__*` and possibly other `quantized_stats` components.",
18
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 216, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 65, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 608, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 563, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3531, in from_pretrained\n ) = cls._load_pretrained_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3958, in _load_pretrained_model\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 814, in _load_state_dict_into_meta_model\n hf_quantizer.create_quantized_param(model, param, param_name, param_device, state_dict, unexpected_keys)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/quantizers/quantizer_bnb_4bit.py\", line 193, in create_quantized_param\n raise ValueError(\nValueError: Supplied state dict for model.layers.0.mlp.down_proj.weight does not contain `bitsandbytes__*` and possibly other `quantized_stats` components.\n"
19
+ }
state-spaces/mamba-1.4b-hf_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MambaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-07T15:43:45Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 445,
16
- "job_start_time": "2024-04-14T00-26-19.830668",
17
- "error_msg": "CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 196, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 65, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1426, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 815, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 144, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1052, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 653, in forward\n mamba_outputs = self.backbone(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 555, in forward\n hidden_states = mixer_block(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 327, in forward\n hidden_states = self.mixer(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 291, in forward\n return self.cuda_kernels_forward(hidden_states, cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 174, in cuda_kernels_forward\n hidden_states = causal_conv1d_fn(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 121, in causal_conv1d_fn\n return CausalConv1dFn.apply(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/autograd/function.py\", line 553, in apply\n return super().apply(*args, **kwargs) # type: ignore[misc]\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 57, in forward\n out = causal_conv1d_cuda.causal_conv1d_fwd(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
19
  }
 
8
  "architectures": "MambaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-07T15:43:45Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 445,
16
+ "job_start_time": "2024-04-14T00-26-19.830668"
 
 
17
  }
state-spaces/mamba-130m-hf_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MambaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-07T15:44:17Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 448,
16
- "job_start_time": "2024-04-14T00-58-21.841715",
17
- "error_msg": "CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 196, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 65, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1426, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 815, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 144, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1052, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 653, in forward\n mamba_outputs = self.backbone(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 555, in forward\n hidden_states = mixer_block(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 327, in forward\n hidden_states = self.mixer(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 291, in forward\n return self.cuda_kernels_forward(hidden_states, cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 174, in cuda_kernels_forward\n hidden_states = causal_conv1d_fn(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 121, in causal_conv1d_fn\n return CausalConv1dFn.apply(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/autograd/function.py\", line 553, in apply\n return super().apply(*args, **kwargs) # type: ignore[misc]\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 57, in forward\n out = causal_conv1d_cuda.causal_conv1d_fwd(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
19
  }
 
8
  "architectures": "MambaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-07T15:44:17Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 448,
16
+ "job_start_time": "2024-04-14T00-58-21.841715"
 
 
17
  }
state-spaces/mamba-370m-hf_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MambaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-07T15:44:10Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 447,
16
- "job_start_time": "2024-04-14T00-49-16.346477",
17
- "error_msg": "CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 196, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 65, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1426, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 815, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 144, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1052, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 653, in forward\n mamba_outputs = self.backbone(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 555, in forward\n hidden_states = mixer_block(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 327, in forward\n hidden_states = self.mixer(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 291, in forward\n return self.cuda_kernels_forward(hidden_states, cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 174, in cuda_kernels_forward\n hidden_states = causal_conv1d_fn(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 121, in causal_conv1d_fn\n return CausalConv1dFn.apply(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/autograd/function.py\", line 553, in apply\n return super().apply(*args, **kwargs) # type: ignore[misc]\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 57, in forward\n out = causal_conv1d_cuda.causal_conv1d_fwd(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
19
  }
 
8
  "architectures": "MambaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-07T15:44:10Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 447,
16
+ "job_start_time": "2024-04-14T00-49-16.346477"
 
 
17
  }
state-spaces/mamba-790m-hf_eval_request_False_float16_Original.json CHANGED
@@ -8,12 +8,10 @@
8
  "architectures": "MambaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
- "status": "FAILED",
12
  "submitted_time": "2024-04-07T15:44:00Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 446,
16
- "job_start_time": "2024-04-14T00-37-32.705895",
17
- "error_msg": "CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
18
- "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 196, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 65, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1426, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 815, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 144, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1052, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 653, in forward\n mamba_outputs = self.backbone(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 555, in forward\n hidden_states = mixer_block(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 327, in forward\n hidden_states = self.mixer(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 291, in forward\n return self.cuda_kernels_forward(hidden_states, cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/mamba/modeling_mamba.py\", line 174, in cuda_kernels_forward\n hidden_states = causal_conv1d_fn(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 121, in causal_conv1d_fn\n return CausalConv1dFn.apply(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/autograd/function.py\", line 553, in apply\n return super().apply(*args, **kwargs) # type: ignore[misc]\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 57, in forward\n out = causal_conv1d_cuda.causal_conv1d_fwd(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
19
  }
 
8
  "architectures": "MambaForCausalLM",
9
  "weight_type": "Original",
10
  "main_language": "English",
11
+ "status": "RERUN",
12
  "submitted_time": "2024-04-07T15:44:00Z",
13
  "model_type": "🟒 : pretrained",
14
  "source": "leaderboard",
15
  "job_id": 446,
16
+ "job_start_time": "2024-04-14T00-37-32.705895"
 
 
17
  }