llm_pt_leaderboard_requests / dominguesm /mambarim-110m_eval_request_False_float16_Original.json
eduagarcia's picture
Update status of dominguesm/mambarim-110m_eval_request_False_float16_Original to FAILED
50bb214 verified
raw
history blame
No virus
7.44 kB
{
"model": "dominguesm/mambarim-110m",
"base_model": "",
"revision": "main",
"private": false,
"precision": "float16",
"params": 0.07,
"architectures": "MambaForCausalLM",
"weight_type": "Original",
"main_language": "Portuguese",
"status": "FAILED",
"submitted_time": "2024-04-15T02:53:55Z",
"model_type": "🟢 : pretrained",
"source": "leaderboard",
"job_id": 464,
"job_start_time": "2024-04-15T22-20-30.277354",
"error_msg": "CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n",
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 198, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1426, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 815, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 144, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1052, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/mamba/modeling_mamba.py\", line 677, in forward\n mamba_outputs = self.backbone(\n ^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/mamba/modeling_mamba.py\", line 579, in forward\n hidden_states = mixer_block(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/mamba/modeling_mamba.py\", line 344, in forward\n hidden_states = self.mixer(hidden_states, cache_params=cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/mamba/modeling_mamba.py\", line 308, in forward\n return self.cuda_kernels_forward(hidden_states, cache_params)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/transformers/src/transformers/models/mamba/modeling_mamba.py\", line 191, in cuda_kernels_forward\n hidden_states = causal_conv1d_fn(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 121, in causal_conv1d_fn\n return CausalConv1dFn.apply(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/autograd/function.py\", line 553, in apply\n return super().apply(*args, **kwargs) # type: ignore[misc]\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/causal_conv1d/causal_conv1d_interface.py\", line 57, in forward\n out = causal_conv1d_cuda.causal_conv1d_fwd(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: CUDA error: CUDA driver version is insufficient for CUDA runtime version\nCUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.\nCompile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.\n\n"
}