llm_pt_leaderboard_requests / Qwen /Qwen-7B_eval_request_False_float16_Original.json
eduagarcia's picture
Update status of Qwen/Qwen-7B_eval_request_False_float16_Original to FAILED
0856b29 verified
raw
history blame
No virus
7.64 kB
{
"model": "Qwen/Qwen-7B",
"base_model": "",
"revision": "main",
"private": false,
"precision": "float16",
"params": 7.721,
"architectures": "QWenLMHeadModel",
"weight_type": "Original",
"status": "FAILED",
"submitted_time": "2024-04-18T23:08:40Z",
"model_type": "🟢 : pretrained",
"source": "leaderboard",
"job_id": 495,
"job_start_time": "2024-04-19T21-28-52.030704",
"main_language": "Chinese",
"error_msg": "Triton Error [CUDA]: device kernel image is invalid",
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 201, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 159, in simple_evaluate\n results = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 343, in evaluate\n resps = getattr(lm, reqtype)(cloned_reqs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1426, in generate_until\n batch_size, _ = self._detect_batch_size_and_length()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 815, in _detect_batch_size_and_length\n batch_size, max_length = forward_batch()\n ^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 144, in decorator\n return function(batch_size, max_length, *args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 810, in forward_batch\n out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 1052, in _model_call\n return self.model(inps).logits\n ^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/datasets/hf_cache/modules/transformers_modules/Qwen/Qwen-7B/ef3c5c9c57b252f3149c1408daf4d649ec8b6c85/modeling_qwen.py\", line 1043, in forward\n transformer_outputs = self.transformer(\n ^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/datasets/hf_cache/modules/transformers_modules/Qwen/Qwen-7B/ef3c5c9c57b252f3149c1408daf4d649ec8b6c85/modeling_qwen.py\", line 891, in forward\n outputs = block(\n ^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/datasets/hf_cache/modules/transformers_modules/Qwen/Qwen-7B/ef3c5c9c57b252f3149c1408daf4d649ec8b6c85/modeling_qwen.py\", line 610, in forward\n attn_outputs = self.attn(\n ^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1511, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1520, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/datasets/hf_cache/modules/transformers_modules/Qwen/Qwen-7B/ef3c5c9c57b252f3149c1408daf4d649ec8b6c85/modeling_qwen.py\", line 432, in forward\n query = apply_rotary_pos_emb(query, q_pos_emb)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/datasets/hf_cache/modules/transformers_modules/Qwen/Qwen-7B/ef3c5c9c57b252f3149c1408daf4d649ec8b6c85/modeling_qwen.py\", line 1342, in apply_rotary_pos_emb\n return apply_rotary_emb_func(t_float, cos, sin).type_as(t)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/layers/rotary.py\", line 122, in apply_rotary_emb\n return ApplyRotaryEmb.apply(\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/autograd/function.py\", line 553, in apply\n return super().apply(*args, **kwargs) # type: ignore[misc]\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/layers/rotary.py\", line 48, in forward\n out = apply_rotary(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/flash_attn/ops/triton/rotary.py\", line 202, in apply_rotary\n rotary_kernel[grid](\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/triton/runtime/jit.py\", line 550, in run\n bin.c_wrapper(\n ^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/triton/compiler/compiler.py\", line 692, in __getattribute__\n self._init_handles()\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/triton/compiler/compiler.py\", line 683, in _init_handles\n mod, func, n_regs, n_spills = fn_load_binary(self.metadata[\"name\"], self.asm[bin_path], self.shared, device)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nRuntimeError: Triton Error [CUDA]: device kernel image is invalid\n"
}