{ "model": "01-ai/Yi-34B-200K", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "params": 34.389, "architectures": "LlamaForCausalLM", "weight_type": "Original", "status": "FAILED", "submitted_time": "2024-02-05T23:18:19Z", "model_type": "🟢 : pretrained", "source": "script", "job_id": 253, "job_start_time": "2024-02-21T12-10-33.914064", "eval_version": "1.0.0", "result_metrics": { "enem_challenge": 0.7186843946815955, "bluex": 0.6634214186369958, "oab_exams": 0.571753986332574, "assin2_rte": 0.7858403678133732, "assin2_sts": 0.5583683246827316, "faquad_nli": 0.7800338409475465, "sparrow_pt": 0.37142261482383704 }, "result_metrics_average": 0.6356464211312362, "result_metrics_npm": 0.4967380342980293, "error_msg": "CUDA out of memory. Tried to allocate 298.02 GiB. GPU 0 has a total capacty of 79.35 GiB of which 14.77 GiB is free. Process 580799 has 64.57 GiB memory in use. Of the allocated memory 64.06 GiB is allocated by PyTorch, and 7.19 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF", "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 191, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 55, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 604, in _create_model\n self._model = self.AUTO_MODEL_CLASS.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py\", line 561, in from_pretrained\n return model_class.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 3558, in from_pretrained\n dispatch_model(model, **device_map_kwargs)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/accelerate/big_modeling.py\", line 445, in dispatch_model\n model.to(device)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/transformers/modeling_utils.py\", line 2556, in to\n return super().to(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1160, in to\n return self._apply(convert)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 810, in _apply\n module._apply(fn)\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 857, in _apply\n self._buffers[key] = fn(buf)\n ^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1158, in convert\n return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 298.02 GiB. GPU 0 has a total capacty of 79.35 GiB of which 14.77 GiB is free. Process 580799 has 64.57 GiB memory in use. Of the allocated memory 64.06 GiB is allocated by PyTorch, and 7.19 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\n" }