File size: 2,939 Bytes
bd7d37c
 
 
 
 
 
 
 
 
a3e0513
bd7d37c
 
 
 
a3e0513
 
 
bd7d37c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
{
    "model": "22h/cabrita-lora-v0-1",
    "base_model": "huggyllama/llama-7b",
    "revision": "main",
    "private": false,
    "precision": "float16",
    "params": 0,
    "architectures": "?",
    "weight_type": "Adapter",
    "status": "FAILED",
    "submitted_time": "2024-02-05T23:03:11Z",
    "model_type": "🔶 : fine-tuned",
    "source": "script",
    "job_id": 14,
    "job_start_time": "2024-02-06T16-32-12.465705",
    "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'",
    "traceback": "Traceback (most recent call last):\n  File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 183, in wait_download_and_run_request\n    del MODELS_DOWNLOADED[f\"{request['model']}_{request['revision']}\"]\n  File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 64, in run_request\n    results = run_eval_on_model(\n              ^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 54, in run_eval_on_model\n  File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n    results = evaluator.simple_evaluate(\n              ^^^^^^^^^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 415, in _wrapper\n    return fn(*args, **kwargs)\n           ^^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n    lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n    return cls(**args, **args2)\n           ^^^^^^^^^^^^^^^^^^^^\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 293, in __init__\n    self._create_model(\n  File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 633, in _create_model\n    self._model = PeftModel.from_pretrained(\n                  ^^^^^^^^^^^^^^^^^^^^^^^^^^\n  File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 325, in from_pretrained\n    config = PEFT_TYPE_TO_CONFIG_MAPPING[\n             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n  File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 152, in from_pretrained\n    return cls.from_peft_type(**kwargs)\n           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n  File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 119, in from_peft_type\n    return config_cls(**kwargs)\n           ^^^^^^^^^^^^^^^^^^^^\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'\n"
}