eduagarcia commited on
Commit
dcb105f
1 Parent(s): 58232ec

Update status of 22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter to FAILED

Browse files
22h/cabrita-lora-v0-1_eval_request_False_float16_Adapter.json CHANGED
@@ -7,11 +7,13 @@
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
- "status": "RUNNING",
11
  "submitted_time": "2024-02-05T23:03:11Z",
12
  "model_type": "🔶 : fine-tuned",
13
  "source": "script",
14
  "job_id": 820,
15
  "job_start_time": "2024-06-16T10-13-34.877976",
16
- "main_language": "Portuguese"
 
 
17
  }
 
7
  "params": 0,
8
  "architectures": "?",
9
  "weight_type": "Adapter",
10
+ "status": "FAILED",
11
  "submitted_time": "2024-02-05T23:03:11Z",
12
  "model_type": "🔶 : fine-tuned",
13
  "source": "script",
14
  "job_id": 820,
15
  "job_start_time": "2024-06-16T10-13-34.877976",
16
+ "main_language": "Portuguese",
17
+ "error_msg": "LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'",
18
+ "traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 200, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 71, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 60, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 297, in __init__\n self._create_model(\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/huggingface.py\", line 637, in _create_model\n self._model = PeftModel.from_pretrained(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/peft_model.py\", line 327, in from_pretrained\n config = PEFT_TYPE_TO_CONFIG_MAPPING[\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 151, in from_pretrained\n return cls.from_peft_type(**kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/peft/config.py\", line 118, in from_peft_type\n return config_cls(**kwargs)\n ^^^^^^^^^^^^^^^^^^^^\nTypeError: LoraConfig.__init__() got an unexpected keyword argument 'enable_lora'\n"
19
  }