ngxson HF staff commited on
Commit
d41bb30
1 Parent(s): 7e7faad

clarify message for non-peft repo

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -54,7 +54,7 @@ def process_model(peft_model_id: str, q_method: str, private_repo, oauth_token:
54
 
55
  adapter_config_dir = local_dir/"adapter_config.json"
56
  if not os.path.exists(adapter_config_dir):
57
- raise Exception("adapter_config.json not found. Please ensure the selected repo is a PEFT LoRA model.")
58
 
59
  fp16_conversion = f"python llama.cpp/{CONVERSION_SCRIPT} {local_dir} --outtype {q_method.lower()} --outfile {gguf_output_name}"
60
  result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
 
54
 
55
  adapter_config_dir = local_dir/"adapter_config.json"
56
  if not os.path.exists(adapter_config_dir):
57
+ raise Exception("adapter_config.json not found. Please ensure the selected repo is a PEFT LoRA model.<br/><br/>If you are converting a model (not a LoRA adapter), please use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) instead.")
58
 
59
  fp16_conversion = f"python llama.cpp/{CONVERSION_SCRIPT} {local_dir} --outtype {q_method.lower()} --outfile {gguf_output_name}"
60
  result = subprocess.run(fp16_conversion, shell=True, capture_output=True)