Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -78,11 +78,11 @@ def process_model(model_id, q_method, hf_token, private_repo):
78
  card.text = dedent(
79
  f"""
80
  # {new_repo_id}
81
- This model was converted to GGUF format from [`{model_id}`](https://huggingface.co/{model_id}) using llama.cpp via the GGML.ai's [GGUF-it](https://huggingface.co/spaces/ggml-org/GGUF-it) space.
82
  Refer to the [original model card](https://huggingface.co/{model_id}) for more details on the model.
83
  ## Use with llama.cpp
84
 
85
- Install Llama.cpp through brew.
86
 
87
  ```bash
88
  brew install ggerganov/ggerganov/llama.cpp
@@ -104,7 +104,10 @@ def process_model(model_id, q_method, hf_token, private_repo):
104
  Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
105
 
106
  ```
107
- git clone https://github.com/ggerganov/llama.cpp && cd llama.cpp && make && ./main -m {qtype.split("/")[-1]} -n 128
 
 
 
108
  ```
109
  """
110
  )
 
78
  card.text = dedent(
79
  f"""
80
  # {new_repo_id}
81
+ This model was converted to GGUF format from [`{model_id}`](https://huggingface.co/{model_id}) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
82
  Refer to the [original model card](https://huggingface.co/{model_id}) for more details on the model.
83
  ## Use with llama.cpp
84
 
85
+ Install llama.cpp through brew.
86
 
87
  ```bash
88
  brew install ggerganov/ggerganov/llama.cpp
 
104
  Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
105
 
106
  ```
107
+ git clone https://github.com/ggerganov/llama.cpp && \
108
+ cd llama.cpp && \
109
+ make && \
110
+ ./main -m {qtype.split("/")[-1]} -n 128
111
  ```
112
  """
113
  )