Ffftdtd5dtft commited on
Commit
5532bbf
·
verified ·
1 Parent(s): 0d6d15f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -32
app.py CHANGED
@@ -17,20 +17,24 @@ HF_TOKEN = os.environ.get("HF_TOKEN")
17
  def generate_importance_matrix(model_path, train_data_path):
18
  imatrix_command = f"./llama-imatrix -m ../{model_path} -f {train_data_path} -ngl 99 --output-frequency 10"
19
  os.chdir("llama.cpp")
 
 
20
  if not os.path.isfile(f"../{model_path}"):
21
  raise Exception(f"Model file not found: {model_path}")
 
22
  process = subprocess.Popen(imatrix_command, shell=True)
23
  try:
24
- process.wait(timeout=60)
25
  except subprocess.TimeoutExpired:
26
- print("Imatrix computation timed out. Sending SIGINT...")
27
  process.send_signal(signal.SIGINT)
28
  try:
29
- process.wait(timeout=5)
30
  except subprocess.TimeoutExpired:
31
- print("Imatrix proc still didn't term. Forecfully terming...")
32
  process.kill()
33
  os.chdir("..")
 
34
 
35
  def split_upload_model(model_path, repo_id, oauth_token: gr.OAuthToken | None, split_max_tensors=256, split_max_size=None):
36
  if oauth_token.token is None:
@@ -39,14 +43,20 @@ def split_upload_model(model_path, repo_id, oauth_token: gr.OAuthToken | None, s
39
  if split_max_size:
40
  split_cmd += f" --split-max-size {split_max_size}"
41
  split_cmd += f" {model_path} {model_path.split('.')[0]}"
 
42
  result = subprocess.run(split_cmd, shell=True, capture_output=True, text=True)
 
 
43
  if result.returncode != 0:
44
  raise Exception(f"Error splitting the model: {result.stderr}")
 
45
  sharded_model_files = [f for f in os.listdir('.') if f.startswith(model_path.split('.')[0])]
46
  if sharded_model_files:
 
47
  api = HfApi(token=oauth_token.token)
48
  for file in sharded_model_files:
49
  file_path = os.path.join('.', file)
 
50
  try:
51
  api.upload_file(
52
  path_or_fileobj=file_path,
@@ -57,6 +67,7 @@ def split_upload_model(model_path, repo_id, oauth_token: gr.OAuthToken | None, s
57
  raise Exception(f"Error uploading file {file_path}: {e}")
58
  else:
59
  raise Exception("No sharded files found.")
 
60
 
61
  def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_repo, train_data_file, split_model, split_max_tensors, split_max_size, oauth_token: gr.OAuthToken | None):
62
  if oauth_token.token is None:
@@ -66,29 +77,43 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
66
 
67
  try:
68
  api = HfApi(token=oauth_token.token)
69
- api.snapshot_download(repo_id=model_id, local_dir=model_name, local_dir_use_symlinks=False)
70
 
71
- all_files = []
72
- for root, _, files in os.walk(model_name):
73
- for file in files:
74
- all_files.append(os.path.join(root, file))
75
-
76
- if not all_files:
77
- raise FileNotFoundError("No files found in the downloaded model directory.")
78
-
79
- for file_path in all_files:
80
- try:
81
- gguf_model_file = f"{os.path.splitext(file_path)[0]}.gguf"
82
- conversion_command = f"python llama.cpp/convert_hf_to_gguf.py {file_path} --outfile {gguf_model_file}"
83
- result = subprocess.run(conversion_command, shell=True, capture_output=True)
84
- if result.returncode == 0:
85
- model_file = gguf_model_file
86
- break
87
- except Exception as e:
88
- print(f"Conversion attempt failed for {file_path}: {e}")
 
 
 
 
 
 
89
 
90
  if model_file is None:
91
- raise Exception("Unable to find or convert a suitable model file to GGUF format.")
 
 
 
 
 
 
 
 
 
92
 
93
  imatrix_path = "llama.cpp/imatrix.dat"
94
 
@@ -96,12 +121,16 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
96
  if train_data_file:
97
  train_data_path = train_data_file.name
98
  else:
99
- train_data_path = "groups_merged.txt"
 
 
100
 
101
  if not os.path.isfile(train_data_path):
102
  raise Exception(f"Training data file not found: {train_data_path}")
103
 
104
- generate_importance_matrix(model_file, train_data_path)
 
 
105
 
106
  username = whoami(oauth_token.token)["name"]
107
  quantized_gguf_name = f"{model_name.lower()}-{imatrix_q_method.lower()}-imat.gguf" if use_imatrix else f"{model_name.lower()}-{q_method.lower()}.gguf"
@@ -109,22 +138,81 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
109
 
110
  os.chdir("llama.cpp")
111
  if use_imatrix:
112
- quantise_ggml = f"./llama-quantize --imatrix {imatrix_path} ../{model_file} ../{quantized_gguf_path} {imatrix_q_method}"
113
  else:
114
- quantise_ggml = f"./llama-quantize ../{model_file} ../{quantized_gguf_path} {q_method}"
115
  result = subprocess.run(quantise_ggml, shell=True, capture_output=True)
116
  os.chdir("..")
117
 
118
  if result.returncode != 0:
119
  raise Exception(f"Error quantizing: {result.stderr}")
 
 
120
 
121
  new_repo_url = api.create_repo(repo_id=f"{username}/{model_name}-{imatrix_q_method if use_imatrix else q_method}-GGUF", exist_ok=True, private=private_repo)
122
  new_repo_id = new_repo_url.repo_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
  if split_model:
125
  split_upload_model(quantized_gguf_path, new_repo_id, oauth_token, split_max_tensors, split_max_size)
126
  else:
127
  try:
 
128
  api.upload_file(
129
  path_or_fileobj=quantized_gguf_path,
130
  path_in_repo=quantized_gguf_name,
@@ -135,6 +223,7 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
135
 
136
  if use_imatrix and os.path.isfile(imatrix_path):
137
  try:
 
138
  api.upload_file(
139
  path_or_fileobj=imatrix_path,
140
  path_in_repo="imatrix.dat",
@@ -143,6 +232,13 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
143
  except Exception as e:
144
  raise Exception(f"Error uploading imatrix.dat: {e}")
145
 
 
 
 
 
 
 
 
146
  return (
147
  f'Find your repo <a href=\'{new_repo_url}\' target="_blank" style="text-decoration:underline">here</a>',
148
  "llama.png",
@@ -151,11 +247,12 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
151
  return (f"Error: {e}", "error.png")
152
  finally:
153
  shutil.rmtree(model_name, ignore_errors=True)
 
154
 
155
  css="""/* Custom CSS to allow scrolling */
156
  .gradio-container {overflow-y: auto;}
157
  """
158
- with gr.Blocks(css=css) as demo:
159
  gr.Markdown("You must be logged in to use GGUF-my-repo.")
160
  gr.LoginButton(min_width=250)
161
 
@@ -178,7 +275,7 @@ with gr.Blocks(css=css) as demo:
178
  ["IQ3_M", "IQ3_XXS", "Q4_K_M", "Q4_K_S", "IQ4_NL", "IQ4_XS", "Q5_K_M", "Q5_K_S"],
179
  label="Imatrix Quantization Method",
180
  info="GGML imatrix quants type",
181
- value="IQ4_NL",
182
  filterable=False,
183
  visible=False
184
  )
@@ -222,7 +319,7 @@ with gr.Blocks(css=css) as demo:
222
 
223
  def update_visibility(use_imatrix):
224
  return gr.update(visible=not use_imatrix), gr.update(visible=use_imatrix), gr.update(visible=use_imatrix)
225
-
226
  use_imatrix.change(
227
  fn=update_visibility,
228
  inputs=use_imatrix,
@@ -261,7 +358,7 @@ with gr.Blocks(css=css) as demo:
261
  )
262
 
263
  def restart_space():
264
- HfApi().restart_space(repo_id="ggml-org/gguf-my-repo", token=HF_TOKEN, factory_reboot=True)
265
 
266
  scheduler = BackgroundScheduler()
267
  scheduler.add_job(restart_space, "interval", seconds=21600)
 
17
  def generate_importance_matrix(model_path, train_data_path):
18
  imatrix_command = f"./llama-imatrix -m ../{model_path} -f {train_data_path} -ngl 99 --output-frequency 10"
19
  os.chdir("llama.cpp")
20
+ print(f"Current working directory: {os.getcwd()}")
21
+ print(f"Files in the current directory: {os.listdir('.')}")
22
  if not os.path.isfile(f"../{model_path}"):
23
  raise Exception(f"Model file not found: {model_path}")
24
+ print("Running imatrix command...")
25
  process = subprocess.Popen(imatrix_command, shell=True)
26
  try:
27
+ process.wait(timeout=60)
28
  except subprocess.TimeoutExpired:
29
+ print("Imatrix computation timed out. Sending SIGINT to allow graceful termination...")
30
  process.send_signal(signal.SIGINT)
31
  try:
32
+ process.wait(timeout=5)
33
  except subprocess.TimeoutExpired:
34
+ print("Imatrix proc still didn't term. Forecfully terming process...")
35
  process.kill()
36
  os.chdir("..")
37
+ print("Importance matrix generation completed.")
38
 
39
  def split_upload_model(model_path, repo_id, oauth_token: gr.OAuthToken | None, split_max_tensors=256, split_max_size=None):
40
  if oauth_token.token is None:
 
43
  if split_max_size:
44
  split_cmd += f" --split-max-size {split_max_size}"
45
  split_cmd += f" {model_path} {model_path.split('.')[0]}"
46
+ print(f"Split command: {split_cmd}")
47
  result = subprocess.run(split_cmd, shell=True, capture_output=True, text=True)
48
+ print(f"Split command stdout: {result.stdout}")
49
+ print(f"Split command stderr: {result.stderr}")
50
  if result.returncode != 0:
51
  raise Exception(f"Error splitting the model: {result.stderr}")
52
+ print("Model split successfully!")
53
  sharded_model_files = [f for f in os.listdir('.') if f.startswith(model_path.split('.')[0])]
54
  if sharded_model_files:
55
+ print(f"Sharded model files: {sharded_model_files}")
56
  api = HfApi(token=oauth_token.token)
57
  for file in sharded_model_files:
58
  file_path = os.path.join('.', file)
59
+ print(f"Uploading file: {file_path}")
60
  try:
61
  api.upload_file(
62
  path_or_fileobj=file_path,
 
67
  raise Exception(f"Error uploading file {file_path}: {e}")
68
  else:
69
  raise Exception("No sharded files found.")
70
+ print("Sharded model has been uploaded successfully!")
71
 
72
  def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_repo, train_data_file, split_model, split_max_tensors, split_max_size, oauth_token: gr.OAuthToken | None):
73
  if oauth_token.token is None:
 
77
 
78
  try:
79
  api = HfApi(token=oauth_token.token)
 
80
 
81
+ # Download only necessary files based on model format
82
+ dl_pattern = ["*.md", "*.json"]
83
+ pattern = (
84
+ "*.safetensors"
85
+ if any(
86
+ file.path.endswith(".safetensors")
87
+ for file in api.list_repo_tree(
88
+ repo_id=model_id,
89
+ recursive=True,
90
+ )
91
+ )
92
+ else "*.bin"
93
+ )
94
+ dl_pattern += pattern
95
+ api.snapshot_download(repo_id=model_id, local_dir=model_name, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
96
+ print("Model downloaded successfully!")
97
+ print(f"Current working directory: {os.getcwd()}")
98
+ print(f"Model directory contents: {os.listdir(model_name)}")
99
+
100
+ # Find downloaded model file
101
+ for filename in os.listdir(model_name):
102
+ if filename.endswith((".bin", ".safetensors")):
103
+ model_file = os.path.join(model_name, filename)
104
+ break
105
 
106
  if model_file is None:
107
+ raise FileNotFoundError("No model file found in the downloaded files.")
108
+
109
+ # Convert to GGUF
110
+ gguf_model_file = f"{os.path.splitext(model_file)[0]}.gguf"
111
+ conversion_command = f"python llama.cpp/convert_hf_to_gguf.py {model_file} --outfile {gguf_model_file}"
112
+ result = subprocess.run(conversion_command, shell=True, capture_output=True)
113
+ if result.returncode != 0:
114
+ raise Exception(f"Error converting to GGUF: {result.stderr}")
115
+ print("Model converted to GGUF successfully!")
116
+ print(f"Converted model path: {gguf_model_file}")
117
 
118
  imatrix_path = "llama.cpp/imatrix.dat"
119
 
 
121
  if train_data_file:
122
  train_data_path = train_data_file.name
123
  else:
124
+ train_data_path = "groups_merged.txt" #fallback calibration dataset
125
+
126
+ print(f"Training data file path: {train_data_path}")
127
 
128
  if not os.path.isfile(train_data_path):
129
  raise Exception(f"Training data file not found: {train_data_path}")
130
 
131
+ generate_importance_matrix(gguf_model_file, train_data_path)
132
+ else:
133
+ print("Not using imatrix quantization.")
134
 
135
  username = whoami(oauth_token.token)["name"]
136
  quantized_gguf_name = f"{model_name.lower()}-{imatrix_q_method.lower()}-imat.gguf" if use_imatrix else f"{model_name.lower()}-{q_method.lower()}.gguf"
 
138
 
139
  os.chdir("llama.cpp")
140
  if use_imatrix:
141
+ quantise_ggml = f"./llama-quantize --imatrix {imatrix_path} ../{gguf_model_file} ../{quantized_gguf_path} {imatrix_q_method}"
142
  else:
143
+ quantise_ggml = f"./llama-quantize ../{gguf_model_file} ../{quantized_gguf_path} {q_method}"
144
  result = subprocess.run(quantise_ggml, shell=True, capture_output=True)
145
  os.chdir("..")
146
 
147
  if result.returncode != 0:
148
  raise Exception(f"Error quantizing: {result.stderr}")
149
+ print(f"Quantized successfully with {imatrix_q_method if use_imatrix else q_method} option!")
150
+ print(f"Quantized model path: {quantized_gguf_path}")
151
 
152
  new_repo_url = api.create_repo(repo_id=f"{username}/{model_name}-{imatrix_q_method if use_imatrix else q_method}-GGUF", exist_ok=True, private=private_repo)
153
  new_repo_id = new_repo_url.repo_id
154
+ print("Repo created successfully!", new_repo_url)
155
+
156
+ try:
157
+ card = ModelCard.load(model_id, token=oauth_token.token)
158
+ except:
159
+ card = ModelCard("")
160
+ if card.data.tags is None:
161
+ card.data.tags = []
162
+ card.data.tags.append("llama-cpp")
163
+ card.data.tags.append("gguf-my-repo")
164
+ card.data.base_model = model_id
165
+ card.text = dedent(
166
+ f"""
167
+ # {new_repo_id}
168
+ This model was converted to GGUF format from [`{model_id}`](https://huggingface.co/{model_id}) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
169
+ Refer to the [original model card](https://huggingface.co/{model_id}) for more details on the model.
170
+
171
+ ## Use with llama.cpp
172
+ Install llama.cpp through brew (works on Mac and Linux)
173
+
174
+ ```bash
175
+ brew install llama.cpp
176
+
177
+ ```
178
+ Invoke the llama.cpp server or the CLI.
179
+
180
+ ### CLI:
181
+ ```bash
182
+ llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "The meaning to life and the universe is"
183
+ ```
184
+
185
+ ### Server:
186
+ ```bash
187
+ llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048
188
+ ```
189
+
190
+ Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.
191
+ Step 1: Clone llama.cpp from GitHub.
192
+ ```
193
+ git clone https://github.com/ggerganov/llama.cpp
194
+ ```
195
+ Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
196
+ ```
197
+ cd llama.cpp && LLAMA_CURL=1 make
198
+ ```
199
+ Step 3: Run inference through the main binary.
200
+ ```
201
+ ./llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "The meaning to life and the universe is"
202
+ ```
203
+ or
204
+ ```
205
+ ./llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048
206
+ ```
207
+ """
208
+ )
209
+ card.save(f"README.md")
210
 
211
  if split_model:
212
  split_upload_model(quantized_gguf_path, new_repo_id, oauth_token, split_max_tensors, split_max_size)
213
  else:
214
  try:
215
+ print(f"Uploading quantized model: {quantized_gguf_path}")
216
  api.upload_file(
217
  path_or_fileobj=quantized_gguf_path,
218
  path_in_repo=quantized_gguf_name,
 
223
 
224
  if use_imatrix and os.path.isfile(imatrix_path):
225
  try:
226
+ print(f"Uploading imatrix.dat: {imatrix_path}")
227
  api.upload_file(
228
  path_or_fileobj=imatrix_path,
229
  path_in_repo="imatrix.dat",
 
232
  except Exception as e:
233
  raise Exception(f"Error uploading imatrix.dat: {e}")
234
 
235
+ api.upload_file(
236
+ path_or_fileobj=f"README.md",
237
+ path_in_repo=f"README.md",
238
+ repo_id=new_repo_id,
239
+ )
240
+ print(f"Uploaded successfully with {imatrix_q_method if use_imatrix else q_method} option!")
241
+
242
  return (
243
  f'Find your repo <a href=\'{new_repo_url}\' target="_blank" style="text-decoration:underline">here</a>',
244
  "llama.png",
 
247
  return (f"Error: {e}", "error.png")
248
  finally:
249
  shutil.rmtree(model_name, ignore_errors=True)
250
+ print("Folder cleaned up successfully!")
251
 
252
  css="""/* Custom CSS to allow scrolling */
253
  .gradio-container {overflow-y: auto;}
254
  """
255
+ with gr.Blocks(css=css) as demo:
256
  gr.Markdown("You must be logged in to use GGUF-my-repo.")
257
  gr.LoginButton(min_width=250)
258
 
 
275
  ["IQ3_M", "IQ3_XXS", "Q4_K_M", "Q4_K_S", "IQ4_NL", "IQ4_XS", "Q5_K_M", "Q5_K_S"],
276
  label="Imatrix Quantization Method",
277
  info="GGML imatrix quants type",
278
+ value="IQ4_NL",
279
  filterable=False,
280
  visible=False
281
  )
 
319
 
320
  def update_visibility(use_imatrix):
321
  return gr.update(visible=not use_imatrix), gr.update(visible=use_imatrix), gr.update(visible=use_imatrix)
322
+
323
  use_imatrix.change(
324
  fn=update_visibility,
325
  inputs=use_imatrix,
 
358
  )
359
 
360
  def restart_space():
361
+ HfApi().restart_space(repo_id="YOUR_SPACE_ID", token=HF_TOKEN, factory_reboot=True)
362
 
363
  scheduler = BackgroundScheduler()
364
  scheduler.add_job(restart_space, "interval", seconds=21600)