FantasiaFoundry Lewdiculous commited on
Commit
967fe65
1 Parent(s): b6bbbe0

Fix things. (#12)

Browse files

- Fix things. (de5a40ce88944af8efe23d4e7e3b36f931634a74)


Co-authored-by: Lewdiculous <Lewdiculous@users.noreply.huggingface.co>

Files changed (1) hide show
  1. gguf-imat.py +21 -15
gguf-imat.py CHANGED
@@ -89,8 +89,11 @@ def download_model_repo():
89
  # If the model already exists, prompt the user if they want to delete the model directory
90
  delete_model_dir = input("Remove HF model folder after converting original model to GGUF? (yes/no) (default: no): ").strip().lower()
91
 
 
 
 
92
  # Convert the existing model to GGUF F16 format and generate imatrix.dat
93
- convert_model_to_gguf_f16(base_dir, model_dir, model_name, delete_model_dir)
94
 
95
  else:
96
  revision = input("Enter the revision (branch, tag, or commit) to download (default: main): ") or "main"
@@ -102,11 +105,14 @@ def download_model_repo():
102
  snapshot_download(repo_id=model_id, local_dir=model_dir, revision=revision)
103
  print("Model repository downloaded successfully.")
104
 
 
 
 
105
  # Convert the downloaded model to GGUF F16 format and generate imatrix.dat
106
- convert_model_to_gguf_f16(base_dir, model_dir, model_name, delete_model_dir)
107
 
108
  # Convert the downloaded model to GGUF F16 format
109
- def convert_model_to_gguf_f16(base_dir, model_dir, model_name, delete_model_dir):
110
  convert_script = os.path.join(base_dir, "llama.cpp", "convert.py")
111
  gguf_dir = os.path.join(base_dir, "models", f"{model_name}-GGUF")
112
  gguf_model_path = os.path.join(gguf_dir, f"{model_name}-F16.gguf")
@@ -126,21 +132,21 @@ def convert_model_to_gguf_f16(base_dir, model_dir, model_name, delete_model_dir)
126
  else:
127
  print(f"Original model directory '{model_dir}' was not deleted. You can remove it manually.")
128
 
129
- # Check if imatrix.dat exists within gguf_dir
130
- imatrix_exe = os.path.join(base_dir, "bin", "imatrix.exe")
131
- imatrix_output = os.path.join(gguf_dir, "imatrix.dat")
132
- imatrix_txt = os.path.join(base_dir, "imatrix", "imatrix.txt")
133
- if not os.path.exists(imatrix_output):
134
- # Execute the imatrix command
135
- subprocess.run([imatrix_exe, "-m", gguf_model_path, "-f", imatrix_txt, "-ngl", "13"], cwd=gguf_dir)
136
- # Move the imatrix.dat file to the GGUF folder
 
137
  shutil.move(os.path.join(gguf_dir, "imatrix.dat"), gguf_dir)
138
  print("imatrix.dat generated successfully.")
139
  else:
140
- print("Skipping imatrix generation as imatrix.dat already exists.")
141
-
142
  else:
143
- print("Skipping model conversion as F16 file already exists.")
144
 
145
  # Quantize the models
146
  quantize_models(base_dir, model_name)
@@ -177,4 +183,4 @@ def main():
177
  print("Finished preparing resources.")
178
 
179
  if __name__ == "__main__":
180
- main()
 
89
  # If the model already exists, prompt the user if they want to delete the model directory
90
  delete_model_dir = input("Remove HF model folder after converting original model to GGUF? (yes/no) (default: no): ").strip().lower()
91
 
92
+ # Ask for the name of the imatrix.txt file
93
+ imatrix_file_name = input("Enter the name of the imatrix.txt file (default: imatrix.txt): ").strip() or "imatrix.txt"
94
+
95
  # Convert the existing model to GGUF F16 format and generate imatrix.dat
96
+ convert_model_to_gguf_f16(base_dir, model_dir, model_name, delete_model_dir, imatrix_file_name)
97
 
98
  else:
99
  revision = input("Enter the revision (branch, tag, or commit) to download (default: main): ") or "main"
 
105
  snapshot_download(repo_id=model_id, local_dir=model_dir, revision=revision)
106
  print("Model repository downloaded successfully.")
107
 
108
+ # Ask for the name of the imatrix.txt file
109
+ imatrix_file_name = input("Enter the name of the imatrix.txt file (default: imatrix.txt): ").strip() or "imatrix.txt"
110
+
111
  # Convert the downloaded model to GGUF F16 format and generate imatrix.dat
112
+ convert_model_to_gguf_f16(base_dir, model_dir, model_name, delete_model_dir, imatrix_file_name)
113
 
114
  # Convert the downloaded model to GGUF F16 format
115
+ def convert_model_to_gguf_f16(base_dir, model_dir, model_name, delete_model_dir, imatrix_file_name):
116
  convert_script = os.path.join(base_dir, "llama.cpp", "convert.py")
117
  gguf_dir = os.path.join(base_dir, "models", f"{model_name}-GGUF")
118
  gguf_model_path = os.path.join(gguf_dir, f"{model_name}-F16.gguf")
 
132
  else:
133
  print(f"Original model directory '{model_dir}' was not deleted. You can remove it manually.")
134
 
135
+ # Generate imatrix.dat if it doesn't exist
136
+ imatrix_exe = os.path.join(base_dir, "bin", "imatrix.exe")
137
+ imatrix_output = os.path.join(gguf_dir, "imatrix.dat")
138
+ imatrix_txt = os.path.join(base_dir, "imatrix", imatrix_file_name)
139
+ if not os.path.exists(imatrix_output):
140
+ # Execute the imatrix command
141
+ subprocess.run([imatrix_exe, "-m", gguf_model_path, "-f", imatrix_txt, "-ngl", "13"], cwd=gguf_dir)
142
+ # Move the imatrix.dat file to the GGUF folder
143
+ if os.path.exists(os.path.join(gguf_dir, "imatrix.dat")):
144
  shutil.move(os.path.join(gguf_dir, "imatrix.dat"), gguf_dir)
145
  print("imatrix.dat generated successfully.")
146
  else:
147
+ print("Failed to generate imatrix.dat file.")
 
148
  else:
149
+ print("Skipping imatrix generation as imatrix.dat already exists.")
150
 
151
  # Quantize the models
152
  quantize_models(base_dir, model_name)
 
183
  print("Finished preparing resources.")
184
 
185
  if __name__ == "__main__":
186
+ main()