Spaces:
Running
on
A10G
Running
on
A10G
Download fixes (#127)
Browse files- Fix download pattern (3d7b10805e2b3ad0a3b9513ebc2fd32c067b2fd5)
- Place each download in a separate temp dir (09babdfe884712104f870a06a79e4d501e88c79e)
Co-authored-by: Pedro Cuenca <pcuenq@users.noreply.huggingface.co>
- app.py +24 -26
- downloads/.keep +0 -0
app.py
CHANGED
@@ -1,20 +1,16 @@
|
|
1 |
import os
|
2 |
-
import shutil
|
3 |
import subprocess
|
4 |
import signal
|
5 |
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
6 |
import gradio as gr
|
|
|
7 |
|
8 |
-
from huggingface_hub import
|
9 |
-
from huggingface_hub import snapshot_download
|
10 |
-
from huggingface_hub import whoami
|
11 |
-
from huggingface_hub import ModelCard
|
12 |
-
|
13 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
14 |
-
|
|
|
15 |
from apscheduler.schedulers.background import BackgroundScheduler
|
16 |
|
17 |
-
from textwrap import dedent
|
18 |
|
19 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
20 |
|
@@ -110,21 +106,25 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
|
|
110 |
else "*.bin"
|
111 |
)
|
112 |
|
113 |
-
dl_pattern += pattern
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
128 |
|
129 |
imatrix_path = "llama.cpp/imatrix.dat"
|
130 |
|
@@ -257,9 +257,7 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
|
|
257 |
)
|
258 |
except Exception as e:
|
259 |
return (f"Error: {e}", "error.png")
|
260 |
-
|
261 |
-
shutil.rmtree(model_name, ignore_errors=True)
|
262 |
-
print("Folder cleaned up successfully!")
|
263 |
|
264 |
css="""/* Custom CSS to allow scrolling */
|
265 |
.gradio-container {overflow-y: auto;}
|
|
|
1 |
import os
|
|
|
2 |
import subprocess
|
3 |
import signal
|
4 |
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
5 |
import gradio as gr
|
6 |
+
import tempfile
|
7 |
|
8 |
+
from huggingface_hub import HfApi, ModelCard, whoami
|
|
|
|
|
|
|
|
|
9 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
10 |
+
from pathlib import Path
|
11 |
+
from textwrap import dedent
|
12 |
from apscheduler.schedulers.background import BackgroundScheduler
|
13 |
|
|
|
14 |
|
15 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
16 |
|
|
|
106 |
else "*.bin"
|
107 |
)
|
108 |
|
109 |
+
dl_pattern += [pattern]
|
110 |
+
|
111 |
+
with tempfile.TemporaryDirectory(dir="downloads") as tmpdir:
|
112 |
+
# Keep the model name as the dirname so the model name metadata is populated correctly
|
113 |
+
local_dir = Path(tmpdir)/model_name
|
114 |
+
print(local_dir)
|
115 |
+
api.snapshot_download(repo_id=model_id, local_dir=local_dir, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
|
116 |
+
print("Model downloaded successfully!")
|
117 |
+
print(f"Current working directory: {os.getcwd()}")
|
118 |
+
print(f"Model directory contents: {os.listdir(local_dir)}")
|
119 |
+
|
120 |
+
conversion_script = "convert_hf_to_gguf.py"
|
121 |
+
fp16_conversion = f"python llama.cpp/{conversion_script} {local_dir} --outtype f16 --outfile {fp16}"
|
122 |
+
result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
|
123 |
+
print(result)
|
124 |
+
if result.returncode != 0:
|
125 |
+
raise Exception(f"Error converting to fp16: {result.stderr}")
|
126 |
+
print("Model converted to fp16 successfully!")
|
127 |
+
print(f"Converted model path: {fp16}")
|
128 |
|
129 |
imatrix_path = "llama.cpp/imatrix.dat"
|
130 |
|
|
|
257 |
)
|
258 |
except Exception as e:
|
259 |
return (f"Error: {e}", "error.png")
|
260 |
+
|
|
|
|
|
261 |
|
262 |
css="""/* Custom CSS to allow scrolling */
|
263 |
.gradio-container {overflow-y: auto;}
|
downloads/.keep
ADDED
File without changes
|