Spaces:
Running
on
A10G
Running
on
A10G
Update app.py (#23)
Browse files- Update app.py (78e33cb597be278183676af9a169ffe7b3d1432c)
app.py
CHANGED
@@ -108,8 +108,6 @@ iface = gr.Interface(
|
|
108 |
lines=1,
|
109 |
label="Hub Model ID",
|
110 |
info="Model repo ID",
|
111 |
-
placeholder="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
112 |
-
value="TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
113 |
),
|
114 |
gr.Dropdown(
|
115 |
["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
|
@@ -129,7 +127,7 @@ iface = gr.Interface(
|
|
129 |
gr.Markdown(label="output"),
|
130 |
gr.Image(show_label=False),
|
131 |
],
|
132 |
-
title="Create your own GGUF Quants
|
133 |
description="Create GGUF quants from any Hugging Face repository! You need to specify a write token obtained in https://hf.co/settings/tokens.",
|
134 |
article="<p>Find your write token at <a href='https://huggingface.co/settings/tokens' target='_blank'>token settings</a></p>",
|
135 |
|
|
|
108 |
lines=1,
|
109 |
label="Hub Model ID",
|
110 |
info="Model repo ID",
|
|
|
|
|
111 |
),
|
112 |
gr.Dropdown(
|
113 |
["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
|
|
|
127 |
gr.Markdown(label="output"),
|
128 |
gr.Image(show_label=False),
|
129 |
],
|
130 |
+
title="Create your own GGUF Quants, blazingly fast ⚡!",
|
131 |
description="Create GGUF quants from any Hugging Face repository! You need to specify a write token obtained in https://hf.co/settings/tokens.",
|
132 |
article="<p>Find your write token at <a href='https://huggingface.co/settings/tokens' target='_blank'>token settings</a></p>",
|
133 |
|