Spaces:
Build error
Build error
remove revision from yml
Browse filesfix for updated yml format
make sure to copy dict before popping things so we can reuse them later
copy hub config dict too
- config.yml +0 -1
- tabbed.py +3 -2
config.yml
CHANGED
@@ -2,7 +2,6 @@
|
|
2 |
hub:
|
3 |
repo_id: TheBloke/stable-vicuna-13B-GGML
|
4 |
filename: stable-vicuna-13B.ggml.q5_1.bin
|
5 |
-
# revision: v1.1.0
|
6 |
llama_cpp:
|
7 |
n_ctx: 2048
|
8 |
n_gpu_layers: 40 # llama 13b has 40 layers
|
|
|
2 |
hub:
|
3 |
repo_id: TheBloke/stable-vicuna-13B-GGML
|
4 |
filename: stable-vicuna-13B.ggml.q5_1.bin
|
|
|
5 |
llama_cpp:
|
6 |
n_ctx: 2048
|
7 |
n_gpu_layers: 40 # llama 13b has 40 layers
|
tabbed.py
CHANGED
@@ -8,7 +8,8 @@ with open("./config.yml", "r") as f:
|
|
8 |
config = yaml.load(f, Loader=yaml.Loader)
|
9 |
while True:
|
10 |
try:
|
11 |
-
|
|
|
12 |
repo_id = hub_config.pop("repo_id")
|
13 |
filename = hub_config.pop("filename")
|
14 |
fp = hf_hub_download(
|
@@ -90,7 +91,7 @@ with gr.Blocks() as demo:
|
|
90 |
with gr.Column():
|
91 |
gr.Markdown(f"""
|
92 |
### brought to you by OpenAccess AI Collective
|
93 |
-
- This is the [{config["
|
94 |
- This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM.
|
95 |
- This is running on a smaller, shared GPU, so it may take a few seconds to respond.
|
96 |
- [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
|
|
|
8 |
config = yaml.load(f, Loader=yaml.Loader)
|
9 |
while True:
|
10 |
try:
|
11 |
+
load_config = config.copy()
|
12 |
+
hub_config = load_config["hub"].copy()
|
13 |
repo_id = hub_config.pop("repo_id")
|
14 |
filename = hub_config.pop("filename")
|
15 |
fp = hf_hub_download(
|
|
|
91 |
with gr.Column():
|
92 |
gr.Markdown(f"""
|
93 |
### brought to you by OpenAccess AI Collective
|
94 |
+
- This is the [{config["hub"]["repo_id"]}](https://huggingface.co/{config["hub"]["repo_id"]}) model file [{config["hub"]["filename"]}](https://huggingface.co/{config["hub"]["repo_id"]}/blob/main/{config["hub"]["filename"]})
|
95 |
- This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM.
|
96 |
- This is running on a smaller, shared GPU, so it may take a few seconds to respond.
|
97 |
- [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
|