winglian commited on
Commit
a68164e
1 Parent(s): 776e6fd

update copy

Browse files
Files changed (2) hide show
  1. chat.py +2 -1
  2. instruct.py +3 -2
chat.py CHANGED
@@ -87,7 +87,8 @@ with gr.Blocks() as blocks:
87
  - This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM.
88
  - This is running on a smaller, shared GPU, so it may take a few seconds to respond.
89
  - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
90
- - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml)")
 
91
  - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
92
  """)
93
  with gr.Column():
 
87
  - This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM.
88
  - This is running on a smaller, shared GPU, so it may take a few seconds to respond.
89
  - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
90
+ - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml)
91
+ - You can use instruct or chatbot mode by updating the README.md to either `app_file: instruct.py` or `app_file: chat.py`
92
  - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
93
  """)
94
  with gr.Column():
instruct.py CHANGED
@@ -24,7 +24,8 @@ description = f"""
24
  - This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM.
25
  - This is running on a smaller, shared GPU, so it may take a few seconds to respond.
26
  - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
27
- - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml)")
 
28
  - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
29
  """
30
 
@@ -32,6 +33,6 @@ gr.Interface(
32
  fn=generate_text,
33
  inputs=input_text,
34
  outputs=output_text,
35
- title="Llama Language Model",
36
  description=description,
37
  ).queue(max_size=16, concurrency_count=1).launch()
 
24
  - This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM.
25
  - This is running on a smaller, shared GPU, so it may take a few seconds to respond.
26
  - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
27
+ - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml)
28
+ - You can use instruct or chatbot mode by updating the README.md to either `app_file: instruct.py` or `app_file: chat.py`
29
  - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
30
  """
31
 
 
33
  fn=generate_text,
34
  inputs=input_text,
35
  outputs=output_text,
36
+ title="GGML UI Demo",
37
  description=description,
38
  ).queue(max_size=16, concurrency_count=1).launch()