winglian commited on
Commit
54d2e53
1 Parent(s): dce6894

loading torch doesn't play nice, just use config for now

Browse files
Files changed (3) hide show
  1. config.yml +3 -0
  2. requirements.txt +0 -1
  3. tabbed.py +1 -14
config.yml CHANGED
@@ -9,3 +9,6 @@ chat:
9
  - "</s>"
10
  - "<unk>"
11
  - "### User:"
 
 
 
 
9
  - "</s>"
10
  - "<unk>"
11
  - "### User:"
12
+ queue:
13
+ max_size: 16
14
+ concurrency_count: 2
requirements.txt CHANGED
@@ -1,7 +1,6 @@
1
  --extra-index-url https://pypi.ngc.nvidia.com
2
  nvidia-cuda-runtime
3
  nvidia-cublas
4
- torch
5
  llama-cpp-python @ https://github.com/OpenAccess-AI-Collective/ggml-webui/releases/download/v0.1.50-rc3/llama_cpp_python-gpu-0.1.50-cp38-cp38-linux_x86_64.whl
6
  pyyaml
7
  torch
 
1
  --extra-index-url https://pypi.ngc.nvidia.com
2
  nvidia-cuda-runtime
3
  nvidia-cublas
 
4
  llama-cpp-python @ https://github.com/OpenAccess-AI-Collective/ggml-webui/releases/download/v0.1.50-rc3/llama_cpp_python-gpu-0.1.50-cp38-cp38-linux_x86_64.whl
5
  pyyaml
6
  torch
tabbed.py CHANGED
@@ -1,7 +1,4 @@
1
- import math
2
-
3
  import gradio as gr
4
- import torch
5
  import yaml
6
  from huggingface_hub import hf_hub_download
7
  from huggingface_hub.utils import LocalEntryNotFoundError
@@ -147,14 +144,4 @@ with gr.Blocks() as demo:
147
  )
148
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False)
149
 
150
-
151
- # figure out how much VRAM is available to see if we can increase concurrency
152
- concurrency_count = 1
153
- model_vram_size_in_gb = 11
154
- if torch.cuda.is_available():
155
- device = torch.cuda.current_device()
156
- total_memory = torch.cuda.get_device_properties(device).total_memory
157
- total_memory_in_gb = total_memory / 1024**3
158
- concurrency_count = int(math.floor(total_memory_in_gb / model_vram_size_in_gb))
159
-
160
- demo.queue(max_size=16, concurrency_count=1).launch(debug=True, server_name="0.0.0.0", server_port=7860)
 
 
 
1
  import gradio as gr
 
2
  import yaml
3
  from huggingface_hub import hf_hub_download
4
  from huggingface_hub.utils import LocalEntryNotFoundError
 
144
  )
145
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False)
146
 
147
+ demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860)