Nekochu commited on
Commit
778d736
1 Parent(s): ee4c03a

Update tabbed.py

Browse files
Files changed (1) hide show
  1. tabbed.py +3 -3
tabbed.py CHANGED
@@ -1,6 +1,5 @@
1
  import gradio as gr
2
  import yaml
3
- import spaces
4
  from huggingface_hub import hf_hub_download
5
  from huggingface_hub.utils import LocalEntryNotFoundError
6
  from llama_cpp import Llama
@@ -25,13 +24,14 @@ while True:
25
 
26
  llm = Llama(model_path=fp, **config["llama_cpp"])
27
 
 
28
  def user(message, history):
29
  history = history or []
30
  # Append the user's message to the conversation history
31
  history.append([message, ""])
32
  return "", history
33
 
34
- ## @spaces.GPU
35
  def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
36
  history = history or []
37
 
@@ -59,7 +59,7 @@ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_
59
  # stream the response
60
  yield history, history
61
 
62
- ## @spaces.GPU
63
  def rp_chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
64
  history = history or []
65
 
 
1
  import gradio as gr
2
  import yaml
 
3
  from huggingface_hub import hf_hub_download
4
  from huggingface_hub.utils import LocalEntryNotFoundError
5
  from llama_cpp import Llama
 
24
 
25
  llm = Llama(model_path=fp, **config["llama_cpp"])
26
 
27
+
28
  def user(message, history):
29
  history = history or []
30
  # Append the user's message to the conversation history
31
  history.append([message, ""])
32
  return "", history
33
 
34
+
35
  def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
36
  history = history or []
37
 
 
59
  # stream the response
60
  yield history, history
61
 
62
+
63
  def rp_chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
64
  history = history or []
65