Omnibus commited on
Commit
cd7ba0f
1 Parent(s): fb1320e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -17,7 +17,7 @@ InferenceClient(models[2]),
17
  InferenceClient(models[3]),
18
  ]
19
 
20
- VERBOSE=True
21
 
22
  def format_prompt(message, history):
23
  prompt = ""
@@ -59,6 +59,7 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,
59
  seed=seed,
60
  )
61
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:])
 
62
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
63
  output = ""
64
  for response in stream:
@@ -70,7 +71,6 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,
70
  if VERBOSE==True:
71
  print("\n######### HIST "+str(in_len))
72
  print("\n######### TOKENS "+str(tokens))
73
- print("\n######### PROMPT "+str(len(formatted_prompt)))
74
 
75
  def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
76
  print(chatblock)
 
17
  InferenceClient(models[3]),
18
  ]
19
 
20
+ VERBOSE=False
21
 
22
  def format_prompt(message, history):
23
  prompt = ""
 
59
  seed=seed,
60
  )
61
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:])
62
+ #print("\n######### PROMPT "+str(len(formatted_prompt)))
63
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
64
  output = ""
65
  for response in stream:
 
71
  if VERBOSE==True:
72
  print("\n######### HIST "+str(in_len))
73
  print("\n######### TOKENS "+str(tokens))
 
74
 
75
  def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
76
  print(chatblock)