Omnibus commited on
Commit
20ed735
1 Parent(s): dd1d65b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -8
app.py CHANGED
@@ -18,6 +18,50 @@ models=[
18
  ]
19
 
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def load_models(inp):
23
  print(type(inp))
@@ -25,7 +69,14 @@ def load_models(inp):
25
  print(models[inp])
26
  model_state= InferenceClient(models[inp])
27
  out_box=gr.update(label=models[inp])
28
- return out_box, model_state
 
 
 
 
 
 
 
29
 
30
  VERBOSE=False
31
 
@@ -50,8 +101,9 @@ def format_prompt(message, history, cust_p):
50
  prompt+=cust_p.replace("USER_INPUT",message)
51
  return prompt
52
 
53
- def chat_inf(system_prompt,prompt,history,memory,model_state,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
54
  #token max=8192
 
55
  print(model_state)
56
  hist_len=0
57
  client=model_state
@@ -79,9 +131,9 @@ def chat_inf(system_prompt,prompt,history,memory,model_state,seed,temp,tokens,to
79
  seed=seed,
80
  )
81
  if system_prompt:
82
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
83
  else:
84
- formatted_prompt = format_prompt(prompt, memory[0-chat_mem:],cust_p)
85
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
86
  output = ""
87
  for response in stream:
@@ -158,13 +210,13 @@ with gr.Blocks() as app:
158
  chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
159
 
160
 
161
- client_choice.change(load_models,client_choice,[chat_b,model_state])
162
- app.load(load_models,client_choice,[chat_b,model_state])
163
 
164
  im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
165
 
166
- chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,model_state,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
167
- go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,model_state,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
168
 
169
  stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
170
  clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
 
18
  ]
19
 
20
 
21
+ def format_prompt_default(message, history,cust_p):
22
+ prompt = ""
23
+ if history:
24
+ #<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
25
+ for user_prompt, bot_response in history:
26
+ prompt += f"{user_prompt}\n"
27
+ print(prompt)
28
+ prompt += f"{bot_response}\n"
29
+ print(prompt)
30
+ #prompt += f"{message}\n"
31
+ prompt+=cust_p.replace("USER_INPUT",message)
32
+ return prompt
33
+
34
+ def format_prompt_gemma(message, history,cust_p):
35
+ prompt = ""
36
+ if history:
37
+ #<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
38
+ for user_prompt, bot_response in history:
39
+ prompt += f"{user_prompt}\n"
40
+ print(prompt)
41
+ prompt += f"{bot_response}\n"
42
+ print(prompt)
43
+ #prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
44
+ prompt+=cust_p.replace("USER_INPUT",message)
45
+ return prompt
46
+
47
+
48
+ def format_prompt_mixtral(message, history,cust_p):
49
+ prompt = "<s>"
50
+ if history:
51
+ for user_prompt, bot_response in history:
52
+ prompt += f"[INST] {user_prompt} [/INST]"
53
+ prompt += f" {bot_response}</s> "
54
+ #prompt += f"[INST] {message} [/INST]"
55
+ prompt+=cust_p.replace("USER_INPUT",message)
56
+ return prompt
57
+
58
+ def format_prompt_choose(message, history, cust_p, model_name):
59
+ if "gemma" in models[model_name].lower():
60
+ return format_prompt_gemma(message,history,cust_p)
61
+ if "mixtral" in models[model_name].lower():
62
+ return format_prompt_mixtral(message,history,cust_p)
63
+ else:
64
+ return format_prompt_default(message,history,cust_p)
65
 
66
  def load_models(inp):
67
  print(type(inp))
 
69
  print(models[inp])
70
  model_state= InferenceClient(models[inp])
71
  out_box=gr.update(label=models[inp])
72
+ if "gemma" in models[inp].lower():
73
+ prompt_out="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model"
74
+ if "mixtral" in models[inp].lower():
75
+ prompt_out="[INST] USER_INPUT [/INST]"
76
+ else:
77
+ prompt_out="USER_INPUT\n"
78
+
79
+ return out_box,prompt_out, model_state
80
 
81
  VERBOSE=False
82
 
 
101
  prompt+=cust_p.replace("USER_INPUT",message)
102
  return prompt
103
 
104
+ def chat_inf(system_prompt,prompt,history,memory,model_state,model_name,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
105
  #token max=8192
106
+ model_n=models[model_name]
107
  print(model_state)
108
  hist_len=0
109
  client=model_state
 
131
  seed=seed,
132
  )
133
  if system_prompt:
134
+ formatted_prompt = format_prompt_choose(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p,model_n)
135
  else:
136
+ formatted_prompt = format_prompt_choose(prompt, memory[0-chat_mem:],cust_p,model_n)
137
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
138
  output = ""
139
  for response in stream:
 
210
  chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
211
 
212
 
213
+ client_choice.change(load_models,client_choice,[chat_b,custom_prompt,model_state])
214
+ app.load(load_models,client_choice,[chat_b,custom_prompt,model_state])
215
 
216
  im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
217
 
218
+ chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,model_state,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
219
+ go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,model_state,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
220
 
221
  stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
222
  clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])