Omnibus commited on
Commit
0c404dc
1 Parent(s): f29cff9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -40
app.py CHANGED
@@ -52,45 +52,45 @@ def format_prompt(message, history):
52
  print(prompt)
53
  return prompt
54
 
55
- def chat_inf(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p):
56
- #token max=8192
57
- client=clients[int(client_choice)-1]
58
- if not history:
59
- history = []
60
- hist_len=0
61
- if history:
62
- hist_len=len(history)
63
- print(hist_len)
64
- in_len=len(system_prompt+prompt)+hist_len
65
- print("\n#########"+in_len)
66
- if (in_len+tokens) > 8000:
67
- yield [(prompt,"Wait. I need to compress our Chat history...")]
68
- history=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p)
69
- yield [(prompt,"History has been compressed, processing request...")]
70
 
71
- generate_kwargs = dict(
72
- temperature=temp,
73
- max_new_tokens=tokens,
74
- top_p=top_p,
75
- repetition_penalty=rep_p,
76
- do_sample=True,
77
- seed=seed,
78
- )
79
- #formatted_prompt=prompt
80
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
81
-
82
-
83
-
84
-
85
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
86
- output = ""
87
 
88
- for response in stream:
89
- output += response.token.text
90
- yield [(prompt,output)]
91
- history.append((prompt,output))
92
- yield history
93
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  def clear_fn():
95
  return None,None,None
96
  rand_val=random.randint(1,1111111111111111)
@@ -120,7 +120,7 @@ with gr.Blocks() as app:
120
  with gr.Group():
121
  stop_btn=gr.Button("Stop")
122
  clear_btn=gr.Button("Clear")
123
- client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],multiselect=True,interactive=True)
124
 
125
  with gr.Column(scale=1):
126
  with gr.Group():
@@ -142,12 +142,20 @@ with gr.Blocks() as app:
142
  wait_time=gr.Number(label="Wait Time",value=3000)
143
  theme=gr.Radio(label="Theme", choices=["light","dark"],value="light")
144
  chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
145
-
 
 
 
146
  client_choice.change(load_models,client_choice,[chat_a,chat_b,chat_c,chat_d])
147
 
148
  #im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
149
  #chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p],chat_b)
150
- #go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p],chat_b)
 
 
 
 
 
151
  #stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
152
  #clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b])
153
  app.queue(default_concurrency_limit=10).launch()
 
52
  print(prompt)
53
  return prompt
54
 
55
+ def chat_inf(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
56
+ if len(client_choice)>=hid_val:
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ #token max=8192
59
+ client=client_z[int(hid_val)-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
+ if not history:
62
+ history = []
63
+ hist_len=0
64
+ if history:
65
+ hist_len=len(history)
66
+ print(hist_len)
67
+ in_len=len(system_prompt+prompt)+hist_len
68
+ print("\n#########"+in_len)
69
+ if (in_len+tokens) > 8000:
70
+ yield [(prompt,"Wait. I need to compress our Chat history...")]
71
+ #history=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p)
72
+ yield [(prompt,"History has been compressed, processing request...")]
73
+
74
+ generate_kwargs = dict(
75
+ temperature=temp,
76
+ max_new_tokens=tokens,
77
+ top_p=top_p,
78
+ repetition_penalty=rep_p,
79
+ do_sample=True,
80
+ seed=seed,
81
+ )
82
+ #formatted_prompt=prompt
83
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
84
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
85
+ output = ""
86
+
87
+ for response in stream:
88
+ output += response.token.text
89
+ yield [(prompt,output)]
90
+ history.append((prompt,output))
91
+ yield history
92
+ else:
93
+ yield None
94
  def clear_fn():
95
  return None,None,None
96
  rand_val=random.randint(1,1111111111111111)
 
120
  with gr.Group():
121
  stop_btn=gr.Button("Stop")
122
  clear_btn=gr.Button("Clear")
123
+ client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],max_choices=4,multiselect=True,interactive=True)
124
 
125
  with gr.Column(scale=1):
126
  with gr.Group():
 
142
  wait_time=gr.Number(label="Wait Time",value=3000)
143
  theme=gr.Radio(label="Theme", choices=["light","dark"],value="light")
144
  chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
145
+ hid1=gr.Number(value=1)
146
+ hid2=gr.Number(value=2)
147
+ hid3=gr.Number(value=3)
148
+ hid4=gr.Number(value=4)
149
  client_choice.change(load_models,client_choice,[chat_a,chat_b,chat_c,chat_d])
150
 
151
  #im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
152
  #chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p],chat_b)
153
+
154
+ go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid1],chat_b)
155
+ go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid2],chat_b)
156
+ go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid3],chat_b)
157
+ go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid4],chat_b)
158
+
159
  #stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
160
  #clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b])
161
  app.queue(default_concurrency_limit=10).launch()