Omnibus commited on
Commit
5692092
1 Parent(s): 6b9f2b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -35
app.py CHANGED
@@ -51,29 +51,16 @@ def format_prompt(message, history):
51
  prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
52
  print(prompt)
53
  return prompt
54
- mega_hist=[[],[],[],[]]
55
 
56
 
57
- def chat_inf(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
 
58
  if len(client_choice)>=hid_val:
59
-
60
- #token max=8192
61
  client=client_z[int(hid_val)-1]
62
-
63
  if history:
64
  mega_hist[hid_val-1]=history
65
  #history = []
66
  hist_len=0
67
- #if mega_hist[hid_val-1]:
68
- # hist_len=len(mega_hist[hid_val-1])
69
- # print(hist_len)
70
- #in_len=len(system_prompt+prompt)+hist_len
71
- #print("\n#########"+str(in_len))
72
- #if (in_len+tokens) > 8000:
73
- # yield [(prompt,"Wait. I need to compress our Chat history...")]
74
- # #history=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p)
75
- # yield [(prompt,"History has been compressed, processing request...")]
76
-
77
  generate_kwargs = dict(
78
  temperature=temp,
79
  max_new_tokens=tokens,
@@ -86,7 +73,6 @@ def chat_inf(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,r
86
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", mega_hist[hid_val-1])
87
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
88
  output = ""
89
-
90
  for response in stream:
91
  output += response.token.text
92
  yield [(prompt,output)]
@@ -98,25 +84,65 @@ def chat_inf(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,r
98
 
99
 
100
 
101
- def chat_inf_og(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  if len(client_choice)>=hid_val:
103
-
104
- #token max=8192
105
  client=client_z[int(hid_val)-1]
106
-
107
  if not history:
108
  history = []
109
  hist_len=0
110
- if history:
111
- hist_len=len(history)
112
- print(hist_len)
113
- in_len=len(system_prompt+prompt)+hist_len
114
- print("\n#########"+str(in_len))
115
- if (in_len+tokens) > 8000:
116
- yield [(prompt,"Wait. I need to compress our Chat history...")]
117
- #history=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p)
118
- yield [(prompt,"History has been compressed, processing request...")]
119
-
120
  generate_kwargs = dict(
121
  temperature=temp,
122
  max_new_tokens=tokens,
@@ -129,7 +155,6 @@ def chat_inf_og(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_
129
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
130
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
131
  output = ""
132
-
133
  for response in stream:
134
  output += response.token.text
135
  yield [(prompt,output)]
@@ -137,6 +162,32 @@ def chat_inf_og(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_
137
  yield history
138
  else:
139
  yield None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  def clear_fn():
141
  return None,None,None
142
  rand_val=random.randint(1,1111111111111111)
@@ -197,10 +248,10 @@ with gr.Blocks() as app:
197
  #im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
198
  #chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p],chat_b)
199
 
200
- go1=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid1],chat_a,batch=True, max_batch_size=16)
201
- go2=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid2],chat_b,batch=True, max_batch_size=16)
202
- go3=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid3],chat_c,batch=True, max_batch_size=16)
203
- go4=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid4],chat_d,batch=True, max_batch_size=16)
204
 
205
  stop_btn.click(None,None,None,cancels=[go1,go2,go3,go4])
206
  clear_btn.click(clear_fn,None,[inp,sys_inp,chat_a,chat_b,chat_c,chat_d])
 
51
  prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
52
  print(prompt)
53
  return prompt
 
54
 
55
 
56
+ mega_hist=[[],[],[],[]]
57
+ def chat_inf_tree(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
58
  if len(client_choice)>=hid_val:
 
 
59
  client=client_z[int(hid_val)-1]
 
60
  if history:
61
  mega_hist[hid_val-1]=history
62
  #history = []
63
  hist_len=0
 
 
 
 
 
 
 
 
 
 
64
  generate_kwargs = dict(
65
  temperature=temp,
66
  max_new_tokens=tokens,
 
73
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", mega_hist[hid_val-1])
74
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
75
  output = ""
 
76
  for response in stream:
77
  output += response.token.text
78
  yield [(prompt,output)]
 
84
 
85
 
86
 
87
+ def chat_inf_a(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
88
+ if len(client_choice)>=hid_val:
89
+ client=client_z[int(hid_val)-1]
90
+ if not history:
91
+ history = []
92
+ hist_len=0
93
+ generate_kwargs = dict(
94
+ temperature=temp,
95
+ max_new_tokens=tokens,
96
+ top_p=top_p,
97
+ repetition_penalty=rep_p,
98
+ do_sample=True,
99
+ seed=seed,
100
+ )
101
+ #formatted_prompt=prompt
102
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
103
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
104
+ output = ""
105
+ for response in stream:
106
+ output += response.token.text
107
+ yield [(prompt,output)]
108
+ history.append((prompt,output))
109
+ yield history
110
+ else:
111
+ yield None
112
+
113
+
114
+ def chat_inf_b(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
115
+ if len(client_choice)>=hid_val:
116
+ client=client_z[int(hid_val)-1]
117
+ if not history:
118
+ history = []
119
+ hist_len=0
120
+ generate_kwargs = dict(
121
+ temperature=temp,
122
+ max_new_tokens=tokens,
123
+ top_p=top_p,
124
+ repetition_penalty=rep_p,
125
+ do_sample=True,
126
+ seed=seed,
127
+ )
128
+ #formatted_prompt=prompt
129
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
130
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
131
+ output = ""
132
+ for response in stream:
133
+ output += response.token.text
134
+ yield [(prompt,output)]
135
+ history.append((prompt,output))
136
+ yield history
137
+ else:
138
+ yield None
139
+
140
+ def chat_inf_c(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
141
  if len(client_choice)>=hid_val:
 
 
142
  client=client_z[int(hid_val)-1]
 
143
  if not history:
144
  history = []
145
  hist_len=0
 
 
 
 
 
 
 
 
 
 
146
  generate_kwargs = dict(
147
  temperature=temp,
148
  max_new_tokens=tokens,
 
155
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
156
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
157
  output = ""
 
158
  for response in stream:
159
  output += response.token.text
160
  yield [(prompt,output)]
 
162
  yield history
163
  else:
164
  yield None
165
+
166
+ def chat_inf_d(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
167
+ if len(client_choice)>=hid_val:
168
+ client=client_z[int(hid_val)-1]
169
+ if not history:
170
+ history = []
171
+ hist_len=0
172
+ generate_kwargs = dict(
173
+ temperature=temp,
174
+ max_new_tokens=tokens,
175
+ top_p=top_p,
176
+ repetition_penalty=rep_p,
177
+ do_sample=True,
178
+ seed=seed,
179
+ )
180
+ #formatted_prompt=prompt
181
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
182
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
183
+ output = ""
184
+ for response in stream:
185
+ output += response.token.text
186
+ yield [(prompt,output)]
187
+ history.append((prompt,output))
188
+ yield history
189
+ else:
190
+ yield None
191
  def clear_fn():
192
  return None,None,None
193
  rand_val=random.randint(1,1111111111111111)
 
248
  #im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
249
  #chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p],chat_b)
250
 
251
+ go1=btn.click(check_rand,[rand,seed],seed).then(chat_inf_a,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid1],chat_a)
252
+ go2=btn.click(check_rand,[rand,seed],seed).then(chat_inf_b,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid2],chat_b)
253
+ go3=btn.click(check_rand,[rand,seed],seed).then(chat_inf_c,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid3],chat_c)
254
+ go4=btn.click(check_rand,[rand,seed],seed).then(chat_inf_d,[sys_inp,inp,chat_b,client_choice,seed,temp,tokens,top_p,rep_p,hid4],chat_d)
255
 
256
  stop_btn.click(None,None,None,cancels=[go1,go2,go3,go4])
257
  clear_btn.click(clear_fn,None,[inp,sys_inp,chat_a,chat_b,chat_c,chat_d])