MrOvkill commited on
Commit
68a609b
1 Parent(s): 7859444

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +158 -15
app.py CHANGED
@@ -11,6 +11,10 @@ import os
11
  import base64
12
  from together import Together
13
  import pathlib
 
 
 
 
14
 
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
  print(f"Using {device}" if device != "cpu" else "Using CPU")
@@ -111,32 +115,41 @@ def simple_desc(img, prompt):
111
  'image_b64': base64.b64encode(bts).decode('utf-8'),
112
  'description': total,
113
  }
114
- return total, res
 
 
 
 
 
 
115
 
116
  ifc_imgprompt2text = gr.Interface(simple_desc, inputs=[gr.Image(label="input", type="pil"), gr.Textbox(label="prompt")], outputs=[gr.Textbox(label="description"), gr.JSON(label="json")])
117
 
118
- def chat(inpt, mess):
119
  from together import Together
120
  print(inpt, mess)
121
  if mess is None:
122
  mess = []
123
 
124
  tog = Together(api_key=os.getenv("TOGETHER_KEY"))
125
- messages = [
126
- {
 
 
 
 
127
  'role': 'system',
128
- 'content': SYSTEM_PROMPT
129
- },
130
- {
131
- 'role': 'user',
132
- 'content': inpt
133
- }
134
- ]
135
  for cht in mess:
136
  print(cht)
137
  res = tog.chat.completions.create(
138
  messages=messages,
139
- model="meta-llama/Llama-3-70b-chat-hf", stop=["<|eot_id|>"], stream=True)
140
  txt = ""
141
  for pk in res:
142
  print(pk)
@@ -145,7 +158,9 @@ def chat(inpt, mess):
145
  yield txt #, json.dumps(messages)#mess#, json.dumps(messages)
146
 
147
  chatbot = gr.Chatbot(
148
- [],
 
 
149
  elem_id="chatbot",
150
  bubble_full_width=False,
151
  sanitize_html=False,
@@ -155,5 +170,133 @@ chatbot = gr.Chatbot(
155
  pathlib.Path("image2.jpeg")
156
  ])
157
 
158
- with gr.TabbedInterface([ifc_imgprompt2text, gr.ChatInterface(chat, chatbot=chatbot, submit_btn=gr.Button(scale=1))], ["Prompt & Image 2 Text", "Chat w/ Llama 3 70b & Moondream 2"]) as ifc:
159
- ifc.launch(share=False, debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  import base64
12
  from together import Together
13
  import pathlib
14
+ import gradio_client as grc
15
+
16
+ global shrd
17
+ shrd = gr.JSON(visible=False)
18
 
19
  device = "cuda" if torch.cuda.is_available() else "cpu"
20
  print(f"Using {device}" if device != "cpu" else "Using CPU")
 
115
  'image_b64': base64.b64encode(bts).decode('utf-8'),
116
  'description': total,
117
  }
118
+ cl = grc.Client("http://127.0.0.1:7860/")
119
+ result = cl.predict(
120
+ message="Here's the description of your latest image, repeat any relevant details to keep them in context. Here's the description:\n```text\n" + total + "\n```\n\nAnd what the user wanted to begin with: `" + prompt + "`.",
121
+ api_name="/chat"
122
+ )
123
+ print(result)
124
+ return total, res, {**res, 'chat': result}
125
 
126
  ifc_imgprompt2text = gr.Interface(simple_desc, inputs=[gr.Image(label="input", type="pil"), gr.Textbox(label="prompt")], outputs=[gr.Textbox(label="description"), gr.JSON(label="json")])
127
 
128
+ def chat(inpt, mess, desc):
129
  from together import Together
130
  print(inpt, mess)
131
  if mess is None:
132
  mess = []
133
 
134
  tog = Together(api_key=os.getenv("TOGETHER_KEY"))
135
+ messages = [{
136
+ 'role': 'system',
137
+ 'content': SYSTEM_PROMPT
138
+ }]
139
+ if desc is not None and desc != "":
140
+ messages.append({
141
  'role': 'system',
142
+ 'content': 'Here is a description of what you can see at the moment:\n```text\n' + desc + '\n```\nKeep this in mind when answering User\'s questions.'
143
+ })
144
+ messages.append({
145
+ 'role': 'user',
146
+ 'content': inpt
147
+ })
 
148
  for cht in mess:
149
  print(cht)
150
  res = tog.chat.completions.create(
151
  messages=messages,
152
+ model="meta-llama/Llama-3-70b-chat-hf", stop=["<|eot_id|>"], stream=True, safety_model="Meta-LLama/Llama-Guard-7b")
153
  txt = ""
154
  for pk in res:
155
  print(pk)
 
158
  yield txt #, json.dumps(messages)#mess#, json.dumps(messages)
159
 
160
  chatbot = gr.Chatbot(
161
+ [
162
+ ["Hello?", "### Greetings\n\nWell, it seems I have a visitor! What can I do for you? &lt3;\n\n---"]
163
+ ],
164
  elem_id="chatbot",
165
  bubble_full_width=False,
166
  sanitize_html=False,
 
170
  pathlib.Path("image2.jpeg")
171
  ])
172
 
173
+ wizard_chatbot = gr.Chatbot(
174
+ [
175
+ ["Hello?", "### Greetings\n\nWell, it seems I have a visitor! What can I do for you? &lt3;\n\n---"]
176
+ ],
177
+ elem_id="chatbot_wizard",
178
+ bubble_full_width=True,
179
+ sanitize_html=False,
180
+ show_copy_button=True,
181
+ avatar_images=[
182
+ pathlib.Path("image.png"),
183
+ pathlib.Path("image2.jpeg")
184
+ ]
185
+ )
186
+
187
+ def wizard_chat(inpt, mess):
188
+ from together import Together
189
+ print(inpt, mess)
190
+ if mess is None:
191
+ mess = []
192
+
193
+ tog = Together(api_key=os.getenv("TOGETHER_KEY"))
194
+ messages = []
195
+ messages.append({
196
+ 'role': 'user',
197
+ 'content': "English; Please reply in English. " + inpt
198
+ })
199
+ for cht in mess:
200
+ print(cht)
201
+ res = tog.chat.completions.create(
202
+ messages=messages,
203
+ model="microsoft/WizardLM-2-8x22B", stop=["</s>"], stream=True, safety_model="Meta-LLama/Llama-Guard-7b")
204
+ txt = ""
205
+ for pk in res:
206
+ print(pk)
207
+ txt += pk.choices[0].delta.content
208
+ #mess[-1][-2] += pk.choices[0].delta.content
209
+ yield txt #, json.dumps(messages)#mess#, json.dumps(messages
210
+
211
+ botroom = None
212
+
213
+ def group_chat(room: str, **models):
214
+ wzn = json.loads(wzn)
215
+ lmn = json.loads(lmn)
216
+ print(wzn, lmn)
217
+ if not "replace_token" in wzn:
218
+ wzn["replace_token"] = "<|wizard|>"
219
+ if not "replace_token" in lmn:
220
+ lmn["replace_token"] = "</Llama>"
221
+ while room.find(lmn['replace_token']) != -1 or room.find(wzn['replace_token']) != -1:
222
+ if not "prompt" in wzn and room.find(wzn['replace_token']) != -1:
223
+ wzn["prompt"] = room[0:room.find(wzn['replace_token'])]
224
+ if not "prompt" in lmn and room.find(lmn['replace_token']) != -1:
225
+ lmn["prompt"] = room[0:room.find(lmn['replace_token'])]
226
+ print(wzn, lmn)
227
+ if "prompt" in wzn:
228
+ print(wzn)
229
+ res = wizard_chat(wzn['prompt'], [])
230
+ tx = ""
231
+ for r in res:
232
+ yield cdd + r
233
+ tx = r
234
+ return cdd + txt
235
+ # Let's make a more genetic model-merge with shadow config that has basic sane defaults for any model.
236
+ # top_k 42
237
+ # top_p 0.842
238
+ # max_tokens 1536
239
+ # temperature 0.693
240
+
241
+ shadow_config = {
242
+ "top_k": 42,
243
+ "top_p": 0.842,
244
+ "max_tokens": 1536,
245
+ "temperature": 0.693,
246
+ "repetition_penalty": 1.12
247
+ }
248
+
249
+ #models = {#
250
+
251
+ # }
252
+
253
+ arch_room = None
254
+
255
+ def wizard_complete(cdd, wzs):
256
+ tog = Together(api_key=os.getenv("TOGETHER_KEY"))
257
+ if wzs.startswith("root="):
258
+ wzs = wzs[5:]
259
+ wzs = json.loads(wzs)
260
+ print(wzs)
261
+ if not "stop" in wzs:
262
+ wzs["stop"] = ['###', '\n\n\n', '<|im_end|>', '<|im_start|>']
263
+ if not "model" in wzs:
264
+ wzs["model"] = "WizardLM/WizardCoder-Python-34B-V1.0"
265
+ if not "prompt" in wzs:
266
+ wzs["prompt"] = cdd
267
+ res = tog.completions.create(prompt=wzs["prompt"], model=wzs["model"], stop=wzs["stop"], max_tokens=1024, stream=False)
268
+ txt = cdd + res.choices[0].text
269
+ return txt, txt
270
+
271
+ with gr.Blocks() as arch_room:
272
+ with gr.Row():
273
+ gr.Markdown(f"""
274
+ ## Arcanistry
275
+
276
+
277
+ *POOF* -- You walk in, to a cloudy room filled with heavy smoke. In the center of the room rests a waist-height table. Upon the table, you see a... You don't understand... It's dark and light and cold and warm but... As you extend your hand, you hear the voice travel up your arm and into your ears...
278
+
279
+ ---
280
+ """)
281
+ with gr.Row():
282
+ cdd = gr.Code("""### Human
283
+ I require a Python script that serves a simple file server in Python over MongoDB.
284
+
285
+ ### Wizard
286
+ Sure! Here's the script:
287
+ ```python""", language="markdown")
288
+ with gr.Row():
289
+ wzs = gr.Code(json.dumps({
290
+ 'token': '<|wizard|>',
291
+ 'model': 'WizardLM/WizardCoder-Python-34B-V1.0',
292
+ 'stop': ['###', '\n\n\n', '<|im_end|>', '<|im_start|>']
293
+ }))
294
+ with gr.Row():
295
+ rnd = gr.Markdown("")
296
+ with gr.Row():
297
+ subm_prompt = gr.Button("Run Prompt")
298
+ subm_prompt.click(wizard_complete, inputs=[cdd, wzs], outputs=[cdd, rnd])
299
+
300
+ with gr.TabbedInterface([ifc_imgprompt2text, c_ifc := gr.ChatInterface(chat, chatbot=chatbot, submit_btn=gr.Button(scale=1)), gr.ChatInterface(wizard_chat), arch_room], ["Prompt & Image 2 Text", "Chat w/ Llama 3 70b", "Chat w/ WizardLM 8x22B", "Arcanistry"]) as ifc:
301
+ shrd = gr.JSON(visible=False)
302
+ ifc.launch(share=False, debug=True, show_error=True)