alexkueck commited on
Commit
6d807e8
1 Parent(s): a447bbc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -36
app.py CHANGED
@@ -126,9 +126,16 @@ os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
126
 
127
  ##############################################
128
  #History - die Frage oder das File eintragen...
129
- def add_text(history, prompt):
130
- history = history + [(prompt, None)]
131
- return history, prompt, "" #gr.Textbox(value="", interactive=False)
 
 
 
 
 
 
 
132
 
133
  def add_file(history, file, prompt):
134
  if (prompt == ""):
@@ -137,7 +144,7 @@ def add_file(history, file, prompt):
137
  history = history + [((file.name,), None), (prompt, None)]
138
  return history, prompt, ""
139
 
140
- def create_picture(history, prompt):
141
  client = OpenAI()
142
  response = client.images.generate(model="dall-e-3", prompt=prompt,size="1024x1024",quality="standard",n=1,)
143
  image_url = response.data[0].url
@@ -325,6 +332,22 @@ def chatbot_response(messages):
325
  print("Bild.............................")
326
  return responses
327
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
 
329
  def invoke (prompt, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
330
  global splittet
@@ -334,12 +357,11 @@ def invoke (prompt, history, rag_option, model_option, openai_api_key, k=3, top_
334
  history_text_und_prompt = generate_prompt_with_history(prompt, history)
335
  else:
336
  history_file_und_prompt = generate_prompt_with_history(prompt, history)
 
337
  #history für HuggingFace Models formatieren
338
  #history_text_und_prompt = generate_prompt_with_history_hf(prompt, history)
339
-
340
  #history für openAi formatieren
341
  #history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
342
-
343
  #history für Langchain formatieren
344
  #history_text_und_prompt = generate_prompt_with_history_langchain(prompt, history)
345
 
@@ -357,10 +379,10 @@ def invoke (prompt, history, rag_option, model_option, openai_api_key, k=3, top_
357
  ###########################
358
  if (model_option == "OpenAI"):
359
  #Anfrage an OpenAI ----------------------------
360
- if (file == None):
 
 
361
  llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
362
- else: MODEL_NAME_OAI_ZEICHNEN
363
- llm = ChatOpenAI(model_name = MODEL_NAME_OAI_ZEICHNEN, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
364
 
365
  print("openAI")
366
  else:
@@ -385,34 +407,32 @@ def invoke (prompt, history, rag_option, model_option, openai_api_key, k=3, top_
385
  db = document_retrieval_mongodb(llm, history_text_und_prompt)
386
  result = rag_chain(llm, history_text_und_prompt, db)
387
  else:
388
- print("LLM aufrufen via HF: ...........")
389
  result = llm_chain(llm, history_text_und_prompt)
390
  print(result)
391
 
392
  except Exception as e:
393
  raise gr.Error(e)
394
 
395
- """
396
- #Antwort als Stream ausgeben...
397
- for i in range(len(result)):
398
- time.sleep(0.05)
399
- yield result[: i+1]
400
- """
401
-
402
- #Antwort als Stream ausgeben...
403
- history[-1][1] = ""
404
- for character in result:
405
- history[-1][1] += character
406
- time.sleep(0.03)
407
- yield history, "Generating"
408
- if shared_state.interrupted:
409
- shared_state.recover()
410
- try:
411
- yield history, "Stop: Success"
412
- return
413
- except:
414
- pass
415
-
416
  ################################################
417
  #GUI
418
  ###############################################
@@ -457,7 +477,8 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
457
  cancelBtn = gr.Button("Stop")
458
  with gr.Row():
459
  emptyBtn = gr.ClearButton( [user_input, chatbot], value="🧹 Neue Session")
460
- btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
 
461
 
462
  with gr.Column():
463
  with gr.Column(min_width=50, scale=1):
@@ -530,17 +551,17 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
530
  )
531
 
532
  # Chatbot
533
- transfer_input_args_text = dict(
534
- fn=add_text, inputs=[chatbot, user_input], outputs=[chatbot, user_question, user_input], show_progress=True
535
  )
536
  transfer_input_args_file = dict(
537
  fn=add_file, inputs=[chatbot, btn, user_input], outputs=[chatbot, user_question, user_input], show_progress=True
538
  )
539
 
540
  predict_event1 = user_input.submit(**transfer_input_args_text, queue=False,).then(**predict_args)
541
- predict_event3 = btn.upload(**transfer_input_args_file,queue=False,).then(**predict_args)
542
  predict_event2 = submitBtn.click(**transfer_input_args_text, queue=False,).then(**predict_args)
543
-
 
544
  cancelBtn.click(
545
  cancels=[predict_event1,predict_event2, predict_event3 ]
546
  )
 
126
 
127
  ##############################################
128
  #History - die Frage oder das File eintragen...
129
+ def add_text(history, prompt, file):
130
+ if (file == None):
131
+ history = history + [(prompt, None)]
132
+ else:
133
+ if (prompt == ""):
134
+ history = history + [((file.name,), "Prompt fehlt!")]
135
+ else:
136
+ history = history + [((file.name,), None), (prompt, None)]
137
+
138
+ return history, prompt, gr.Textbox(value="", interactive=False)
139
 
140
  def add_file(history, file, prompt):
141
  if (prompt == ""):
 
144
  history = history + [((file.name,), None), (prompt, None)]
145
  return history, prompt, ""
146
 
147
+ def create_picture_backup(history, prompt):
148
  client = OpenAI()
149
  response = client.images.generate(model="dall-e-3", prompt=prompt,size="1024x1024",quality="standard",n=1,)
150
  image_url = response.data[0].url
 
332
  print("Bild.............................")
333
  return responses
334
 
335
+ def create_picture(history, prompt):
336
+ client = OpenAI()
337
+ response = client.images.generate(model="dall-e-3", prompt=prompt,size="1024x1024",quality="standard",n=1,)
338
+ image_url = response.data[0].url
339
+ return image_url
340
+
341
+ # prompt describing the desired image
342
+ #text = "batman art in red and blue color"
343
+ # calling the custom function "generate"
344
+ # saving the output in "url1"
345
+ #url1 = create_picture(text)
346
+ # using requests library to get the image in bytes
347
+ #response = requests.get(url1)
348
+ # using the Image module from PIL library to view the image
349
+ #Image.open(response.raw)
350
+
351
 
352
  def invoke (prompt, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
353
  global splittet
 
357
  history_text_und_prompt = generate_prompt_with_history(prompt, history)
358
  else:
359
  history_file_und_prompt = generate_prompt_with_history(prompt, history)
360
+
361
  #history für HuggingFace Models formatieren
362
  #history_text_und_prompt = generate_prompt_with_history_hf(prompt, history)
 
363
  #history für openAi formatieren
364
  #history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
 
365
  #history für Langchain formatieren
366
  #history_text_und_prompt = generate_prompt_with_history_langchain(prompt, history)
367
 
 
379
  ###########################
380
  if (model_option == "OpenAI"):
381
  #Anfrage an OpenAI ----------------------------
382
+ if (prompt.find('Bild zeichnen')):
383
+ llm = ChatOpenAI(model_name = MODEL_NAME_OAI_ZEICHNEN, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
384
+ else:
385
  llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
 
 
386
 
387
  print("openAI")
388
  else:
 
407
  db = document_retrieval_mongodb(llm, history_text_und_prompt)
408
  result = rag_chain(llm, history_text_und_prompt, db)
409
  else:
410
+ print("LLM aufrufen ohne RAG: ...........")
411
  result = llm_chain(llm, history_text_und_prompt)
412
  print(result)
413
 
414
  except Exception as e:
415
  raise gr.Error(e)
416
 
417
+
418
+ if (prompt.find('Bild zeichnen')):
419
+ history[-1][1] = result
420
+ return history, "Stop: Success"
421
+ else:
422
+ #Antwort als Stream ausgeben... wenn Textantwort gefordert
423
+ history[-1][1] = ""
424
+ for character in result:
425
+ history[-1][1] += character
426
+ time.sleep(0.03)
427
+ yield history, "Generating"
428
+ if shared_state.interrupted:
429
+ shared_state.recover()
430
+ try:
431
+ yield history, "Stop: Success"
432
+ return
433
+ except:
434
+ pass
435
+
 
 
436
  ################################################
437
  #GUI
438
  ###############################################
 
477
  cancelBtn = gr.Button("Stop")
478
  with gr.Row():
479
  emptyBtn = gr.ClearButton( [user_input, chatbot], value="🧹 Neue Session")
480
+ upload = gr.UploadButton("📁", file_types=["image", "video", "audio"])
481
+ file_display = gr.File(label=None, interactive=False, height=20)
482
 
483
  with gr.Column():
484
  with gr.Column(min_width=50, scale=1):
 
551
  )
552
 
553
  # Chatbot
554
+ transfer_input_args = dict(
555
+ fn=add_text, inputs=[ chatbot, user_input, upload], outputs=[chatbot, user_input], show_progress=True
556
  )
557
  transfer_input_args_file = dict(
558
  fn=add_file, inputs=[chatbot, btn, user_input], outputs=[chatbot, user_question, user_input], show_progress=True
559
  )
560
 
561
  predict_event1 = user_input.submit(**transfer_input_args_text, queue=False,).then(**predict_args)
 
562
  predict_event2 = submitBtn.click(**transfer_input_args_text, queue=False,).then(**predict_args)
563
+ predict_event3 = upload.upload(file_anzeigen, [ upload], [file_display] ) #.then(**predict_args)
564
+
565
  cancelBtn.click(
566
  cancels=[predict_event1,predict_event2, predict_event3 ]
567
  )