alexkueck commited on
Commit
a4ba6f3
1 Parent(s): fc94519

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -263,8 +263,9 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
263
  splittet = False
264
 
265
  #kein Bild hochgeladen -> auf Text antworten...
 
266
  if (file == None):
267
- result = generate_text(prompt, chatbot, history, rag_option, model_option, openai_api_key, db, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
268
  history = history + [(prompt, result)]
269
  else:
270
  #Es wurde ein Bild angehängt -> wenn prompt dazu, das Bild analysieren
@@ -281,7 +282,7 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
281
  for character in result:
282
  chatbot[-1][1] += character
283
  time.sleep(0.03)
284
- yield chatbot, history, None, "Generating"
285
  if shared_state.interrupted:
286
  shared_state.recover()
287
  try:
@@ -311,7 +312,7 @@ def generate_bild(prompt, chatbot, model_option_zeichnen='HuggingFace', temperat
311
  #chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
312
  chatbot[-1][1] = "<img src='data:image/png;base64,{0}'/>".format(response.data[0].b64_json)
313
 
314
- return chatbot, "Success"
315
 
316
 
317
  ##################################################
@@ -362,6 +363,7 @@ def generate_text_zu_doc(file, prompt, k, rag_option, chatbot, db):
362
  #mit oder ohne RAG möglich
363
  def generate_text (prompt, chatbot, history, rag_option, model_option, openai_api_key, db, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
364
  global splittet
 
365
  print("Text pur..............................")
366
  if (openai_api_key == "" or openai_api_key == "sk-"):
367
  #raise gr.Error("OpenAI API Key is required.")
@@ -417,15 +419,16 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
417
  print("LLM aufrufen ohne RAG: ...........")
418
  result = llm_chain(llm, history_text_und_prompt)
419
 
420
- #Wenn keine Antwort möglich "Ich weiß es nicht", dann versuchen mit Suche im Internet.
421
- if (result == "Ich weiß es nicht."):
422
  print("Suche im Netz: ...........")
 
423
  result = create_assistant_suche(history_text_und_prompt)
424
 
425
  except Exception as e:
426
  raise gr.Error(e)
427
 
428
- return result
429
 
430
 
431
 
@@ -472,7 +475,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
472
  with gr.Tab("LI Chatbot"):
473
  #with gr.Row():
474
  #gr.HTML("LI Chatot")
475
- #status_display = gr.Markdown("Success", visible = False, elem_id="status_display")
476
  with gr.Row():
477
  with gr.Column(scale=5):
478
  with gr.Row():
@@ -546,7 +549,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
546
  with gr.Tab("LI Zeichnen"):
547
  #with gr.Row():
548
  #gr.HTML("LI Zeichnen mit KI")
549
- #status_display2 = gr.Markdown("Success", visible = False, elem_id="status_display")
550
  #gr.Markdown(description2)
551
  with gr.Row():
552
  with gr.Column(scale=5):
 
263
  splittet = False
264
 
265
  #kein Bild hochgeladen -> auf Text antworten...
266
+ status = "Antwort der KI ..."
267
  if (file == None):
268
+ result, status = generate_text(prompt, chatbot, history, rag_option, model_option, openai_api_key, db, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
269
  history = history + [(prompt, result)]
270
  else:
271
  #Es wurde ein Bild angehängt -> wenn prompt dazu, das Bild analysieren
 
282
  for character in result:
283
  chatbot[-1][1] += character
284
  time.sleep(0.03)
285
+ yield chatbot, history, None, status
286
  if shared_state.interrupted:
287
  shared_state.recover()
288
  try:
 
312
  #chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
313
  chatbot[-1][1] = "<img src='data:image/png;base64,{0}'/>".format(response.data[0].b64_json)
314
 
315
+ return chatbot, "Antwort KI: Success"
316
 
317
 
318
  ##################################################
 
363
  #mit oder ohne RAG möglich
364
  def generate_text (prompt, chatbot, history, rag_option, model_option, openai_api_key, db, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
365
  global splittet
366
+ suche_im_Netz="Antwort der KI ..."
367
  print("Text pur..............................")
368
  if (openai_api_key == "" or openai_api_key == "sk-"):
369
  #raise gr.Error("OpenAI API Key is required.")
 
419
  print("LLM aufrufen ohne RAG: ...........")
420
  result = llm_chain(llm, history_text_und_prompt)
421
 
422
+ #Wenn keine Antwort möglich "Ich weiß es nicht" etc., dann versuchen mit Suche im Internet.
423
+ if is_response_similar(result):
424
  print("Suche im Netz: ...........")
425
+ suche_im_Netz="Antwort aus dem Internet ..."
426
  result = create_assistant_suche(history_text_und_prompt)
427
 
428
  except Exception as e:
429
  raise gr.Error(e)
430
 
431
+ return result, suche_im_Netz
432
 
433
 
434
 
 
475
  with gr.Tab("LI Chatbot"):
476
  #with gr.Row():
477
  #gr.HTML("LI Chatot")
478
+ status_display = gr.Markdown("Antwort der KI ...", visible = False, elem_id="status_display")
479
  with gr.Row():
480
  with gr.Column(scale=5):
481
  with gr.Row():
 
549
  with gr.Tab("LI Zeichnen"):
550
  #with gr.Row():
551
  #gr.HTML("LI Zeichnen mit KI")
552
+ status_display2 = gr.Markdown("Success", visible = False, elem_id="status_display")
553
  #gr.Markdown(description2)
554
  with gr.Row():
555
  with gr.Column(scale=5):