alexkueck commited on
Commit
5ba7b43
1 Parent(s): 38b4a39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -141,7 +141,7 @@ def add_text(history, prompt, file):
141
  else:
142
  history = history + [((file.name,), None), (prompt, None)]
143
 
144
- return history, prompt, "", gr.File( label=None, interactive=False, height=20, min_width=20, visible=False, scale=2) #gr.Textbox(value="", interactive=False)
145
 
146
  def add_file(history, file, prompt):
147
  if (prompt == ""):
@@ -323,6 +323,9 @@ def generate_prompt_with_history_openai(prompt, history):
323
  history_openai_format.append({"role": "assistant", "content":assistant})
324
 
325
  history_openai_format.append({"role": "user", "content": prompt})
 
 
 
326
  return history_openai_format
327
 
328
  #Prompt und History für Hugging Face Schnittstelle
@@ -380,7 +383,7 @@ def umwandeln_fuer_anzeige(image):
380
  image.save(buffer, format='PNG')
381
  return buffer.getvalue()
382
 
383
- def generate_auswahl(prompt, file, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
384
  if (prompt.find('zeichnen') != -1):
385
  response = generate_bild(prompt)
386
  result = response.content
@@ -394,10 +397,10 @@ def generate_auswahl(prompt, file, history, rag_option, model_option, openai_api
394
  else:
395
  result = generate_text(prompt, file, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
396
  #Antwort als Stream ausgeben... wenn Textantwort gefordert
397
- print("history vor Zusatz...........")
398
- print(history)
399
  history[-1][1] = result
400
- return history, "Success"
 
 
401
  """
402
  for character in result:
403
  history[-1][1] += character
@@ -417,7 +420,7 @@ def generate_bild(prompt):
417
  print("fertig Bild")
418
  return response
419
 
420
- def generate_text (prompt, file, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
421
  global splittet
422
  print(splittet)
423
 
@@ -586,7 +589,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
586
  user_question,
587
  upload,
588
  chatbot,
589
- #history,
590
  rag_option,
591
  model_option,
592
  openai_key,
@@ -597,7 +600,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
597
  max_context_length_tokens,
598
  repetition_penalty
599
  ],
600
- outputs=[chatbot, status_display], #[chatbot, history, status_display]
601
  show_progress=True,
602
  postprocess=False
603
  )
 
141
  else:
142
  history = history + [((file.name,), None), (prompt, None)]
143
 
144
+ return history, history, prompt, "", gr.File( label=None, interactive=False, height=20, min_width=20, visible=False, scale=2) #gr.Textbox(value="", interactive=False)
145
 
146
  def add_file(history, file, prompt):
147
  if (prompt == ""):
 
323
  history_openai_format.append({"role": "assistant", "content":assistant})
324
 
325
  history_openai_format.append({"role": "user", "content": prompt})
326
+ print("openai history und prompt................")
327
+ print(history_openai_format)
328
+ print (history_openai_format)
329
  return history_openai_format
330
 
331
  #Prompt und History für Hugging Face Schnittstelle
 
383
  image.save(buffer, format='PNG')
384
  return buffer.getvalue()
385
 
386
+ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
387
  if (prompt.find('zeichnen') != -1):
388
  response = generate_bild(prompt)
389
  result = response.content
 
397
  else:
398
  result = generate_text(prompt, file, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
399
  #Antwort als Stream ausgeben... wenn Textantwort gefordert
 
 
400
  history[-1][1] = result
401
+ print("history nach Zusatz und mit KI Antwort...........")
402
+ print(history)
403
+ return history, history, "Success"
404
  """
405
  for character in result:
406
  history[-1][1] += character
 
420
  print("fertig Bild")
421
  return response
422
 
423
+ def generate_text (prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
424
  global splittet
425
  print(splittet)
426
 
 
589
  user_question,
590
  upload,
591
  chatbot,
592
+ history,
593
  rag_option,
594
  model_option,
595
  openai_key,
 
600
  max_context_length_tokens,
601
  repetition_penalty
602
  ],
603
+ outputs=[chatbot, history, status_display], #[chatbot, history, status_display]
604
  show_progress=True,
605
  postprocess=False
606
  )