alexkueck commited on
Commit
9b40322
1 Parent(s): 88ab182

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -30
app.py CHANGED
@@ -225,35 +225,26 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
225
 
226
  ##################################################
227
  #zu einem Text-Prompt ein Bild via Stable Diffusion generieren
228
- def generate_bild(prompt, chatbot, temperature=0.5, max_new_tokens=4048,top_p=0.6, repetition_penalty=1.3):
229
- """
230
- print("Bild Erzeugung HF..............................")
231
- #Bild nach Anweisung zeichnen und in History darstellen...
232
- data = {"inputs": prompt}
233
- response = requests.post(API_URL, headers=HEADERS, json=data)
234
- print("fertig Bild")
235
- result = response.content
236
- #Bild ausgeben
237
- image = Image.open(io.BytesIO(result))
238
- image_64 = umwandeln_fuer_anzeige(image)
239
- chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
240
- return chatbot, "Success"
241
- """
242
- print("Bild Erzeugung DallE..............................")
243
- client = OpenAI()
244
- #als Format ginge auch 'url', n - Anz. der erzeugten Bilder
245
- response = client.images.generate(model="dall-e-3",prompt=prompt,size="1024x1024",quality="standard",n=1, response_format='b64_json')
246
- print("response.........................")
247
- print(response)
248
- print("data........................")
249
- print(response.data[0].b64_json)
250
- #with open(image_path, "rb") as image_file:
251
- #return base64.b64encode(image_file.read()).decode('utf-8')
252
 
253
- #image = Image.open(io.BytesIO(response))
254
- #image_64 = umwandeln_fuer_anzeige(image)
255
- #chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
256
- chatbot[-1][1] = "<img src='data:image/png;base64,{0}'/>".format(response.data[0].b64_json)
257
  return chatbot, "Success"
258
 
259
 
@@ -484,7 +475,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
484
  with gr.Row():
485
  with gr.Column(scale=5):
486
  with gr.Row():
487
- chatbot_bild = gr.Chatbot(elem_id="li-zeichnen")
488
  with gr.Row():
489
  with gr.Column(scale=12):
490
  user_input2 = gr.Textbox(
@@ -498,6 +489,12 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
498
  with gr.Row():
499
  emptyBtn2 = gr.ClearButton([user_input, chatbot_bild], value="🧹 Neue Session", scale=10)
500
  #additional_inputs_accordion = gr.Accordion(label="Weitere Eingaben...", open=False)
 
 
 
 
 
 
501
 
502
  gr.Markdown(description)
503
 
@@ -553,9 +550,10 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
553
  inputs=[
554
  user_question2,
555
  chatbot_bild,
 
556
  #additional_inputs,
557
  ],
558
- outputs=[chatbot_bild, status_display2], #[chatbot, history, status_display]
559
  show_progress=True,
560
  )
561
  transfer_input_args2 = dict(
 
225
 
226
  ##################################################
227
  #zu einem Text-Prompt ein Bild via Stable Diffusion generieren
228
+ def generate_bild(prompt, chatbot, model_option_zeichnen, temperature=0.5, max_new_tokens=4048,top_p=0.6, repetition_penalty=1.3):
229
+ if (model_option_zeichnen == "HugginfFace"):
230
+ print("Bild Erzeugung HF..............................")
231
+ #Bild nach Anweisung zeichnen und in History darstellen...
232
+ data = {"inputs": prompt}
233
+ response = requests.post(API_URL, headers=HEADERS, json=data)
234
+ print("fertig Bild")
235
+ result = response.content
236
+ #Bild ausgeben
237
+ image = Image.open(io.BytesIO(result))
238
+ image_64 = umwandeln_fuer_anzeige(image)
239
+ chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
240
+ else:
241
+ print("Bild Erzeugung DallE..............................")
242
+ client = OpenAI()
243
+ #als Format ginge auch 'url', n - Anz. der erzeugten Bilder
244
+ response = client.images.generate(model="dall-e-3",prompt=prompt,size="1024x1024",quality="standard",n=1, response_format='b64_json')
245
+ #chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
246
+ chatbot[-1][1] = "<img src='data:image/png;base64,{0}'/>".format(response.data[0].b64_json)
 
 
 
 
 
247
 
 
 
 
 
248
  return chatbot, "Success"
249
 
250
 
 
475
  with gr.Row():
476
  with gr.Column(scale=5):
477
  with gr.Row():
478
+ chatbot_bild = gr.Chatbot(elem_id="li-zeichnen", ,show_copy_button=True, show_share_button=True)
479
  with gr.Row():
480
  with gr.Column(scale=12):
481
  user_input2 = gr.Textbox(
 
489
  with gr.Row():
490
  emptyBtn2 = gr.ClearButton([user_input, chatbot_bild], value="🧹 Neue Session", scale=10)
491
  #additional_inputs_accordion = gr.Accordion(label="Weitere Eingaben...", open=False)
492
+ with gr.Column():
493
+ with gr.Column(min_width=50, scale=1):
494
+ with gr.Tab(label="Parameter Einstellung"):
495
+ #gr.Markdown("# Parameters")
496
+ model_option_zeichnen = gr.Radio(["OpenAI(DallE)", "HuggingFace"], label="Modellauswahl", value = "HugginfFace")
497
+
498
 
499
  gr.Markdown(description)
500
 
 
550
  inputs=[
551
  user_question2,
552
  chatbot_bild,
553
+ model_option_zeichnen,
554
  #additional_inputs,
555
  ],
556
+ outputs=[chatbot_bild, status_display2], #[chatbot, history, status_display]
557
  show_progress=True,
558
  )
559
  transfer_input_args2 = dict(