alexkueck commited on
Commit
a3847a3
1 Parent(s): e455e2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +168 -53
app.py CHANGED
@@ -20,6 +20,8 @@ from langchain.prompts import PromptTemplate
20
  from langchain.text_splitter import RecursiveCharacterTextSplitter
21
  from langchain.vectorstores import Chroma
22
  from chromadb.errors import InvalidDimensionException
 
 
23
 
24
  #from langchain.vectorstores import MongoDBAtlasVectorSearch
25
  #from pymongo import MongoClient
@@ -117,15 +119,28 @@ os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
117
  #################################################
118
  #Funktionen zur Verarbeitung
119
  ################################################
120
- def add_text(history, text):
121
- history = history + [(text, None)]
122
- return history, gr.Textbox(value="", interactive=False)
123
-
124
-
125
- def add_file(history, file):
126
- history = history + [((file.name,), None)]
127
- return history
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  # Funktion, um für einen best. File-typ ein directory-loader zu definieren
130
  def create_directory_loader(file_type, directory_path):
131
  #verscheidene Dokument loaders:
@@ -300,8 +315,8 @@ def chatbot_response(messages):
300
  print("Bild.............................")
301
  return responses
302
 
303
-
304
- def invoke (prompt, history, rag_option, model_option, openai_api_key, temperature=0.5, max_new_tokens=4048, top_p=0.6, repetition_penalty=1.3,):
305
  global splittet
306
  print(splittet)
307
  #Prompt an history anhängen und einen Text daraus machen
@@ -361,10 +376,26 @@ def invoke (prompt, history, rag_option, model_option, openai_api_key, temperat
361
  except Exception as e:
362
  raise gr.Error(e)
363
 
 
364
  #Antwort als Stream ausgeben...
365
  for i in range(len(result)):
366
  time.sleep(0.05)
367
  yield result[: i+1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
 
369
  ################################################
370
  #GUI
@@ -375,21 +406,137 @@ def invoke (prompt, history, rag_option, model_option, openai_api_key, temperat
375
  description = """<strong>Information:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> mit
376
  <strong>Retrieval Augmented Generation (RAG)</strong> auf <strong>externen Daten</strong> verwendet.\n\n
377
  """
378
- css = """.toast-wrap { display: none !important } """
379
- examples=[['Was ist ChtGPT-4?'],['schreibe ein Python Programm, dass die GPT-4 API aufruft.']]
380
 
381
  def vote(data: gr.LikeData):
382
  if data.liked: print("You upvoted this response: " + data.value)
383
  else: print("You downvoted this response: " + data.value)
384
 
385
- def read_image(image, size=512):
386
- return np.array(Image.fromarray(image).resize((size, size)))
387
 
388
- def add_file(history, file):
389
- history = history + [((file.name,), None)]
390
- return history
391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
 
 
 
 
 
393
  additional_inputs = [
394
  #gr.Radio(["Off", "Chroma", "MongoDB"], label="Retrieval Augmented Generation", value = "Off"),
395
  gr.Radio(["Aus", "An"], label="RAG - LI Erweiterungen", value = "Aus"),
@@ -401,10 +548,6 @@ additional_inputs = [
401
  gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True)
402
  ]
403
 
404
-
405
-
406
-
407
-
408
  with gr.Blocks() as demo:
409
  reference_image = gr.Image(label="Reference Image")
410
 
@@ -424,7 +567,7 @@ with gr.Blocks() as demo:
424
  )
425
 
426
  gr.HTML(
427
- """
428
  <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
429
  <a href="https://github.com/magic-research/magic-animate" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
430
  </a>
@@ -435,7 +578,7 @@ with gr.Blocks() as demo:
435
  </div>
436
  </div>
437
  </div>
438
- """)
439
 
440
  with gr.Row():
441
  prompt = gr.Textbox(
@@ -453,33 +596,5 @@ with gr.Blocks() as demo:
453
  #chatbot_stream.like(print_like_dislike, None, None)
454
 
455
 
456
- """
457
- with gr.Tab("Chatbot"):
458
- iface = gr.Interface(
459
- fn=chatbot_response,
460
- inputs=[reference_image, chat_interface_stream],
461
- outputs=chat_interface_stream,
462
- title="Chatbot mit Bildeingabe",
463
- description="Laden Sie ein Bild hoch oder interagieren Sie über den Chat."
464
- )
465
-
466
- iface.launch()
467
-
468
-
469
- with gr.Row():
470
- chatbot_stream.like(vote, None, None)
471
- chat_interface_stream.queue().launch()
472
- #with gr.Row():
473
- #reference_image.queue().launch()
474
-
475
-
476
- # when `first_frame` is updated
477
- reference_image.upload(
478
- read_image,
479
- reference_image,
480
- reference_image,
481
- queue=False
482
- )
483
- """
484
-
485
- demo.queue().launch()
 
20
  from langchain.text_splitter import RecursiveCharacterTextSplitter
21
  from langchain.vectorstores import Chroma
22
  from chromadb.errors import InvalidDimensionException
23
+ from utils import *
24
+ from beschreibungen import *
25
 
26
  #from langchain.vectorstores import MongoDBAtlasVectorSearch
27
  #from pymongo import MongoClient
 
119
  #################################################
120
  #Funktionen zur Verarbeitung
121
  ################################################
 
 
 
 
 
 
 
 
122
 
123
+ ##############################################
124
+ #History - die Frage oder das File eintragen...
125
+ def add_text(history, prompt):
126
+ history = history + [(prompt, None)]
127
+ return history, prompt, "" #gr.Textbox(value="", interactive=False)
128
+
129
+ def add_file(history, file, prompt):
130
+ if (prompt == ""):
131
+ history = history + [((file.name,), None)]
132
+ else:
133
+ history = history + [((file.name,), None), (prompt, None)]
134
+ return history, prompt, ""
135
+
136
+ def transfer_input(inputs):
137
+ textbox = reset_textbox()
138
+ return (
139
+ inputs,
140
+ gr.update(value=""),
141
+ gr.Button.update(visible=True),
142
+ )
143
+ ##################################################
144
  # Funktion, um für einen best. File-typ ein directory-loader zu definieren
145
  def create_directory_loader(file_type, directory_path):
146
  #verscheidene Dokument loaders:
 
315
  print("Bild.............................")
316
  return responses
317
 
318
+
319
+ def invoke (prompt, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
320
  global splittet
321
  print(splittet)
322
  #Prompt an history anhängen und einen Text daraus machen
 
376
  except Exception as e:
377
  raise gr.Error(e)
378
 
379
+ """
380
  #Antwort als Stream ausgeben...
381
  for i in range(len(result)):
382
  time.sleep(0.05)
383
  yield result[: i+1]
384
+ """
385
+
386
+ #Antwort als Stream ausgeben...
387
+ history[-1][1] = ""
388
+ for character in result:
389
+ history[-1][1] += character
390
+ time.sleep(0.03)
391
+ yield history, "Generating"
392
+ if shared_state.interrupted:
393
+ shared_state.recover()
394
+ try:
395
+ yield history, "Stop: Success"
396
+ return
397
+ except:
398
+ pass
399
 
400
  ################################################
401
  #GUI
 
406
  description = """<strong>Information:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> mit
407
  <strong>Retrieval Augmented Generation (RAG)</strong> auf <strong>externen Daten</strong> verwendet.\n\n
408
  """
409
+ #css = """.toast-wrap { display: none !important } """
410
+ #examples=[['Was ist ChtGPT-4?'],['schreibe ein Python Programm, dass die GPT-4 API aufruft.']]
411
 
412
  def vote(data: gr.LikeData):
413
  if data.liked: print("You upvoted this response: " + data.value)
414
  else: print("You downvoted this response: " + data.value)
415
 
 
 
416
 
417
+ print ("Start GUI")
418
+ with open("custom.css", "r", encoding="utf-8") as f:
419
+ customCSS = f.read()
420
 
421
+ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
422
+ history = gr.State([])
423
+ user_question = gr.State("")
424
+ with gr.Row():
425
+ gr.HTML("LI Chatot")
426
+ status_display = gr.Markdown("Success", elem_id="status_display")
427
+ gr.Markdown(description_top)
428
+ with gr.Row():
429
+ with gr.Column(scale=5):
430
+ with gr.Row():
431
+ chatbot = gr.Chatbot(elem_id="chuanhu_chatbot")
432
+ with gr.Row():
433
+ with gr.Column(scale=12):
434
+ user_input = gr.Textbox(
435
+ show_label=False, placeholder="Gib hier deinen Prompt ein...",
436
+ container=False
437
+ )
438
+ with gr.Column(min_width=70, scale=1):
439
+ submitBtn = gr.Button("Senden")
440
+ with gr.Column(min_width=70, scale=1):
441
+ cancelBtn = gr.Button("Stop")
442
+ with gr.Row():
443
+ emptyBtn = gr.ClearButton( [user_input, chatbot], value="🧹 Neue Session")
444
+ btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
445
+
446
+ with gr.Column():
447
+ with gr.Column(min_width=50, scale=1):
448
+ with gr.Tab(label="Parameter Einstellung"):
449
+ gr.Markdown("# Parameters")
450
+ rag_option = gr.Radio(["Aus", "An"], label="RAG - LI Erweiterungen", value = "Aus")
451
+ model_option = gr.Radio(["HF1", "HF2"], label="Modellauswahl", value = "HF1")
452
+
453
+ top_p = gr.Slider(
454
+ minimum=-0,
455
+ maximum=1.0,
456
+ value=0.95,
457
+ step=0.05,
458
+ interactive=True,
459
+ label="Top-p",
460
+ )
461
+ temperature = gr.Slider(
462
+ minimum=0.1,
463
+ maximum=2.0,
464
+ value=1,
465
+ step=0.1,
466
+ interactive=True,
467
+ label="Temperature",
468
+ )
469
+ max_length_tokens = gr.Slider(
470
+ minimum=0,
471
+ maximum=512,
472
+ value=512,
473
+ step=8,
474
+ interactive=True,
475
+ label="Max Generation Tokens",
476
+ )
477
+ max_context_length_tokens = gr.Slider(
478
+ minimum=0,
479
+ maximum=4096,
480
+ value=2048,
481
+ step=128,
482
+ interactive=True,
483
+ label="Max History Tokens",
484
+ )
485
+ repetition_penalty=gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True)
486
+ anzahl_docs = gr.Slider(label="Anzahl Dokumente", value=3, minimum=1, maximum=10, step=1, interactive=True, info="wie viele Dokumententeile aus dem Vektorstore an den prompt gehängt werden", visible=True)
487
+ openai_key = gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1)
488
+ gr.Markdown(description)
489
+
490
+ #Argumente für generate Funktion als Input
491
+ predict_args = dict(
492
+ fn=generate,
493
+ inputs=[
494
+ user_question,
495
+ chatbot,
496
+ #history,
497
+ rag_option,
498
+ model_option,
499
+ openai_key,
500
+ anzahl_docs,
501
+ top_p,
502
+ temperature,
503
+ max_length_tokens,
504
+ max_context_length_tokens,
505
+ repetition_penalty
506
+ ],
507
+ outputs=[ chatbot, status_display], #[ chatbot, history, status_display],
508
+ show_progress=True,
509
+ )
510
+
511
+
512
+ reset_args = dict(
513
+ fn=reset_textbox, inputs=[], outputs=[user_input, status_display]
514
+ )
515
+
516
+ # Chatbot
517
+ transfer_input_args_text = dict(
518
+ fn=add_text, inputs=[chatbot, user_input], outputs=[chatbot, user_question, user_input], show_progress=True
519
+ )
520
+ transfer_input_args_file = dict(
521
+ fn=add_file, inputs=[chatbot, btn, user_input], outputs=[chatbot, user_question, user_input], show_progress=True
522
+ )
523
+
524
+ predict_event1 = user_input.submit(**transfer_input_args_text, queue=False,).then(**predict_args)
525
+ predict_event3 = btn.upload(**transfer_input_args_file,queue=False,).then(**predict_args)
526
+ predict_event2 = submitBtn.click(**transfer_input_args_text, queue=False,).then(**predict_args)
527
+
528
+ cancelBtn.click(
529
+ cancels=[predict_event1,predict_event2, predict_event3 ]
530
+ )
531
+ demo.title = "LI-ChatBot"
532
+
533
+ demo.queue().launch(debug=True)
534
+
535
 
536
+
537
+
538
+
539
+ """
540
  additional_inputs = [
541
  #gr.Radio(["Off", "Chroma", "MongoDB"], label="Retrieval Augmented Generation", value = "Off"),
542
  gr.Radio(["Aus", "An"], label="RAG - LI Erweiterungen", value = "Aus"),
 
548
  gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True)
549
  ]
550
 
 
 
 
 
551
  with gr.Blocks() as demo:
552
  reference_image = gr.Image(label="Reference Image")
553
 
 
567
  )
568
 
569
  gr.HTML(
570
+
571
  <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
572
  <a href="https://github.com/magic-research/magic-animate" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
573
  </a>
 
578
  </div>
579
  </div>
580
  </div>
581
+ )
582
 
583
  with gr.Row():
584
  prompt = gr.Textbox(
 
596
  #chatbot_stream.like(print_like_dislike, None, None)
597
 
598
 
599
+ demo.queue().launch()
600
+ """