|
import requests |
|
import os, sys, json |
|
import gradio as gr |
|
import openai |
|
from openai import OpenAI |
|
import time |
|
import re |
|
import io |
|
from PIL import Image, ImageDraw, ImageOps, ImageFont |
|
from base64 import b64encode |
|
|
|
from langchain.chains import LLMChain, RetrievalQA |
|
from langchain.chat_models import ChatOpenAI |
|
from langchain.document_loaders import PyPDFLoader, WebBaseLoader, UnstructuredWordDocumentLoader, DirectoryLoader |
|
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader |
|
from langchain.document_loaders.generic import GenericLoader |
|
from langchain.document_loaders.parsers import OpenAIWhisperParser |
|
from langchain.schema import AIMessage, HumanMessage |
|
from langchain.llms import HuggingFaceHub |
|
from langchain.llms import HuggingFaceTextGenInference |
|
from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings |
|
|
|
from langchain.embeddings.openai import OpenAIEmbeddings |
|
from langchain.prompts import PromptTemplate |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.vectorstores import Chroma |
|
from chromadb.errors import InvalidDimensionException |
|
from utils import * |
|
from beschreibungen import * |
|
|
|
|
|
|
|
|
|
|
|
from dotenv import load_dotenv, find_dotenv |
|
_ = load_dotenv(find_dotenv()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
splittet = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template = """Antworte in deutsch, wenn es nicht explizit anders gefordert wird. Wenn du die Antwort nicht kennst, antworte einfach, dass du es nicht weißt. Versuche nicht, die Antwort zu erfinden oder aufzumocken. Halte die Antwort so kurz aber exakt.""" |
|
|
|
llm_template = "Beantworte die Frage am Ende. " + template + "Frage: {question} Hilfreiche Antwort: " |
|
rag_template = "Nutze die folgenden Kontext Teile, um die Frage zu beantworten am Ende. " + template + "{context} Frage: {question} Hilfreiche Antwort: " |
|
|
|
|
|
|
|
LLM_CHAIN_PROMPT = PromptTemplate(input_variables = ["question"], |
|
template = llm_template) |
|
RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], |
|
template = rag_template) |
|
|
|
|
|
HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_ACCESS_READ") |
|
OAI_API_KEY=os.getenv("OPENAI_API_KEY") |
|
HEADERS = {"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"} |
|
|
|
|
|
|
|
PATH_WORK = "." |
|
CHROMA_DIR = "/chroma" |
|
YOUTUBE_DIR = "/youtube" |
|
|
|
|
|
|
|
PDF_URL = "https://arxiv.org/pdf/2303.08774.pdf" |
|
WEB_URL = "https://openai.com/research/gpt-4" |
|
YOUTUBE_URL_1 = "https://www.youtube.com/watch?v=--khbXchTeE" |
|
YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE" |
|
|
|
|
|
|
|
|
|
|
|
MODEL_NAME = "gpt-3.5-turbo-16k" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
repo_id = "HuggingFaceH4/zephyr-7b-alpha" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MODEL_NAME_HF = "mistralai/Mixtral-8x7B-Instruct-v0.1" |
|
MODEL_NAME_OAI_ZEICHNEN = "dall-e-3" |
|
|
|
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1" |
|
|
|
|
|
|
|
|
|
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def add_text(chatbot, history, prompt, file): |
|
if (file == None): |
|
chatbot = chatbot + [(prompt, None)] |
|
else: |
|
if (prompt == ""): |
|
chatbot = chatbot + [((file.name,), "Prompt fehlt!")] |
|
else: |
|
chatbot = chatbot + [((file.name,), None), (prompt, None)] |
|
print("chatbot nach add_text............") |
|
print(chatbot) |
|
return chatbot, history, prompt, "", gr.File( label=None, interactive=False, height=20, min_width=20, visible=False, scale=2) |
|
|
|
def add_file(history, file, prompt): |
|
if (prompt == ""): |
|
history = history + [((file.name,), None)] |
|
else: |
|
history = history + [((file.name,), None), (prompt, None)] |
|
return history, prompt, "" |
|
|
|
def file_anzeigen(file): |
|
return gr.File(visible=True), file.name |
|
|
|
def umwandeln_fuer_anzeige(image): |
|
buffer = io.BytesIO() |
|
image.save(buffer, format='PNG') |
|
return buffer.getvalue() |
|
|
|
def process_image(image_path, prompt): |
|
|
|
with open(image_path, "rb") as image_file: |
|
encoded_string = b64encode(image_file.read()).decode() |
|
|
|
|
|
data = { |
|
'image': encoded_string, |
|
'input': prompt, |
|
} |
|
return data |
|
|
|
|
|
def transfer_input(inputs): |
|
textbox = reset_textbox() |
|
return ( |
|
inputs, |
|
gr.update(value=""), |
|
gr.Button.update(visible=True), |
|
) |
|
|
|
|
|
def create_directory_loader(file_type, directory_path): |
|
|
|
loaders = { |
|
'.pdf': PyPDFLoader, |
|
'.word': UnstructuredWordDocumentLoader, |
|
} |
|
return DirectoryLoader( |
|
path=directory_path, |
|
glob=f"**/*{file_type}", |
|
loader_cls=loaders[file_type], |
|
) |
|
|
|
|
|
def document_loading_splitting(): |
|
global splittet |
|
|
|
|
|
docs = [] |
|
|
|
|
|
pdf_loader = create_directory_loader('.pdf', './chroma/pdf') |
|
word_loader = create_directory_loader('.word', './chroma/word') |
|
|
|
|
|
|
|
pdf_documents = pdf_loader.load() |
|
word_documents = word_loader.load() |
|
|
|
|
|
docs.extend(pdf_documents) |
|
docs.extend(word_documents) |
|
|
|
|
|
|
|
loader = PyPDFLoader(PDF_URL) |
|
docs.extend(loader.load()) |
|
|
|
loader = WebBaseLoader(WEB_URL) |
|
docs.extend(loader.load()) |
|
|
|
loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,YOUTUBE_URL_2], PATH_WORK + YOUTUBE_DIR), OpenAIWhisperParser()) |
|
docs.extend(loader.load()) |
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150, chunk_size = 1500) |
|
splits = text_splitter.split_documents(docs) |
|
|
|
|
|
splittet = True |
|
return splits |
|
|
|
|
|
def document_storage_chroma(splits): |
|
|
|
Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(disallowed_special = ()), persist_directory = PATH_WORK + CHROMA_DIR) |
|
|
|
|
|
|
|
|
|
|
|
def document_storage_mongodb(splits): |
|
MongoDBAtlasVectorSearch.from_documents(documents = splits, |
|
embedding = OpenAIEmbeddings(disallowed_special = ()), |
|
collection = MONGODB_COLLECTION, |
|
index_name = MONGODB_INDEX_NAME) |
|
|
|
|
|
def document_retrieval_chroma(llm, prompt): |
|
|
|
embeddings = OpenAIEmbeddings() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
db = Chroma(embedding_function = embeddings, persist_directory = PATH_WORK + CHROMA_DIR) |
|
return db |
|
|
|
|
|
def document_retrieval_mongodb(llm, prompt): |
|
db = MongoDBAtlasVectorSearch.from_connection_string(MONGODB_URI, |
|
MONGODB_DB_NAME + "." + MONGODB_COLLECTION_NAME, |
|
OpenAIEmbeddings(disallowed_special = ()), |
|
index_name = MONGODB_INDEX_NAME) |
|
return db |
|
|
|
|
|
|
|
|
|
|
|
def llm_chain(llm, prompt): |
|
llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT) |
|
result = llm_chain.run({"question": prompt}) |
|
return result |
|
|
|
|
|
def rag_chain(llm, prompt, db): |
|
rag_chain = RetrievalQA.from_chain_type(llm, |
|
chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT}, |
|
retriever = db.as_retriever(search_kwargs = {"k": 3}), |
|
return_source_documents = True) |
|
result = rag_chain({"query": prompt}) |
|
return result["result"] |
|
|
|
|
|
|
|
|
|
|
|
def generate_prompt_with_history(text, history, max_length=4048): |
|
|
|
|
|
prompt="" |
|
history = ["\n{}\n{}".format(x[0],x[1]) for x in history] |
|
history.append("\n{}\n".format(text)) |
|
history_text = "" |
|
flag = False |
|
for x in history[::-1]: |
|
history_text = x + history_text |
|
flag = True |
|
print ("Prompt: ..........................") |
|
print(prompt+history_text) |
|
if flag: |
|
return prompt+history_text |
|
else: |
|
return None |
|
|
|
def generate_prompt_and_file_with_history(text, history, file): |
|
|
|
|
|
prompt="" |
|
history = ["\n{}\n{}".format(x[0],x[1]) for x in history] |
|
history.append("\n{}\n".format(text)) |
|
history_text = "" |
|
flag = False |
|
for x in history[::-1]: |
|
history_text = x + history_text |
|
flag = True |
|
print ("Prompt: ..........................") |
|
print(prompt+history_text) |
|
if flag: |
|
return prompt+history_text |
|
else: |
|
return None |
|
|
|
|
|
def generate_prompt_with_history_openai(prompt, history): |
|
history_openai_format = [] |
|
for human, assistant in history: |
|
history_openai_format.append({"role": "user", "content": human }) |
|
history_openai_format.append({"role": "assistant", "content":assistant}) |
|
|
|
history_openai_format.append({"role": "user", "content": prompt}) |
|
print("openai history und prompt................") |
|
print(history_openai_format) |
|
return history_openai_format |
|
|
|
|
|
def generate_prompt_with_history_hf(prompt, history): |
|
history_transformer_format = history + [[prompt, ""]] |
|
|
|
|
|
messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]]) |
|
for item in history_transformer_format]) |
|
|
|
|
|
def generate_prompt_with_history_langchain(prompt, history): |
|
history_langchain_format = [] |
|
for human, ai in history: |
|
history_langchain_format.append(HumanMessage(content=human)) |
|
history_langchain_format.append(AIMessage(content=ai)) |
|
history_langchain_format.append(HumanMessage(content=prompt)) |
|
|
|
return history_langchain_format |
|
|
|
|
|
|
|
|
|
|
|
def chatbot_response(messages): |
|
print("messages.......................") |
|
print(messages) |
|
responses = [] |
|
for message in messages: |
|
if message['type'] == 'text': |
|
|
|
responses.append({'type': 'text', 'data': f"Echo: {message['data']}"}) |
|
else: |
|
print("Bild.............................") |
|
return responses |
|
|
|
def create_picture(history, prompt): |
|
client = OpenAI() |
|
response = client.images.generate(model="dall-e-3", prompt=prompt,size="1024x1024",quality="standard",n=1,) |
|
image_url = response.data[0].url |
|
return image_url |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,): |
|
if (prompt.find('zeichnen') != -1): |
|
response = generate_bild(prompt) |
|
result = response.content |
|
|
|
image = Image.open(io.BytesIO(result)) |
|
print("result image...............") |
|
print (image) |
|
image_64 = umwandeln_fuer_anzeige(image) |
|
print("result image 64...............") |
|
print (image_64) |
|
chatbot[-1][1] = "<img src='data:image/png;base64,{0}'/>".format(b64encode(image_64).decode('utf-8')) |
|
history = history + [(prompt, result)] |
|
print("history zeichnen......................") |
|
print(chatbot) |
|
return chatbot, history, "Success" |
|
else: |
|
result = generate_text(prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,) |
|
|
|
chatbot[-1][1] = result |
|
if (file == None): |
|
history = history + [(prompt, result)] |
|
else: |
|
history = history + [(prompt+"hier noch die URL zum File", result)] |
|
|
|
print("history nach Zusatz und mit KI Antwort...........") |
|
print(history) |
|
print("chatbot nach Zusatz und mit KI Antwort...........") |
|
print(chatbot) |
|
return chatbot, history, "Success" |
|
""" |
|
for character in result: |
|
history[-1][1] += character |
|
time.sleep(0.03) |
|
yield history, "Generating" |
|
if shared_state.interrupted: |
|
shared_state.recover() |
|
try: |
|
yield history, "Stop: Success" |
|
except: |
|
pass |
|
""" |
|
|
|
def generate_bild(prompt): |
|
data = {"inputs": prompt} |
|
response = requests.post(API_URL, headers=HEADERS, json=data) |
|
print("fertig Bild") |
|
return response |
|
|
|
def generate_text (prompt, file, chatbot, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,): |
|
global splittet |
|
print(splittet) |
|
|
|
if (openai_api_key == "" or openai_api_key == "sk-"): |
|
|
|
|
|
openai_api_key= OAI_API_KEY |
|
if (rag_option is None): |
|
raise gr.Error("Retrieval Augmented Generation ist erforderlich.") |
|
if (prompt == ""): |
|
raise gr.Error("Prompt ist erforderlich.") |
|
|
|
|
|
if (file == None): |
|
history_text_und_prompt = generate_prompt_with_history_openai(prompt, history) |
|
else: |
|
print("file bild uplad....................") |
|
print(file) |
|
|
|
|
|
prompt_neu = process_image(file, prompt) |
|
print("prompt_neu............................") |
|
print(prompt_neu) |
|
history_text_und_prompt = generate_prompt_with_history_openai(prompt_neu, history) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
|
|
if (model_option == "OpenAI"): |
|
|
|
print("OpenAI normal.......................") |
|
llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature) |
|
print("openAI") |
|
else: |
|
|
|
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128}) |
|
|
|
|
|
|
|
print("HF") |
|
|
|
|
|
if (rag_option == "An"): |
|
|
|
if not splittet: |
|
splits = document_loading_splitting() |
|
document_storage_chroma(splits) |
|
db = document_retrieval_chroma(llm, history_text_und_prompt) |
|
result = rag_chain(llm, history_text_und_prompt, db) |
|
elif (rag_option == "MongoDB"): |
|
|
|
|
|
db = document_retrieval_mongodb(llm, history_text_und_prompt) |
|
result = rag_chain(llm, history_text_und_prompt, db) |
|
else: |
|
print("LLM aufrufen ohne RAG: ...........") |
|
result = llm_chain(llm, history_text_und_prompt) |
|
|
|
|
|
except Exception as e: |
|
raise gr.Error(e) |
|
|
|
return result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
description = """<strong>Information:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> mit |
|
<strong>Retrieval Augmented Generation (RAG)</strong> auf <strong>externen Daten</strong> verwendet.\n\n |
|
""" |
|
|
|
|
|
|
|
def vote(data: gr.LikeData): |
|
if data.liked: print("You upvoted this response: " + data.value) |
|
else: print("You downvoted this response: " + data.value) |
|
|
|
|
|
print ("Start GUIneu") |
|
with open("custom.css", "r", encoding="utf-8") as f: |
|
customCSS = f.read() |
|
|
|
with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: |
|
history = gr.State([]) |
|
user_question = gr.State("") |
|
with gr.Row(): |
|
gr.HTML("LI Chatot") |
|
status_display = gr.Markdown("Success", elem_id="status_display") |
|
gr.Markdown(description_top) |
|
with gr.Row(): |
|
with gr.Column(scale=5): |
|
with gr.Row(): |
|
chatbot = gr.Chatbot(elem_id="chuanhu_chatbot",) |
|
with gr.Row(): |
|
with gr.Column(scale=12): |
|
user_input = gr.Textbox( |
|
show_label=False, placeholder="Gib hier deinen Prompt ein...", |
|
container=False |
|
) |
|
with gr.Column(min_width=70, scale=1): |
|
submitBtn = gr.Button("Senden") |
|
with gr.Column(min_width=70, scale=1): |
|
cancelBtn = gr.Button("Stop") |
|
with gr.Row(): |
|
emptyBtn = gr.ClearButton( [user_input, chatbot], value="🧹 Neue Session", scale=3) |
|
upload = gr.UploadButton("📁", file_types=["image", "video", "audio"], scale=3) |
|
file_display = gr.File( label=None, interactive=False, height=30, min_width=30, visible=False, scale=2) |
|
|
|
with gr.Column(): |
|
with gr.Column(min_width=50, scale=1): |
|
with gr.Tab(label="Parameter Einstellung"): |
|
|
|
rag_option = gr.Radio(["Aus", "An"], label="RAG - LI Erweiterungen", value = "Aus") |
|
model_option = gr.Radio(["OpenAI", "HuggingFace"], label="Modellauswahl", value = "OpenAI") |
|
|
|
top_p = gr.Slider( |
|
minimum=-0, |
|
maximum=1.0, |
|
value=0.95, |
|
step=0.05, |
|
interactive=True, |
|
label="Top-p", |
|
) |
|
temperature = gr.Slider( |
|
minimum=0.1, |
|
maximum=2.0, |
|
value=1, |
|
step=0.1, |
|
interactive=True, |
|
label="Temperature", |
|
) |
|
max_length_tokens = gr.Slider( |
|
minimum=0, |
|
maximum=512, |
|
value=512, |
|
step=8, |
|
interactive=True, |
|
label="Max Generation Tokens", |
|
) |
|
max_context_length_tokens = gr.Slider( |
|
minimum=0, |
|
maximum=4096, |
|
value=2048, |
|
step=128, |
|
interactive=True, |
|
label="Max History Tokens", |
|
) |
|
repetition_penalty=gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True) |
|
anzahl_docs = gr.Slider(label="Anzahl Dokumente", value=3, minimum=1, maximum=10, step=1, interactive=True, info="wie viele Dokumententeile aus dem Vektorstore an den prompt gehängt werden", visible=True) |
|
openai_key = gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1) |
|
gr.Markdown(description) |
|
|
|
|
|
predict_args = dict( |
|
fn=generate_auswahl, |
|
inputs=[ |
|
user_question, |
|
upload, |
|
chatbot, |
|
history, |
|
rag_option, |
|
model_option, |
|
openai_key, |
|
anzahl_docs, |
|
top_p, |
|
temperature, |
|
max_length_tokens, |
|
max_context_length_tokens, |
|
repetition_penalty |
|
], |
|
outputs=[chatbot, history, status_display], |
|
show_progress=True, |
|
postprocess=False |
|
) |
|
|
|
|
|
reset_args = dict( |
|
fn=reset_textbox, inputs=[], outputs=[user_input, status_display] |
|
) |
|
|
|
|
|
transfer_input_args = dict( |
|
fn=add_text, inputs=[chatbot, history, user_input, upload], outputs=[chatbot, history, user_question, user_input, file_display], show_progress=True |
|
) |
|
|
|
predict_event1 = user_input.submit(**transfer_input_args, queue=False,).then(**predict_args) |
|
predict_event2 = submitBtn.click(**transfer_input_args, queue=False,).then(**predict_args) |
|
predict_event3 = upload.upload(file_anzeigen, [upload], [file_display, file_display] ) |
|
|
|
cancelBtn.click( |
|
cancels=[predict_event1,predict_event2, predict_event3 ] |
|
) |
|
demo.title = "LI-ChatBot" |
|
|
|
demo.queue().launch(debug=True) |
|
|
|
|
|
|
|
|
|
|
|
""" |
|
additional_inputs = [ |
|
#gr.Radio(["Off", "Chroma", "MongoDB"], label="Retrieval Augmented Generation", value = "Off"), |
|
gr.Radio(["Aus", "An"], label="RAG - LI Erweiterungen", value = "Aus"), |
|
gr.Radio(["OpenAI", "HuggingFace"], label="Modellauswahl", value = "HuggingFace"), |
|
gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), |
|
gr.Slider(label="Temperature", value=0.65, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Höhere Werte erzeugen diversere Antworten", visible=True), |
|
gr.Slider(label="Max new tokens", value=1024, minimum=0, maximum=4096, step=64, interactive=True, info="Maximale Anzahl neuer Tokens", visible=True), |
|
gr.Slider(label="Top-p (nucleus sampling)", value=0.6, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Höhere Werte verwenden auch Tokens mit niedrigerer Wahrscheinlichkeit.", visible=True), |
|
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True) |
|
] |
|
|
|
with gr.Blocks() as demo: |
|
reference_image = gr.Image(label="Reference Image") |
|
|
|
chatbot_stream = gr.Chatbot() |
|
|
|
chat_interface_stream = gr.ChatInterface(fn=invoke, |
|
additional_inputs = additional_inputs, |
|
additional_inputs_accordion = gr.Accordion(label="Weitere Eingaben...", open=False), |
|
title = "ChatGPT vom LI", |
|
theme="soft", |
|
chatbot=chatbot_stream, |
|
retry_btn="🔄 Wiederholen", |
|
undo_btn="↩️ Letztes löschen", |
|
clear_btn="🗑️ Verlauf löschen", |
|
submit_btn = "Abschicken", |
|
description = description, |
|
) |
|
|
|
gr.HTML( |
|
|
|
<div style="display: flex; justify-content: center; align-items: center; text-align: center;"> |
|
<a href="https://github.com/magic-research/magic-animate" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;"> |
|
</a> |
|
<div> |
|
<h1 >Chatbot des LI - hier im Test mit Image Eingabe</h1> |
|
<div style="display: flex; justify-content: center; align-items: center; text-align: center;> |
|
<a href="https://arxiv.org/abs/2311.16498"><img src="https://img.shields.io/badge/Arxiv-2311.16498-red"></a> |
|
</div> |
|
</div> |
|
</div> |
|
) |
|
|
|
with gr.Row(): |
|
prompt = gr.Textbox( |
|
scale=4, |
|
show_label=False, |
|
placeholder="Gib einen Text ein oder lade eine Datei (Bild, File, Audio) hoch", |
|
container=False, |
|
) |
|
btn = gr.UploadButton("📁", file_types=["image", "video", "audio"]) |
|
|
|
txt_msg = txt.submit(invoke, [chat_interface_stream, prompt], [chat_interface_stream, prompt], queue=False).then(bot, chat_interface_stream, chat_interface_stream, api_name="bot_response") |
|
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [prompt], queue=False) |
|
file_msg = btn.upload(add_file, [chat_interface_stream, btn], [chat_interface_stream], queue=False).then(bot, chat_interface_stream, chat_interface_stream) |
|
|
|
#chatbot_stream.like(print_like_dislike, None, None) |
|
|
|
|
|
demo.queue().launch() |
|
""" |