LIRAGTBackup / app.py
alexkueck's picture
Update app.py
38b4a39
raw
history blame
32.2 kB
import requests
import os, sys, json
import gradio as gr
import openai
from openai import OpenAI
import time
import re
import io
from PIL import Image, ImageDraw, ImageOps, ImageFont
from base64 import b64encode
from langchain.chains import LLMChain, RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader, WebBaseLoader, UnstructuredWordDocumentLoader, DirectoryLoader
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser
from langchain.schema import AIMessage, HumanMessage
from langchain.llms import HuggingFaceHub
from langchain.llms import HuggingFaceTextGenInference
from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from chromadb.errors import InvalidDimensionException
from utils import *
from beschreibungen import *
#from langchain.vectorstores import MongoDBAtlasVectorSearch
#from pymongo import MongoClient
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
###############################################
#globale Variablen
##############################################
#nur bei ersten Anfrage splitten der Dokumente - um die Vektordatenbank entsprechend zu füllen
splittet = False
##################################################
#Für MongoDB statt Chroma als Vektorstore
#MONGODB_URI = os.environ["MONGODB_ATLAS_CLUSTER_URI"]
#client = MongoClient(MONGODB_URI)
#MONGODB_DB_NAME = "langchain_db"
#MONGODB_COLLECTION_NAME = "gpt-4"
#MONGODB_COLLECTION = client[MONGODB_DB_NAME][MONGODB_COLLECTION_NAME]
#MONGODB_INDEX_NAME = "default"
#################################################
#Prompt Zusätze
template = """Antworte in deutsch, wenn es nicht explizit anders gefordert wird. Wenn du die Antwort nicht kennst, antworte einfach, dass du es nicht weißt. Versuche nicht, die Antwort zu erfinden oder aufzumocken. Halte die Antwort so kurz aber exakt."""
llm_template = "Beantworte die Frage am Ende. " + template + "Frage: {question} Hilfreiche Antwort: "
rag_template = "Nutze die folgenden Kontext Teile, um die Frage zu beantworten am Ende. " + template + "{context} Frage: {question} Hilfreiche Antwort: "
#################################################
#Konstanten
LLM_CHAIN_PROMPT = PromptTemplate(input_variables = ["question"],
template = llm_template)
RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"],
template = rag_template)
#Plattform Keys aus den Secrets holen zu diesem Space
HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_ACCESS_READ")
OAI_API_KEY=os.getenv("OPENAI_API_KEY")
HEADERS = {"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"}
#Pfad, wo Docs/Bilder/Filme abgelegt werden können - lokal, also hier im HF Space (sonst auf eigenem Rechner)
PATH_WORK = "."
CHROMA_DIR = "/chroma"
YOUTUBE_DIR = "/youtube"
###############################################
#URLs zu Dokumenten oder andere Inhalte, die einbezogen werden sollen
PDF_URL = "https://arxiv.org/pdf/2303.08774.pdf"
WEB_URL = "https://openai.com/research/gpt-4"
YOUTUBE_URL_1 = "https://www.youtube.com/watch?v=--khbXchTeE"
YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE"
#YOUTUBE_URL_3 = "https://www.youtube.com/watch?v=vw-KWfKwvTQ"
################################################
#LLM Model mit dem gearbeitet wird
#openai-------------------------------------
MODEL_NAME = "gpt-3.5-turbo-16k"
#MODEL_NAME = "gpt-3.5-turbo-1106"
#MODEL_NAME= "gpt-4-1106-preview"
#verfügbare Modelle anzeigen lassen
#HuggingFace Reop ID--------------------------------
#repo_id = "meta-llama/Llama-2-13b-chat-hf"
repo_id = "HuggingFaceH4/zephyr-7b-alpha" #das Modell ist echt gut!!! Vom MIT
#repo_id = "TheBloke/Yi-34B-Chat-GGUF"
#repo_id = "meta-llama/Llama-2-70b-chat-hf"
#repo_id = "tiiuae/falcon-40b"
#repo_id = "Vicuna-33b"
#repo_id = "alexkueck/ChatBotLI2Klein"
#repo_id = "mistralai/Mistral-7B-v0.1"
#repo_id = "internlm/internlm-chat-7b"
#repo_id = "Qwen/Qwen-7B"
#repo_id = "Salesforce/xgen-7b-8k-base"
#repo_id = "Writer/camel-5b-hf"
#repo_id = "databricks/dolly-v2-3b"
#repo_id = "google/flan-t5-xxl"
#HuggingFace Model name--------------------------------
MODEL_NAME_HF = "mistralai/Mixtral-8x7B-Instruct-v0.1"
MODEL_NAME_OAI_ZEICHNEN = "dall-e-3"
#Alternativ zeichnen: Stabe Diffusion from HF:
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1"
################################################
#HF Hub Zugriff ermöglichen
###############################################
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
#################################################
#################################################
#################################################
#Funktionen zur Verarbeitung
################################################
##############################################
#History - die Frage oder das File eintragen...
def add_text(history, prompt, file):
if (file == None):
history = history + [(prompt, None)]
else:
if (prompt == ""):
history = history + [((file.name,), "Prompt fehlt!")]
else:
history = history + [((file.name,), None), (prompt, None)]
return history, prompt, "", gr.File( label=None, interactive=False, height=20, min_width=20, visible=False, scale=2) #gr.Textbox(value="", interactive=False)
def add_file(history, file, prompt):
if (prompt == ""):
history = history + [((file.name,), None)]
else:
history = history + [((file.name,), None), (prompt, None)]
return history, prompt, ""
def file_anzeigen(file):
return gr.File(visible=True), file.name
def create_picture_backup(history, prompt):
client = OpenAI()
response = client.images.generate(model="dall-e-3", prompt=prompt,size="1024x1024",quality="standard",n=1,)
image_url = response.data[0].url
return image_url
def transfer_input(inputs):
textbox = reset_textbox()
return (
inputs,
gr.update(value=""),
gr.Button.update(visible=True),
)
##################################################
# Funktion, um für einen best. File-typ ein directory-loader zu definieren
def create_directory_loader(file_type, directory_path):
#verscheidene Dokument loaders:
loaders = {
'.pdf': PyPDFLoader,
'.word': UnstructuredWordDocumentLoader,
}
return DirectoryLoader(
path=directory_path,
glob=f"**/*{file_type}",
loader_cls=loaders[file_type],
)
#die Inhalte splitten, um in Vektordatenbank entsprechend zu laden als Splits
def document_loading_splitting():
global splittet
##############################
# Document loading
docs = []
# kreiere einen DirectoryLoader für jeden file type
pdf_loader = create_directory_loader('.pdf', './chroma/pdf')
word_loader = create_directory_loader('.word', './chroma/word')
# Load the files
pdf_documents = pdf_loader.load()
word_documents = word_loader.load()
#alle zusammen in docs...
docs.extend(pdf_documents)
docs.extend(word_documents)
#andere loader...
# Load PDF
loader = PyPDFLoader(PDF_URL)
docs.extend(loader.load())
# Load Web
loader = WebBaseLoader(WEB_URL)
docs.extend(loader.load())
# Load YouTube
loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,YOUTUBE_URL_2], PATH_WORK + YOUTUBE_DIR), OpenAIWhisperParser())
docs.extend(loader.load())
################################
# Document splitting
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150, chunk_size = 1500)
splits = text_splitter.split_documents(docs)
#nur bei erster Anfrage mit "choma" wird gesplittet...
splittet = True
return splits
#Chroma DB die splits ablegen - vektorisiert...
def document_storage_chroma(splits):
#OpenAi embeddings----------------------------------
Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(disallowed_special = ()), persist_directory = PATH_WORK + CHROMA_DIR)
#HF embeddings--------------------------------------
#Chroma.from_documents(documents = splits, embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2", model_kwargs={"device": "cpu"}, encode_kwargs={'normalize_embeddings': False}), persist_directory = PATH_WORK + CHROMA_DIR)
#Mongo DB die splits ablegen - vektorisiert...
def document_storage_mongodb(splits):
MongoDBAtlasVectorSearch.from_documents(documents = splits,
embedding = OpenAIEmbeddings(disallowed_special = ()),
collection = MONGODB_COLLECTION,
index_name = MONGODB_INDEX_NAME)
#dokumente in chroma db vektorisiert ablegen können - die Db vorbereiten daüfur
def document_retrieval_chroma(llm, prompt):
#OpenAI embeddings -------------------------------
embeddings = OpenAIEmbeddings()
#HF embeddings -----------------------------------
#Alternative Embedding - für Vektorstore, um Ähnlichkeitsvektoren zu erzeugen - die ...InstructEmbedding ist sehr rechenaufwendig
#embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
#etwas weniger rechenaufwendig:
#embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2", model_kwargs={"device": "cpu"}, encode_kwargs={'normalize_embeddings': False})
#ChromaDb um die embedings zu speichern
db = Chroma(embedding_function = embeddings, persist_directory = PATH_WORK + CHROMA_DIR)
return db
#dokumente in mongo db vektorisiert ablegen können - die Db vorbereiten daüfür
def document_retrieval_mongodb(llm, prompt):
db = MongoDBAtlasVectorSearch.from_connection_string(MONGODB_URI,
MONGODB_DB_NAME + "." + MONGODB_COLLECTION_NAME,
OpenAIEmbeddings(disallowed_special = ()),
index_name = MONGODB_INDEX_NAME)
return db
###############################################
#Langchain anlegen
#langchain nutzen, um prompt an LLM zu leiten - llm und prompt sind austauschbar
def llm_chain(llm, prompt):
llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
result = llm_chain.run({"question": prompt})
return result
#langchain nutzen, um prompt an llm zu leiten, aber vorher in der VektorDB suchen, um passende splits zum Prompt hinzuzufügen
def rag_chain(llm, prompt, db):
rag_chain = RetrievalQA.from_chain_type(llm,
chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
retriever = db.as_retriever(search_kwargs = {"k": 3}),
return_source_documents = True)
result = rag_chain({"query": prompt})
return result["result"]
###################################################
#Prompts mit History erzeugen für verschiednee Modelle
###################################################
#Funktion, die einen Prompt mit der history zusammen erzeugt - allgemein
def generate_prompt_with_history(text, history, max_length=4048):
#prompt = "The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n[|Human|]Hello!\n[|AI|]Hi!"
#prompt = "Das folgende ist eine Unterhaltung in deutsch zwischen einem Menschen und einem KI-Assistenten, der Baize genannt wird. Baize ist ein open-source KI-Assistent, der von UCSD entwickelt wurde. Der Mensch und der KI-Assistent chatten abwechselnd miteinander in deutsch. Die Antworten des KI Assistenten sind immer so ausführlich wie möglich und in Markdown Schreibweise und in deutscher Sprache. Wenn nötig übersetzt er sie ins Deutsche. Die Antworten des KI-Assistenten vermeiden Themen und Antworten zu unethischen, kontroversen oder sensiblen Themen. Die Antworten sind immer sehr höflich formuliert..\n[|Human|]Hallo!\n[|AI|]Hi!"
prompt=""
history = ["\n{}\n{}".format(x[0],x[1]) for x in history]
history.append("\n{}\n".format(text))
history_text = ""
flag = False
for x in history[::-1]:
history_text = x + history_text
flag = True
print ("Prompt: ..........................")
print(prompt+history_text)
if flag:
return prompt+history_text
else:
return None
def generate_prompt_and_file_with_history(text, history, file):
#prompt = "The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n[|Human|]Hello!\n[|AI|]Hi!"
#prompt = "Das folgende ist eine Unterhaltung in deutsch zwischen einem Menschen und einem KI-Assistenten, der Baize genannt wird. Baize ist ein open-source KI-Assistent, der von UCSD entwickelt wurde. Der Mensch und der KI-Assistent chatten abwechselnd miteinander in deutsch. Die Antworten des KI Assistenten sind immer so ausführlich wie möglich und in Markdown Schreibweise und in deutscher Sprache. Wenn nötig übersetzt er sie ins Deutsche. Die Antworten des KI-Assistenten vermeiden Themen und Antworten zu unethischen, kontroversen oder sensiblen Themen. Die Antworten sind immer sehr höflich formuliert..\n[|Human|]Hallo!\n[|AI|]Hi!"
prompt=""
history = ["\n{}\n{}".format(x[0],x[1]) for x in history]
history.append("\n{}\n".format(text))
history_text = ""
flag = False
for x in history[::-1]:
history_text = x + history_text
flag = True
print ("Prompt: ..........................")
print(prompt+history_text)
if flag:
return prompt+history_text
else:
return None
#Prompt und History für OPenAi Schnittstelle
def generate_prompt_with_history_openai(prompt, history):
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human })
history_openai_format.append({"role": "assistant", "content":assistant})
history_openai_format.append({"role": "user", "content": prompt})
return history_openai_format
#Prompt und History für Hugging Face Schnittstelle
def generate_prompt_with_history_hf(prompt, history):
history_transformer_format = history + [[prompt, ""]]
#stop = StopOnTokens()
messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]]) #curr_system_message +
for item in history_transformer_format])
#Prompt und History für Langchain Schnittstelle
def generate_prompt_with_history_langchain(prompt, history):
history_langchain_format = []
for human, ai in history:
history_langchain_format.append(HumanMessage(content=human))
history_langchain_format.append(AIMessage(content=ai))
history_langchain_format.append(HumanMessage(content=prompt))
return history_langchain_format
###################################################
#Funktion von Gradio aus, die den dort eingegebenen Prompt annimmt und weiterverarbeitet
#erstmal gucken, ob text oder Bild angekommen ist
def chatbot_response(messages):
print("messages.......................")
print(messages)
responses = []
for message in messages:
if message['type'] == 'text':
#invoke(message['data'], history, rag_option, model_option, openai_api_key, temperature=0.5, max_new_tokens=4048, top_p=0.6, repetition_penalty=1.3,)
responses.append({'type': 'text', 'data': f"Echo: {message['data']}"})
else:
print("Bild.............................")
return responses
def create_picture(history, prompt):
client = OpenAI()
response = client.images.generate(model="dall-e-3", prompt=prompt,size="1024x1024",quality="standard",n=1,)
image_url = response.data[0].url
return image_url
# prompt describing the desired image
#text = "batman art in red and blue color"
# calling the custom function "generate"
# saving the output in "url1"
#url1 = create_picture(text)
# using requests library to get the image in bytes
#response = requests.get(url1)
# using the Image module from PIL library to view the image
#Image.open(response.raw)
def umwandeln_fuer_anzeige(image):
buffer = io.BytesIO()
image.save(buffer, format='PNG')
return buffer.getvalue()
def generate_auswahl(prompt, file, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
if (prompt.find('zeichnen') != -1):
response = generate_bild(prompt)
result = response.content
#Bild ausgeben
image = Image.open(io.BytesIO(result))
image_64 = umwandeln_fuer_anzeige(image)
history[-1][1] = "<img src='data:image/png;base64,{0}'/>".format(b64encode(image_64).decode('utf-8'))
print("history zeichnen......................")
print(history)
return history, "Success"
else:
result = generate_text(prompt, file, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,)
#Antwort als Stream ausgeben... wenn Textantwort gefordert
print("history vor Zusatz...........")
print(history)
history[-1][1] = result
return history, "Success"
"""
for character in result:
history[-1][1] += character
time.sleep(0.03)
yield history, "Generating"
if shared_state.interrupted:
shared_state.recover()
try:
yield history, "Stop: Success"
except:
pass
"""
def generate_bild(prompt):
data = {"inputs": prompt}
response = requests.post(API_URL, headers=HEADERS, json=data)
print("fertig Bild")
return response
def generate_text (prompt, file, history, rag_option, model_option, openai_api_key, k=3, top_p=0.6, temperature=0.5, max_new_tokens=4048, max_context_length_tokens=2048, repetition_penalty=1.3,):
global splittet
print(splittet)
if (openai_api_key == "" or openai_api_key == "sk-"):
#raise gr.Error("OpenAI API Key is required.")
#eigenen OpenAI key nutzen
openai_api_key= OAI_API_KEY
if (rag_option is None):
raise gr.Error("Retrieval Augmented Generation ist erforderlich.")
if (prompt == ""):
raise gr.Error("Prompt ist erforderlich.")
#Prompt an history anhängen und einen Text daraus machen
if (file == None):
history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
else:
prompt_neu = prompt + file.path, #b64encode(file).decode("utf-8")
print(prompt_neu)
history_text_und_prompt = generate_prompt_with_history_openai(prompt_neu, history)
#history für HuggingFace Models formatieren
#history_text_und_prompt = generate_prompt_with_history_hf(prompt, history)
#history für openAi formatieren
#history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
#history für Langchain formatieren
#history_text_und_prompt = generate_prompt_with_history_langchain(prompt, history)
try:
###########################
#LLM auswählen (OpenAI oder HF)
###########################
if (model_option == "OpenAI"):
#Anfrage an OpenAI ----------------------------
print("OpenAI normal.......................")
llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
print("openAI")
else:
#oder an Hugging Face --------------------------
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
#llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
print("HF")
#zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
if (rag_option == "An"):
#muss nur einmal ausgeführt werden...
if not splittet:
splits = document_loading_splitting()
document_storage_chroma(splits)
db = document_retrieval_chroma(llm, history_text_und_prompt)
result = rag_chain(llm, history_text_und_prompt, db)
elif (rag_option == "MongoDB"):
#splits = document_loading_splitting()
#document_storage_mongodb(splits)
db = document_retrieval_mongodb(llm, history_text_und_prompt)
result = rag_chain(llm, history_text_und_prompt, db)
else:
print("LLM aufrufen ohne RAG: ...........")
result = llm_chain(llm, history_text_und_prompt)
except Exception as e:
raise gr.Error(e)
return result
################################################
#GUI
###############################################
#Beschreibung oben in GUI
################################################
#title = "LLM mit RAG"
description = """<strong>Information:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> mit
<strong>Retrieval Augmented Generation (RAG)</strong> auf <strong>externen Daten</strong> verwendet.\n\n
"""
#css = """.toast-wrap { display: none !important } """
#examples=[['Was ist ChtGPT-4?'],['schreibe ein Python Programm, dass die GPT-4 API aufruft.']]
def vote(data: gr.LikeData):
if data.liked: print("You upvoted this response: " + data.value)
else: print("You downvoted this response: " + data.value)
print ("Start GUIneu")
with open("custom.css", "r", encoding="utf-8") as f:
customCSS = f.read()
with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
history = gr.State([])
user_question = gr.State("")
with gr.Row():
gr.HTML("LI Chatot")
status_display = gr.Markdown("Success", elem_id="status_display")
gr.Markdown(description_top)
with gr.Row():
with gr.Column(scale=5):
with gr.Row():
chatbot = gr.Chatbot(elem_id="chuanhu_chatbot",)
with gr.Row():
with gr.Column(scale=12):
user_input = gr.Textbox(
show_label=False, placeholder="Gib hier deinen Prompt ein...",
container=False
)
with gr.Column(min_width=70, scale=1):
submitBtn = gr.Button("Senden")
with gr.Column(min_width=70, scale=1):
cancelBtn = gr.Button("Stop")
with gr.Row():
emptyBtn = gr.ClearButton( [user_input, chatbot], value="🧹 Neue Session", scale=3)
upload = gr.UploadButton("📁", file_types=["image", "video", "audio"], scale=3)
file_display = gr.File( label=None, interactive=False, height=30, min_width=30, visible=False, scale=2)
with gr.Column():
with gr.Column(min_width=50, scale=1):
with gr.Tab(label="Parameter Einstellung"):
#gr.Markdown("# Parameters")
rag_option = gr.Radio(["Aus", "An"], label="RAG - LI Erweiterungen", value = "Aus")
model_option = gr.Radio(["OpenAI", "HuggingFace"], label="Modellauswahl", value = "OpenAI")
top_p = gr.Slider(
minimum=-0,
maximum=1.0,
value=0.95,
step=0.05,
interactive=True,
label="Top-p",
)
temperature = gr.Slider(
minimum=0.1,
maximum=2.0,
value=1,
step=0.1,
interactive=True,
label="Temperature",
)
max_length_tokens = gr.Slider(
minimum=0,
maximum=512,
value=512,
step=8,
interactive=True,
label="Max Generation Tokens",
)
max_context_length_tokens = gr.Slider(
minimum=0,
maximum=4096,
value=2048,
step=128,
interactive=True,
label="Max History Tokens",
)
repetition_penalty=gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True)
anzahl_docs = gr.Slider(label="Anzahl Dokumente", value=3, minimum=1, maximum=10, step=1, interactive=True, info="wie viele Dokumententeile aus dem Vektorstore an den prompt gehängt werden", visible=True)
openai_key = gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1)
gr.Markdown(description)
#Argumente für generate Funktion als Input
predict_args = dict(
fn=generate_auswahl,
inputs=[
user_question,
upload,
chatbot,
#history,
rag_option,
model_option,
openai_key,
anzahl_docs,
top_p,
temperature,
max_length_tokens,
max_context_length_tokens,
repetition_penalty
],
outputs=[chatbot, status_display], #[chatbot, history, status_display]
show_progress=True,
postprocess=False
)
reset_args = dict(
fn=reset_textbox, inputs=[], outputs=[user_input, status_display]
)
# Chatbot
transfer_input_args = dict(
fn=add_text, inputs=[chatbot, user_input, upload], outputs=[chatbot, user_question, user_input, file_display], show_progress=True
)
predict_event1 = user_input.submit(**transfer_input_args, queue=False,).then(**predict_args)
predict_event2 = submitBtn.click(**transfer_input_args, queue=False,).then(**predict_args)
predict_event3 = upload.upload(file_anzeigen, [upload], [file_display, file_display] ) #.then(**predict_args)
cancelBtn.click(
cancels=[predict_event1,predict_event2, predict_event3 ]
)
demo.title = "LI-ChatBot"
demo.queue().launch(debug=True)
"""
additional_inputs = [
#gr.Radio(["Off", "Chroma", "MongoDB"], label="Retrieval Augmented Generation", value = "Off"),
gr.Radio(["Aus", "An"], label="RAG - LI Erweiterungen", value = "Aus"),
gr.Radio(["OpenAI", "HuggingFace"], label="Modellauswahl", value = "HuggingFace"),
gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1),
gr.Slider(label="Temperature", value=0.65, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Höhere Werte erzeugen diversere Antworten", visible=True),
gr.Slider(label="Max new tokens", value=1024, minimum=0, maximum=4096, step=64, interactive=True, info="Maximale Anzahl neuer Tokens", visible=True),
gr.Slider(label="Top-p (nucleus sampling)", value=0.6, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Höhere Werte verwenden auch Tokens mit niedrigerer Wahrscheinlichkeit.", visible=True),
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens", visible=True)
]
with gr.Blocks() as demo:
reference_image = gr.Image(label="Reference Image")
chatbot_stream = gr.Chatbot()
chat_interface_stream = gr.ChatInterface(fn=invoke,
additional_inputs = additional_inputs,
additional_inputs_accordion = gr.Accordion(label="Weitere Eingaben...", open=False),
title = "ChatGPT vom LI",
theme="soft",
chatbot=chatbot_stream,
retry_btn="🔄 Wiederholen",
undo_btn="↩️ Letztes löschen",
clear_btn="🗑️ Verlauf löschen",
submit_btn = "Abschicken",
description = description,
)
gr.HTML(
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
<a href="https://github.com/magic-research/magic-animate" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
</a>
<div>
<h1 >Chatbot des LI - hier im Test mit Image Eingabe</h1>
<div style="display: flex; justify-content: center; align-items: center; text-align: center;>
<a href="https://arxiv.org/abs/2311.16498"><img src="https://img.shields.io/badge/Arxiv-2311.16498-red"></a>
</div>
</div>
</div>
)
with gr.Row():
prompt = gr.Textbox(
scale=4,
show_label=False,
placeholder="Gib einen Text ein oder lade eine Datei (Bild, File, Audio) hoch",
container=False,
)
btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
txt_msg = txt.submit(invoke, [chat_interface_stream, prompt], [chat_interface_stream, prompt], queue=False).then(bot, chat_interface_stream, chat_interface_stream, api_name="bot_response")
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [prompt], queue=False)
file_msg = btn.upload(add_file, [chat_interface_stream, btn], [chat_interface_stream], queue=False).then(bot, chat_interface_stream, chat_interface_stream)
#chatbot_stream.like(print_like_dislike, None, None)
demo.queue().launch()
"""