Spaces:
Runtime error
Runtime error
import os, sys, json | |
import gradio as gr | |
import openai | |
from openai import OpenAI | |
from langchain.chains import LLMChain, RetrievalQA | |
from langchain.chat_models import ChatOpenAI | |
from langchain.document_loaders import PyPDFLoader, WebBaseLoader | |
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader | |
from langchain.document_loaders.generic import GenericLoader | |
from langchain.document_loaders.parsers import OpenAIWhisperParser | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.prompts import PromptTemplate | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.vectorstores import Chroma | |
#from langchain.vectorstores import MongoDBAtlasVectorSearch | |
#from pymongo import MongoClient | |
from dotenv import load_dotenv, find_dotenv | |
_ = load_dotenv(find_dotenv()) | |
# Schnittstellen hinzubinden und OpenAI Key holen aus den Secrets | |
#client = OpenAI( | |
#api_key=os.getenv("OPENAI_API_KEY"), | |
#) | |
#nur bei ersten Anfrage splitten der Dokumente | |
splittet = False | |
#Für MongoDB statt Chroma als Vektorstore | |
#MONGODB_URI = os.environ["MONGODB_ATLAS_CLUSTER_URI"] | |
#client = MongoClient(MONGODB_URI) | |
#MONGODB_DB_NAME = "langchain_db" | |
#MONGODB_COLLECTION_NAME = "gpt-4" | |
#MONGODB_COLLECTION = client[MONGODB_DB_NAME][MONGODB_COLLECTION_NAME] | |
#MONGODB_INDEX_NAME = "default" | |
template = """Antworte in deutsch, wenn es nicht explizit anders gefordert wird. Wenn du die Antwort nicht kennst, antworte einfach, dass du es nicht weißt. Versuche nicht, die Antwort zu erfinden oder aufzumocken. Halte die Antwort so kurz aber exakt.""" | |
llm_template = "Beantworte die Frage am Ende. " + template + "Frage: {question} Hilfreiche Antwort: " | |
rag_template = "Nutze die folgenden Kontext Teile, um die Frage zu beantworten am Ende. " + template + "{context} Frage: {question} Hilfreiche Antwort: " | |
LLM_CHAIN_PROMPT = PromptTemplate(input_variables = ["question"], | |
template = llm_template) | |
RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], | |
template = rag_template) | |
OAI_API_KEY=os.getenv("OPENAI_API_KEY") | |
#Pfad, wo Docs abgelegt werden können - lokal, also hier im HF Space (sonst auf eigenem Rechner) | |
PATH_WORK = "." | |
CHROMA_DIR = "/chroma" | |
YOUTUBE_DIR = "/youtube" | |
PDF_URL = "https://arxiv.org/pdf/2303.08774.pdf" | |
WEB_URL = "https://openai.com/research/gpt-4" | |
YOUTUBE_URL_1 = "https://www.youtube.com/watch?v=--khbXchTeE" | |
YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE" | |
YOUTUBE_URL_3 = "https://www.youtube.com/watch?v=vw-KWfKwvTQ" | |
#MODEL_NAME = "gpt-3.5-turbo-16k" | |
MODEL_NAME ="gpt-4" | |
def document_loading_splitting(): | |
global splittet | |
# Document loading | |
docs = [] | |
# Load PDF | |
loader = PyPDFLoader(PDF_URL) | |
docs.extend(loader.load()) | |
# Load Web | |
loader = WebBaseLoader(WEB_URL) | |
docs.extend(loader.load()) | |
# Load YouTube | |
loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1, | |
YOUTUBE_URL_2, | |
YOUTUBE_URL_3], PATH_WORK + YOUTUBE_DIR), | |
OpenAIWhisperParser()) | |
docs.extend(loader.load()) | |
# Document splitting | |
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150, | |
chunk_size = 1500) | |
splits = text_splitter.split_documents(docs) | |
#nur bei erster Anfrage mit "choma" wird gesplittet... | |
splittet = True | |
return splits | |
def document_storage_chroma(splits): | |
Chroma.from_documents(documents = splits, | |
embedding = OpenAIEmbeddings(disallowed_special = ()), | |
persist_directory = PATH_WORK + CHROMA_DIR) | |
def document_storage_mongodb(splits): | |
MongoDBAtlasVectorSearch.from_documents(documents = splits, | |
embedding = OpenAIEmbeddings(disallowed_special = ()), | |
collection = MONGODB_COLLECTION, | |
index_name = MONGODB_INDEX_NAME) | |
def document_retrieval_chroma(llm, prompt): | |
embeddings = OpenAIEmbeddings() | |
#Alternative Embedding - für Vektorstore, um Ähnlichkeitsvektoren zu erzeugen | |
#embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"}) | |
db = Chroma(embedding_function = embeddings, | |
persist_directory = PATH_WORK + CHROMA_DIR) | |
return db | |
def document_retrieval_mongodb(llm, prompt): | |
db = MongoDBAtlasVectorSearch.from_connection_string(MONGODB_URI, | |
MONGODB_DB_NAME + "." + MONGODB_COLLECTION_NAME, | |
OpenAIEmbeddings(disallowed_special = ()), | |
index_name = MONGODB_INDEX_NAME) | |
return db | |
def llm_chain(llm, prompt): | |
llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT) | |
result = llm_chain.run({"question": prompt}) | |
return result | |
def rag_chain(llm, prompt, db): | |
rag_chain = RetrievalQA.from_chain_type(llm, | |
chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT}, | |
retriever = db.as_retriever(search_kwargs = {"k": 3}), | |
return_source_documents = True) | |
result = rag_chain({"query": prompt}) | |
return result["result"] | |
def invoke(openai_api_key, rag_option, prompt): | |
global splittet | |
if (openai_api_key == "" or openai_api_key == "sk-"): | |
#raise gr.Error("OpenAI API Key is required.") | |
openai_api_key= OAI_API_KEY | |
if (rag_option is None): | |
raise gr.Error("Retrieval Augmented Generation ist erforderlich.") | |
if (prompt == ""): | |
raise gr.Error("Prompt ist erforderlich.") | |
try: | |
llm = ChatOpenAI(model_name = MODEL_NAME, | |
openai_api_key = openai_api_key, | |
temperature = 0) | |
if (rag_option == "Chroma"): | |
#muss nur einmal ausgeführt werden... | |
if not splittet: | |
splits = document_loading_splitting() | |
document_storage_chroma(splits) | |
db = document_retrieval_chroma(llm, prompt) | |
result = rag_chain(llm, prompt, db) | |
elif (rag_option == "MongoDB"): | |
#splits = document_loading_splitting() | |
#document_storage_mongodb(splits) | |
db = document_retrieval_mongodb(llm, prompt) | |
result = rag_chain(llm, prompt, db) | |
else: | |
result = llm_chain(llm, prompt) | |
except Exception as e: | |
raise gr.Error(e) | |
return result | |
description = """<strong>Überblick:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> mit | |
<strong>Retrieval Augmented Generation (RAG)</strong> auf <strong>externen Daten</strong> demonstriert.\n\n | |
<strong>Genauer:</strong> Folgende externe Daten sind als Beispiel gegeben: | |
<a href='""" + YOUTUBE_URL_1 + """'>YouTube</a>, <a href='""" + PDF_URL + """'>PDF</a>, and <a href='""" + WEB_URL + """'>Web.</a> <br> | |
Alle neueren Datums!. | |
<ul style="list-style-type:square;"> | |
<li>Setze "Retrieval Augmented Generation" auf "<strong>Off</strong>" und gib einen Prompt ein." Das entspricht <strong> ein LLM nutzen ohne RAG</strong></li> | |
<li>Setze "Retrieval Augmented Generation" to "<strong>Chroma</strong>" und gib einen Prompt ein. Das <strong>LLM mit RAG</strong> weiß auch Antworten zu aktuellen Themen aus den angefügten Datenquellen</li> | |
<li>Experimentiere mit Prompts, z.B. Antworte in deutsch, englisch, ..." oder "schreibe ein Python Programm, dass die GPT-4 API aufruft."</li> | |
</ul>\n\n | |
""" | |
gr.close_all() | |
demo = gr.Interface(fn=invoke, | |
inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), | |
#gr.Radio(["Off", "Chroma", "MongoDB"], label="Retrieval Augmented Generation", value = "Off"), | |
gr.Radio(["Off", "Chroma"], label="Retrieval Augmented Generation", value = "Off"), | |
gr.Textbox(label = "Prompt", value = "What is GPT-4?", lines = 1)], | |
outputs = [gr.Textbox(label = "Completion", lines = 1)], | |
title = "Generative AI - LLM & RAG", | |
description = description) | |
demo.launch() |