alexkueck commited on
Commit
41b540b
1 Parent(s): addd96d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -23
app.py CHANGED
@@ -23,9 +23,9 @@ _ = load_dotenv(find_dotenv())
23
 
24
 
25
  # Schnittstellen hinzubinden und OpenAI Key holen aus den Secrets
26
- client = OpenAI(
27
- api_key=os.getenv("OPENAI_API_KEY"), # this is also the default, it can be omitted
28
- )
29
 
30
 
31
 
@@ -41,8 +41,7 @@ client = OpenAI(
41
  #MONGODB_COLLECTION = client[MONGODB_DB_NAME][MONGODB_COLLECTION_NAME]
42
  #MONGODB_INDEX_NAME = "default"
43
 
44
- template = """If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say
45
- "🧠 Thanks for using the app - Bernd" at the end of the answer. """
46
 
47
  llm_template = "Answer the question at the end. " + template + "Question: {question} Helpful Answer: "
48
  rag_template = "Use the following pieces of context to answer the question at the end. " + template + "{context} Question: {question} Helpful Answer: "
@@ -52,10 +51,11 @@ LLM_CHAIN_PROMPT = PromptTemplate(input_variables = ["question"],
52
  RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"],
53
  template = rag_template)
54
 
 
55
  #Pfad, wo Docs abgelegt werden können - lokal, also hier im HF Space (sonst auf eigenem Rechner)
56
  PATH_WORK = "."
57
- CHROMA_DIR = "/data/chroma"
58
- YOUTUBE_DIR = "/data/youtube"
59
 
60
  PDF_URL = "https://arxiv.org/pdf/2303.08774.pdf"
61
  WEB_URL = "https://openai.com/research/gpt-4"
@@ -75,10 +75,10 @@ def document_loading_splitting():
75
  loader = WebBaseLoader(WEB_URL)
76
  docs.extend(loader.load())
77
  # Load YouTube
78
- #loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,
79
- #YOUTUBE_URL_2,
80
- #YOUTUBE_URL_3], YOUTUBE_DIR),
81
- #OpenAIWhisperParser())
82
  docs.extend(loader.load())
83
  # Document splitting
84
  text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150,
@@ -102,8 +102,7 @@ def document_retrieval_chroma(llm, prompt):
102
  #Alternative Embedding - für Vektorstore, um Ähnlichkeitsvektoren zu erzeugen
103
  #embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
104
  db = Chroma(embedding_function = embeddings,
105
- #persist_directory = CHROMA_DIR)
106
- persist_directory = PATH_WORK + '/chroma')
107
 
108
  return db
109
 
@@ -129,7 +128,8 @@ def rag_chain(llm, prompt, db):
129
 
130
  def invoke(openai_api_key, rag_option, prompt):
131
  if (openai_api_key == ""):
132
- raise gr.Error("OpenAI API Key is required.")
 
133
  if (rag_option is None):
134
  raise gr.Error("Retrieval Augmented Generation is required.")
135
  if (prompt == ""):
@@ -154,18 +154,17 @@ def invoke(openai_api_key, rag_option, prompt):
154
  raise gr.Error(e)
155
  return result
156
 
157
- description = """<strong>Overview:</strong> Reasoning application that demonstrates a <strong>Large Language Model (LLM)</strong> with
158
- <strong>Retrieval Augmented Generation (RAG)</strong> on <strong>external data</strong>.\n\n
159
- <strong>Instructions:</strong> Enter an OpenAI API key and perform LLM use cases (semantic search, summarization, translation, etc.) on
160
  <a href='""" + YOUTUBE_URL_1 + """'>YouTube</a>, <a href='""" + PDF_URL + """'>PDF</a>, and <a href='""" + WEB_URL + """'>Web</a>
161
- data on GPT-4, published after LLM knowledge cutoff.
162
  <ul style="list-style-type:square;">
163
- <li>Set "Retrieval Augmented Generation" to "<strong>Off</strong>" and submit prompt "What is GPT-4?" The <strong>LLM without RAG</strong> does not know the answer.</li>
164
- <li>Set "Retrieval Augmented Generation" to "<strong>Chroma</strong>" or "<strong>MongoDB</strong>" and submit prompt "What is GPT-4?" The <strong>LLM with RAG</strong> knows the answer.</li>
165
- <li>Experiment with prompts, e.g. "What are GPT-4's media capabilities in 5 emojis and 1 sentence?", "List GPT-4's exam scores and benchmark results.", or "Compare GPT-4 to GPT-3.5 in markdown table format."</li>
166
- <li>Experiment some more, for example "What is the GPT-4 API's cost and rate limit? Answer in English, Arabic, Chinese, Hindi, and Russian in JSON format." or "Write a Python program that calls the GPT-4 API."</li>
167
  </ul>\n\n
168
- <strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using the <a href='https://openai.com/'>OpenAI</a> API and
169
  AI-native <a href='https://www.trychroma.com/'>Chroma</a> embedding database /
170
  <a href='https://www.mongodb.com/blog/post/introducing-atlas-vector-search-build-intelligent-applications-semantic-search-ai'>MongoDB</a> vector search.
171
  <strong>Speech-to-text</strong> (STT) via <a href='https://openai.com/research/whisper'>whisper-1</a> model, <strong>text embedding</strong> via
 
23
 
24
 
25
  # Schnittstellen hinzubinden und OpenAI Key holen aus den Secrets
26
+ #client = OpenAI(
27
+ #api_key=os.getenv("OPENAI_API_KEY"),
28
+ #)
29
 
30
 
31
 
 
41
  #MONGODB_COLLECTION = client[MONGODB_DB_NAME][MONGODB_COLLECTION_NAME]
42
  #MONGODB_INDEX_NAME = "default"
43
 
44
+ template = """If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Answer in german if not asked otherwise """
 
45
 
46
  llm_template = "Answer the question at the end. " + template + "Question: {question} Helpful Answer: "
47
  rag_template = "Use the following pieces of context to answer the question at the end. " + template + "{context} Question: {question} Helpful Answer: "
 
51
  RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"],
52
  template = rag_template)
53
 
54
+ OAI_API_KEY=os.getenv("OPENAI_API_KEY")
55
  #Pfad, wo Docs abgelegt werden können - lokal, also hier im HF Space (sonst auf eigenem Rechner)
56
  PATH_WORK = "."
57
+ CHROMA_DIR = "/chroma"
58
+ YOUTUBE_DIR = "/youtube"
59
 
60
  PDF_URL = "https://arxiv.org/pdf/2303.08774.pdf"
61
  WEB_URL = "https://openai.com/research/gpt-4"
 
75
  loader = WebBaseLoader(WEB_URL)
76
  docs.extend(loader.load())
77
  # Load YouTube
78
+ loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,
79
+ YOUTUBE_URL_2,
80
+ YOUTUBE_URL_3], PATH_WORK + YOUTUBE_DIR),
81
+ OpenAIWhisperParser())
82
  docs.extend(loader.load())
83
  # Document splitting
84
  text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150,
 
102
  #Alternative Embedding - für Vektorstore, um Ähnlichkeitsvektoren zu erzeugen
103
  #embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
104
  db = Chroma(embedding_function = embeddings,
105
+ persist_directory = PATH_WORK + CHROMA_DIR)
 
106
 
107
  return db
108
 
 
128
 
129
  def invoke(openai_api_key, rag_option, prompt):
130
  if (openai_api_key == ""):
131
+ #raise gr.Error("OpenAI API Key is required.")
132
+ openai_api_key= OAI_API_KEY
133
  if (rag_option is None):
134
  raise gr.Error("Retrieval Augmented Generation is required.")
135
  if (prompt == ""):
 
154
  raise gr.Error(e)
155
  return result
156
 
157
+ description = """<strong>Überblick:</strong> Hier wird ein <strong>Large Language Model (LLM)</strong> mit
158
+ <strong>Retrieval Augmented Generation (RAG)</strong> auf <strong>externen Daten</strong> demonstriert.\n\n
159
+ <strong>Genauer:</strong> Folgende externe Daten sind als Beispiel gegeben:
160
  <a href='""" + YOUTUBE_URL_1 + """'>YouTube</a>, <a href='""" + PDF_URL + """'>PDF</a>, and <a href='""" + WEB_URL + """'>Web</a>
161
+ Alle neueren Datums!.
162
  <ul style="list-style-type:square;">
163
+ <li>Setze "Retrieval Augmented Generation" auf "<strong>Off</strong>" und gib einen Prompt ein." Das entspricht <strong> ein LLM nutzen ohne RAG</strong></li>
164
+ <li>Setze "Retrieval Augmented Generation" to "<strong>Chroma</strong>" und gib einen Prompt ein. Das <strong>LLM mit RAG</strong> weiß auch Antworten zu aktuellen Themen aus den angefügten Datenquellen</li>
165
+ <li>Experimentiere mit Prompts, z.B. Answer in German, Arabic, Chinese, Hindi, and Russian." oder "Schreibe ein Python Programm, dass die GPT-4 API aufruft."</li>
 
166
  </ul>\n\n
167
+ <strong>Verwendete Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using the <a href='https://openai.com/'>OpenAI</a> API and
168
  AI-native <a href='https://www.trychroma.com/'>Chroma</a> embedding database /
169
  <a href='https://www.mongodb.com/blog/post/introducing-atlas-vector-search-build-intelligent-applications-semantic-search-ai'>MongoDB</a> vector search.
170
  <strong>Speech-to-text</strong> (STT) via <a href='https://openai.com/research/whisper'>whisper-1</a> model, <strong>text embedding</strong> via