Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,7 @@ from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLo
|
|
11 |
from langchain.document_loaders.generic import GenericLoader
|
12 |
from langchain.document_loaders.parsers import OpenAIWhisperParser
|
13 |
from langchain.schema import AIMessage, HumanMessage
|
|
|
14 |
|
15 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
16 |
from langchain.prompts import PromptTemplate
|
@@ -73,9 +74,13 @@ YOUTUBE_URL_3 = "https://www.youtube.com/watch?v=vw-KWfKwvTQ"
|
|
73 |
|
74 |
################################################
|
75 |
#LLM Model mit dem gearbeitet wird
|
|
|
76 |
MODEL_NAME = "gpt-3.5-turbo-16k"
|
77 |
#MODEL_NAME ="gpt-4"
|
78 |
|
|
|
|
|
|
|
79 |
|
80 |
################################################
|
81 |
#HF Hub Zugriff ermöglichen
|
@@ -225,7 +230,7 @@ def invoke (prompt, history, openai_api_key, rag_option, temperature=0.9, max_ne
|
|
225 |
global splittet
|
226 |
|
227 |
#Prompt an history anhängen und einen Text daraus machen
|
228 |
-
history_text_und_prompt = generate_prompt_with_history(prompt, history)
|
229 |
|
230 |
#history für HuggingFace Models formatieren
|
231 |
#history_text_und_prompt = generate_prompt_with_history_hf(prompt, history)
|
@@ -234,7 +239,7 @@ def invoke (prompt, history, openai_api_key, rag_option, temperature=0.9, max_ne
|
|
234 |
#history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
|
235 |
|
236 |
#history für Langchain formatieren
|
237 |
-
|
238 |
|
239 |
if (openai_api_key == "" or openai_api_key == "sk-"):
|
240 |
#raise gr.Error("OpenAI API Key is required.")
|
@@ -245,10 +250,14 @@ def invoke (prompt, history, openai_api_key, rag_option, temperature=0.9, max_ne
|
|
245 |
if (prompt == ""):
|
246 |
raise gr.Error("Prompt ist erforderlich.")
|
247 |
try:
|
|
|
|
|
|
|
248 |
#Anfrage an OpenAI
|
249 |
-
llm = ChatOpenAI(model_name = MODEL_NAME,
|
250 |
-
|
251 |
-
|
|
|
252 |
#zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
|
253 |
if (rag_option == "Chroma"):
|
254 |
#muss nur einmal ausgeführt werden...
|
|
|
11 |
from langchain.document_loaders.generic import GenericLoader
|
12 |
from langchain.document_loaders.parsers import OpenAIWhisperParser
|
13 |
from langchain.schema import AIMessage, HumanMessage
|
14 |
+
from langchain.llms import HuggingFaceHub
|
15 |
|
16 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
17 |
from langchain.prompts import PromptTemplate
|
|
|
74 |
|
75 |
################################################
|
76 |
#LLM Model mit dem gearbeitet wird
|
77 |
+
#openai
|
78 |
MODEL_NAME = "gpt-3.5-turbo-16k"
|
79 |
#MODEL_NAME ="gpt-4"
|
80 |
|
81 |
+
#HuggingFace
|
82 |
+
repo_id = "google/flan-t5-xxl"
|
83 |
+
|
84 |
|
85 |
################################################
|
86 |
#HF Hub Zugriff ermöglichen
|
|
|
230 |
global splittet
|
231 |
|
232 |
#Prompt an history anhängen und einen Text daraus machen
|
233 |
+
#history_text_und_prompt = generate_prompt_with_history(prompt, history)
|
234 |
|
235 |
#history für HuggingFace Models formatieren
|
236 |
#history_text_und_prompt = generate_prompt_with_history_hf(prompt, history)
|
|
|
239 |
#history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
|
240 |
|
241 |
#history für Langchain formatieren
|
242 |
+
history_text_und_prompt = generate_prompt_with_history_langchain(prompt, history)
|
243 |
|
244 |
if (openai_api_key == "" or openai_api_key == "sk-"):
|
245 |
#raise gr.Error("OpenAI API Key is required.")
|
|
|
250 |
if (prompt == ""):
|
251 |
raise gr.Error("Prompt ist erforderlich.")
|
252 |
try:
|
253 |
+
###########################
|
254 |
+
#LLM auswählen (OpenAI oder HF)
|
255 |
+
###########################
|
256 |
#Anfrage an OpenAI
|
257 |
+
#llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature = 0)
|
258 |
+
#oder an Hugging Face
|
259 |
+
#llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 64})
|
260 |
+
|
261 |
#zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
|
262 |
if (rag_option == "Chroma"):
|
263 |
#muss nur einmal ausgeführt werden...
|