Update app.py
Browse files
app.py
CHANGED
@@ -79,11 +79,11 @@ YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE"
|
|
79 |
|
80 |
################################################
|
81 |
#LLM Model mit dem gearbeitet wird
|
82 |
-
#openai
|
83 |
-
|
84 |
-
MODEL_NAME ="gpt-4"
|
85 |
|
86 |
-
#HuggingFace
|
87 |
#repo_id = "meta-llama/Llama-2-13b-chat-hf"
|
88 |
repo_id = "HuggingFaceH4/zephyr-7b-alpha"
|
89 |
#repo_id = "meta-llama/Llama-2-70b-chat-hf"
|
@@ -170,10 +170,10 @@ def document_loading_splitting():
|
|
170 |
|
171 |
#Chroma DB die splits ablegen - vektorisiert...
|
172 |
def document_storage_chroma(splits):
|
173 |
-
#OpenAi embediings
|
174 |
Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(disallowed_special = ()), persist_directory = PATH_WORK + CHROMA_DIR)
|
175 |
|
176 |
-
#HF embeddings
|
177 |
#Chroma.from_documents(documents = splits, embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2", model_kwargs={"device": "cpu"}, encode_kwargs={'normalize_embeddings': False}), persist_directory = PATH_WORK + CHROMA_DIR)
|
178 |
|
179 |
#Mongo DB die splits ablegen - vektorisiert...
|
@@ -184,8 +184,11 @@ def document_storage_mongodb(splits):
|
|
184 |
index_name = MONGODB_INDEX_NAME)
|
185 |
|
186 |
#dokumente in chroma db vektorisiert ablegen können - die Db vorbereiten daüfur
|
187 |
-
def document_retrieval_chroma(llm, prompt):
|
|
|
188 |
embeddings = OpenAIEmbeddings()
|
|
|
|
|
189 |
#Alternative Embedding - für Vektorstore, um Ähnlichkeitsvektoren zu erzeugen - die ...InstructEmbedding ist sehr rechenaufwendig
|
190 |
#embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
|
191 |
#etwas weniger rechenaufwendig:
|
@@ -300,10 +303,10 @@ def invoke (prompt, history, rag_option, openai_api_key, temperature=0.9, max_n
|
|
300 |
###########################
|
301 |
#LLM auswählen (OpenAI oder HF)
|
302 |
###########################
|
303 |
-
#Anfrage an OpenAI
|
304 |
#llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
|
305 |
-
#oder an Hugging Face
|
306 |
-
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length":
|
307 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
308 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
309 |
|
|
|
79 |
|
80 |
################################################
|
81 |
#LLM Model mit dem gearbeitet wird
|
82 |
+
#openai-------------------------------------
|
83 |
+
MODEL_NAME = "gpt-3.5-turbo-16k"
|
84 |
+
#MODEL_NAME ="gpt-4"
|
85 |
|
86 |
+
#HuggingFace--------------------------------
|
87 |
#repo_id = "meta-llama/Llama-2-13b-chat-hf"
|
88 |
repo_id = "HuggingFaceH4/zephyr-7b-alpha"
|
89 |
#repo_id = "meta-llama/Llama-2-70b-chat-hf"
|
|
|
170 |
|
171 |
#Chroma DB die splits ablegen - vektorisiert...
|
172 |
def document_storage_chroma(splits):
|
173 |
+
#OpenAi embediings----------------------------------
|
174 |
Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(disallowed_special = ()), persist_directory = PATH_WORK + CHROMA_DIR)
|
175 |
|
176 |
+
#HF embeddings--------------------------------------
|
177 |
#Chroma.from_documents(documents = splits, embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2", model_kwargs={"device": "cpu"}, encode_kwargs={'normalize_embeddings': False}), persist_directory = PATH_WORK + CHROMA_DIR)
|
178 |
|
179 |
#Mongo DB die splits ablegen - vektorisiert...
|
|
|
184 |
index_name = MONGODB_INDEX_NAME)
|
185 |
|
186 |
#dokumente in chroma db vektorisiert ablegen können - die Db vorbereiten daüfur
|
187 |
+
def document_retrieval_chroma(llm, prompt):
|
188 |
+
#OpenAI embeddings -------------------------------
|
189 |
embeddings = OpenAIEmbeddings()
|
190 |
+
|
191 |
+
#HF embeddings -----------------------------------
|
192 |
#Alternative Embedding - für Vektorstore, um Ähnlichkeitsvektoren zu erzeugen - die ...InstructEmbedding ist sehr rechenaufwendig
|
193 |
#embeddings = HuggingFaceInstructEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
|
194 |
#etwas weniger rechenaufwendig:
|
|
|
303 |
###########################
|
304 |
#LLM auswählen (OpenAI oder HF)
|
305 |
###########################
|
306 |
+
#Anfrage an OpenAI ----------------------------
|
307 |
#llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
|
308 |
+
#oder an Hugging Face --------------------------
|
309 |
+
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
|
310 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
311 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
312 |
|