Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -276,9 +276,15 @@ def generate(text, history, rag_option, model_option, temperature=0.5, max_new_
|
|
276 |
db = document_retrieval_chroma()
|
277 |
#mit RAG:
|
278 |
neu_text_mit_chunks = rag_chain(text, db)
|
279 |
-
|
|
|
|
|
|
|
280 |
else:
|
281 |
-
|
|
|
|
|
|
|
282 |
|
283 |
#Anfrage an Modell (mit RAG: mit chunks aus Vektorstore, ohne: nur promt und history)
|
284 |
#payload = tokenizer.apply_chat_template([{"role":"user","content":prompt}],tokenize=False)
|
|
|
276 |
db = document_retrieval_chroma()
|
277 |
#mit RAG:
|
278 |
neu_text_mit_chunks = rag_chain(text, db)
|
279 |
+
#für Chat LLM:
|
280 |
+
#prompt = generate_prompt_with_history_openai(neu_text_mit_chunks, history)
|
281 |
+
#als reiner prompt:
|
282 |
+
prompt = generate_prompt_with_history(neu_text_mit_chunks, history)
|
283 |
else:
|
284 |
+
#für Chat LLM:
|
285 |
+
#prompt = generate_prompt_with_history_openai(text, history)
|
286 |
+
#als reiner prompt:
|
287 |
+
prompt = generate_prompt_with_history(text, history)
|
288 |
|
289 |
#Anfrage an Modell (mit RAG: mit chunks aus Vektorstore, ohne: nur promt und history)
|
290 |
#payload = tokenizer.apply_chat_template([{"role":"user","content":prompt}],tokenize=False)
|