alexkueck commited on
Commit
0ec9f22
1 Parent(s): 1d2f1c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -276,9 +276,15 @@ def generate(text, history, rag_option, model_option, temperature=0.5, max_new_
276
  db = document_retrieval_chroma()
277
  #mit RAG:
278
  neu_text_mit_chunks = rag_chain(text, db)
279
- prompt = generate_prompt_with_history_openai(neu_text_mit_chunks, history)
 
 
 
280
  else:
281
- prompt = generate_prompt_with_history_openai(text, history)
 
 
 
282
 
283
  #Anfrage an Modell (mit RAG: mit chunks aus Vektorstore, ohne: nur promt und history)
284
  #payload = tokenizer.apply_chat_template([{"role":"user","content":prompt}],tokenize=False)
 
276
  db = document_retrieval_chroma()
277
  #mit RAG:
278
  neu_text_mit_chunks = rag_chain(text, db)
279
+ #für Chat LLM:
280
+ #prompt = generate_prompt_with_history_openai(neu_text_mit_chunks, history)
281
+ #als reiner prompt:
282
+ prompt = generate_prompt_with_history(neu_text_mit_chunks, history)
283
  else:
284
+ #für Chat LLM:
285
+ #prompt = generate_prompt_with_history_openai(text, history)
286
+ #als reiner prompt:
287
+ prompt = generate_prompt_with_history(text, history)
288
 
289
  #Anfrage an Modell (mit RAG: mit chunks aus Vektorstore, ohne: nur promt und history)
290
  #payload = tokenizer.apply_chat_template([{"role":"user","content":prompt}],tokenize=False)