alexkueck commited on
Commit
e417dcc
1 Parent(s): 732f75b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -221,6 +221,7 @@ def rag_chain(llm, prompt, db):
221
  return result["result"]
222
 
223
 
 
224
  ###################################################
225
  #Prompts mit History erzeugen für verschiednee Modelle
226
  ###################################################
@@ -267,7 +268,9 @@ def generate(text, history):
267
  #prompt = f"""Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
268
  #{context} Question: {text}"""
269
 
270
- prompt = generate_prompt_with_history(text, history)
 
 
271
 
272
  #zusammengesetzte Anfrage an Modell...
273
  #payload = tokenizer.apply_chat_template([{"role":"user","content":prompt}],tokenize=False)
 
221
  return result["result"]
222
 
223
 
224
+
225
  ###################################################
226
  #Prompts mit History erzeugen für verschiednee Modelle
227
  ###################################################
 
268
  #prompt = f"""Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
269
  #{context} Question: {text}"""
270
 
271
+ #mit RAG:
272
+ #??? wie aus Chroma die passenden Text-Schnipsel dazuladen???
273
+ prompt = generate_prompt_with_history_openai(text_neu, history)
274
 
275
  #zusammengesetzte Anfrage an Modell...
276
  #payload = tokenizer.apply_chat_template([{"role":"user","content":prompt}],tokenize=False)