alexkueck commited on
Commit
b680eb6
1 Parent(s): 7c84eb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -7
app.py CHANGED
@@ -209,7 +209,7 @@ def generate_text (prompt, chatbot, history, vektordatenbank, retriever, top_p=0
209
  endpoint_url=f"https://api-inference.huggingface.co/models/{MODEL_NAME_HF}",
210
  api_key=hf_token,
211
  temperature= 0.5,
212
- max_length = 512,
213
  top_k=top_k,
214
  top_p=top_p,
215
  repetition_penalty=repetition_penalty
@@ -222,9 +222,6 @@ def generate_text (prompt, chatbot, history, vektordatenbank, retriever, top_p=0
222
  print("LLM aufrufen mit RAG: ...........")
223
  #result = rag_chain(history_text_und_prompt, vektordatenbank, ANZAHL_DOCS)
224
  result = rag_chain(llm, history_text_und_prompt, retriever)
225
- print("result regchain.....................")
226
- print(result)
227
-
228
 
229
  except Exception as e:
230
  raise gr.Error(e)
@@ -254,7 +251,6 @@ def generate_auswahl(prompt_in, file, file_history, chatbot, history, anzahl_doc
254
  splits = document_loading_splitting()
255
  if splits:
256
  vektordatenbank, retriever = document_storage_chroma(splits)
257
- print("db done............................")
258
 
259
  #kein Bild hochgeladen -> auf Text antworten...
260
  status = "Antwort der KI ..."
@@ -275,8 +271,8 @@ def generate_auswahl(prompt_in, file, file_history, chatbot, history, anzahl_doc
275
  #summary += " ".join(['Dokument: ' + str(doc['titel']) + ' Seite: ' + str(doc['seite']) + '\nAuschnitt: ' + str(doc["content"]) for doc in results['relevant_docs']])
276
  summary += " ".join([
277
  '<b>\nDokument: </b> <span style="color: #BB70FC;">' + str(doc['titel']) + '</span> '
278
- '<span style="color: red;">Seite:</span> ' + str(doc['seite']) + '<br>'
279
- '<b>Auschnitt:</b> ' + str(doc["content"])
280
  for doc in results['relevant_docs']
281
  ])
282
  history = history + [[prompt_in, summary]]
 
209
  endpoint_url=f"https://api-inference.huggingface.co/models/{MODEL_NAME_HF}",
210
  api_key=hf_token,
211
  temperature= 0.5,
212
+ max_length = 2048,
213
  top_k=top_k,
214
  top_p=top_p,
215
  repetition_penalty=repetition_penalty
 
222
  print("LLM aufrufen mit RAG: ...........")
223
  #result = rag_chain(history_text_und_prompt, vektordatenbank, ANZAHL_DOCS)
224
  result = rag_chain(llm, history_text_und_prompt, retriever)
 
 
 
225
 
226
  except Exception as e:
227
  raise gr.Error(e)
 
251
  splits = document_loading_splitting()
252
  if splits:
253
  vektordatenbank, retriever = document_storage_chroma(splits)
 
254
 
255
  #kein Bild hochgeladen -> auf Text antworten...
256
  status = "Antwort der KI ..."
 
271
  #summary += " ".join(['Dokument: ' + str(doc['titel']) + ' Seite: ' + str(doc['seite']) + '\nAuschnitt: ' + str(doc["content"]) for doc in results['relevant_docs']])
272
  summary += " ".join([
273
  '<b>\nDokument: </b> <span style="color: #BB70FC;">' + str(doc['titel']) + '</span> '
274
+ '<span style="color: red;"> (Seite:</span> ' + str(doc['seite']) + ')<br>'
275
+ '<b>Auschnitt:</b> ' + str(doc["content"]) + '\n'
276
  for doc in results['relevant_docs']
277
  ])
278
  history = history + [[prompt_in, summary]]