alexkueck commited on
Commit
ab7d2c0
1 Parent(s): d0b9f09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -458,11 +458,11 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
458
  #result = rag_chain(llm, history_text_und_prompt, db)
459
  else:
460
  #splittet = False
461
- if (model_option=="OpenAI"):
462
- print("LLM aufrufen ohne RAG: ...........")
463
- resulti = llm_chain(llm, history_text_und_prompt)
464
- result = resulti.strip()
465
- else:
466
  data = {"inputs": prompt, "options": {"max_new_tokens": max_new_tokens},}
467
  response = requests.post(API_URL_TEXT, headers=HEADERS, json=data)
468
  result = response.json()
@@ -476,7 +476,7 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
476
  print(history)
477
  print(chatbot_message)
478
  result = chatbot_message
479
-
480
 
481
 
482
 
 
458
  #result = rag_chain(llm, history_text_und_prompt, db)
459
  else:
460
  #splittet = False
461
+ print("LLM aufrufen ohne RAG: ...........")
462
+ resulti = llm_chain(llm, history_text_und_prompt)
463
+ result = resulti.strip()
464
+ """
465
+ #Alternativ mit API_URL - aber das model braucht 93 B Space!!!
466
  data = {"inputs": prompt, "options": {"max_new_tokens": max_new_tokens},}
467
  response = requests.post(API_URL_TEXT, headers=HEADERS, json=data)
468
  result = response.json()
 
476
  print(history)
477
  print(chatbot_message)
478
  result = chatbot_message
479
+ """
480
 
481
 
482