alexkueck commited on
Commit
42e7ede
1 Parent(s): 8f58f98

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -507,12 +507,13 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
507
  ###########################
508
  if (model_option == "OpenAI"):
509
  #Anfrage an OpenAI ----------------------------
510
- print("OpenAI normal.......................")
511
  llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
512
  #Prompt an history anhängen und einen Text daraus machen
513
  history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
514
  else:
515
  #oder an Hugging Face --------------------------
 
516
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
517
  #llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
518
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
@@ -523,6 +524,7 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
523
 
524
  #zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
525
  if (rag_option == "An"):
 
526
  #muss nur einmal ausgeführt werden...
527
  if not splittet:
528
  splits = document_loading_splitting()
@@ -530,11 +532,11 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
530
  db = document_retrieval_chroma(llm, history_text_und_prompt)
531
  print("LLM aufrufen mit RAG: ...........")
532
  result = rag_chain(llm, history_text_und_prompt, db)
533
- elif (rag_option == "MongoDB"):
534
  #splits = document_loading_splitting()
535
  #document_storage_mongodb(splits)
536
- db = document_retrieval_mongodb(llm, history_text_und_prompt)
537
- result = rag_chain(llm, history_text_und_prompt, db)
538
  else:
539
  print("LLM aufrufen ohne RAG: ...........")
540
  result = llm_chain(llm, history_text_und_prompt)
 
507
  ###########################
508
  if (model_option == "OpenAI"):
509
  #Anfrage an OpenAI ----------------------------
510
+ print("OpenAI Anfrage.......................")
511
  llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
512
  #Prompt an history anhängen und einen Text daraus machen
513
  history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
514
  else:
515
  #oder an Hugging Face --------------------------
516
+ print("HF Anfrage.......................")
517
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
518
  #llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
519
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
 
524
 
525
  #zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
526
  if (rag_option == "An"):
527
+ print("RAG aktiviert.......................")
528
  #muss nur einmal ausgeführt werden...
529
  if not splittet:
530
  splits = document_loading_splitting()
 
532
  db = document_retrieval_chroma(llm, history_text_und_prompt)
533
  print("LLM aufrufen mit RAG: ...........")
534
  result = rag_chain(llm, history_text_und_prompt, db)
535
+ #elif (rag_option == "MongoDB"):
536
  #splits = document_loading_splitting()
537
  #document_storage_mongodb(splits)
538
+ #db = document_retrieval_mongodb(llm, history_text_und_prompt)
539
+ #result = rag_chain(llm, history_text_und_prompt, db)
540
  else:
541
  print("LLM aufrufen ohne RAG: ...........")
542
  result = llm_chain(llm, history_text_und_prompt)