alexkueck commited on
Commit
d2db8f4
1 Parent(s): 5175cfc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -490,9 +490,6 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
490
  raise gr.Error("Retrieval Augmented Generation ist erforderlich.")
491
  if (prompt == ""):
492
  raise gr.Error("Prompt ist erforderlich.")
493
-
494
- #Prompt an history anhängen und einen Text daraus machen
495
- history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
496
 
497
  #history für HuggingFace Models formatieren
498
  #history_text_und_prompt = generate_prompt_with_history_hf(prompt, history)
@@ -510,6 +507,8 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
510
  #Anfrage an OpenAI ----------------------------
511
  print("OpenAI normal.......................")
512
  llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
 
 
513
  else:
514
  #oder an Hugging Face --------------------------
515
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
@@ -517,7 +516,9 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
517
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
518
  #llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
519
  print("HF")
520
-
 
 
521
  #zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
522
  if (rag_option == "An"):
523
  #muss nur einmal ausgeführt werden...
 
490
  raise gr.Error("Retrieval Augmented Generation ist erforderlich.")
491
  if (prompt == ""):
492
  raise gr.Error("Prompt ist erforderlich.")
 
 
 
493
 
494
  #history für HuggingFace Models formatieren
495
  #history_text_und_prompt = generate_prompt_with_history_hf(prompt, history)
 
507
  #Anfrage an OpenAI ----------------------------
508
  print("OpenAI normal.......................")
509
  llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
510
+ #Prompt an history anhängen und einen Text daraus machen
511
+ history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
512
  else:
513
  #oder an Hugging Face --------------------------
514
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
 
516
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
517
  #llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
518
  print("HF")
519
+ #Prompt an history anhängen und einen Text daraus machen
520
+ history_text_und_prompt = generate_prompt_with_history(prompt, history)
521
+
522
  #zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
523
  if (rag_option == "An"):
524
  #muss nur einmal ausgeführt werden...