alexkueck commited on
Commit
f013d30
1 Parent(s): a4d3595

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -102,7 +102,8 @@ repo_id = "HuggingFaceH4/zephyr-7b-alpha" #das Modell ist echt gut!!! Vom MIT
102
  #repo_id = "google/flan-t5-xxl"
103
 
104
  #HuggingFace Model name--------------------------------
105
- MODEL_NAME_HF = "mistralai/Mixtral-8x7B-Instruct-v0.1"
 
106
  MODEL_NAME_OAI_ZEICHNEN = "dall-e-3"
107
  #Alternativ zeichnen: Stabe Diffusion from HF:
108
  API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1"
@@ -442,8 +443,8 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
442
  #oder an Hugging Face --------------------------
443
  print("HF Anfrage.......................")
444
  model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
445
- llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
446
- #llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
447
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
448
  #llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
449
  print("HF")
 
102
  #repo_id = "google/flan-t5-xxl"
103
 
104
  #HuggingFace Model name--------------------------------
105
+ #MODEL_NAME_HF = "mistralai/Mixtral-8x7B-Instruct-v0.1"
106
+ MODEL_NAME_HF = "google/gemma-7b"
107
  MODEL_NAME_OAI_ZEICHNEN = "dall-e-3"
108
  #Alternativ zeichnen: Stabe Diffusion from HF:
109
  API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1"
 
443
  #oder an Hugging Face --------------------------
444
  print("HF Anfrage.......................")
445
  model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
446
+ #llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
447
+ llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
448
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
449
  #llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
450
  print("HF")