alexkueck commited on
Commit
701644a
1 Parent(s): 76669be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -11,7 +11,7 @@ from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLo
11
  from langchain.document_loaders.generic import GenericLoader
12
  from langchain.document_loaders.parsers import OpenAIWhisperParser
13
  from langchain.schema import AIMessage, HumanMessage
14
- from langchain.llms import HuggingFaceHub
15
  from langchain.llms import HuggingFaceTextGenInference
16
  from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings
17
 
@@ -87,7 +87,7 @@ MODEL_NAME = "gpt-3.5-turbo-16k"
87
  #verfügbare Modelle anzeigen lassen
88
 
89
 
90
- #HuggingFace--------------------------------
91
  #repo_id = "meta-llama/Llama-2-13b-chat-hf"
92
  repo_id = "HuggingFaceH4/zephyr-7b-alpha" #das Modell ist echt gut!!! Vom MIT
93
  #repo_id = "TheBloke/Yi-34B-Chat-GGUF"
@@ -103,6 +103,8 @@ repo_id = "HuggingFaceH4/zephyr-7b-alpha" #das Modell ist echt gut!!! Vom MIT
103
  #repo_id = "databricks/dolly-v2-3b"
104
  #repo_id = "google/flan-t5-xxl"
105
 
 
 
106
 
107
  ################################################
108
  #HF Hub Zugriff ermöglichen
@@ -318,7 +320,8 @@ def invoke (prompt, history, rag_option, model_option, openai_api_key, temperat
318
  print("openAI")
319
  else:
320
  #oder an Hugging Face --------------------------
321
- llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
 
322
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
323
  #llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
324
  print("HF")
 
11
  from langchain.document_loaders.generic import GenericLoader
12
  from langchain.document_loaders.parsers import OpenAIWhisperParser
13
  from langchain.schema import AIMessage, HumanMessage
14
+ from langchain.llms import HuggingFaceHub, HuggingFaceChain
15
  from langchain.llms import HuggingFaceTextGenInference
16
  from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings
17
 
 
87
  #verfügbare Modelle anzeigen lassen
88
 
89
 
90
+ #HuggingFace Reop ID--------------------------------
91
  #repo_id = "meta-llama/Llama-2-13b-chat-hf"
92
  repo_id = "HuggingFaceH4/zephyr-7b-alpha" #das Modell ist echt gut!!! Vom MIT
93
  #repo_id = "TheBloke/Yi-34B-Chat-GGUF"
 
103
  #repo_id = "databricks/dolly-v2-3b"
104
  #repo_id = "google/flan-t5-xxl"
105
 
106
+ #HuggingFace Model name--------------------------------
107
+ MODEL_NAME_HF = "mistralai/Mixtral-8x7B-Instruct-v0.1"
108
 
109
  ################################################
110
  #HF Hub Zugriff ermöglichen
 
320
  print("openAI")
321
  else:
322
  #oder an Hugging Face --------------------------
323
+ #llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 128})
324
+ llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
325
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
326
  #llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
327
  print("HF")