Umama-at-Bluchip commited on
Commit
89c55f7
·
verified ·
1 Parent(s): a2b6d4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -2,8 +2,6 @@ from langchain_community.vectorstores import FAISS
2
  from langchain_community.embeddings import HuggingFaceEmbeddings
3
  from langchain.prompts import PromptTemplate
4
  from langchain_together import Together
5
- from langchain_community.llms import huggingface_hub
6
- import os
7
  from langchain.retrievers.document_compressors import EmbeddingsFilter
8
  from langchain.retrievers import ContextualCompressionRetriever
9
  from langchain.memory import ConversationBufferWindowMemory
@@ -41,12 +39,12 @@ prompt = PromptTemplate(template=custom_prompt_template,
41
  input_variables=['context', 'question', 'chat_history'])
42
 
43
 
44
- llm = huggingface_hub(
45
- repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
46
- model_kwargs={"temperature": 0.7, "max_length": 512}
47
- )
48
-
49
- embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.80)
50
 
51
  qa = ConversationalRetrievalChain.from_llm(
52
  llm=llm,
 
2
  from langchain_community.embeddings import HuggingFaceEmbeddings
3
  from langchain.prompts import PromptTemplate
4
  from langchain_together import Together
 
 
5
  from langchain.retrievers.document_compressors import EmbeddingsFilter
6
  from langchain.retrievers import ContextualCompressionRetriever
7
  from langchain.memory import ConversationBufferWindowMemory
 
39
  input_variables=['context', 'question', 'chat_history'])
40
 
41
 
42
+ llm = Together(
43
+ model="mistralai/Mistral-7B-Instruct-v0.2",
44
+ temperature=0.7,
45
+ max_tokens=512,
46
+ together_api_key="48515099b0ed4e22e56da54e50feb4adfaaa901a444b0c34bb33c66abe7b2c61"
47
+ )
48
 
49
  qa = ConversationalRetrievalChain.from_llm(
50
  llm=llm,