isayahc commited on
Commit
a2318db
1 Parent(s): 0737e52

changed variables for readibility also used common variable names

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -54,7 +54,10 @@ AWS_S3_FILE=os.getenv('AWS_S3_FILE')
54
  VS_DESTINATION=os.getenv('VS_DESTINATION')
55
 
56
  # initialize Model config
57
- model_id = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.1", model_kwargs={
 
 
 
58
  # "temperature":0.1,
59
  "max_new_tokens":1024,
60
  "repetition_penalty":1.2,
@@ -100,10 +103,10 @@ db.get()
100
  retriever = db.as_retriever(search_type="mmr")#, search_kwargs={'k': 3, 'lambda_mult': 0.25})
101
 
102
  # asks LLM to create 3 alternatives baed on user query
103
- # multi_retriever = MultiQueryRetriever.from_llm(retriever=retriever, llm=model_id)
104
 
105
  # asks LLM to extract relevant parts from retrieved documents
106
- # compressor = LLMChainExtractor.from_llm(model_id)
107
  # compression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever=multi_retriever)
108
 
109
  global qa
@@ -135,13 +138,13 @@ logging.getLogger("langchain.chains.qa_with_sources").setLevel(logging.INFO)
135
 
136
 
137
 
138
- # qa = RetrievalQA.from_chain_type(llm=model_id, retriever=retriever, return_source_documents=True, verbose=True, chain_type_kwargs={
139
  # "verbose": True,
140
  # "memory": memory,
141
  # "prompt": prompt
142
  # }
143
  # )
144
- qa = RetrievalQAWithSourcesChain.from_chain_type(llm=model_id, retriever=retriever, return_source_documents=True, verbose=True, chain_type_kwargs={
145
  "verbose": True,
146
  "memory": memory,
147
  "prompt": prompt,
 
54
  VS_DESTINATION=os.getenv('VS_DESTINATION')
55
 
56
  # initialize Model config
57
+ llm_model_name = "mistralai/Mistral-7B-Instruct-v0.1"
58
+
59
+ # changed named to model_id to llm as is common
60
+ llm = HuggingFaceHub(repo_id=llm_model_name, model_kwargs={
61
  # "temperature":0.1,
62
  "max_new_tokens":1024,
63
  "repetition_penalty":1.2,
 
103
  retriever = db.as_retriever(search_type="mmr")#, search_kwargs={'k': 3, 'lambda_mult': 0.25})
104
 
105
  # asks LLM to create 3 alternatives baed on user query
106
+ # multi_retriever = MultiQueryRetriever.from_llm(retriever=retriever, llm=llm)
107
 
108
  # asks LLM to extract relevant parts from retrieved documents
109
+ # compressor = LLMChainExtractor.from_llm(llm)
110
  # compression_retriever = ContextualCompressionRetriever(base_compressor=compressor, base_retriever=multi_retriever)
111
 
112
  global qa
 
138
 
139
 
140
 
141
+ # qa = RetrievalQA.from_chain_type(llm=llm, retriever=retriever, return_source_documents=True, verbose=True, chain_type_kwargs={
142
  # "verbose": True,
143
  # "memory": memory,
144
  # "prompt": prompt
145
  # }
146
  # )
147
+ qa = RetrievalQAWithSourcesChain.from_chain_type(llm=llm, retriever=retriever, return_source_documents=True, verbose=True, chain_type_kwargs={
148
  "verbose": True,
149
  "memory": memory,
150
  "prompt": prompt,