ErikH commited on
Commit
e4d4f1f
1 Parent(s): 875ec35

Update pages/bot.py

Browse files
Files changed (1) hide show
  1. pages/bot.py +6 -11
pages/bot.py CHANGED
@@ -79,7 +79,7 @@ def get_vectorstore():
79
  return vectorstoreDB
80
 
81
  ######
82
-
83
  def get_conversation_chain(vectorstore):
84
  llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
85
  conversation_chain = ConversationalRetrievalChain.from_llm(
@@ -87,7 +87,7 @@ def get_conversation_chain(vectorstore):
87
  retriever=vectorstore.as_retriever()
88
  )
89
  return conversation_chain
90
-
91
 
92
 
93
  #####
@@ -100,13 +100,8 @@ def main():
100
  user_question
101
  )
102
  if user_question:
103
- st.text("Test1:")
104
- st.text(retrieved_docs[0].page_content)
105
- st.text("Test2:")
106
- st.text(retrieved_docs[1].page_content)
107
- st.text("Test3:")
108
- st.text(retrieved_docs[2].page_content)
109
- context=retrieved_docs[0].page_content+retrieved_docs[1].page_content+retrieved_docs[3].page_content
110
  question=user_question
111
  st.text(user_question)
112
 
@@ -132,8 +127,8 @@ def main():
132
 
133
  ######
134
 
135
- newA = get_conversation_chain(get_vectorstore())
136
- st.text(newA)
137
 
138
  """
139
  generator = pipeline('text-generation', model = 'tiiuae/falcon-40b')
 
79
  return vectorstoreDB
80
 
81
  ######
82
+ """
83
  def get_conversation_chain(vectorstore):
84
  llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
85
  conversation_chain = ConversationalRetrievalChain.from_llm(
 
87
  retriever=vectorstore.as_retriever()
88
  )
89
  return conversation_chain
90
+ """
91
 
92
 
93
  #####
 
100
  user_question
101
  )
102
  if user_question:
103
+ context=""+retrieved_docs[0].page_content+retrieved_docs[1].page_content+retrieved_docs[3].page_content
104
+ st.text(context)
 
 
 
 
 
105
  question=user_question
106
  st.text(user_question)
107
 
 
127
 
128
  ######
129
 
130
+ #newA = get_conversation_chain(get_vectorstore())
131
+ #st.text(newA)
132
 
133
  """
134
  generator = pipeline('text-generation', model = 'tiiuae/falcon-40b')