Spaces:
Sleeping
Sleeping
mikepastor11
commited on
Commit
•
c15629e
1
Parent(s):
f72b341
Update app.py
Browse files
app.py
CHANGED
@@ -20,8 +20,9 @@ from langchain_community.vectorstores import FAISS
|
|
20 |
|
21 |
from langchain.text_splitter import CharacterTextSplitter
|
22 |
|
23 |
-
|
24 |
-
|
|
|
25 |
# from htmlTemplates import css, bot_template, user_template
|
26 |
# from langchain.llms import HuggingFaceHub
|
27 |
|
@@ -74,20 +75,20 @@ def get_vectorstore(text_chunks):
|
|
74 |
|
75 |
return vectorstore
|
76 |
|
77 |
-
|
78 |
-
#
|
79 |
-
#
|
80 |
-
#
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
|
92 |
# def handle_userinput(user_question):
|
93 |
|
|
|
20 |
|
21 |
from langchain.text_splitter import CharacterTextSplitter
|
22 |
|
23 |
+
from langchain.memory import ConversationBufferMemory
|
24 |
+
from langchain.chains import ConversationalRetrievalChain
|
25 |
+
|
26 |
# from htmlTemplates import css, bot_template, user_template
|
27 |
# from langchain.llms import HuggingFaceHub
|
28 |
|
|
|
75 |
|
76 |
return vectorstore
|
77 |
|
78 |
+
def get_conversation_chain(vectorstore):
|
79 |
+
# llm = ChatOpenAI()
|
80 |
+
# llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
|
81 |
+
# google/bigbird-roberta-base facebook/bart-large
|
82 |
+
llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature": 0.5, "max_length": 512})
|
83 |
+
|
84 |
+
memory = ConversationBufferMemory(
|
85 |
+
memory_key='chat_history', return_messages=True)
|
86 |
+
conversation_chain = ConversationalRetrievalChain.from_llm(
|
87 |
+
llm=llm,
|
88 |
+
retriever=vectorstore.as_retriever(),
|
89 |
+
memory=memory,
|
90 |
+
)
|
91 |
+
return conversation_chain
|
92 |
|
93 |
# def handle_userinput(user_question):
|
94 |
|