Spaces:
Runtime error
Runtime error
update
Browse files
app.py
CHANGED
@@ -16,6 +16,7 @@ load_dotenv()
|
|
16 |
|
17 |
openai.api_key = os.environ['OPENAI_API_KEY']
|
18 |
|
|
|
19 |
if 'messages' not in st.session_state:
|
20 |
st.session_state.messages = []
|
21 |
|
@@ -63,6 +64,7 @@ if uploaded_file is not None:
|
|
63 |
message_placeholder = st.empty()
|
64 |
full_response = ''
|
65 |
assistant_response = qr_chain({'question': prompt})['answer']
|
|
|
66 |
logger.info(f'assistant response {assistant_response}')
|
67 |
|
68 |
for chunk in assistant_response.split():
|
|
|
16 |
|
17 |
openai.api_key = os.environ['OPENAI_API_KEY']
|
18 |
|
19 |
+
|
20 |
if 'messages' not in st.session_state:
|
21 |
st.session_state.messages = []
|
22 |
|
|
|
64 |
message_placeholder = st.empty()
|
65 |
full_response = ''
|
66 |
assistant_response = qr_chain({'question': prompt})['answer']
|
67 |
+
logger.info(f'question {prompt}')
|
68 |
logger.info(f'assistant response {assistant_response}')
|
69 |
|
70 |
for chunk in assistant_response.split():
|
utils.py
CHANGED
@@ -1,7 +1,9 @@
|
|
1 |
from __future__ import annotations
|
2 |
|
|
|
3 |
import pickle
|
4 |
|
|
|
5 |
from langchain.chains import ConversationalRetrievalChain
|
6 |
from langchain.chat_models import ChatOpenAI
|
7 |
from langchain.document_loaders import UnstructuredFileLoader
|
@@ -16,19 +18,25 @@ from langchain.vectorstores.base import VectorStoreRetriever
|
|
16 |
from langchain.vectorstores.faiss import FAISS
|
17 |
|
18 |
from config import Config
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
|
21 |
def get_prompt():
|
22 |
"""
|
23 |
This function creates a prompt template that will be used to generate the prompt for the model.
|
24 |
"""
|
25 |
-
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
26 |
-
|
|
|
27 |
Question: {question}
|
28 |
Answer:"""
|
29 |
qa_prompt = PromptTemplate(
|
30 |
template=template, input_variables=[
|
31 |
-
'question', 'context',
|
32 |
],
|
33 |
)
|
34 |
return qa_prompt
|
@@ -64,7 +72,7 @@ def load_retriever():
|
|
64 |
with open(Config.vectorstore_path, 'rb') as f:
|
65 |
vectorstore = pickle.load(f)
|
66 |
retriever = VectorStoreRetriever(
|
67 |
-
vectorstore=vectorstore, search_type='
|
68 |
)
|
69 |
return retriever
|
70 |
|
@@ -73,7 +81,10 @@ def get_qa_chain():
|
|
73 |
"""
|
74 |
This function creates a question answering chain.
|
75 |
"""
|
76 |
-
llm = ChatOpenAI(
|
|
|
|
|
|
|
77 |
retriever = load_retriever()
|
78 |
prompt = get_prompt()
|
79 |
memory = ConversationBufferMemory(
|
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
+
import os
|
4 |
import pickle
|
5 |
|
6 |
+
from dotenv import load_dotenv
|
7 |
from langchain.chains import ConversationalRetrievalChain
|
8 |
from langchain.chat_models import ChatOpenAI
|
9 |
from langchain.document_loaders import UnstructuredFileLoader
|
|
|
18 |
from langchain.vectorstores.faiss import FAISS
|
19 |
|
20 |
from config import Config
|
21 |
+
# from langchain.callbacks import ContextC÷allbackHandler
|
22 |
+
|
23 |
+
load_dotenv()
|
24 |
+
|
25 |
+
# context_callback = ContextCallbackHandler(os.environ['CONTEXT_API_KEY'])
|
26 |
|
27 |
|
28 |
def get_prompt():
|
29 |
"""
|
30 |
This function creates a prompt template that will be used to generate the prompt for the model.
|
31 |
"""
|
32 |
+
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
33 |
+
---
|
34 |
+
Context: {context}
|
35 |
Question: {question}
|
36 |
Answer:"""
|
37 |
qa_prompt = PromptTemplate(
|
38 |
template=template, input_variables=[
|
39 |
+
'question', 'context', 'chat_history',
|
40 |
],
|
41 |
)
|
42 |
return qa_prompt
|
|
|
72 |
with open(Config.vectorstore_path, 'rb') as f:
|
73 |
vectorstore = pickle.load(f)
|
74 |
retriever = VectorStoreRetriever(
|
75 |
+
vectorstore=vectorstore, search_type='mmr', search_kwargs={'k': 10},
|
76 |
)
|
77 |
return retriever
|
78 |
|
|
|
81 |
"""
|
82 |
This function creates a question answering chain.
|
83 |
"""
|
84 |
+
llm = ChatOpenAI(
|
85 |
+
model_name=Config.chatgpt_model_name,
|
86 |
+
temperature=0,
|
87 |
+
)
|
88 |
retriever = load_retriever()
|
89 |
prompt = get_prompt()
|
90 |
memory = ConversationBufferMemory(
|