Spaces:
Running
Running
Asankhaya Sharma
commited on
Commit
•
6128070
1
Parent(s):
1ca7761
- main.py +4 -2
- question.py +14 -14
- requirements.txt +4 -4
main.py
CHANGED
@@ -7,7 +7,7 @@ from question import chat_with_doc
|
|
7 |
from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
|
8 |
from langchain.vectorstores import SupabaseVectorStore
|
9 |
from supabase import Client, create_client
|
10 |
-
from stats import
|
11 |
|
12 |
supabase_url = st.secrets.SUPABASE_URL
|
13 |
supabase_key = st.secrets.SUPABASE_KEY
|
@@ -71,6 +71,8 @@ if 'max_tokens' not in st.session_state:
|
|
71 |
if 'username' not in st.session_state:
|
72 |
st.session_state['username'] = username
|
73 |
|
74 |
-
|
|
|
|
|
75 |
|
76 |
st.markdown("---\n\n")
|
|
|
7 |
from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
|
8 |
from langchain.vectorstores import SupabaseVectorStore
|
9 |
from supabase import Client, create_client
|
10 |
+
from stats import get_usage
|
11 |
|
12 |
supabase_url = st.secrets.SUPABASE_URL
|
13 |
supabase_key = st.secrets.SUPABASE_KEY
|
|
|
71 |
if 'username' not in st.session_state:
|
72 |
st.session_state['username'] = username
|
73 |
|
74 |
+
stats = str(get_usage(supabase))
|
75 |
+
|
76 |
+
chat_with_doc(st.session_state['model'], vector_store, stats_db=supabase, stats=stats)
|
77 |
|
78 |
st.markdown("---\n\n")
|
question.py
CHANGED
@@ -7,7 +7,7 @@ from langchain.llms import OpenAI
|
|
7 |
from langchain.llms import HuggingFaceEndpoint
|
8 |
from langchain.chat_models import ChatAnthropic
|
9 |
from langchain.vectorstores import SupabaseVectorStore
|
10 |
-
from stats import add_usage
|
11 |
|
12 |
memory = ConversationBufferMemory(memory_key="chat_history", input_key='question', output_key='answer', return_messages=True)
|
13 |
openai_api_key = st.secrets.openai_api_key
|
@@ -15,13 +15,12 @@ anthropic_api_key = st.secrets.anthropic_api_key
|
|
15 |
hf_api_key = st.secrets.hf_api_key
|
16 |
logger = get_logger(__name__)
|
17 |
|
18 |
-
def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
|
19 |
|
20 |
if 'chat_history' not in st.session_state:
|
21 |
st.session_state['chat_history'] = []
|
22 |
|
23 |
-
|
24 |
-
question = st.text_area("## Ask a question (" + stats + " queries answered so far)", max_chars=500)
|
25 |
columns = st.columns(2)
|
26 |
with columns[0]:
|
27 |
button = st.button("Ask")
|
@@ -38,7 +37,7 @@ def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
|
|
38 |
|
39 |
if button:
|
40 |
qa = None
|
41 |
-
add_usage(stats_db, "chat", "prompt" +
|
42 |
if model.startswith("gpt"):
|
43 |
logger.info('Using OpenAI model %s', model)
|
44 |
qa = ConversationalRetrievalChain.from_llm(
|
@@ -62,12 +61,14 @@ def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
|
|
62 |
huggingfacehub_api_token=hf_api_key,
|
63 |
model_kwargs=model_kwargs
|
64 |
)
|
65 |
-
qa = ConversationalRetrievalChain.from_llm(hf, retriever=vector_store.as_retriever(search_kwargs={"score_threshold": 0.
|
66 |
|
67 |
-
|
|
|
|
|
68 |
|
69 |
# Generate model's response and add it to chat history
|
70 |
-
model_response = qa({"question":
|
71 |
logger.info('Result: %s', model_response["answer"])
|
72 |
sources = model_response["source_documents"]
|
73 |
logger.info('Sources: %s', model_response["source_documents"])
|
@@ -77,9 +78,8 @@ def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
|
|
77 |
else:
|
78 |
st.session_state['chat_history'].append(("Safety Copilot", "I am sorry, I do not have enough information to provide an answer. If there is a public source of data that you would like to add, please email copilot@securade.ai."))
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
st.markdown(f"**{speaker}:** {text}")
|
|
|
7 |
from langchain.llms import HuggingFaceEndpoint
|
8 |
from langchain.chat_models import ChatAnthropic
|
9 |
from langchain.vectorstores import SupabaseVectorStore
|
10 |
+
from stats import add_usage
|
11 |
|
12 |
memory = ConversationBufferMemory(memory_key="chat_history", input_key='question', output_key='answer', return_messages=True)
|
13 |
openai_api_key = st.secrets.openai_api_key
|
|
|
15 |
hf_api_key = st.secrets.hf_api_key
|
16 |
logger = get_logger(__name__)
|
17 |
|
18 |
+
def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db, stats):
|
19 |
|
20 |
if 'chat_history' not in st.session_state:
|
21 |
st.session_state['chat_history'] = []
|
22 |
|
23 |
+
query = st.text_area("## Ask a question (" + stats + " queries answered so far)", max_chars=500)
|
|
|
24 |
columns = st.columns(2)
|
25 |
with columns[0]:
|
26 |
button = st.button("Ask")
|
|
|
37 |
|
38 |
if button:
|
39 |
qa = None
|
40 |
+
add_usage(stats_db, "chat", "prompt" + query, {"model": model, "temperature": st.session_state['temperature']})
|
41 |
if model.startswith("gpt"):
|
42 |
logger.info('Using OpenAI model %s', model)
|
43 |
qa = ConversationalRetrievalChain.from_llm(
|
|
|
61 |
huggingfacehub_api_token=hf_api_key,
|
62 |
model_kwargs=model_kwargs
|
63 |
)
|
64 |
+
qa = ConversationalRetrievalChain.from_llm(hf, retriever=vector_store.as_retriever(search_kwargs={"score_threshold": 0.6, "k": 4,"filter": {"user": st.session_state["username"]}}), memory=memory, verbose=True, return_source_documents=True)
|
65 |
|
66 |
+
print("Question>")
|
67 |
+
print(query)
|
68 |
+
st.session_state['chat_history'].append(("You", query))
|
69 |
|
70 |
# Generate model's response and add it to chat history
|
71 |
+
model_response = qa({"question": query})
|
72 |
logger.info('Result: %s', model_response["answer"])
|
73 |
sources = model_response["source_documents"]
|
74 |
logger.info('Sources: %s', model_response["source_documents"])
|
|
|
78 |
else:
|
79 |
st.session_state['chat_history'].append(("Safety Copilot", "I am sorry, I do not have enough information to provide an answer. If there is a public source of data that you would like to add, please email copilot@securade.ai."))
|
80 |
|
81 |
+
# Display chat history
|
82 |
+
st.empty()
|
83 |
+
chat_history = st.session_state['chat_history']
|
84 |
+
for speaker, text in chat_history:
|
85 |
+
st.markdown(f"**{speaker}:** {text}")
|
|
requirements.txt
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
langchain
|
2 |
Markdown==3.4.3
|
3 |
openai==0.27.6
|
4 |
pdf2image==1.16.3
|
5 |
pypdf==3.8.1
|
6 |
-
streamlit==1.
|
7 |
StrEnum==0.4.10
|
8 |
supabase==1.0.3
|
9 |
tiktoken==0.4.0
|
@@ -12,5 +12,5 @@ anthropic==0.2.8
|
|
12 |
fastapi==0.95.2
|
13 |
python-multipart==0.0.6
|
14 |
uvicorn==0.22.0
|
15 |
-
docx2txt
|
16 |
-
st-login-form
|
|
|
1 |
+
langchain==0.1.0
|
2 |
Markdown==3.4.3
|
3 |
openai==0.27.6
|
4 |
pdf2image==1.16.3
|
5 |
pypdf==3.8.1
|
6 |
+
streamlit==1.22.0
|
7 |
StrEnum==0.4.10
|
8 |
supabase==1.0.3
|
9 |
tiktoken==0.4.0
|
|
|
12 |
fastapi==0.95.2
|
13 |
python-multipart==0.0.6
|
14 |
uvicorn==0.22.0
|
15 |
+
docx2txt==0.8
|
16 |
+
st-login-form==0.2.1
|