Spaces:
Sleeping
Sleeping
saptharishi
commited on
Commit
•
159023c
1
Parent(s):
7366084
update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from langchain.chains import ConversationalRetrievalChain
|
|
8 |
import streamlit as st
|
9 |
import time
|
10 |
|
11 |
-
st.set_page_config(page_title="
|
12 |
col1, col2, col3 = st.columns([1,8,1])
|
13 |
with col2:
|
14 |
st.image("logo.png")
|
@@ -61,7 +61,7 @@ embeddings = HuggingFaceEmbeddings(model_name="nomic-ai/nomic-embed-text-v1",mod
|
|
61 |
db = FAISS.load_local("ipc_vector_db", embeddings, allow_dangerous_deserialization=True)
|
62 |
db_retriever = db.as_retriever(search_type="similarity",search_kwargs={"k": 4})
|
63 |
|
64 |
-
prompt_template = """<s>[INST]This is a chat template and As a legal chat
|
65 |
CONTEXT: {context}
|
66 |
CHAT HISTORY: {chat_history}
|
67 |
QUESTION: {question}
|
@@ -92,7 +92,7 @@ for message in st.session_state.messages:
|
|
92 |
role = message.get("role")
|
93 |
content = message.get("content")
|
94 |
|
95 |
-
with st.chat_message(role, avatar="user.svg" if role == "human" else "
|
96 |
st.write(content)
|
97 |
|
98 |
input_prompt = st.chat_input("message LAWGpt.....")
|
@@ -103,13 +103,13 @@ if input_prompt:
|
|
103 |
|
104 |
st.session_state.messages.append({"role":"human","content":input_prompt})
|
105 |
full_response = " "
|
106 |
-
with st.chat_message("
|
107 |
with st.spinner("Thinking..."):
|
108 |
result = qa.invoke(input=input_prompt)
|
109 |
|
110 |
message_placeholder = st.empty()
|
111 |
|
112 |
-
full_response = "
|
113 |
for chunk in result["answer"]:
|
114 |
full_response+=chunk
|
115 |
time.sleep(0.02)
|
@@ -117,5 +117,5 @@ if input_prompt:
|
|
117 |
message_placeholder.markdown(full_response+" ▌")
|
118 |
st.button('Reset All Chat 🗑️', on_click=reset_conversation)
|
119 |
|
120 |
-
st.session_state.messages.append({"role": "ai", "content": result["answer"], "avatar": "
|
121 |
|
|
|
8 |
import streamlit as st
|
9 |
import time
|
10 |
|
11 |
+
st.set_page_config(page_title="zhagaramGPT")
|
12 |
col1, col2, col3 = st.columns([1,8,1])
|
13 |
with col2:
|
14 |
st.image("logo.png")
|
|
|
61 |
db = FAISS.load_local("ipc_vector_db", embeddings, allow_dangerous_deserialization=True)
|
62 |
db_retriever = db.as_retriever(search_type="similarity",search_kwargs={"k": 4})
|
63 |
|
64 |
+
prompt_template = """<s>[INST]This is a chat template and As a legal chat ai specializing in Sericultural related Queries!!.
|
65 |
CONTEXT: {context}
|
66 |
CHAT HISTORY: {chat_history}
|
67 |
QUESTION: {question}
|
|
|
92 |
role = message.get("role")
|
93 |
content = message.get("content")
|
94 |
|
95 |
+
with st.chat_message(role, avatar="user.svg" if role == "human" else "ai"):
|
96 |
st.write(content)
|
97 |
|
98 |
input_prompt = st.chat_input("message LAWGpt.....")
|
|
|
103 |
|
104 |
st.session_state.messages.append({"role":"human","content":input_prompt})
|
105 |
full_response = " "
|
106 |
+
with st.chat_message("ai"):
|
107 |
with st.spinner("Thinking..."):
|
108 |
result = qa.invoke(input=input_prompt)
|
109 |
|
110 |
message_placeholder = st.empty()
|
111 |
|
112 |
+
full_response = " \n"
|
113 |
for chunk in result["answer"]:
|
114 |
full_response+=chunk
|
115 |
time.sleep(0.02)
|
|
|
117 |
message_placeholder.markdown(full_response+" ▌")
|
118 |
st.button('Reset All Chat 🗑️', on_click=reset_conversation)
|
119 |
|
120 |
+
st.session_state.messages.append({"role": "ai", "content": result["answer"], "avatar": "ai"})
|
121 |
|