ramhemanth580's picture
Update app.py
919bc5a verified
import langchain
import streamlit as st
from streamlit_chat import message
from utils import *
from PyPDF2 import PdfReader
from dotenv import load_dotenv
import os
import google.generativeai as genai
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.chains.question_answering import load_qa_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationChain
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import ChatPromptTemplate
from langchain.prompts import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
ChatPromptTemplate,
MessagesPlaceholder
)
with st.sidebar:
st.title("Menu:")
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
if st.button("Submit & Process"):
with st.spinner("Processing..."):
raw_text = get_pdf_text(pdf_docs)
text_chunks = get_text_chunks(raw_text)
get_vector_store(text_chunks)
st.success("Done")
st.subheader("Conversation Chatbot with Langchain, Gemini Pro LLM, Pinecone and Streamlit")
if 'responses' not in st.session_state:
st.session_state['responses'] = ["How can I assist you?"]
if 'requests' not in st.session_state:
st.session_state['requests'] = []
load_dotenv()
genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
llm = ChatGoogleGenerativeAI(model="gemini-pro",temperature=0,convert_system_message_to_human=True)
from langchain import HuggingFaceHub
if 'buffer_memory' not in st.session_state:
st.session_state.buffer_memory=ConversationBufferWindowMemory(k=3,return_messages=True)
system_msg_template = SystemMessagePromptTemplate.from_template(template="""Answer the question as truthfully as possible using the provided context,
and if the answer is not contained within the text below, say 'I don't know'""")
human_msg_template = HumanMessagePromptTemplate.from_template(template="{input}")
prompt_template = ChatPromptTemplate.from_messages([system_msg_template, MessagesPlaceholder(variable_name="history"), human_msg_template])
conversation = ConversationChain(memory=st.session_state.buffer_memory, prompt=prompt_template, llm=llm, verbose=True)
# container for chat history
response_container = st.container()
# container for text box
textcontainer = st.container()
#Coverstion UI
with textcontainer:
query = st.text_input("Query: ", key="input")
if query:
with st.spinner("typing..."):
conversation_string = get_conversation_string()
# st.code(conversation_string)
refined_query = query_refiner(conversation_string, query)
st.subheader("Refined Query:")
st.write(refined_query)
context = find_match(refined_query)
# print(context)
response = conversation.predict(input=f"Context:\n {context} \n\n Query:\n{query}")
st.session_state.requests.append(query)
st.session_state.responses.append(response)
with response_container:
if st.session_state['responses']:
for i in range(len(st.session_state['responses'])):
message(st.session_state['responses'][i],key=str(i))
if i < len(st.session_state['requests']):
message(st.session_state["requests"][i], is_user=True,key=str(i)+ '_user')