ramhemanth580 commited on
Commit
8fdd3b6
1 Parent(s): 7169c10

Upload 3 files

Browse files

Added the app files

Files changed (3) hide show
  1. app.py +94 -0
  2. requirements.txt +12 -0
  3. utils.py +99 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import langchain
2
+ import streamlit as st
3
+ from streamlit_chat import message
4
+ from utils import *
5
+
6
+ from PyPDF2 import PdfReader
7
+ from dotenv import load_dotenv
8
+
9
+ import os
10
+ import google.generativeai as genai
11
+
12
+ from langchain_google_genai import ChatGoogleGenerativeAI
13
+ from langchain.chains.question_answering import load_qa_chain
14
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
15
+ from langchain.chains.conversation.memory import ConversationBufferWindowMemory
16
+ from langchain.chains import ConversationChain
17
+ from langchain.chains import ConversationalRetrievalChain
18
+ from langchain.prompts import ChatPromptTemplate
19
+ from langchain.prompts import (
20
+ SystemMessagePromptTemplate,
21
+ HumanMessagePromptTemplate,
22
+ ChatPromptTemplate,
23
+ MessagesPlaceholder
24
+ )
25
+
26
+ with st.sidebar:
27
+ st.title("Menu:")
28
+ pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
29
+ if st.button("Submit & Process"):
30
+ with st.spinner("Processing..."):
31
+ raw_text = get_pdf_text(pdf_docs)
32
+ text_chunks = get_text_chunks(raw_text)
33
+ get_vector_store(text_chunks)
34
+ st.success("Done")
35
+ st.subheader("Conversation Chatbot with Langchain, Gemini Pro LLM, Pinecone and Streamlit")
36
+
37
+ if 'responses' not in st.session_state:
38
+ st.session_state['responses'] = ["How can I assist you?"]
39
+
40
+ if 'requests' not in st.session_state:
41
+ st.session_state['requests'] = []
42
+
43
+
44
+ load_dotenv()
45
+ genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
46
+ llm = ChatGoogleGenerativeAI(model="gemini-pro",temperature=0,convert_system_message_to_human=True)
47
+ from langchain import HuggingFaceHub
48
+
49
+
50
+ if 'buffer_memory' not in st.session_state:
51
+ st.session_state.buffer_memory=ConversationBufferWindowMemory(k=3,return_messages=True)
52
+
53
+
54
+ system_msg_template = SystemMessagePromptTemplate.from_template(template="""Answer the question as truthfully as possible using the provided context,
55
+ and if the answer is not contained within the text below, say 'I don't know'""")
56
+
57
+
58
+ human_msg_template = HumanMessagePromptTemplate.from_template(template="{input}")
59
+
60
+ prompt_template = ChatPromptTemplate.from_messages([system_msg_template, MessagesPlaceholder(variable_name="history"), human_msg_template])
61
+
62
+ conversation = ConversationChain(memory=st.session_state.buffer_memory, prompt=prompt_template, llm=llm, verbose=True)
63
+
64
+
65
+
66
+
67
+ # container for chat history
68
+ response_container = st.container()
69
+ # container for text box
70
+ textcontainer = st.container()
71
+
72
+
73
+ with textcontainer:
74
+ query = st.text_input("Query: ", key="input")
75
+ if query:
76
+ with st.spinner("typing..."):
77
+ conversation_string = get_conversation_string()
78
+ # st.code(conversation_string)
79
+ refined_query = query_refiner(conversation_string, query)
80
+ st.subheader("Refined Query:")
81
+ st.write(refined_query)
82
+ context = find_match(refined_query)
83
+ # print(context)
84
+ response = conversation.predict(input=f"Context:\n {context} \n\n Query:\n{query}")
85
+ st.session_state.requests.append(query)
86
+ st.session_state.responses.append(response)
87
+ with response_container:
88
+ if st.session_state['responses']:
89
+
90
+ for i in range(len(st.session_state['responses'])):
91
+ message(st.session_state['responses'][i],key=str(i))
92
+ if i < len(st.session_state['requests']):
93
+ message(st.session_state["requests"][i], is_user=True,key=str(i)+ '_user')
94
+
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ streamlit_chat
3
+ google-generativeai
4
+ python-dotenv
5
+ langchain
6
+ PyPDF2
7
+ langchain_google_genai
8
+ sentence-transformers==2.2.2
9
+ pinecone-client==2.2.4
10
+ unstructured
11
+ unstructured[local-inference]
12
+ tiktoken
utils.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sentence_transformers import SentenceTransformer
2
+ import pinecone
3
+ import google.generativeai as genai
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ import streamlit as st
7
+ import os
8
+ from dotenv import load_dotenv
9
+ from PyPDF2 import PdfReader
10
+
11
+ # Parsing the uploaded documents and creating a single text blob
12
+ def get_pdf_text(pdf_docs):
13
+ text=""
14
+ for pdf in pdf_docs:
15
+ pdf_reader= PdfReader(pdf)
16
+ for page in pdf_reader.pages:
17
+ text+= page.extract_text()
18
+ return text
19
+
20
+ # creating chunks of the text blob
21
+ def get_text_chunks(text):
22
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
23
+ chunks = text_splitter.split_text(text)
24
+ return chunks
25
+
26
+ from langchain.embeddings import HuggingFaceEmbeddings
27
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
28
+
29
+ # Load environment variables to get Pinecone API Key and env
30
+ load_dotenv()
31
+ # Access the value of PINECONE_API_KEY
32
+ PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
33
+ PINECONE_API_ENV = os.getenv("PINECONE_API_ENV")
34
+
35
+ # initialize pinecone
36
+ import pinecone
37
+ # initialize pinecone
38
+ pinecone.init(
39
+ api_key=PINECONE_API_KEY, # find at app.pinecone.io
40
+ environment=PINECONE_API_ENV # next to api key in console
41
+ )
42
+ index_name = "rag-chatbot" # put in the name of your pinecone index here
43
+
44
+
45
+ # Function that indexes documents into Pinecone
46
+ from langchain.vectorstores import Pinecone
47
+
48
+ # Load the data into pinecone database
49
+ def get_vector_store(text_chunks):
50
+ #docsearch = Pinecone.from_texts(chunked_data, embeddings, index_name=index_name)
51
+ index = Pinecone.from_texts([t for t in text_chunks], embeddings, index_name=index_name)
52
+ return index
53
+
54
+ index = pinecone.Index('langchain-chatbot')
55
+
56
+ encoder = SentenceTransformer('all-MiniLM-L6-v2')
57
+
58
+
59
+
60
+
61
+ def find_match(input):
62
+ # input_em = encoder.encode(input).tolist()
63
+ # result = index.query(input_em, top_k=2, includeMetadata=True)
64
+ # return result['matches'][0]['metadata']['text']+"\n"+result['matches'][1]['metadata']['text']
65
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
66
+
67
+ # Pinecone search using the loaded embeddings
68
+ docsearch = Pinecone.from_existing_index(index_name, embeddings)
69
+ docs = docsearch.similarity_search(input)
70
+ return docs
71
+
72
+ # def query_refiner(conversation, query):
73
+
74
+ # response = openai.Completion.create(
75
+ # model="text-davinci-003",
76
+ # prompt=f"Given the following user query and conversation log, formulate a question that would be the most relevant to provide the user with an answer from a knowledge base.\n\nCONVERSATION LOG: \n{conversation}\n\nQuery: {query}\n\nRefined Query:",
77
+ # temperature=0.7,
78
+ # max_tokens=256,
79
+ # top_p=1,
80
+ # frequency_penalty=0,
81
+ # presence_penalty=0
82
+ # )
83
+ # return response['choices'][0]['text']
84
+
85
+ genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
86
+ model = genai.GenerativeModel('gemini-pro')
87
+
88
+ def query_refiner(conversation, query):
89
+ prompt=f"Given the following user query and conversation log, formulate a question that would be the most relevant to provide the user with an answer from a knowledge base.\n\nCONVERSATION LOG: \n{conversation}\n\nQuery: {query}\n\nRefined Query:"
90
+ response = model.generate_content(prompt)
91
+ return response.text
92
+
93
+ def get_conversation_string():
94
+ conversation_string = ""
95
+ for i in range(len(st.session_state['responses'])-1):
96
+
97
+ conversation_string += "Human: "+st.session_state['requests'][i] + "\n"
98
+ conversation_string += "Bot: "+ st.session_state['responses'][i+1] + "\n"
99
+ return conversation_string