tinaranathania commited on
Commit
31c9ea0
1 Parent(s): 2b3aa79

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +84 -0
  2. requirements.txt +13 -0
  3. utils.py +41 -0
app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chat_models import ChatOpenAI
2
+ from langchain.chains import ConversationChain
3
+ from langchain.chains.conversation.memory import ConversationBufferWindowMemory
4
+ from langchain.prompts import (
5
+ SystemMessagePromptTemplate,
6
+ HumanMessagePromptTemplate,
7
+ ChatPromptTemplate,
8
+ MessagesPlaceholder
9
+ )
10
+ import streamlit as st
11
+ from streamlit_chat import message
12
+
13
+ import os
14
+ from dotenv import find_dotenv, load_dotenv
15
+ load_dotenv(find_dotenv())
16
+
17
+ # Get from utils.py
18
+ from utils import *
19
+
20
+ st.subheader("Chatbot with Langchain, ChatGPT, Pinecone, and Streamlit")
21
+
22
+ # Make sure to create a session -> responses and request
23
+
24
+ if 'responses' not in st.session_state:
25
+ st.session_state['responses'] = ["How can I assist you?"]
26
+
27
+ if 'requests' not in st.session_state:
28
+ st.session_state['requests'] = []
29
+
30
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=os.environ["OPENAI_API_KEY"])
31
+
32
+ if 'buffer_memory' not in st.session_state:
33
+ #save the memory/chat history
34
+ # sometimes it can exceed the token limit -> can use conversation summary memory ->
35
+ # Conversation Buffer Window Memory: Maintain the conversation within a window (3 last interactions)
36
+ st.session_state.buffer_memory=ConversationBufferWindowMemory(k=3,return_messages=True)
37
+
38
+ # What the system should do
39
+ system_msg_template = SystemMessagePromptTemplate.from_template(template="""Answer the question as truthfully as possible using the provided context,
40
+ and if the answer is not contained within the text below, say 'I don't know'""")
41
+
42
+ # What to do with input variable
43
+ human_msg_template = HumanMessagePromptTemplate.from_template(template="{input}")
44
+
45
+ # We have to pass a variable name "history" ->
46
+ prompt_template = ChatPromptTemplate.from_messages([system_msg_template, MessagesPlaceholder(variable_name="history"), human_msg_template])
47
+
48
+ # Chain: Putting multiple components together
49
+ conversation = ConversationChain(memory=st.session_state.buffer_memory, prompt=prompt_template, llm=llm, verbose=True)
50
+
51
+
52
+ # STREAMLIT
53
+
54
+ # Container for chat history
55
+ response_container = st.container()
56
+ # Container for text box
57
+ textcontainer = st.container()
58
+
59
+
60
+ with textcontainer:
61
+ query = st.text_input("Query: ", key="input")
62
+ if query:
63
+ with st.spinner("typing..."):
64
+ conversation_string = get_conversation_string()
65
+ # st.code(conversation_string)
66
+ refined_query = query_refiner(conversation_string, query)
67
+ st.subheader("Refined Query:")
68
+ st.write(refined_query)
69
+ context = find_match(refined_query)
70
+ # print(context)
71
+ response = conversation.predict(input=f"Context:\n {context} \n\n Query:\n{query}")
72
+
73
+ # Adds response and requests to history (session)
74
+ st.session_state.requests.append(query)
75
+ st.session_state.responses.append(response)
76
+ with response_container:
77
+ if st.session_state['responses']:
78
+
79
+ for i in range(len(st.session_state['responses'])):
80
+ message(st.session_state['responses'][i],key=str(i))
81
+ if i < len(st.session_state['requests']):
82
+ message(st.session_state["requests"][i], is_user=True,key=str(i)+ '_user')
83
+
84
+
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ os
2
+ dotenv
3
+ pinecone-client
4
+ sentence_transformers
5
+ unstructured
6
+ unstructured[local-inference]
7
+ detectron2@git+https://github.com/facebookresearch/detectron2.git@v0.6#egg=detectron2
8
+ poppler-utils
9
+ tiktoken
10
+ streamlit
11
+ streamlit_chat
12
+ langchain
13
+ sentence_transformers
utils.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sentence_transformers import SentenceTransformer
2
+ import pinecone
3
+ from openai import OpenAI
4
+ import os
5
+
6
+ from dotenv import find_dotenv, load_dotenv
7
+ load_dotenv(find_dotenv())
8
+
9
+ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
10
+ import streamlit as st
11
+ model = SentenceTransformer('all-MiniLM-L6-v2')
12
+
13
+ pinecone.init(api_key=os.environ["PINECONE_API_KEY"], environment='gcp-starter')
14
+ index = pinecone.Index('langchain-chatbot')
15
+
16
+ # Find the most relevant documents that match the user's query
17
+ def find_match(input):
18
+ input_em = model.encode(input).tolist()
19
+ result = index.query(input_em, top_k=2, includeMetadata=True)
20
+ return result['matches'][0]['metadata']['text']+"\n"+result['matches'][1]['metadata']['text']
21
+
22
+ # Take the user's query and refine it to ensure it's optimal for providing a relevant answer
23
+ def query_refiner(conversation, query):
24
+
25
+ response = client.completions.create(model="text-davinci-003",
26
+ prompt=f"Given the following user query and conversation log, formulate a question that would be the most relevant to provide the user with an answer from a knowledge base.\n\nCONVERSATION LOG: \n{conversation}\n\nQuery: {query}\n\nRefined Query:",
27
+ temperature=0.7,
28
+ max_tokens=256,
29
+ top_p=1,
30
+ frequency_penalty=0,
31
+ presence_penalty=0)
32
+ return response.choices[0].text
33
+
34
+ # Keep track of the ongoing conversation
35
+ def get_conversation_string():
36
+ conversation_string = ""
37
+ for i in range(len(st.session_state['responses'])-1):
38
+
39
+ conversation_string += "Human: "+st.session_state['requests'][i] + "\n"
40
+ conversation_string += "Bot: "+ st.session_state['responses'][i+1] + "\n"
41
+ return conversation_string