seansullivan commited on
Commit
487a211
·
verified ·
1 Parent(s): fc6f6c1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -0
app.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from getpass import getpass
2
+ from langchain_openai import OpenAIEmbeddings
3
+
4
+ from pinecone import Pinecone
5
+
6
+ from pinecone_text.sparse import SpladeEncoder
7
+ from langchain_community.retrievers import PineconeHybridSearchRetriever
8
+
9
+ import os
10
+
11
+ from langchain_core.output_parsers import StrOutputParser
12
+ from langchain_core.prompts import ChatPromptTemplate
13
+ from langchain_core.runnables import RunnableParallel, RunnablePassthrough, Runnable
14
+ from langchain_anthropic import ChatAnthropic
15
+
16
+ import streamlit as st
17
+
18
+ # Streamlit App Configuration (gets model_name, index_name, namespace_name before needed)
19
+ st.set_page_config(page_title="Chat with HiPerGator Docs", page_icon="🟩")
20
+ st.markdown("<h1 style='text-align: center;'>How can I help you?:</h1>", unsafe_allow_html=True)
21
+
22
+ st.sidebar.title("Options")
23
+ model_name = "claude-3-haiku-20240307"
24
+
25
+
26
+ # ========== PART 1 ==========
27
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
28
+ ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
29
+ PINE_API_KEY = os.getenv("PINE_API_KEY")
30
+
31
+ embed = OpenAIEmbeddings(
32
+ model='text-embedding-3-small',
33
+ openai_api_key=OPENAI_API_KEY,
34
+ dimensions = 768
35
+ )
36
+
37
+
38
+ # ========== PART 2 ==========
39
+ index_name='splade'
40
+ namespace_name='HiPerGator'
41
+ pc = Pinecone(api_key=PINE_API_KEY)
42
+ index = pc.Index(index_name)
43
+
44
+ # ========== PART 3 ==========
45
+ splade_encoder = SpladeEncoder()
46
+ retriever = PineconeHybridSearchRetriever(
47
+ embeddings=embed, sparse_encoder=splade_encoder, index=index
48
+ )
49
+
50
+ # ========== PART 4 ==========
51
+ # RAG prompt
52
+ template = """You are an expert in HiPerGator (University of Florida's Super Computer) who has access to it's dense documentation. Please use the given context from the documentation to happily assist the user with their question:
53
+ Question: {question}
54
+ {context}
55
+ """
56
+ prompt = ChatPromptTemplate.from_template(template)
57
+
58
+ # Haiku
59
+ model = ChatAnthropic(temperature=0, anthropic_api_key=ANTHROPIC_API_KEY, model_name="claude-3-haiku-20240307")
60
+
61
+ class SourceDedup(Runnable):
62
+ def invoke(self, input, config=None):
63
+ assert isinstance(input, dict)
64
+ documents = input["context"]
65
+ unique_sources = set()
66
+ unique_documents = []
67
+
68
+ for doc in documents:
69
+ source = doc.metadata["source"]
70
+ if source not in unique_sources:
71
+ unique_sources.add(source)
72
+ unique_documents.append(doc)
73
+ input["context"] = unique_documents
74
+ return input
75
+
76
+ class PassParentContent(Runnable):
77
+ def invoke(self, input, config=None):
78
+ assert isinstance(input, dict)
79
+ documents = input["context"]
80
+
81
+ for doc in documents:
82
+ if "parent_content" in doc.metadata:
83
+ doc.page_content = doc.metadata["parent_content"]
84
+ return input
85
+
86
+ rag_chain = (
87
+ RunnablePassthrough()
88
+ # | SourceDedup()
89
+ # | PassParentContent()
90
+ | prompt
91
+ | model
92
+ | StrOutputParser()
93
+ )
94
+
95
+ rag_chain_with_source = RunnableParallel(
96
+ {"context": retriever, "question": RunnablePassthrough()}
97
+ ).assign(answer=rag_chain)
98
+
99
+ def generate_response(prompt):
100
+ start = "Answer: "
101
+ st.session_state['generated'].append(start)
102
+ yield start
103
+
104
+ for chunk in rag_chain_with_source.stream(prompt):
105
+
106
+ if list(chunk.keys())[0] == 'answer':
107
+ st.session_state['generated'][-1] += chunk['answer']
108
+ yield chunk['answer']
109
+
110
+ elif list(chunk.keys())[0] == 'context':
111
+ pass
112
+ # Sources DO NOT work the same with this code... removing for now.
113
+ # sources = chunk['context']
114
+ #for thing in chunk['context']:
115
+ #print()
116
+ #print(thing.metadata)
117
+ #sources = [doc.metadata['source'] for doc in chunk['context']]
118
+
119
+ #response = rag_chain_with_source.invoke(prompt)
120
+ #answer = response["answer"]
121
+ #sources_txt = "\n\nSources:\n" + "\n".join(sources)
122
+ #yield sources_txt
123
+
124
+ #question = "How can I do hybrid search with a pinecone database?"
125
+ #answer = generate_response(question)
126
+ #print(answer)
127
+
128
+ # ==================== THE REST OF THE STREAMLIT APP ====================
129
+
130
+ # Initialize session state variables if they don't exist
131
+ if 'generated' not in st.session_state:
132
+ st.session_state['generated'] = []
133
+
134
+ if 'past' not in st.session_state:
135
+ st.session_state['past'] = []
136
+
137
+ if 'messages' not in st.session_state:
138
+ st.session_state['messages'] = [{"role": "system", "content": "You are a helpful assistant."}]
139
+
140
+ if 'total_cost' not in st.session_state:
141
+ st.session_state['total_cost'] = 0.0
142
+
143
+ def refresh_text():
144
+ with response_container:
145
+ for i in range(len(st.session_state['past'])):
146
+ try:
147
+ user_message_content = st.session_state["past"][i]
148
+ message = st.chat_message("user")
149
+ message.write(user_message_content)
150
+ except:
151
+ print("Past error")
152
+
153
+ try:
154
+ ai_message_content = st.session_state["generated"][i]
155
+ message = st.chat_message("assistant")
156
+ message.write(ai_message_content)
157
+ except:
158
+ print("Generated Error")
159
+
160
+ response_container = st.container()
161
+ container = st.container()
162
+
163
+ if prompt := st.chat_input("Ask a question..."):
164
+ st.session_state['past'].append(prompt)
165
+ refresh_text()
166
+
167
+ st.session_state['messages'].append({"role": "user", "content": prompt})
168
+ with response_container:
169
+ my_generator = generate_response(prompt)
170
+ message = st.chat_message("assistant")
171
+ message.write_stream(my_generator)
172
+
173
+ if __name__ == "__main__":
174
+ #result = retriever.get_relevant_documents("foo")
175
+ #print(result[0].page_content)
176
+ pass