Ashkchamp commited on
Commit
b705daa
Β·
verified Β·
1 Parent(s): 01685af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +404 -74
app.py CHANGED
@@ -1,91 +1,421 @@
1
  import os
2
- from dotenv import load_dotenv
3
  import streamlit as st
 
 
 
 
 
 
 
4
  from langchain_groq import ChatGroq
5
  from langchain.chains import LLMChain
6
  from langchain.prompts import PromptTemplate
7
- from langchain_community.utilities import WikipediaAPIWrapper
8
- from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
9
- from langchain.agents import Tool
10
- from langchain.callbacks import StreamlitCallbackHandler
11
 
12
- # Load .env
13
  load_dotenv()
14
- GROQ_API_KEY = os.getenv("GROQ_API_KEY")
15
- if not GROQ_API_KEY:
16
- st.error("GROQ_API_KEY not found in environment")
17
- st.stop()
18
 
19
- # Streamlit UI
20
- st.set_page_config(page_title="General Knowledge Assistant", page_icon="🧭")
21
- st.title("General Knowledge Assistant")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- # Initialize LLM
24
- llm = ChatGroq(
25
- model="meta-llama/llama-4-maverick-17b-128e-instruct",
26
- groq_api_key=GROQ_API_KEY
27
- )
28
 
29
- # Wikipedia tool
30
- wikipedia_wrapper = WikipediaAPIWrapper()
31
- wikipedia_tool = Tool(
32
- name="Wikipedia",
33
- func=wikipedia_wrapper.run,
34
- description="Fetch summaries from Wikipedia."
35
- )
36
 
37
- # DuckDuckGo web search tool
38
- ddg_wrapper = DuckDuckGoSearchAPIWrapper()
39
- web_search_tool = Tool(
40
- name="WebSearch",
41
- func=ddg_wrapper.run,
42
- description="Perform a live web search via DuckDuckGo."
43
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- # Prompt template for the LLM
46
- prompt = """
47
- You are a knowledgeable assistant. Answer {question} using your internal knowledge.
48
- If you’re unsure, say "I don't know" or "Outdated".
49
- """
50
- prompt_template = PromptTemplate(input_variables=["question"], template=prompt)
51
- chain = LLMChain(llm=llm, prompt=prompt_template)
52
 
53
- # Enhanced fallback logic:
54
- def get_answer(query: str) -> str:
55
- ql = query.lower()
56
- # 1) If it's asking for recent info, always web‑search first:
57
- if any(k in ql for k in ["recent", "latest", "today", "current", "2025"]):
58
- ddg_ans = ddg_wrapper.run(query)
59
- if ddg_ans and len(ddg_ans) > 20:
60
- return ddg_ans
61
- return wikipedia_wrapper.run(query)
62
- # 2) Otherwise, use your LLM
63
- lm_ans = chain.run({"question": query}).strip()
64
- # 3) If the LLM defers, fall back to Wikipedia
65
- if any(flag in lm_ans.lower() for flag in ["i don't know", "outdated", "not sure"]):
66
- return wikipedia_wrapper.run(query)
67
- return lm_ans
68
 
69
- # Conversation history
70
- if "messages" not in st.session_state:
71
- st.session_state["messages"] = [
72
- {"role": "assistant", "content": "Hi! Ask me anythingβ€”I'll fetch the latest data for recent questions."}
73
- ]
 
 
74
 
75
- # Display history
76
- for msg in st.session_state["messages"]:
77
- st.chat_message(msg["role"]).write(msg["content"])
78
 
79
- # User input
80
- question = st.text_area("Enter your question:", "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
- if st.button("Find my answer"):
83
- if question:
84
- st.session_state["messages"].append({"role": "user", "content": question})
85
- st.chat_message("user").write(question)
86
- with st.spinner("Generating response..."):
87
- answer = get_answer(question)
88
- st.session_state["messages"].append({"role": "assistant", "content": answer})
89
- st.chat_message("assistant").write(answer)
90
- else:
91
- st.warning("Please enter a question.")
 
1
  import os
 
2
  import streamlit as st
3
+ import numpy as np
4
+ import google.generativeai as genai
5
+ import uuid
6
+ import datetime
7
+ import json
8
+ from dotenv import load_dotenv
9
+ from langchain_community.tools import DuckDuckGoSearchRun
10
  from langchain_groq import ChatGroq
11
  from langchain.chains import LLMChain
12
  from langchain.prompts import PromptTemplate
 
 
 
 
13
 
14
+ # Load environment variables
15
  load_dotenv()
 
 
 
 
16
 
17
+ # Custom CSS for a modern chat interface
18
+ def local_css():
19
+ st.markdown("""
20
+ <style>
21
+ /* Main app styling */
22
+ .main {
23
+ background-color: #f9f9fc;
24
+ font-family: 'Inter', sans-serif;
25
+ }
26
+
27
+ /* Chat container styling */
28
+ .chat-container {
29
+ max-width: 900px;
30
+ margin: 0 auto;
31
+ padding: 1rem;
32
+ border-radius: 12px;
33
+ background-color: white;
34
+ box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05);
35
+ }
36
+
37
+ /* Message styling */
38
+ .stChatMessage {
39
+ padding: 0.5rem 0;
40
+ }
41
+
42
+ /* User message styling */
43
+ [data-testid="stChatMessageContent"] {
44
+ border-radius: 18px;
45
+ padding: 0.8rem 1rem;
46
+ line-height: 1.5;
47
+ }
48
+
49
+ /* User avatar */
50
+ .stChatMessageAvatar {
51
+ background-color: #1f75fe !important;
52
+ }
53
+
54
+ /* Assistant avatar */
55
+ [data-testid="stChatMessageAvatar"][data-testid*="assistant"] {
56
+ background-color: #10a37f !important;
57
+ }
58
+
59
+ /* Sidebar styling */
60
+ [data-testid="stSidebar"] {
61
+ background-color: #ffffff;
62
+ border-right: 1px solid #e6e6e6;
63
+ padding: 1rem;
64
+ }
65
+
66
+ /* Chat history item styling */
67
+ .chat-history-item {
68
+ padding: 10px 15px;
69
+ margin: 5px 0;
70
+ border-radius: 8px;
71
+ cursor: pointer;
72
+ transition: background-color 0.2s;
73
+ overflow: hidden;
74
+ text-overflow: ellipsis;
75
+ white-space: nowrap;
76
+ }
77
+
78
+ .chat-history-item:hover {
79
+ background-color: #f0f0f5;
80
+ }
81
+
82
+ .chat-history-active {
83
+ background-color: #e6f0ff;
84
+ border-left: 3px solid #1f75fe;
85
+ }
86
+
87
+ /* Input area styling */
88
+ .stTextInput > div > div > input {
89
+ border-radius: 20px;
90
+ padding: 10px 15px;
91
+ border: 1px solid #e0e0e0;
92
+ background-color: #f9f9fc;
93
+ }
94
+
95
+ /* Button styling */
96
+ .stButton > button {
97
+ border-radius: 20px;
98
+ padding: 0.3rem 1rem;
99
+ background-color: #1f75fe;
100
+ color: white;
101
+ border: none;
102
+ transition: all 0.2s;
103
+ }
104
+
105
+ .stButton > button:hover {
106
+ background-color: #0056b3;
107
+ transform: translateY(-2px);
108
+ }
109
+
110
+ /* Custom header */
111
+ .custom-header {
112
+ display: flex;
113
+ align-items: center;
114
+ margin-bottom: 1rem;
115
+ }
116
+
117
+ .custom-header h1 {
118
+ margin: 0;
119
+ font-size: 1.8rem;
120
+ color: #333;
121
+ }
122
+
123
+ /* Typing indicator */
124
+ .typing-indicator {
125
+ display: flex;
126
+ padding: 10px 15px;
127
+ background-color: #f0f0f5;
128
+ border-radius: 18px;
129
+ width: fit-content;
130
+ }
131
+
132
+ .typing-indicator span {
133
+ height: 8px;
134
+ width: 8px;
135
+ margin: 0 1px;
136
+ background-color: #a0a0a0;
137
+ border-radius: 50%;
138
+ display: inline-block;
139
+ animation: typing 1.4s infinite ease-in-out both;
140
+ }
141
+
142
+ .typing-indicator span:nth-child(1) {
143
+ animation-delay: 0s;
144
+ }
145
+
146
+ .typing-indicator span:nth-child(2) {
147
+ animation-delay: 0.2s;
148
+ }
149
+
150
+ .typing-indicator span:nth-child(3) {
151
+ animation-delay: 0.4s;
152
+ }
153
+
154
+ @keyframes typing {
155
+ 0% { transform: scale(1); }
156
+ 50% { transform: scale(1.5); }
157
+ 100% { transform: scale(1); }
158
+ }
159
+ </style>
160
+ """, unsafe_allow_html=True)
161
+
162
+ # Initialize session state variables
163
+ def init_session_state():
164
+ if 'messages' not in st.session_state:
165
+ st.session_state.messages = []
166
+ if 'chat_sessions' not in st.session_state:
167
+ st.session_state.chat_sessions = {}
168
+ if 'current_session_id' not in st.session_state:
169
+ st.session_state.current_session_id = str(uuid.uuid4())
170
+ if 'session_name' not in st.session_state:
171
+ st.session_state.session_name = f"Chat {datetime.datetime.now().strftime('%b %d, %H:%M')}"
172
+
173
+ # Save and load chat sessions
174
+ def save_chat_session():
175
+ if st.session_state.current_session_id:
176
+ st.session_state.chat_sessions[st.session_state.current_session_id] = {
177
+ "name": st.session_state.session_name,
178
+ "messages": st.session_state.messages,
179
+ "timestamp": datetime.datetime.now().isoformat()
180
+ }
181
 
182
+ def load_chat_session(session_id):
183
+ if session_id in st.session_state.chat_sessions:
184
+ st.session_state.current_session_id = session_id
185
+ st.session_state.messages = st.session_state.chat_sessions[session_id]["messages"]
186
+ st.session_state.session_name = st.session_state.chat_sessions[session_id]["name"]
187
 
188
+ def create_new_chat():
189
+ st.session_state.current_session_id = str(uuid.uuid4())
190
+ st.session_state.messages = []
191
+ st.session_state.session_name = f"Chat {datetime.datetime.now().strftime('%b %d, %H:%M')}"
 
 
 
192
 
193
+ # Configure Gemini and Groq models
194
+ def setup_models(groq_api_key, gemini_api_key):
195
+ genai.configure(api_key=gemini_api_key)
196
+
197
+ llm = ChatGroq(
198
+ model="meta-llama/llama-4-maverick-17b-128e-instruct",
199
+ groq_api_key=groq_api_key
200
+ )
201
+
202
+ direct_prompt = PromptTemplate(
203
+ input_variables=["question"],
204
+ template="""
205
+ Answer the question in detailed form.
206
+
207
+ Question: {question}
208
+ Answer:
209
+ """
210
+ )
211
+ direct_chain = LLMChain(llm=llm, prompt=direct_prompt)
212
+
213
+ search_prompt = PromptTemplate(
214
+ input_variables=["web_results", "question"],
215
+ template="""
216
+ Use these web search results to give a comprehensive answer:
217
+
218
+ Search Results:
219
+ {web_results}
220
+
221
+ Question: {question}
222
+ Answer:
223
+ """
224
+ )
225
+ search_chain = LLMChain(llm=llm, prompt=search_prompt)
226
+
227
+ return direct_chain, search_chain
228
 
229
+ def get_gemini_model(name="gemini-1.5-pro"):
230
+ return genai.GenerativeModel(name)
 
 
 
 
 
231
 
232
+ def gen_content(model, prompt, temperature=0.4, max_tokens=512):
233
+ cfg = {"temperature": temperature, "top_p":1, "top_k":50, "max_output_tokens": max_tokens}
234
+ safety = [{"category":c, "threshold":"BLOCK_NONE"} for c in [
235
+ "HARM_CATEGORY_HARASSMENT", "HARM_CATEGORY_HATE_SPEECH",
236
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT", "HARM_CATEGORY_DANGEROUS_CONTENT"
237
+ ]]
238
+ res = model.generate_content(prompt, generation_config=cfg, safety_settings=safety)
239
+ return res.candidates[0].content.parts[0].text if res.candidates else ""
 
 
 
 
 
 
 
240
 
241
+ def decide_search(query: str):
242
+ model = get_gemini_model()
243
+ decision_prompt = f"Decide if this requires web search. If yes, reply '<SEARCH> keywords'. Otherwise 'NO_SEARCH'.\nQuery: {query}"
244
+ response = gen_content(model, decision_prompt, max_tokens=32)
245
+ if "<SEARCH>" in response:
246
+ return True, response.split("<SEARCH>")[1].strip()
247
+ return False, None
248
 
249
+ @st.cache_data
250
+ def perform_search(keywords: str) -> str:
251
+ return DuckDuckGoSearchRun().run(keywords)
252
 
253
+ # Main application
254
+ def main():
255
+ # Page configuration
256
+ st.set_page_config(
257
+ page_title="General Knowledge Assistant",
258
+ page_icon="🧭",
259
+ layout="wide",
260
+ initial_sidebar_state="expanded"
261
+ )
262
+
263
+ # Apply custom CSS
264
+ local_css()
265
+
266
+ # Initialize session state
267
+ init_session_state()
268
+
269
+ # Sidebar: API keys and chat history
270
+ with st.sidebar:
271
+ st.markdown("<h2 style='text-align: center;'>🧭 Knowledge Assistant</h2>", unsafe_allow_html=True)
272
+
273
+ # API Key inputs
274
+ st.subheader("πŸ”‘ API Keys")
275
+ groq_api_key = os.environ.get("GROQ_API_KEY") or st.text_input("Groq API Key", type="password")
276
+ gemini_api_key = os.environ.get("GEMINI_API_KEY") or st.text_input("Gemini API Key", type="password")
277
+
278
+ if not groq_api_key or not gemini_api_key:
279
+ st.warning("Please provide both API keys to proceed.")
280
+ st.stop()
281
+
282
+ # Chat history management
283
+ st.subheader("πŸ’¬ Chat History")
284
+
285
+ # New chat button
286
+ if st.button("βž• New Chat", key="new_chat"):
287
+ create_new_chat()
288
+
289
+ # Current chat name editor
290
+ new_name = st.text_input("Chat Name", value=st.session_state.session_name)
291
+ if new_name != st.session_state.session_name:
292
+ st.session_state.session_name = new_name
293
+ save_chat_session()
294
+
295
+ # Display chat history
296
+ st.markdown("#### Previous Chats")
297
+
298
+ # Sort sessions by timestamp (newest first)
299
+ sorted_sessions = sorted(
300
+ st.session_state.chat_sessions.items(),
301
+ key=lambda x: x[1].get("timestamp", ""),
302
+ reverse=True
303
+ )
304
+
305
+ for session_id, session in sorted_sessions:
306
+ # Display first message or default text
307
+ preview = "New conversation"
308
+ if session["messages"] and len(session["messages"]) > 0:
309
+ first_msg = session["messages"][0]
310
+ if isinstance(first_msg, dict) and "content" in first_msg:
311
+ preview = first_msg["content"]
312
+ elif isinstance(first_msg, (list, tuple)) and len(first_msg) > 1:
313
+ preview = first_msg[1] # Assuming content is at index 1
314
+
315
+ if len(preview) > 30:
316
+ preview = preview[:30] + "..."
317
+
318
+ # Highlight current session
319
+ is_current = session_id == st.session_state.current_session_id
320
+ style = "chat-history-item chat-history-active" if is_current else "chat-history-item"
321
+
322
+ col1, col2 = st.columns([0.8, 0.2])
323
+ with col1:
324
+ if st.button(session["name"], key=f"load_session_{session_id}"):
325
+ load_chat_session(session_id)
326
+ st.rerun()
327
+
328
+ with col2:
329
+ if st.button("πŸ—‘οΈ", key=f"delete_{session_id}", help="Delete this chat"):
330
+ if session_id in st.session_state.chat_sessions:
331
+ del st.session_state.chat_sessions[session_id]
332
+ if session_id == st.session_state.current_session_id:
333
+ create_new_chat()
334
+ st.rerun()
335
+
336
+ # Main chat interface
337
+ direct_chain, search_chain = setup_models(groq_api_key, gemini_api_key)
338
+
339
+ # Custom header with logo and title
340
+ st.markdown("""
341
+ <div class="custom-header">
342
+ <h1>🧭 General Knowledge Assistant</h1>
343
+ </div>
344
+ """, unsafe_allow_html=True)
345
+
346
+ # Chat container
347
+ chat_container = st.container()
348
+
349
+ # Chat input area (placed before displaying messages for better UX)
350
+ user_input = st.chat_input("Ask me anything...")
351
+
352
+ # Process user input
353
+ if user_input:
354
+ # Add user message to chat
355
+ st.session_state.messages.append({"role": "user", "content": user_input})
356
+
357
+ # Save current state
358
+ save_chat_session()
359
+
360
+ # Show typing indicator
361
+ with chat_container:
362
+ typing_placeholder = st.empty()
363
+ typing_placeholder.markdown("""
364
+ <div class="typing-indicator">
365
+ <span></span>
366
+ <span></span>
367
+ <span></span>
368
+ </div>
369
+ """, unsafe_allow_html=True)
370
+
371
+ # Process the query
372
+ try:
373
+ # Determine need for search
374
+ needs_search, terms = decide_search(user_input)
375
+
376
+ if needs_search:
377
+ web_results = perform_search(terms)
378
+ answer = search_chain.run({"web_results": web_results, "question": user_input})
379
+ else:
380
+ answer = direct_chain.run({"question": user_input})
381
+
382
+ # Add assistant response to chat
383
+ st.session_state.messages.append({"role": "assistant", "content": answer})
384
+
385
+ # Save updated chat
386
+ save_chat_session()
387
+
388
+ except Exception as e:
389
+ error_message = f"Sorry, I encountered an error: {str(e)}"
390
+ st.session_state.messages.append({"role": "assistant", "content": error_message})
391
+ save_chat_session()
392
+
393
+ # Remove typing indicator
394
+ typing_placeholder.empty()
395
+
396
+ # Force a rerun to update the UI
397
+ st.rerun()
398
+
399
+ # Display chat messages
400
+ with chat_container:
401
+ if not st.session_state.messages:
402
+ # Show welcome message if no messages
403
+ st.markdown("""
404
+ <div style="text-align: center; padding: 50px 20px;">
405
+ <h3>πŸ‘‹ Welcome to the General Knowledge Assistant!</h3>
406
+ <p>Ask me anything about general knowledge, facts, or concepts.</p>
407
+ <p>I can search the web when needed to provide you with up-to-date information.</p>
408
+ </div>
409
+ """, unsafe_allow_html=True)
410
+ else:
411
+ # Display all messages
412
+ for msg in st.session_state.messages:
413
+ # Ensure we're handling the message correctly based on its type
414
+ if isinstance(msg, dict) and "role" in msg and "content" in msg:
415
+ with st.chat_message(msg["role"]):
416
+ st.write(msg["content"])
417
+ else:
418
+ st.error(f"Invalid message format: {msg}")
419
 
420
+ if __name__ == "__main__":
421
+ main()