uumerrr684 commited on
Commit
8237ec2
·
verified ·
1 Parent(s): 41ac029

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +190 -73
app.py CHANGED
@@ -1,7 +1,9 @@
1
  import requests
 
2
  import json
3
  import streamlit as st
4
  from datetime import datetime
 
5
 
6
  # Page configuration
7
  st.set_page_config(
@@ -10,18 +12,31 @@ st.set_page_config(
10
  initial_sidebar_state="collapsed"
11
  )
12
 
13
- # Minimal CSS for styling
14
  st.markdown("""
15
  <style>
16
  .stApp {
17
  background: white;
 
 
 
18
  max-width: 800px;
19
- margin: 0 auto;
20
  }
 
 
 
 
 
 
21
  .model-id {
22
  color: #28a745;
23
  font-family: monospace;
24
- font-size: 0.9em;
 
 
 
 
 
25
  }
26
  </style>
27
  """, unsafe_allow_html=True)
@@ -32,10 +47,12 @@ HISTORY_FILE = "chat_history.json"
32
  def load_chat_history():
33
  """Load chat history from file"""
34
  try:
35
- with open(HISTORY_FILE, 'r', encoding='utf-8') as f:
36
- return json.load(f)
37
- except (FileNotFoundError, json.JSONDecodeError):
38
- return []
 
 
39
 
40
  def save_chat_history(messages):
41
  """Save chat history to file"""
@@ -48,47 +65,45 @@ def save_chat_history(messages):
48
  def clear_chat_history():
49
  """Clear chat history file"""
50
  try:
51
- open(HISTORY_FILE, 'w').close()
 
52
  st.session_state.messages = []
53
  except Exception as e:
54
  st.error(f"Error clearing chat history: {e}")
55
 
56
- # Initialize session state
57
  if "messages" not in st.session_state:
58
  st.session_state.messages = load_chat_history()
59
 
60
  # Get API key
61
- OPENROUTER_API_KEY = st.secrets.get("OPENROUTER_API_KEY", None)
62
 
63
- @st.cache_data(ttl=3600)
64
- def fetch_models():
65
- """Fetch available models from OpenRouter API"""
66
  if not OPENROUTER_API_KEY:
67
- return [("GPT-3.5 Turbo", "openai/gpt-3.5-turbo")]
68
  try:
69
  url = "https://openrouter.ai/api/v1/models"
70
  headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
71
  response = requests.get(url, headers=headers, timeout=10)
72
- if response.status_code == 200:
73
- models = response.json().get("data", [])
74
- return [(model["name"], model["id"]) for model in models if "name" in model and "id" in model]
75
- return [("GPT-3.5 Turbo", "openai/gpt-3.5-turbo")]
76
- except requests.RequestException:
77
- return [("GPT-3.5 Turbo", "openai/gpt-3.5-turbo")]
78
 
79
  def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
80
- """Get streaming AI response from OpenRouter"""
81
  if not OPENROUTER_API_KEY:
82
- yield "No API key found. Please add OPENROUTER_API_KEY to secrets."
83
- return
84
 
85
  url = "https://openrouter.ai/api/v1/chat/completions"
86
  headers = {
87
  "Content-Type": "application/json",
88
- "Authorization": f"Bearer {OPENROUTER_API_KEY}"
 
 
89
  }
90
 
91
- api_messages = [{"role": "system", "content": "You are a helpful AI assistant."}]
 
92
  api_messages.extend(messages)
93
 
94
  data = {
@@ -96,36 +111,57 @@ def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
96
  "messages": api_messages,
97
  "stream": True,
98
  "max_tokens": 2000,
99
- "temperature": 0.7
 
 
 
100
  }
101
 
102
  try:
103
  response = requests.post(url, headers=headers, json=data, stream=True, timeout=60)
 
 
104
  if response.status_code != 200:
105
- error_data = response.json().get('error', {})
106
- yield f"API Error: {error_data.get('message', response.reason)}"
 
 
 
 
 
 
107
  return
108
 
109
  full_response = ""
 
 
 
110
  for line in response.iter_lines():
111
- if line and line.startswith(b"data: "):
112
- data_str = line[len(b"data: "):].decode("utf-8")
113
- if data_str.strip() == "[DONE]":
114
- break
115
- try:
116
- data = json.loads(data_str)
117
- delta = data["choices"][0]["delta"].get("content", "")
118
- if delta:
119
- full_response += delta
120
- yield full_response
121
- except json.JSONDecodeError:
122
- continue
123
- except requests.Timeout:
124
- yield "Request timed out. Try again."
125
- except requests.ConnectionError:
126
- yield "Connection error. Check your internet."
127
- except requests.RequestException as e:
128
- yield f"Request error: {str(e)}."
 
 
 
 
 
 
 
129
 
130
  # Header
131
  st.title("AI Assistant")
@@ -136,32 +172,54 @@ with st.sidebar:
136
  st.header("Settings")
137
 
138
  # API Status
139
- status = "Connected" if OPENROUTER_API_KEY and requests.get(
140
- "https://openrouter.ai/api/v1/models",
141
- headers={"Authorization": f"Bearer {OPENROUTER_API_KEY}"},
142
- timeout=10
143
- ).status_code == 200 else "No API Key"
 
 
144
 
145
- st.success("🟢 API Connected" if status == "Connected" else "🔴 No API Key")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
147
- # Model selection
148
- models = fetch_models()
149
  model_names = [name for name, _ in models]
150
  model_ids = [model_id for _, model_id in models]
151
 
152
  selected_index = st.selectbox("Model", range(len(model_names)),
153
- format_func=lambda x: model_names[x], index=0)
 
154
  selected_model = model_ids[selected_index]
155
 
 
156
  st.markdown(f"**Model ID:** <span class='model-id'>{selected_model}</span>", unsafe_allow_html=True)
157
 
158
- # Chat history controls
 
 
159
  st.header("Chat History")
 
 
160
  if st.session_state.messages:
161
  st.info(f"Messages stored: {len(st.session_state.messages)}")
162
 
 
163
  auto_save = st.checkbox("Auto-save messages", value=True)
164
 
 
165
  col1, col2 = st.columns(2)
166
  with col1:
167
  if st.button("Save History", use_container_width=True):
@@ -169,43 +227,102 @@ with st.sidebar:
169
  st.success("History saved!")
170
 
171
  with col2:
172
- if st.button("Clear History", use_container_width=True):
173
- clear_chat_history()
174
- st.success("History cleared!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
- # Show welcome message
177
  if not st.session_state.messages:
178
  st.info("How can I help you today?")
179
 
180
  # Display chat messages
181
  for message in st.session_state.messages:
182
  with st.chat_message(message["role"]):
183
- st.markdown(message["content"])
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
  # Chat input
186
  if prompt := st.chat_input("Ask anything..."):
 
187
  user_message = {"role": "user", "content": prompt}
188
  st.session_state.messages.append(user_message)
189
 
 
190
  if auto_save:
191
  save_chat_history(st.session_state.messages)
192
 
 
193
  with st.chat_message("user"):
194
  st.markdown(prompt)
195
 
 
196
  with st.chat_message("assistant"):
197
  placeholder = st.empty()
198
- full_response = ""
199
- for response in get_ai_response([user_message], selected_model):
200
- full_response = response
201
- placeholder.markdown(full_response + "▌")
202
- placeholder.markdown(full_response)
203
-
204
- assistant_message = {"role": "assistant", "content": full_response}
205
- st.session_state.messages.append(assistant_message)
206
 
207
- if auto_save:
208
- save_chat_history(st.session_state.messages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
- # Show current model
211
- st.caption(f"Currently using: {model_names[selected_index]}")
 
1
  import requests
2
+ import os
3
  import json
4
  import streamlit as st
5
  from datetime import datetime
6
+ import time
7
 
8
  # Page configuration
9
  st.set_page_config(
 
12
  initial_sidebar_state="collapsed"
13
  )
14
 
15
+ # White background
16
  st.markdown("""
17
  <style>
18
  .stApp {
19
  background: white;
20
+ }
21
+
22
+ .main .block-container {
23
  max-width: 800px;
 
24
  }
25
+
26
+ #MainMenu {visibility: hidden;}
27
+ footer {visibility: hidden;}
28
+ header {visibility: hidden;}
29
+ .stDeployButton {display: none;}
30
+
31
  .model-id {
32
  color: #28a745;
33
  font-family: monospace;
34
+ }
35
+
36
+ .model-attribution {
37
+ color: #28a745;
38
+ font-size: 0.8em;
39
+ font-style: italic;
40
  }
41
  </style>
42
  """, unsafe_allow_html=True)
 
47
  def load_chat_history():
48
  """Load chat history from file"""
49
  try:
50
+ if os.path.exists(HISTORY_FILE):
51
+ with open(HISTORY_FILE, 'r', encoding='utf-8') as f:
52
+ return json.load(f)
53
+ except Exception as e:
54
+ st.error(f"Error loading chat history: {e}")
55
+ return []
56
 
57
  def save_chat_history(messages):
58
  """Save chat history to file"""
 
65
  def clear_chat_history():
66
  """Clear chat history file"""
67
  try:
68
+ if os.path.exists(HISTORY_FILE):
69
+ os.remove(HISTORY_FILE)
70
  st.session_state.messages = []
71
  except Exception as e:
72
  st.error(f"Error clearing chat history: {e}")
73
 
74
+ # Initialize session state with saved history
75
  if "messages" not in st.session_state:
76
  st.session_state.messages = load_chat_history()
77
 
78
  # Get API key
79
+ OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY")
80
 
81
+ @st.cache_data(ttl=300)
82
+ def check_api_status():
 
83
  if not OPENROUTER_API_KEY:
84
+ return "No API Key"
85
  try:
86
  url = "https://openrouter.ai/api/v1/models"
87
  headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
88
  response = requests.get(url, headers=headers, timeout=10)
89
+ return "Connected" if response.status_code == 200 else "Error"
90
+ except:
91
+ return "Error"
 
 
 
92
 
93
  def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
 
94
  if not OPENROUTER_API_KEY:
95
+ return "No API key found. Please add OPENROUTER_API_KEY to environment variables."
 
96
 
97
  url = "https://openrouter.ai/api/v1/chat/completions"
98
  headers = {
99
  "Content-Type": "application/json",
100
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
101
+ "HTTP-Referer": "http://localhost:8501", # Optional: Your site URL
102
+ "X-Title": "Streamlit AI Assistant" # Optional: Your app name
103
  }
104
 
105
+ # Create system message and user messages
106
+ api_messages = [{"role": "system", "content": "You are a helpful AI assistant. Provide clear and helpful responses."}]
107
  api_messages.extend(messages)
108
 
109
  data = {
 
111
  "messages": api_messages,
112
  "stream": True,
113
  "max_tokens": 2000,
114
+ "temperature": 0.7,
115
+ "top_p": 1,
116
+ "frequency_penalty": 0,
117
+ "presence_penalty": 0
118
  }
119
 
120
  try:
121
  response = requests.post(url, headers=headers, json=data, stream=True, timeout=60)
122
+
123
+ # Better error handling
124
  if response.status_code != 200:
125
+ error_detail = ""
126
+ try:
127
+ error_data = response.json()
128
+ error_detail = error_data.get('error', {}).get('message', f"HTTP {response.status_code}")
129
+ except:
130
+ error_detail = f"HTTP {response.status_code}: {response.reason}"
131
+
132
+ yield f"API Error: {error_detail}. Please try a different model or check your API key."
133
  return
134
 
135
  full_response = ""
136
+ buffer = ""
137
+
138
+ # Using your working streaming logic
139
  for line in response.iter_lines():
140
+ if line:
141
+ # The server sends lines starting with "data: ..."
142
+ if line.startswith(b"data: "):
143
+ data_str = line[len(b"data: "):].decode("utf-8")
144
+ if data_str.strip() == "[DONE]":
145
+ break
146
+ try:
147
+ data = json.loads(data_str)
148
+ delta = data["choices"][0]["delta"].get("content", "")
149
+ if delta:
150
+ full_response += delta
151
+ yield full_response
152
+ except json.JSONDecodeError:
153
+ continue
154
+ except (KeyError, IndexError):
155
+ continue
156
+
157
+ except requests.exceptions.Timeout:
158
+ yield "Request timed out. Please try again with a shorter message or different model."
159
+ except requests.exceptions.ConnectionError:
160
+ yield "Connection error. Please check your internet connection and try again."
161
+ except requests.exceptions.RequestException as e:
162
+ yield f"Request error: {str(e)}. Please try again."
163
+ except Exception as e:
164
+ yield f"Unexpected error: {str(e)}. Please try again or contact support."
165
 
166
  # Header
167
  st.title("AI Assistant")
 
172
  st.header("Settings")
173
 
174
  # API Status
175
+ status = check_api_status()
176
+ if status == "Connected":
177
+ st.success("🟢 API Connected")
178
+ elif status == "No API Key":
179
+ st.error("No API Key")
180
+ else:
181
+ st.warning("Connection Issue")
182
 
183
+ st.divider()
184
+
185
+ # All models including new ones
186
+ models = [
187
+ ("GPT-3.5 Turbo", "openai/gpt-3.5-turbo"),
188
+ ("LLaMA 3.1 8B", "meta-llama/llama-3.1-8b-instruct"),
189
+ ("LLaMA 3.1 70B", "meta-llama/llama-3.1-70b-instruct"),
190
+ ("DeepSeek Chat v3", "deepseek/deepseek-chat-v3-0324:free"),
191
+ ("DeepSeek R1", "deepseek/deepseek-r1-0528:free"),
192
+ ("Qwen3 Coder", "qwen/qwen3-coder:free"),
193
+ ("Microsoft MAI DS R1", "microsoft/mai-ds-r1:free"),
194
+ ("Gemma 3 27B", "google/gemma-3-27b-it:free"),
195
+ ("Gemma 3 4B", "google/gemma-3-4b-it:free"),
196
+ ("Auto (Best Available)", "openrouter/auto")
197
+ ]
198
 
 
 
199
  model_names = [name for name, _ in models]
200
  model_ids = [model_id for _, model_id in models]
201
 
202
  selected_index = st.selectbox("Model", range(len(model_names)),
203
+ format_func=lambda x: model_names[x],
204
+ index=0)
205
  selected_model = model_ids[selected_index]
206
 
207
+ # Show selected model ID in green
208
  st.markdown(f"**Model ID:** <span class='model-id'>{selected_model}</span>", unsafe_allow_html=True)
209
 
210
+ st.divider()
211
+
212
+ # Chat History Controls
213
  st.header("Chat History")
214
+
215
+ # Show number of messages
216
  if st.session_state.messages:
217
  st.info(f"Messages stored: {len(st.session_state.messages)}")
218
 
219
+ # Auto-save toggle
220
  auto_save = st.checkbox("Auto-save messages", value=True)
221
 
222
+ # Manual save/load buttons
223
  col1, col2 = st.columns(2)
224
  with col1:
225
  if st.button("Save History", use_container_width=True):
 
227
  st.success("History saved!")
228
 
229
  with col2:
230
+ if st.button("Load History", use_container_width=True):
231
+ st.session_state.messages = load_chat_history()
232
+ st.success("History loaded!")
233
+ st.rerun()
234
+
235
+ st.divider()
236
+
237
+ # View History
238
+ if st.button("View History File", use_container_width=True):
239
+ if os.path.exists(HISTORY_FILE):
240
+ with open(HISTORY_FILE, 'r', encoding='utf-8') as f:
241
+ history_content = f.read()
242
+ st.text_area("Chat History (JSON)", history_content, height=200)
243
+ else:
244
+ st.warning("No history file found")
245
+
246
+ # Download History
247
+ if os.path.exists(HISTORY_FILE):
248
+ with open(HISTORY_FILE, 'rb') as f:
249
+ st.download_button(
250
+ label="Download History",
251
+ data=f.read(),
252
+ file_name=f"chat_history_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
253
+ mime="application/json",
254
+ use_container_width=True
255
+ )
256
+
257
+ st.divider()
258
+
259
+ # Clear controls
260
+ if st.button("Clear Chat", use_container_width=True, type="secondary"):
261
+ clear_chat_history()
262
+ st.success("Chat cleared!")
263
+ st.rerun()
264
 
265
+ # Show welcome message when no messages
266
  if not st.session_state.messages:
267
  st.info("How can I help you today?")
268
 
269
  # Display chat messages
270
  for message in st.session_state.messages:
271
  with st.chat_message(message["role"]):
272
+ # Check if this is an assistant message with attribution
273
+ if message["role"] == "assistant" and "Response created by:" in message["content"]:
274
+ # Split content and attribution
275
+ parts = message["content"].split("\n\n---\n*Response created by:")
276
+ main_content = parts[0]
277
+ if len(parts) > 1:
278
+ model_name = parts[1].replace("***", "").replace("**", "")
279
+ st.markdown(main_content)
280
+ st.markdown(f"<div class='model-attribution'>Response created by: <strong>{model_name}</strong></div>", unsafe_allow_html=True)
281
+ else:
282
+ st.markdown(message["content"])
283
+ else:
284
+ st.markdown(message["content"])
285
 
286
  # Chat input
287
  if prompt := st.chat_input("Ask anything..."):
288
+ # Add user message
289
  user_message = {"role": "user", "content": prompt}
290
  st.session_state.messages.append(user_message)
291
 
292
+ # Auto-save if enabled
293
  if auto_save:
294
  save_chat_history(st.session_state.messages)
295
 
296
+ # Display user message
297
  with st.chat_message("user"):
298
  st.markdown(prompt)
299
 
300
+ # Get AI response
301
  with st.chat_message("assistant"):
302
  placeholder = st.empty()
 
 
 
 
 
 
 
 
303
 
304
+ full_response = ""
305
+ try:
306
+ for response in get_ai_response(st.session_state.messages, selected_model):
307
+ full_response = response
308
+ placeholder.markdown(full_response + "▌")
309
+
310
+ # Remove cursor and show final response
311
+ placeholder.markdown(full_response)
312
+
313
+ except Exception as e:
314
+ error_msg = f"An error occurred: {str(e)}"
315
+ placeholder.markdown(error_msg)
316
+ full_response = error_msg
317
+
318
+ # Add AI response to messages with attribution
319
+ full_response_with_attribution = full_response + f"\n\n---\n*Response created by: **{model_names[selected_index]}***"
320
+ assistant_message = {"role": "assistant", "content": full_response_with_attribution}
321
+ st.session_state.messages.append(assistant_message)
322
+
323
+ # Auto-save if enabled
324
+ if auto_save:
325
+ save_chat_history(st.session_state.messages)
326
 
327
+ # Show currently using model
328
+ st.caption(f"Currently using: **{model_names[selected_index]}**")