awacke1 commited on
Commit
3aed0ab
β€’
1 Parent(s): c48c794

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +338 -0
app.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic
3
+ import openai
4
+ import base64
5
+ from datetime import datetime
6
+ import plotly.graph_objects as go
7
+ import cv2
8
+ import glob
9
+ import json
10
+ import math
11
+ import os
12
+ import pytz
13
+ import random
14
+ import re
15
+ import requests
16
+ import streamlit.components.v1 as components
17
+ import textract
18
+ import time
19
+ import zipfile
20
+ from audio_recorder_streamlit import audio_recorder
21
+ from bs4 import BeautifulSoup
22
+ from collections import deque
23
+ from dotenv import load_dotenv
24
+ from gradio_client import Client, handle_file
25
+ from huggingface_hub import InferenceClient
26
+ from io import BytesIO
27
+ from moviepy.editor import VideoFileClip
28
+ from PIL import Image
29
+ from PyPDF2 import PdfReader
30
+ from urllib.parse import quote
31
+ from xml.etree import ElementTree as ET
32
+ from openai import OpenAI
33
+
34
+ # Configuration and Setup
35
+ Site_Name = '🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI'
36
+ title = "🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI"
37
+ helpURL = 'https://huggingface.co/awacke1'
38
+ bugURL = 'https://huggingface.co/spaces/awacke1'
39
+ icons = 'πŸš²πŸ†'
40
+
41
+ st.set_page_config(
42
+ page_title=title,
43
+ page_icon=icons,
44
+ layout="wide",
45
+ initial_sidebar_state="auto",
46
+ menu_items={
47
+ 'Get Help': helpURL,
48
+ 'Report a bug': bugURL,
49
+ 'About': title
50
+ }
51
+ )
52
+
53
+ # Load environment variables and initialize clients
54
+ load_dotenv()
55
+
56
+ # OpenAI setup
57
+ openai.api_key = os.getenv('OPENAI_API_KEY')
58
+ if openai.api_key == None:
59
+ openai.api_key = st.secrets['OPENAI_API_KEY']
60
+
61
+ openai_client = OpenAI(
62
+ api_key=os.getenv('OPENAI_API_KEY'),
63
+ organization=os.getenv('OPENAI_ORG_ID')
64
+ )
65
+
66
+ # Claude setup
67
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
68
+ if anthropic_key == None:
69
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
+
72
+ # Initialize session states
73
+ if 'transcript_history' not in st.session_state:
74
+ st.session_state.transcript_history = []
75
+ if "chat_history" not in st.session_state:
76
+ st.session_state.chat_history = []
77
+ if "openai_model" not in st.session_state:
78
+ st.session_state["openai_model"] = "gpt-4o-2024-05-13"
79
+ if "messages" not in st.session_state:
80
+ st.session_state.messages = []
81
+ if 'last_voice_input' not in st.session_state:
82
+ st.session_state.last_voice_input = ""
83
+
84
+ # Speech Recognition HTML Component
85
+ speech_recognition_html = """
86
+ <!DOCTYPE html>
87
+ <html>
88
+ <head>
89
+ <title>Continuous Speech Demo</title>
90
+ <style>
91
+ body {
92
+ font-family: sans-serif;
93
+ padding: 20px;
94
+ max-width: 800px;
95
+ margin: 0 auto;
96
+ }
97
+ button {
98
+ padding: 10px 20px;
99
+ margin: 10px 5px;
100
+ font-size: 16px;
101
+ }
102
+ #status {
103
+ margin: 10px 0;
104
+ padding: 10px;
105
+ background: #e8f5e9;
106
+ border-radius: 4px;
107
+ }
108
+ #output {
109
+ white-space: pre-wrap;
110
+ padding: 15px;
111
+ background: #f5f5f5;
112
+ border-radius: 4px;
113
+ margin: 10px 0;
114
+ min-height: 100px;
115
+ max-height: 400px;
116
+ overflow-y: auto;
117
+ }
118
+ .controls {
119
+ margin: 10px 0;
120
+ }
121
+ </style>
122
+ </head>
123
+ <body>
124
+ <div class="controls">
125
+ <button id="start">Start Listening</button>
126
+ <button id="stop" disabled>Stop Listening</button>
127
+ <button id="clear">Clear Text</button>
128
+ </div>
129
+ <div id="status">Ready</div>
130
+ <div id="output"></div>
131
+
132
+ <script>
133
+ if (!('webkitSpeechRecognition' in window)) {
134
+ alert('Speech recognition not supported');
135
+ } else {
136
+ const recognition = new webkitSpeechRecognition();
137
+ const startButton = document.getElementById('start');
138
+ const stopButton = document.getElementById('stop');
139
+ const clearButton = document.getElementById('clear');
140
+ const status = document.getElementById('status');
141
+ const output = document.getElementById('output');
142
+ let fullTranscript = '';
143
+ let lastUpdateTime = Date.now();
144
+
145
+ // Configure recognition
146
+ recognition.continuous = true;
147
+ recognition.interimResults = true;
148
+
149
+ // Function to start recognition
150
+ const startRecognition = () => {
151
+ try {
152
+ recognition.start();
153
+ status.textContent = 'Listening...';
154
+ startButton.disabled = true;
155
+ stopButton.disabled = false;
156
+ } catch (e) {
157
+ console.error(e);
158
+ status.textContent = 'Error: ' + e.message;
159
+ }
160
+ };
161
+
162
+ // Auto-start on load
163
+ window.addEventListener('load', () => {
164
+ setTimeout(startRecognition, 1000);
165
+ });
166
+
167
+ startButton.onclick = startRecognition;
168
+
169
+ stopButton.onclick = () => {
170
+ recognition.stop();
171
+ status.textContent = 'Stopped';
172
+ startButton.disabled = false;
173
+ stopButton.disabled = true;
174
+ };
175
+
176
+ clearButton.onclick = () => {
177
+ fullTranscript = '';
178
+ output.textContent = '';
179
+ window.parent.postMessage({
180
+ type: 'clear_transcript',
181
+ }, '*');
182
+ };
183
+
184
+ recognition.onresult = (event) => {
185
+ let interimTranscript = '';
186
+ let finalTranscript = '';
187
+
188
+ for (let i = event.resultIndex; i < event.results.length; i++) {
189
+ const transcript = event.results[i][0].transcript;
190
+ if (event.results[i].isFinal) {
191
+ finalTranscript += transcript + '\\n';
192
+ } else {
193
+ interimTranscript += transcript;
194
+ }
195
+ }
196
+
197
+ if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
198
+ if (finalTranscript) {
199
+ fullTranscript += finalTranscript;
200
+ // Send to Streamlit
201
+ window.parent.postMessage({
202
+ type: 'final_transcript',
203
+ text: finalTranscript
204
+ }, '*');
205
+ }
206
+ lastUpdateTime = Date.now();
207
+ }
208
+
209
+ output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
210
+ output.scrollTop = output.scrollHeight;
211
+ };
212
+
213
+ recognition.onend = () => {
214
+ if (!stopButton.disabled) {
215
+ try {
216
+ recognition.start();
217
+ console.log('Restarted recognition');
218
+ } catch (e) {
219
+ console.error('Failed to restart recognition:', e);
220
+ status.textContent = 'Error restarting: ' + e.message;
221
+ startButton.disabled = false;
222
+ stopButton.disabled = true;
223
+ }
224
+ }
225
+ };
226
+
227
+ recognition.onerror = (event) => {
228
+ console.error('Recognition error:', event.error);
229
+ status.textContent = 'Error: ' + event.error;
230
+
231
+ if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
232
+ startButton.disabled = false;
233
+ stopButton.disabled = true;
234
+ }
235
+ };
236
+ }
237
+ </script>
238
+ </body>
239
+ </html>
240
+ """
241
+
242
+ # Helper Functions
243
+ def generate_filename(prompt, file_type):
244
+ central = pytz.timezone('US/Central')
245
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
246
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
247
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
248
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
249
+
250
+ # [Previous helper functions remain the same]
251
+ # ... [Include all the helper functions from the second file]
252
+
253
+ def main():
254
+ st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
255
+
256
+ # Main navigation
257
+ tab_main = st.radio("Choose Action:",
258
+ ["🎀 Voice Input", "πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
259
+ horizontal=True)
260
+
261
+ if tab_main == "🎀 Voice Input":
262
+ st.subheader("Voice Recognition")
263
+
264
+ # Display speech recognition component
265
+ speech_component = st.components.v1.html(speech_recognition_html, height=400)
266
+
267
+ # Handle speech recognition output
268
+ if speech_component:
269
+ try:
270
+ data = speech_component
271
+ if isinstance(data, dict):
272
+ if data.get('type') == 'final_transcript':
273
+ text = data.get('text', '').strip()
274
+ if text:
275
+ st.session_state.last_voice_input = text
276
+
277
+ # Process voice input with AI
278
+ st.subheader("AI Response to Voice Input:")
279
+
280
+ col1, col2, col3 = st.columns(3)
281
+ with col2:
282
+ st.write("Claude-3.5 Sonnet:")
283
+ try:
284
+ claude_response = process_with_claude(text)
285
+ except:
286
+ st.write('Claude 3.5 Sonnet out of tokens.')
287
+ with col1:
288
+ st.write("GPT-4o Omni:")
289
+ try:
290
+ gpt_response = process_with_gpt(text)
291
+ except:
292
+ st.write('GPT 4o out of tokens')
293
+ with col3:
294
+ st.write("Arxiv and Mistral Research:")
295
+ with st.spinner("Searching ArXiv..."):
296
+ results = perform_ai_lookup(text)
297
+ st.markdown(results)
298
+
299
+ elif data.get('type') == 'clear_transcript':
300
+ st.session_state.last_voice_input = ""
301
+ st.experimental_rerun()
302
+
303
+ except Exception as e:
304
+ st.error(f"Error processing voice input: {e}")
305
+
306
+ # Display last voice input
307
+ if st.session_state.last_voice_input:
308
+ st.text_area("Last Voice Input:", st.session_state.last_voice_input, height=100)
309
+
310
+ # [Rest of the main function remains the same]
311
+ elif tab_main == "πŸ’¬ Chat":
312
+ # [Previous chat interface code]
313
+ pass
314
+
315
+ elif tab_main == "πŸ“Έ Media Gallery":
316
+ create_media_gallery()
317
+
318
+ elif tab_main == "πŸ” Search ArXiv":
319
+ query = st.text_input("Enter your research query:")
320
+ if query:
321
+ with st.spinner("Searching ArXiv..."):
322
+ results = search_arxiv(query)
323
+ st.markdown(results)
324
+
325
+ elif tab_main == "πŸ“ File Editor":
326
+ if hasattr(st.session_state, 'current_file'):
327
+ st.subheader(f"Editing: {st.session_state.current_file}")
328
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
329
+ if st.button("Save Changes"):
330
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
331
+ file.write(new_content)
332
+ st.success("File updated successfully!")
333
+
334
+ # Always show file manager in sidebar
335
+ display_file_manager()
336
+
337
+ if __name__ == "__main__":
338
+ main()