awacke1 commited on
Commit
d1c1759
โ€ข
1 Parent(s): a1ed500

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +245 -862
app.py CHANGED
@@ -1,22 +1,8 @@
1
  import streamlit as st
2
- import anthropic
3
- import openai
4
- import base64
5
- from datetime import datetime
6
  import plotly.graph_objects as go
7
- import cv2
8
- import glob
9
- import json
10
- import math
11
- import os
12
- import pytz
13
- import random
14
- import re
15
- import requests
16
  import streamlit.components.v1 as components
17
- import textract
18
- import time
19
- import zipfile
20
  from audio_recorder_streamlit import audio_recorder
21
  from bs4 import BeautifulSoup
22
  from collections import deque
@@ -32,937 +18,334 @@ from openai import OpenAI
32
  import extra_streamlit_components as stx
33
  from streamlit.runtime.scriptrunner import get_script_run_ctx
34
 
35
-
36
- # 1. ๐ŸšฒBikeAI๐Ÿ† Configuration and Setup
37
- Site_Name = '๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI'
38
- title = "๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI"
39
- helpURL = 'https://huggingface.co/awacke1'
40
- bugURL = 'https://huggingface.co/spaces/awacke1'
41
- icons = '๐Ÿšฒ๐Ÿ†'
42
  st.set_page_config(
43
- page_title=title,
44
- page_icon=icons,
45
  layout="wide",
46
  initial_sidebar_state="auto",
47
  menu_items={
48
- 'Get Help': helpURL,
49
- 'Report a bug': bugURL,
50
- 'About': title
51
  }
52
  )
53
  load_dotenv()
54
- openai.api_key = os.getenv('OPENAI_API_KEY')
55
- if openai.api_key == None:
56
- openai.api_key = st.secrets['OPENAI_API_KEY']
57
- openai_client = OpenAI(
58
- api_key=os.getenv('OPENAI_API_KEY'),
59
- organization=os.getenv('OPENAI_ORG_ID')
60
- )
61
- anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
62
- if anthropic_key == None:
63
- anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
64
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
65
- API_URL = os.getenv('API_URL')
66
  HF_KEY = os.getenv('HF_KEY')
67
- MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
68
- MODEL2 = "openai/whisper-small.en"
69
- headers = {
70
- "Authorization": f"Bearer {HF_KEY}",
71
- "Content-Type": "application/json"
72
- }
73
- # markdown target for viewing files in markdown (number one feature)
74
- markdown_target = st.empty()
75
-
76
-
77
-
78
- # 2.๐ŸšฒBikeAI๐Ÿ† Initialize session states
79
- if 'transcript_history' not in st.session_state:
80
- st.session_state.transcript_history = []
81
- if "chat_history" not in st.session_state:
82
- st.session_state.chat_history = []
83
- if "openai_model" not in st.session_state:
84
- st.session_state["openai_model"] = "gpt-4o-2024-05-13"
85
- if "messages" not in st.session_state:
86
- st.session_state.messages = []
87
- if 'last_voice_input' not in st.session_state:
88
- st.session_state.last_voice_input = ""
89
 
 
 
 
 
 
90
 
91
- # 3. ๐ŸšฒBikeAI๐Ÿ† Custom CSS
92
  st.markdown("""
93
  <style>
94
- .main {
95
- background: linear-gradient(to right, #1a1a1a, #2d2d2d);
96
- color: #ffffff;
97
- }
98
- .stMarkdown {
99
- font-family: 'Helvetica Neue', sans-serif;
100
- }
101
- .category-header {
102
- background: linear-gradient(45deg, #2b5876, #4e4376);
103
- padding: 20px;
104
- border-radius: 10px;
105
- margin: 10px 0;
106
- }
107
- .scene-card {
108
- background: rgba(0,0,0,0.3);
109
- padding: 15px;
110
- border-radius: 8px;
111
- margin: 10px 0;
112
- border: 1px solid rgba(255,255,255,0.1);
113
- }
114
- .media-gallery {
115
- display: grid;
116
- gap: 1rem;
117
- padding: 1rem;
118
- }
119
- .bike-card {
120
- background: rgba(255,255,255,0.05);
121
- border-radius: 10px;
122
- padding: 15px;
123
- transition: transform 0.3s;
124
- }
125
- .bike-card:hover {
126
- transform: scale(1.02);
127
- }
128
  </style>
129
  """, unsafe_allow_html=True)
130
 
 
 
 
 
 
 
 
131
 
 
 
 
132
 
133
- # create and save a file (and avoid the black hole of lost data ๐Ÿ•ณ)
134
- def generate_filename(prompt, file_type):
135
- """Generate a safe filename using the prompt and file type."""
136
- central = pytz.timezone('US/Central')
137
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
138
- replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
139
- #safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
140
- safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:90] # Ensures file name is long enough but doesnt prevent unzip due to path length
141
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
142
-
143
-
144
- def create_file(filename, prompt, response, should_save=True):
145
- if not should_save:
146
- return
147
- with open(filename, 'w', encoding='utf-8') as file:
148
- file.write(prompt + "\n\n" + response)
149
- def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
150
- """Create and save file with proper handling of different types."""
151
- if not should_save:
152
- return None
153
- filename = generate_filename(prompt if prompt else content, file_type)
154
- with open(filename, "w", encoding="utf-8") as f:
155
- if is_image:
156
- f.write(content)
157
- else:
158
- f.write(prompt + "\n\n" + content if prompt else content)
159
- return filename
160
-
161
- # Load a file, base64 it, return as link
162
- def get_download_link(file_path):
163
- """Create download link for file."""
164
- with open(file_path, "rb") as file:
165
- contents = file.read()
166
- b64 = base64.b64encode(contents).decode()
167
- return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}๐Ÿ“‚</a>'
168
 
169
- # Speech Synth Browser Style
170
  @st.cache_resource
171
- def SpeechSynthesis(result):
172
- """HTML5 Speech Synthesis."""
173
- documentHTML5 = f'''
174
- <!DOCTYPE html>
175
- <html>
176
- <head>
177
- <title>Read It Aloud</title>
178
- <script type="text/javascript">
179
- function readAloud() {{
180
- const text = document.getElementById("textArea").value;
181
- const speech = new SpeechSynthesisUtterance(text);
182
- window.speechSynthesis.speak(speech);
183
- }}
184
- </script>
185
- </head>
186
- <body>
187
- <h1>๐Ÿ”Š Read It Aloud</h1>
188
- <textarea id="textArea" rows="10" cols="80">{result}</textarea>
189
- <br>
190
- <button onclick="readAloud()">๐Ÿ”Š Read Aloud</button>
191
- </body>
192
- </html>
193
- '''
194
- components.html(documentHTML5, width=1280, height=300)
195
-
196
- # Media Processing Functions
197
- def process_image(image_input, user_prompt):
198
- """Process image with GPT-4o vision."""
199
- if isinstance(image_input, str):
200
- with open(image_input, "rb") as image_file:
201
- image_input = image_file.read()
202
- base64_image = base64.b64encode(image_input).decode("utf-8")
203
- response = openai_client.chat.completions.create(
204
  model=st.session_state["openai_model"],
205
  messages=[
206
- {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
207
  {"role": "user", "content": [
208
  {"type": "text", "text": user_prompt},
209
- {"type": "image_url", "image_url": {
210
- "url": f"data:image/png;base64,{base64_image}"
211
- }}
212
  ]}
213
  ],
214
  temperature=0.0,
215
  )
216
- return response.choices[0].message.content
217
 
218
- def process_audio(audio_input, text_input=''):
219
- """Process audio with Whisper and GPT."""
220
- if isinstance(audio_input, str):
221
- with open(audio_input, "rb") as file:
222
- audio_input = file.read()
223
- transcription = openai_client.audio.transcriptions.create(
224
- model="whisper-1",
225
- file=audio_input,
226
- )
227
  st.session_state.messages.append({"role": "user", "content": transcription.text})
228
- with st.chat_message("assistant"):
229
- st.markdown(transcription.text)
230
- SpeechSynthesis(transcription.text)
231
- filename = generate_filename(transcription.text, "wav")
232
- create_and_save_file(audio_input, "wav", transcription.text, True)
233
-
234
- # Modified video processing function without moviepy dependency
235
- def process_video(video_path, seconds_per_frame=1):
236
- """Process video files for frame extraction."""
237
- base64Frames = []
238
- video = cv2.VideoCapture(video_path)
239
- total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
240
- fps = video.get(cv2.CAP_PROP_FPS)
241
- frames_to_skip = int(fps * seconds_per_frame)
242
-
243
- for frame_idx in range(0, total_frames, frames_to_skip):
244
- video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
245
- success, frame = video.read()
246
- if not success:
247
- break
248
- _, buffer = cv2.imencode(".jpg", frame)
249
- base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
250
- video.release()
251
- return base64Frames, None
252
 
253
- def process_video_with_gpt(video_input, user_prompt):
254
- """Process video with GPT-4 vision."""
255
- base64Frames, _ = process_video(video_input)
256
- response = openai_client.chat.completions.create(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  model=st.session_state["openai_model"],
258
  messages=[
259
- {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
260
- {"role": "user", "content": [
261
- {"type": "text", "text": user_prompt},
262
- *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
263
- for frame in base64Frames]
264
  ]}
265
  ]
266
  )
267
- return response.choices[0].message.content
268
-
269
-
270
- def extract_urls(text):
271
- try:
272
- date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
273
- abs_link_pattern = re.compile(r'\[(.*?)\]\((https://arxiv\.org/abs/\d+\.\d+)\)')
274
- pdf_link_pattern = re.compile(r'\[โฌ‡๏ธ\]\((https://arxiv\.org/pdf/\d+\.\d+)\)')
275
- title_pattern = re.compile(r'### \d{2} \w{3} \d{4} \| \[(.*?)\]')
276
- date_matches = date_pattern.findall(text)
277
- abs_link_matches = abs_link_pattern.findall(text)
278
- pdf_link_matches = pdf_link_pattern.findall(text)
279
- title_matches = title_pattern.findall(text)
280
- # markdown with the extracted fields
281
- markdown_text = ""
282
- for i in range(len(date_matches)):
283
- date = date_matches[i]
284
- title = title_matches[i]
285
- abs_link = abs_link_matches[i][1]
286
- pdf_link = pdf_link_matches[i]
287
- markdown_text += f"**Date:** {date}\n\n"
288
- markdown_text += f"**Title:** {title}\n\n"
289
- markdown_text += f"**Abstract Link:** [{abs_link}]({abs_link})\n\n"
290
- markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
291
- markdown_text += "---\n\n"
292
- return markdown_text
293
- except:
294
- st.write('.')
295
- return ''
296
-
297
 
298
  def search_arxiv(query):
299
- st.write("Performing AI Lookup...")
300
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
301
- result1 = client.predict(
302
- prompt=query,
303
- llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
304
- stream_outputs=True,
305
- api_name="/ask_llm"
306
- )
307
- st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
308
- st.markdown(result1)
309
- result2 = client.predict(
310
- prompt=query,
311
- llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
312
- stream_outputs=True,
313
- api_name="/ask_llm"
314
- )
315
  st.markdown("### Mistral-7B-Instruct-v0.2 Result")
316
- st.markdown(result2)
317
- combined_result = f"{result1}\n\n{result2}"
318
- return combined_result
319
- #return responseall
320
-
321
-
322
- # Function to generate a filename based on prompt and time (because names matter ๐Ÿ•’)
323
- def generate_filename(prompt, file_type):
324
- central = pytz.timezone('US/Central')
325
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
326
- safe_prompt = re.sub(r'\W+', '_', prompt)[:90]
327
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
328
 
329
- # Function to create and save a file (and avoid the black hole of lost data ๐Ÿ•ณ)
330
- def create_file(filename, prompt, response):
331
- with open(filename, 'w', encoding='utf-8') as file:
332
- file.write(prompt + "\n\n" + response)
333
-
334
-
335
- def perform_ai_lookup(query):
336
- start_time = time.strftime("%Y-%m-%d %H:%M:%S")
337
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
338
- response1 = client.predict(
339
- query,
340
- 20,
341
- "Semantic Search",
342
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
343
- api_name="/update_with_rag_md"
344
- )
345
- Question = '### ๐Ÿ”Ž ' + query + '\r\n' # Format for markdown display with links
346
- References = response1[0]
347
- ReferenceLinks = extract_urls(References)
348
- RunSecondQuery = True
349
- results=''
350
- if RunSecondQuery:
351
- # Search 2 - Retrieve the Summary with Papers Context and Original Query
352
- response2 = client.predict(
353
- query,
354
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
355
- True,
356
- api_name="/ask_llm"
 
 
 
357
  )
358
- if len(response2) > 10:
359
- Answer = response2
360
- SpeechSynthesis(Answer)
361
- # Restructure results to follow format of Question, Answer, References, ReferenceLinks
362
- results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
363
- st.markdown(results)
364
- st.write('๐Ÿ”Run of Multi-Agent System Paper Summary Spec is Complete')
365
- end_time = time.strftime("%Y-%m-%d %H:%M:%S")
366
- start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
367
- end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
368
- elapsed_seconds = end_timestamp - start_timestamp
369
- st.write(f"Start time: {start_time}")
370
- st.write(f"Finish time: {end_time}")
371
- st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
372
- filename = generate_filename(query, "md")
373
- create_file(filename, query, results)
374
- return results
375
-
376
- # Chat Processing Functions
377
- def process_with_gpt(text_input):
378
- """Process text with GPT-4o."""
379
- if text_input:
380
- st.session_state.messages.append({"role": "user", "content": text_input})
381
- with st.chat_message("user"):
382
- st.markdown(text_input)
383
- with st.chat_message("assistant"):
384
- completion = openai_client.chat.completions.create(
385
- model=st.session_state["openai_model"],
386
- messages=[
387
- {"role": m["role"], "content": m["content"]}
388
- for m in st.session_state.messages
389
- ],
390
- stream=False
391
- )
392
- return_text = completion.choices[0].message.content
393
- st.write("GPT-4o: " + return_text)
394
- #filename = generate_filename(text_input, "md")
395
- filename = generate_filename("GPT-4o: " + return_text, "md")
396
- create_file(filename, text_input, return_text)
397
- st.session_state.messages.append({"role": "assistant", "content": return_text})
398
- return return_text
399
-
400
- def process_with_claude(text_input):
401
- """Process text with Claude."""
402
- if text_input:
403
- with st.chat_message("user"):
404
- st.markdown(text_input)
405
- with st.chat_message("assistant"):
406
- response = claude_client.messages.create(
407
- model="claude-3-sonnet-20240229",
408
- max_tokens=1000,
409
- messages=[
410
- {"role": "user", "content": text_input}
411
- ]
412
- )
413
- response_text = response.content[0].text
414
- st.write("Claude: " + response_text)
415
- #filename = generate_filename(text_input, "md")
416
- filename = generate_filename("Claude: " + response_text, "md")
417
- create_file(filename, text_input, response_text)
418
- st.session_state.chat_history.append({
419
- "user": text_input,
420
- "claude": response_text
421
- })
422
- return response_text
423
-
424
- # File Management Functions
425
- def load_file(file_name):
426
- """Load file content."""
427
- with open(file_name, "r", encoding='utf-8') as file:
428
- content = file.read()
429
- return content
430
 
431
  def create_zip_of_files(files):
432
- """Create zip archive of files."""
433
  zip_name = "all_files.zip"
434
- with zipfile.ZipFile(zip_name, 'w') as zipf:
435
- for file in files:
436
- zipf.write(file)
437
  return zip_name
438
 
439
- def get_media_html(media_path, media_type="video", width="100%"):
440
- """Generate HTML for media player."""
441
- media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
442
- if media_type == "video":
443
- return f'''
444
- <video width="{width}" controls autoplay muted loop>
445
- <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
446
- Your browser does not support the video tag.
447
- </video>
448
- '''
449
- else: # audio
450
- return f'''
451
- <audio controls style="width: {width};">
452
- <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
453
- Your browser does not support the audio element.
454
- </audio>
455
- '''
456
 
457
  def create_media_gallery():
458
- """Create the media gallery interface."""
459
  st.header("๐ŸŽฌ Media Gallery")
460
  tabs = st.tabs(["๐Ÿ–ผ๏ธ Images", "๐ŸŽต Audio", "๐ŸŽฅ Video"])
461
  with tabs[0]:
462
- image_files = glob.glob("*.png") + glob.glob("*.jpg")
463
- if image_files:
464
- num_cols = st.slider("Number of columns", 1, 5, 3)
465
- cols = st.columns(num_cols)
466
- for idx, image_file in enumerate(image_files):
467
- with cols[idx % num_cols]:
468
- img = Image.open(image_file)
469
- st.image(img, use_container_width=True)
470
- # Add GPT vision analysis option
471
- if st.button(f"Analyze {os.path.basename(image_file)}"):
472
- analysis = process_image(image_file,
473
- "Describe this image in detail and identify key elements.")
474
- st.markdown(analysis)
475
  with tabs[1]:
476
- audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
477
- for audio_file in audio_files:
478
- with st.expander(f"๐ŸŽต {os.path.basename(audio_file)}"):
479
- st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
480
- if st.button(f"Transcribe {os.path.basename(audio_file)}"):
481
- with open(audio_file, "rb") as f:
482
- transcription = process_audio(f)
483
- st.write(transcription)
484
  with tabs[2]:
485
- video_files = glob.glob("*.mp4")
486
- for video_file in video_files:
487
- with st.expander(f"๐ŸŽฅ {os.path.basename(video_file)}"):
488
- st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
489
- if st.button(f"Analyze {os.path.basename(video_file)}"):
490
- analysis = process_video_with_gpt(video_file,
491
- "Describe what's happening in this video.")
492
- st.markdown(analysis)
493
-
494
-
495
- def display_file_manager():
496
- """Display file management sidebar with guaranteed unique button keys."""
497
- st.sidebar.title("๐Ÿ“ File Management")
498
- all_files = glob.glob("*.md")
499
- all_files.sort(reverse=True)
500
- if st.sidebar.button("๐Ÿ—‘ Delete All", key="delete_all_files_button"):
501
- for file in all_files:
502
- os.remove(file)
503
- st.rerun()
504
- if st.sidebar.button("โฌ‡๏ธ Download All", key="download_all_files_button"):
505
- zip_file = create_zip_of_files(all_files)
506
- st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
507
- # Create unique keys using file attributes
508
- for idx, file in enumerate(all_files):
509
- # Get file stats for unique identification
510
- file_stat = os.stat(file)
511
- unique_id = f"{idx}_{file_stat.st_size}_{file_stat.st_mtime}"
512
- col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
513
- with col1:
514
- if st.button("๐ŸŒ", key=f"view_{unique_id}"):
515
- st.session_state.current_file = file
516
- st.session_state.file_content = load_file(file)
517
- with col2:
518
- st.markdown(get_download_link(file), unsafe_allow_html=True)
519
- with col3:
520
- if st.button("๐Ÿ“‚", key=f"edit_{unique_id}"):
521
- st.session_state.current_file = file
522
- st.session_state.file_content = load_file(file)
523
- with col4:
524
- if st.button("๐Ÿ—‘", key=f"delete_{unique_id}"):
525
- os.remove(file)
526
- st.rerun()
527
-
528
-
529
- # Speech Recognition HTML Component
530
- speech_recognition_html = """
531
- <!DOCTYPE html>
532
- <html>
533
- <head>
534
- <title>Continuous Speech Demo</title>
535
- <style>
536
- body {
537
- font-family: sans-serif;
538
- padding: 20px;
539
- max-width: 800px;
540
- margin: 0 auto;
541
- }
542
- button {
543
- padding: 10px 20px;
544
- margin: 10px 5px;
545
- font-size: 16px;
546
- }
547
- #status {
548
- margin: 10px 0;
549
- padding: 10px;
550
- background: #e8f5e9;
551
- border-radius: 4px;
552
- }
553
- #output {
554
- white-space: pre-wrap;
555
- padding: 15px;
556
- background: #f5f5f5;
557
- border-radius: 4px;
558
- margin: 10px 0;
559
- min-height: 100px;
560
- max-height: 400px;
561
- overflow-y: auto;
562
- }
563
- .controls {
564
- margin: 10px 0;
565
- }
566
- </style>
567
- </head>
568
- <body>
569
- <div class="controls">
570
- <button id="start">Start Listening</button>
571
- <button id="stop" disabled>Stop Listening</button>
572
- <button id="clear">Clear Text</button>
573
- </div>
574
- <div id="status">Ready</div>
575
- <div id="output"></div>
576
-
577
- <!-- Add the hidden input here -->
578
- <input type="hidden" id="streamlit-data" value="">
579
-
580
- <script>
581
- if (!('webkitSpeechRecognition' in window)) {
582
- alert('Speech recognition not supported');
583
- } else {
584
- const recognition = new webkitSpeechRecognition();
585
- const startButton = document.getElementById('start');
586
- const stopButton = document.getElementById('stop');
587
- const clearButton = document.getElementById('clear');
588
- const status = document.getElementById('status');
589
- const output = document.getElementById('output');
590
- let fullTranscript = '';
591
- let lastUpdateTime = Date.now();
592
-
593
- // Configure recognition
594
- recognition.continuous = true;
595
- recognition.interimResults = true;
596
-
597
- // Function to start recognition
598
- const startRecognition = () => {
599
- try {
600
- recognition.start();
601
- status.textContent = 'Listening...';
602
- startButton.disabled = true;
603
- stopButton.disabled = false;
604
- } catch (e) {
605
- console.error(e);
606
- status.textContent = 'Error: ' + e.message;
607
- }
608
- };
609
-
610
- // Auto-start on load
611
- window.addEventListener('load', () => {
612
- setTimeout(startRecognition, 1000);
613
- });
614
-
615
- startButton.onclick = startRecognition;
616
-
617
- stopButton.onclick = () => {
618
- recognition.stop();
619
- status.textContent = 'Stopped';
620
- startButton.disabled = false;
621
- stopButton.disabled = true;
622
- };
623
-
624
- clearButton.onclick = () => {
625
- fullTranscript = '';
626
- output.textContent = '';
627
- window.parent.postMessage({
628
- type: 'clear_transcript',
629
- }, '*');
630
- };
631
-
632
- recognition.onresult = (event) => {
633
- let interimTranscript = '';
634
- let finalTranscript = '';
635
-
636
- for (let i = event.resultIndex; i < event.results.length; i++) {
637
- const transcript = event.results[i][0].transcript;
638
- if (event.results[i].isFinal) {
639
- finalTranscript += transcript + '\\n';
640
- } else {
641
- interimTranscript += transcript;
642
- }
643
- }
644
-
645
- if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
646
- if (finalTranscript) {
647
- fullTranscript += finalTranscript;
648
-
649
- // Update the hidden input value
650
- document.getElementById('streamlit-data').value = fullTranscript;
651
- }
652
- lastUpdateTime = Date.now();
653
- }
654
-
655
- output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
656
- output.scrollTop = output.scrollHeight;
657
-
658
- document.getElementById('streamlit-data').value = fullTranscript;
659
-
660
- };
661
-
662
- recognition.onend = () => {
663
- if (!stopButton.disabled) {
664
- try {
665
- recognition.start();
666
- console.log('Restarted recognition');
667
- } catch (e) {
668
- console.error('Failed to restart recognition:', e);
669
- status.textContent = 'Error restarting: ' + e.message;
670
- startButton.disabled = false;
671
- stopButton.disabled = true;
672
- }
673
- }
674
- };
675
-
676
- recognition.onerror = (event) => {
677
- console.error('Recognition error:', event.error);
678
- status.textContent = 'Error: ' + event.error;
679
-
680
- if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
681
- startButton.disabled = false;
682
- stopButton.disabled = true;
683
- }
684
- };
685
- }
686
- </script>
687
- </body>
688
- </html>
689
- """
690
-
691
- # Helper Functions
692
- def generate_filename(prompt, file_type):
693
- central = pytz.timezone('US/Central')
694
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
695
- replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
696
- safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
697
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
698
-
699
- # File Management Functions
700
- def load_file(file_name):
701
- """Load file content."""
702
- with open(file_name, "r", encoding='utf-8') as file:
703
- content = file.read()
704
- return content
705
-
706
- def create_zip_of_files(files):
707
- """Create zip archive of files."""
708
- zip_name = "all_files.zip"
709
- with zipfile.ZipFile(zip_name, 'w') as zipf:
710
- for file in files:
711
- zipf.write(file)
712
- return zip_name
713
-
714
- def get_download_link(file):
715
- """Create download link for file."""
716
- with open(file, "rb") as f:
717
- contents = f.read()
718
- b64 = base64.b64encode(contents).decode()
719
- return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">Download {os.path.basename(file)}๐Ÿ“‚</a>'
720
 
721
  def display_file_manager():
722
- """Display file management sidebar."""
723
  st.sidebar.title("๐Ÿ“ File Management")
724
-
725
- all_files = glob.glob("*.md")
726
- all_files.sort(reverse=True)
727
-
728
  if st.sidebar.button("๐Ÿ—‘ Delete All"):
729
- for file in all_files:
730
- os.remove(file)
731
- st.rerun()
732
-
733
  if st.sidebar.button("โฌ‡๏ธ Download All"):
734
- zip_file = create_zip_of_files(all_files)
735
- st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
736
-
737
- for file in all_files:
738
- col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
739
  with col1:
740
- if st.button("๐ŸŒ", key="view_"+file):
741
- st.session_state.current_file = file
742
- st.session_state.file_content = load_file(file)
743
- st.write(file)
744
- markdown_target.markdown(st.session_state.file_content) # view ๐ŸŒ
745
  with col2:
746
- st.markdown(get_download_link(file), unsafe_allow_html=True)
747
  with col3:
748
- if st.button("๐Ÿ“‚", key="edit_"+file):
749
- st.session_state.current_file = file
750
- st.session_state.file_content = load_file(file)
751
  with col4:
752
- if st.button("๐Ÿ—‘", key="delete_"+file):
753
- os.remove(file)
754
- st.rerun()
755
-
756
- def create_media_gallery():
757
- """Create the media gallery interface."""
758
- st.header("๐ŸŽฌ Media Gallery")
759
-
760
- tabs = st.tabs(["๐Ÿ–ผ๏ธ Images", "๐ŸŽต Audio", "๐ŸŽฅ Video"])
761
-
762
- with tabs[0]:
763
- image_files = glob.glob("*.png") + glob.glob("*.jpg")
764
- if image_files:
765
- num_cols = st.slider("Number of columns", 1, 5, 3)
766
- cols = st.columns(num_cols)
767
- for idx, image_file in enumerate(image_files):
768
- with cols[idx % num_cols]:
769
- img = Image.open(image_file)
770
- st.image(img, use_container_width=True)
771
-
772
- # Add GPT vision analysis option
773
- if st.button(f"Analyze {os.path.basename(image_file)}"):
774
- analysis = process_image(image_file,
775
- "Describe this image in detail and identify key elements.")
776
- st.markdown(analysis)
777
-
778
- with tabs[1]:
779
- audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
780
- for audio_file in audio_files:
781
- with st.expander(f"๐ŸŽต {os.path.basename(audio_file)}"):
782
- st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
783
- if st.button(f"Transcribe {os.path.basename(audio_file)}"):
784
- with open(audio_file, "rb") as f:
785
- transcription = process_audio(f)
786
- st.write(transcription)
787
-
788
- with tabs[2]:
789
- video_files = glob.glob("*.mp4")
790
- for video_file in video_files:
791
- with st.expander(f"๐ŸŽฅ {os.path.basename(video_file)}"):
792
- st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
793
- if st.button(f"Analyze {os.path.basename(video_file)}"):
794
- analysis = process_video_with_gpt(video_file,
795
- "Describe what's happening in this video.")
796
- st.markdown(analysis)
797
-
798
-
799
-
800
- def get_media_html(media_path, media_type="video", width="100%"):
801
- """Generate HTML for media player."""
802
- media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
803
- if media_type == "video":
804
- return f'''
805
- <video width="{width}" controls autoplay muted loop>
806
- <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
807
- Your browser does not support the video tag.
808
- </video>
809
- '''
810
- else: # audio
811
- return f'''
812
- <audio controls style="width: {width};">
813
- <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
814
- Your browser does not support the audio element.
815
- </audio>
816
- '''
817
-
818
- @st.cache_resource
819
- def set_transcript(text):
820
- """Set transcript in session state."""
821
- st.session_state.voice_transcript = text
822
 
823
  def main():
824
- st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")
825
-
826
-
827
-
828
-
829
-
830
- # Main navigation
831
- tab_main = st.radio("Choose Action:",
832
- ["๐ŸŽค Voice Input", "๐Ÿ“ธ Media Gallery", "๐Ÿ” Search ArXiv", "๐Ÿ“ File Editor"],
833
- horizontal=True)
834
- # Model Selection
835
- model_choice = st.sidebar.radio(
836
- "Choose AI Model:",
837
- [ "GPT+Claude+Arxiv", "GPT-4o", "Claude-3"]
838
- )
839
-
840
- # ๐Ÿ†################ Component Magic ###############๐Ÿ†
841
- mycomponent = components.declare_component("mycomponent", path="mycomponent") # load from __init__.py and index.html in mycomponent folder
842
- from mycomponent import mycomponent
843
- value = mycomponent(my_input_value="hello there")
844
- st.write("Received", value) # value is speech recognition full text result with \n dividing
845
- if (value is not None):
846
- user_input = value
847
  if model_choice == "GPT-4o":
848
- gpt_response = process_with_gpt(user_input)
849
  elif model_choice == "Claude-3":
850
- claude_response = process_with_claude(user_input)
851
- else: # All Three AIs!
852
- col1, col2, col3 = st.columns(3)
853
- with col2:
854
- st.subheader("Claude-3.5 Sonnet:")
855
- try:
856
- claude_response = process_with_claude(user_input)
857
- except:
858
- st.write('Claude 3.5 Sonnet out of tokens.')
859
  with col1:
860
  st.subheader("GPT-4o Omni:")
 
 
 
 
 
 
 
 
861
  try:
862
- gpt_response = process_with_gpt(user_input)
 
863
  except:
864
- st.write('GPT 4o out of tokens')
865
- with col3:
866
- st.subheader("Arxiv and Mistral Research:")
867
- with st.spinner("Searching ArXiv..."):
868
- try:
869
- results = perform_ai_lookup(user_input)
870
- st.markdown(results)
871
- except:
872
- st.write("Arxiv Mistral too busy - try again.")
873
- # ๐Ÿ†################ Component Magic ###############๐Ÿ†
874
-
875
 
876
-
877
-
878
  if tab_main == "๐ŸŽค Voice Input":
879
- st.subheader("Voice Recognition")
880
-
881
- # Initialize session state for the transcript
882
- if 'voice_transcript' not in st.session_state:
883
- st.session_state.voice_transcript = ""
884
-
885
- # Display speech recognition component and capture returned value
886
- #transcript = st.components.v1.html(speech_recognition_html, height=400)
887
-
888
- # Update session state if there's new data
889
- #if transcript is not None and transcript != "":
890
- # st.session_state.voice_transcript = transcript
891
-
892
- # Display the transcript in a Streamlit text area
893
- # st.markdown("### Processed Voice Input:")
894
- # st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
895
-
896
-
897
-
898
- # Chat Interface
899
- user_input = st.text_area("Message:", height=100)
900
-
901
  if st.button("Send ๐Ÿ“จ"):
902
- if user_input:
903
  if model_choice == "GPT-4o":
904
- gpt_response = process_with_gpt(user_input)
905
  elif model_choice == "Claude-3":
906
- claude_response = process_with_claude(user_input)
907
- else: # Both
908
- col1, col2, col3 = st.columns(3)
909
- with col2:
910
- st.subheader("Claude-3.5 Sonnet:")
911
- try:
912
- claude_response = process_with_claude(user_input)
913
- except:
914
- st.write('Claude 3.5 Sonnet out of tokens.')
915
  with col1:
916
  st.subheader("GPT-4o Omni:")
917
- try:
918
- gpt_response = process_with_gpt(user_input)
919
- except:
920
- st.write('GPT 4o out of tokens')
921
  with col3:
922
- st.subheader("Arxiv and Mistral Research:")
923
- with st.spinner("Searching ArXiv..."):
924
- #results = search_arxiv(user_input)
925
- results = perform_ai_lookup(user_input)
926
-
927
- st.markdown(results)
928
-
929
- # Display Chat History
930
- st.subheader("Chat History ๐Ÿ“œ")
931
- tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
932
-
933
- with tab1:
934
- for chat in st.session_state.chat_history:
935
- st.text_area("You:", chat["user"], height=100)
936
- st.text_area("Claude:", chat["claude"], height=200)
937
- st.markdown(chat["claude"])
938
-
939
- with tab2:
940
- for message in st.session_state.messages:
941
- with st.chat_message(message["role"]):
942
- st.markdown(message["content"])
943
-
944
  elif tab_main == "๐Ÿ“ธ Media Gallery":
945
  create_media_gallery()
946
-
947
  elif tab_main == "๐Ÿ” Search ArXiv":
948
- query = st.text_input("Enter your research query:")
949
- if query:
950
- with st.spinner("Searching ArXiv..."):
951
- results = search_arxiv(query)
952
- st.markdown(results)
953
-
954
  elif tab_main == "๐Ÿ“ File Editor":
955
- if hasattr(st.session_state, 'current_file'):
956
  st.subheader(f"Editing: {st.session_state.current_file}")
957
- new_content = st.text_area("Content:", st.session_state.file_content, height=300)
958
- if st.button("Save Changes"):
959
- with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
960
- file.write(new_content)
961
- st.success("File updated successfully!")
962
-
963
 
964
- # Always show file manager in sidebar
965
  display_file_manager()
966
 
967
- if __name__ == "__main__":
968
- main()
 
1
  import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
 
 
 
3
  import plotly.graph_objects as go
 
 
 
 
 
 
 
 
 
4
  import streamlit.components.v1 as components
5
+ from datetime import datetime
 
 
6
  from audio_recorder_streamlit import audio_recorder
7
  from bs4 import BeautifulSoup
8
  from collections import deque
 
18
  import extra_streamlit_components as stx
19
  from streamlit.runtime.scriptrunner import get_script_run_ctx
20
 
21
+ # ๐Ÿ”ง Config & Setup
 
 
 
 
 
 
22
  st.set_page_config(
23
+ page_title="๐ŸšฒBikeAI๐Ÿ† Claude/GPT Research",
24
+ page_icon="๐Ÿšฒ๐Ÿ†",
25
  layout="wide",
26
  initial_sidebar_state="auto",
27
  menu_items={
28
+ 'Get Help': 'https://huggingface.co/awacke1',
29
+ 'Report a bug': 'https://huggingface.co/spaces/awacke1',
30
+ 'About': "๐ŸšฒBikeAI๐Ÿ† Claude/GPT Research AI"
31
  }
32
  )
33
  load_dotenv()
34
+ openai.api_key = os.getenv('OPENAI_API_KEY') or st.secrets['OPENAI_API_KEY']
35
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3") or st.secrets["ANTHROPIC_API_KEY"]
 
 
 
 
 
 
 
 
36
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
37
+ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
38
  HF_KEY = os.getenv('HF_KEY')
39
+ API_URL = os.getenv('API_URL')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
+ st.session_state.setdefault('transcript_history', [])
42
+ st.session_state.setdefault('chat_history', [])
43
+ st.session_state.setdefault('openai_model', "gpt-4o-2024-05-13")
44
+ st.session_state.setdefault('messages', [])
45
+ st.session_state.setdefault('last_voice_input', "")
46
 
47
+ # ๐ŸŽจ Minimal Custom CSS
48
  st.markdown("""
49
  <style>
50
+ .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
51
+ .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  </style>
53
  """, unsafe_allow_html=True)
54
 
55
+ # ๐Ÿ”‘ Common Utilities
56
+ def generate_filename(prompt, file_type="md"):
57
+ ctz = pytz.timezone('US/Central')
58
+ date_str = datetime.now(ctz).strftime("%m%d_%H%M")
59
+ safe = re.sub(r'[<>:"/\\\\|?*\n]', ' ', prompt)
60
+ safe = re.sub(r'\s+', ' ', safe).strip()[:90]
61
+ return f"{date_str}_{safe}.{file_type}"
62
 
63
+ def create_file(filename, prompt, response):
64
+ with open(filename, 'w', encoding='utf-8') as f:
65
+ f.write(prompt + "\n\n" + response)
66
 
67
+ def get_download_link(file):
68
+ with open(file, "rb") as f:
69
+ b64 = base64.b64encode(f.read()).decode()
70
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">๐Ÿ“‚ Download {os.path.basename(file)}</a>'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
 
72
  @st.cache_resource
73
+ def speech_synthesis_html(result):
74
+ html_code = f"""
75
+ <html><body>
76
+ <script>
77
+ var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
78
+ window.speechSynthesis.speak(msg);
79
+ </script>
80
+ </body></html>
81
+ """
82
+ components.html(html_code, height=0)
83
+
84
+ def process_image(image_path, user_prompt):
85
+ with open(image_path, "rb") as imgf:
86
+ image_data = imgf.read()
87
+ b64img = base64.b64encode(image_data).decode("utf-8")
88
+ resp = openai_client.chat.completions.create(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  model=st.session_state["openai_model"],
90
  messages=[
91
+ {"role": "system", "content": "You are a helpful assistant."},
92
  {"role": "user", "content": [
93
  {"type": "text", "text": user_prompt},
94
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64img}"}}
 
 
95
  ]}
96
  ],
97
  temperature=0.0,
98
  )
99
+ return resp.choices[0].message.content
100
 
101
+ def process_audio(audio_path):
102
+ with open(audio_path, "rb") as f:
103
+ transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
 
 
 
 
 
 
104
  st.session_state.messages.append({"role": "user", "content": transcription.text})
105
+ return transcription.text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
+ def process_video(video_path, seconds_per_frame=1):
108
+ vid = cv2.VideoCapture(video_path)
109
+ total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
110
+ fps = vid.get(cv2.CAP_PROP_FPS)
111
+ skip = int(fps*seconds_per_frame)
112
+ frames_b64 = []
113
+ for i in range(0, total, skip):
114
+ vid.set(cv2.CAP_PROP_POS_FRAMES, i)
115
+ ret, frame = vid.read()
116
+ if not ret: break
117
+ _, buf = cv2.imencode(".jpg", frame)
118
+ frames_b64.append(base64.b64encode(buf).decode("utf-8"))
119
+ vid.release()
120
+ return frames_b64
121
+
122
+ def process_video_with_gpt(video_path, prompt):
123
+ frames = process_video(video_path)
124
+ resp = openai_client.chat.completions.create(
125
  model=st.session_state["openai_model"],
126
  messages=[
127
+ {"role":"system","content":"Analyze video frames."},
128
+ {"role":"user","content":[
129
+ {"type":"text","text":prompt},
130
+ *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
 
131
  ]}
132
  ]
133
  )
134
+ return resp.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
  def search_arxiv(query):
137
+ st.write("๐Ÿ” Searching ArXiv...")
138
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
139
+ r1 = client.predict(prompt=query, llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1", stream_outputs=True, api_name="/ask_llm")
140
+ st.markdown("### Mistral-8x7B-Instruct-v0.1 Result")
141
+ st.markdown(r1)
142
+ r2 = client.predict(prompt=query, llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2", stream_outputs=True, api_name="/ask_llm")
 
 
 
 
 
 
 
 
 
 
143
  st.markdown("### Mistral-7B-Instruct-v0.2 Result")
144
+ st.markdown(r2)
145
+ return f"{r1}\n\n{r2}"
 
 
 
 
 
 
 
 
 
 
146
 
147
+ def perform_ai_lookup(q):
148
+ start = time.time()
 
 
 
 
 
 
149
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
150
+ r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
151
+ refs = r[0]
152
+ r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
153
+ result = f"### ๐Ÿ”Ž {q}\n\n{r2}\n\n{refs}"
154
+ speech_synthesis_html(r2)
155
+ st.markdown(result)
156
+ elapsed = time.time()-start
157
+ st.write(f"Elapsed: {elapsed:.2f} s")
158
+ fn = generate_filename(q,"md")
159
+ create_file(fn,q,result)
160
+ return result
161
+
162
+ def process_with_gpt(text):
163
+ if not text: return
164
+ st.session_state.messages.append({"role":"user","content":text})
165
+ with st.chat_message("user"):
166
+ st.markdown(text)
167
+ with st.chat_message("assistant"):
168
+ c = openai_client.chat.completions.create(
169
+ model=st.session_state["openai_model"],
170
+ messages=st.session_state.messages,
171
+ stream=False
172
  )
173
+ ans = c.choices[0].message.content
174
+ st.write("GPT-4o: " + ans)
175
+ create_file(generate_filename(text,"md"),text,ans)
176
+ st.session_state.messages.append({"role":"assistant","content":ans})
177
+ return ans
178
+
179
+ def process_with_claude(text):
180
+ if not text: return
181
+ with st.chat_message("user"):
182
+ st.markdown(text)
183
+ with st.chat_message("assistant"):
184
+ r = claude_client.messages.create(
185
+ model="claude-3-sonnet-20240229",
186
+ max_tokens=1000,
187
+ messages=[{"role":"user","content":text}]
188
+ )
189
+ ans = r.content[0].text
190
+ st.write("Claude: " + ans)
191
+ create_file(generate_filename(text,"md"),text,ans)
192
+ st.session_state.chat_history.append({"user":text,"claude":ans})
193
+ return ans
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
  def create_zip_of_files(files):
 
196
  zip_name = "all_files.zip"
197
+ with zipfile.ZipFile(zip_name,'w') as z:
198
+ for f in files: z.write(f)
 
199
  return zip_name
200
 
201
+ def get_media_html(p,typ="video",w="100%"):
202
+ d = base64.b64encode(open(p,'rb').read()).decode()
203
+ if typ=="video":
204
+ return f'<video width="{w}" controls autoplay muted loop><source src="data:video/mp4;base64,{d}" type="video/mp4"></video>'
205
+ else:
206
+ return f'<audio controls style="width:{w};"><source src="data:audio/mpeg;base64,{d}" type="audio/mpeg"></audio>'
 
 
 
 
 
 
 
 
 
 
 
207
 
208
  def create_media_gallery():
 
209
  st.header("๐ŸŽฌ Media Gallery")
210
  tabs = st.tabs(["๐Ÿ–ผ๏ธ Images", "๐ŸŽต Audio", "๐ŸŽฅ Video"])
211
  with tabs[0]:
212
+ imgs = glob.glob("*.png")+glob.glob("*.jpg")
213
+ if imgs:
214
+ c = st.slider("Cols",1,5,3)
215
+ cols = st.columns(c)
216
+ for i,f in enumerate(imgs):
217
+ with cols[i%c]:
218
+ st.image(Image.open(f),use_container_width=True)
219
+ if st.button(f"๐Ÿ‘€ Analyze {os.path.basename(f)}"):
220
+ a = process_image(f,"Describe this image.")
221
+ st.markdown(a)
 
 
 
222
  with tabs[1]:
223
+ auds = glob.glob("*.mp3")+glob.glob("*.wav")
224
+ for a in auds:
225
+ with st.expander(f"๐ŸŽต {os.path.basename(a)}"):
226
+ st.markdown(get_media_html(a,"audio"),unsafe_allow_html=True)
227
+ if st.button(f"Transcribe {os.path.basename(a)}"):
228
+ t = process_audio(a)
229
+ st.write(t)
 
230
  with tabs[2]:
231
+ vids = glob.glob("*.mp4")
232
+ for v in vids:
233
+ with st.expander(f"๐ŸŽฅ {os.path.basename(v)}"):
234
+ st.markdown(get_media_html(v,"video"),unsafe_allow_html=True)
235
+ if st.button(f"Analyze {os.path.basename(v)}"):
236
+ a = process_video_with_gpt(v,"Describe video.")
237
+ st.markdown(a)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
  def display_file_manager():
 
240
  st.sidebar.title("๐Ÿ“ File Management")
241
+ files = sorted(glob.glob("*.md"),reverse=True)
 
 
 
242
  if st.sidebar.button("๐Ÿ—‘ Delete All"):
243
+ for f in files: os.remove(f)
244
+ st.experimental_rerun()
 
 
245
  if st.sidebar.button("โฌ‡๏ธ Download All"):
246
+ z= create_zip_of_files(files)
247
+ st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
248
+ for f in files:
249
+ col1,col2,col3,col4 = st.sidebar.columns([1,3,1,1])
 
250
  with col1:
251
+ if st.button("๐ŸŒ",key="v"+f):
252
+ st.session_state.current_file=f
253
+ c=open(f,'r',encoding='utf-8').read()
254
+ st.write(c)
 
255
  with col2:
256
+ st.markdown(get_download_link(f),unsafe_allow_html=True)
257
  with col3:
258
+ if st.button("๐Ÿ“‚",key="e"+f):
259
+ st.session_state.current_file=f
260
+ st.session_state.file_content=open(f,'r',encoding='utf-8').read()
261
  with col4:
262
+ if st.button("๐Ÿ—‘",key="d"+f):
263
+ os.remove(f)
264
+ st.experimental_rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
 
266
  def main():
267
+ st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Multi-Agent Research AI")
268
+ tab_main = st.radio("Action:",["๐ŸŽค Voice Input","๐Ÿ“ธ Media Gallery","๐Ÿ” Search ArXiv","๐Ÿ“ File Editor"],horizontal=True)
269
+ model_choice = st.sidebar.radio("AI Model:",["GPT+Claude+Arxiv","GPT-4o","Claude-3"])
270
+
271
+ # Speech-to-Text component placeholder (example)
272
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
273
+ val = mycomponent(my_input_value="Hello")
274
+ if val:
275
+ user_input = val
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
  if model_choice == "GPT-4o":
277
+ process_with_gpt(user_input)
278
  elif model_choice == "Claude-3":
279
+ process_with_claude(user_input)
280
+ else:
281
+ col1,col2,col3=st.columns(3)
 
 
 
 
 
 
282
  with col1:
283
  st.subheader("GPT-4o Omni:")
284
+ try: process_with_gpt(user_input)
285
+ except: st.write('GPT 4o error')
286
+ with col2:
287
+ st.subheader("Claude-3 Sonnet:")
288
+ try: process_with_claude(user_input)
289
+ except: st.write('Claude error')
290
+ with col3:
291
+ st.subheader("Arxiv + Mistral:")
292
  try:
293
+ r = perform_ai_lookup(user_input)
294
+ st.markdown(r)
295
  except:
296
+ st.write("Arxiv error")
 
 
 
 
 
 
 
 
 
 
297
 
 
 
298
  if tab_main == "๐ŸŽค Voice Input":
299
+ st.subheader("๐ŸŽค Voice Recognition")
300
+ user_text = st.text_area("Message:", height=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  if st.button("Send ๐Ÿ“จ"):
302
+ if user_text:
303
  if model_choice == "GPT-4o":
304
+ process_with_gpt(user_text)
305
  elif model_choice == "Claude-3":
306
+ process_with_claude(user_text)
307
+ else:
308
+ col1,col2,col3=st.columns(3)
 
 
 
 
 
 
309
  with col1:
310
  st.subheader("GPT-4o Omni:")
311
+ process_with_gpt(user_text)
312
+ with col2:
313
+ st.subheader("Claude-3 Sonnet:")
314
+ process_with_claude(user_text)
315
  with col3:
316
+ st.subheader("Arxiv & Mistral:")
317
+ res = perform_ai_lookup(user_text)
318
+ st.markdown(res)
319
+ st.subheader("๐Ÿ“œ Chat History")
320
+ t1,t2=st.tabs(["Claude History","GPT-4o History"])
321
+ with t1:
322
+ for c in st.session_state.chat_history:
323
+ st.write("**You:**", c["user"])
324
+ st.write("**Claude:**", c["claude"])
325
+ with t2:
326
+ for m in st.session_state.messages:
327
+ with st.chat_message(m["role"]):
328
+ st.markdown(m["content"])
329
+
 
 
 
 
 
 
 
 
330
  elif tab_main == "๐Ÿ“ธ Media Gallery":
331
  create_media_gallery()
332
+
333
  elif tab_main == "๐Ÿ” Search ArXiv":
334
+ q=st.text_input("Research query:")
335
+ if q:
336
+ r=search_arxiv(q)
337
+ st.markdown(r)
338
+
 
339
  elif tab_main == "๐Ÿ“ File Editor":
340
+ if getattr(st.session_state,'current_file',None):
341
  st.subheader(f"Editing: {st.session_state.current_file}")
342
+ new_text = st.text_area("Content:", st.session_state.file_content, height=300)
343
+ if st.button("Save"):
344
+ with open(st.session_state.current_file,'w',encoding='utf-8') as f:
345
+ f.write(new_text)
346
+ st.success("Updated!")
 
347
 
 
348
  display_file_manager()
349
 
350
+ if __name__=="__main__":
351
+ main()