awacke1 commited on
Commit
efe554b
Β·
verified Β·
1 Parent(s): 80fb6ec

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +600 -0
app.py ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic
3
+ import openai
4
+ import base64
5
+ from datetime import datetime
6
+ import plotly.graph_objects as go
7
+ import cv2
8
+ import glob
9
+ import json
10
+ import math
11
+ import os
12
+ import pytz
13
+ import random
14
+ import re
15
+ import requests
16
+ import streamlit.components.v1 as components
17
+ import textract
18
+ import time
19
+ import zipfile
20
+ from audio_recorder_streamlit import audio_recorder
21
+ from bs4 import BeautifulSoup
22
+ from collections import deque
23
+ from dotenv import load_dotenv
24
+ from gradio_client import Client, handle_file
25
+ from huggingface_hub import InferenceClient
26
+ from io import BytesIO
27
+ from moviepy.editor import VideoFileClip
28
+ from PIL import Image
29
+ from PyPDF2 import PdfReader
30
+ from urllib.parse import quote
31
+ from xml.etree import ElementTree as ET
32
+ from openai import OpenAI
33
+
34
+ # Configuration and Setup
35
+ Site_Name = 'πŸ€–πŸ§ Claude35πŸ“πŸ”¬'
36
+ title = "πŸ€–πŸ§ Claude35πŸ“πŸ”¬"
37
+ helpURL = 'https://huggingface.co/awacke1'
38
+ bugURL = 'https://huggingface.co/spaces/awacke1'
39
+ icons = 'πŸ€–πŸ§ πŸ”¬πŸ“'
40
+
41
+ st.set_page_config(
42
+ page_title=title,
43
+ page_icon=icons,
44
+ layout="wide",
45
+ initial_sidebar_state="auto",
46
+ menu_items={
47
+ 'Get Help': helpURL,
48
+ 'Report a bug': bugURL,
49
+ 'About': title
50
+ }
51
+ )
52
+
53
+ # Load environment variables and initialize clients
54
+ load_dotenv()
55
+
56
+ # OpenAI setup
57
+ openai.api_key = os.getenv('OPENAI_API_KEY')
58
+ if openai.api_key == None:
59
+ openai.api_key = st.secrets['OPENAI_API_KEY']
60
+
61
+ openai_client = OpenAI(
62
+ api_key=os.getenv('OPENAI_API_KEY'),
63
+ organization=os.getenv('OPENAI_ORG_ID')
64
+ )
65
+
66
+ # Claude setup
67
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
68
+ if anthropic_key == None:
69
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
+
72
+ # HuggingFace setup
73
+ API_URL = os.getenv('API_URL')
74
+ HF_KEY = os.getenv('HF_KEY')
75
+ MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
76
+ MODEL2 = "openai/whisper-small.en"
77
+
78
+ headers = {
79
+ "Authorization": f"Bearer {HF_KEY}",
80
+ "Content-Type": "application/json"
81
+ }
82
+
83
+ # Initialize session states
84
+ if "chat_history" not in st.session_state:
85
+ st.session_state.chat_history = []
86
+ if "openai_model" not in st.session_state:
87
+ st.session_state["openai_model"] = "gpt-4o-2024-05-13"
88
+ if "messages" not in st.session_state:
89
+ st.session_state.messages = []
90
+
91
+ # Custom CSS
92
+ st.markdown("""
93
+ <style>
94
+ .main {
95
+ background: linear-gradient(to right, #1a1a1a, #2d2d2d);
96
+ color: #ffffff;
97
+ }
98
+ .stMarkdown {
99
+ font-family: 'Helvetica Neue', sans-serif;
100
+ }
101
+ .category-header {
102
+ background: linear-gradient(45deg, #2b5876, #4e4376);
103
+ padding: 20px;
104
+ border-radius: 10px;
105
+ margin: 10px 0;
106
+ }
107
+ .scene-card {
108
+ background: rgba(0,0,0,0.3);
109
+ padding: 15px;
110
+ border-radius: 8px;
111
+ margin: 10px 0;
112
+ border: 1px solid rgba(255,255,255,0.1);
113
+ }
114
+ .media-gallery {
115
+ display: grid;
116
+ gap: 1rem;
117
+ padding: 1rem;
118
+ }
119
+ .bike-card {
120
+ background: rgba(255,255,255,0.05);
121
+ border-radius: 10px;
122
+ padding: 15px;
123
+ transition: transform 0.3s;
124
+ }
125
+ .bike-card:hover {
126
+ transform: scale(1.02);
127
+ }
128
+ </style>
129
+ """, unsafe_allow_html=True)
130
+
131
+ # Bike Collections
132
+ bike_collections = {
133
+ "Celestial Collection 🌌": {
134
+ "Eclipse Vaulter": {
135
+ "prompt": """Cinematic shot of a sleek black mountain bike silhouetted against a total solar eclipse.
136
+ The corona creates an ethereal halo effect, with lens flares accentuating key points of the frame.
137
+ Dynamic composition shows the bike mid-leap, with stardust particles trailing behind.
138
+ Camera angle: Low angle, wide shot
139
+ Lighting: Dramatic rim lighting from eclipse
140
+ Color palette: Deep purples, cosmic blues, corona gold""",
141
+ "emoji": "πŸŒ‘"
142
+ },
143
+ "Starlight Leaper": {
144
+ "prompt": """A black bike performing an epic leap under a vast Milky Way galaxy.
145
+ Shimmering stars blanket the sky while the bike's wheels leave a trail of stardust.
146
+ Camera angle: Wide-angle upward shot
147
+ Lighting: Natural starlight with subtle rim lighting
148
+ Color palette: Deep blues, silver highlights, cosmic purples""",
149
+ "emoji": "✨"
150
+ },
151
+ "Moonlit Hopper": {
152
+ "prompt": """A sleek black bike mid-hop over a moonlit meadow,
153
+ the full moon illuminating the misty surroundings. Fireflies dance around the bike,
154
+ and soft shadows create a serene yet dynamic atmosphere.
155
+ Camera angle: Side profile with slight low angle
156
+ Lighting: Soft moonlight with atmospheric fog
157
+ Color palette: Silver blues, soft whites, deep shadows""",
158
+ "emoji": "πŸŒ™"
159
+ }
160
+ },
161
+ "Nature-Inspired Collection 🌲": {
162
+ "Shadow Grasshopper": {
163
+ "prompt": """A black bike jumping between forest paths,
164
+ with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame
165
+ as it soars above mossy logs.
166
+ Camera angle: Through-the-trees tracking shot
167
+ Lighting: Natural forest lighting with sun rays
168
+ Color palette: Forest greens, golden sunlight, deep shadows""",
169
+ "emoji": "πŸ¦—"
170
+ },
171
+ "Onyx Leapfrog": {
172
+ "prompt": """A bike with obsidian-black finish jumping over a sparkling creek,
173
+ the reflection on the water broken into ripples by the leap. The surrounding forest
174
+ is vibrant with greens and browns.
175
+ Camera angle: Low angle from water level
176
+ Lighting: Golden hour side lighting
177
+ Color palette: Deep blacks, water blues, forest greens""",
178
+ "emoji": "🐸"
179
+ }
180
+ }
181
+ }
182
+
183
+ # Helper Functions
184
+ def generate_filename(prompt, file_type):
185
+ """Generate a safe filename using the prompt and file type."""
186
+ central = pytz.timezone('US/Central')
187
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
188
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
189
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:240]
190
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
191
+
192
+ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
193
+ """Create and save file with proper handling of different types."""
194
+ if not should_save:
195
+ return None
196
+ filename = generate_filename(prompt if prompt else content, file_type)
197
+ with open(filename, "w", encoding="utf-8") as f:
198
+ if is_image:
199
+ f.write(content)
200
+ else:
201
+ f.write(prompt + "\n\n" + content if prompt else content)
202
+ return filename
203
+
204
+ def get_download_link(file_path):
205
+ """Create download link for file."""
206
+ with open(file_path, "rb") as file:
207
+ contents = file.read()
208
+ b64 = base64.b64encode(contents).decode()
209
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}πŸ“‚</a>'
210
+
211
+ @st.cache_resource
212
+ def SpeechSynthesis(result):
213
+ """HTML5 Speech Synthesis."""
214
+ documentHTML5 = f'''
215
+ <!DOCTYPE html>
216
+ <html>
217
+ <head>
218
+ <title>Read It Aloud</title>
219
+ <script type="text/javascript">
220
+ function readAloud() {{
221
+ const text = document.getElementById("textArea").value;
222
+ const speech = new SpeechSynthesisUtterance(text);
223
+ window.speechSynthesis.speak(speech);
224
+ }}
225
+ </script>
226
+ </head>
227
+ <body>
228
+ <h1>πŸ”Š Read It Aloud</h1>
229
+ <textarea id="textArea" rows="10" cols="80">{result}</textarea>
230
+ <br>
231
+ <button onclick="readAloud()">πŸ”Š Read Aloud</button>
232
+ </body>
233
+ </html>
234
+ '''
235
+ components.html(documentHTML5, width=1280, height=300)
236
+
237
+ # Media Processing Functions
238
+ def process_image(image_input, user_prompt):
239
+ """Process image with GPT-4o vision."""
240
+ if isinstance(image_input, str):
241
+ with open(image_input, "rb") as image_file:
242
+ image_input = image_file.read()
243
+
244
+ base64_image = base64.b64encode(image_input).decode("utf-8")
245
+
246
+ response = openai_client.chat.completions.create(
247
+ model=st.session_state["openai_model"],
248
+ messages=[
249
+ {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
250
+ {"role": "user", "content": [
251
+ {"type": "text", "text": user_prompt},
252
+ {"type": "image_url", "image_url": {
253
+ "url": f"data:image/png;base64,{base64_image}"
254
+ }}
255
+ ]}
256
+ ],
257
+ temperature=0.0,
258
+ )
259
+
260
+ return response.choices[0].message.content
261
+
262
+ def process_audio(audio_input, text_input=''):
263
+ """Process audio with Whisper and GPT."""
264
+ if isinstance(audio_input, str):
265
+ with open(audio_input, "rb") as file:
266
+ audio_input = file.read()
267
+
268
+ transcription = openai_client.audio.transcriptions.create(
269
+ model="whisper-1",
270
+ file=audio_input,
271
+ )
272
+
273
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
274
+
275
+ with st.chat_message("assistant"):
276
+ st.markdown(transcription.text)
277
+ SpeechSynthesis(transcription.text)
278
+
279
+ filename = generate_filename(transcription.text, "wav")
280
+ create_and_save_file(audio_input, "wav", transcription.text, True)
281
+
282
+ def process_video(video_path, seconds_per_frame=1):
283
+ """Process video files for frame extraction and audio."""
284
+ base64Frames = []
285
+ video = cv2.VideoCapture(video_path)
286
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
287
+ fps = video.get(cv2.CAP_PROP_FPS)
288
+ frames_to_skip = int(fps * seconds_per_frame)
289
+
290
+ for frame_idx in range(0, total_frames, frames_to_skip):
291
+ video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
292
+ success, frame = video.read()
293
+ if not success:
294
+ break
295
+ _, buffer = cv2.imencode(".jpg", frame)
296
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
297
+
298
+ video.release()
299
+
300
+ # Extract audio
301
+ base_video_path = os.path.splitext(video_path)[0]
302
+ audio_path = f"{base_video_path}.mp3"
303
+ try:
304
+ video_clip = VideoFileClip(video_path)
305
+ video_clip.audio.write_audiofile(audio_path)
306
+ video_clip.close()
307
+ except:
308
+ st.warning("No audio track found in video")
309
+ audio_path = None
310
+
311
+ return base64Frames, audio_path
312
+
313
+ def process_video_with_gpt(video_input, user_prompt):
314
+ """Process video with GPT-4o vision."""
315
+ base64Frames, audio_path = process_video(video_input)
316
+
317
+ response = openai_client.chat.completions.create(
318
+ model=st.session_state["openai_model"],
319
+ messages=[
320
+ {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
321
+ {"role": "user", "content": [
322
+ {"type": "text", "text": user_prompt},
323
+ *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
324
+ for frame in base64Frames]
325
+ ]}
326
+ ]
327
+ )
328
+
329
+ return response.choices[0].message.content
330
+
331
+ # ArXiv Search Functions
332
+ def search_arxiv(query):
333
+ """Search ArXiv papers using Hugging Face client."""
334
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
335
+ response = client.predict(
336
+ query,
337
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
338
+ True,
339
+ api_name="/ask_llm"
340
+ )
341
+ return response
342
+
343
+ # Chat Processing Functions
344
+ def process_with_gpt(text_input):
345
+ """Process text with GPT-4o."""
346
+ if text_input:
347
+ st.session_state.messages.append({"role": "user", "content": text_input})
348
+
349
+ with st.chat_message("user"):
350
+ st.markdown(text_input)
351
+
352
+ with st.chat_message("assistant"):
353
+ completion = openai_client.chat.completions.create(
354
+ model=st.session_state["openai_model"],
355
+ messages=[
356
+ {"role": m["role"], "content": m["content"]}
357
+ for m in st.session_state.messages
358
+ ],
359
+ stream=False
360
+ )
361
+ return_text = completion.choices[0].message.content
362
+ st.write("GPT-4o: " + return_text)
363
+
364
+ filename = generate_filename(text_input, "md")
365
+ create_file(filename, text_input, return_text)
366
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
367
+ return return_text
368
+
369
+ def process_with_claude(text_input):
370
+ """Process text with Claude."""
371
+ if text_input:
372
+ response = claude_client.messages.create(
373
+ model="claude-3-sonnet-20240229",
374
+ max_tokens=1000,
375
+ messages=[
376
+ {"role": "user", "content": text_input}
377
+ ]
378
+ )
379
+
380
+ response_text = response.content[0].text
381
+ st.write("Claude: " + response_text)
382
+
383
+ filename = generate_filename(text_input, "md")
384
+ create_file(filename, text_input, response_text)
385
+
386
+ st.session_state.chat_history.append({
387
+ "user": text_input,
388
+ "claude": response_text
389
+ })
390
+ return response_text
391
+
392
+ # File Management Functions
393
+ def load_file(file_name):
394
+ """Load file content."""
395
+ with open(file_name, "r", encoding='utf-8') as file:
396
+ content = file.read()
397
+ return content
398
+
399
+ def create_zip_of_files(files):
400
+ """Create zip archive of files."""
401
+ zip_name = "all_files.zip"
402
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
403
+ for file in files:
404
+ zipf.write(file)
405
+ return zip_name
406
+
407
+ def get_media_html(media_path, media_type="video", width="100%"):
408
+ """Generate HTML for media player."""
409
+ media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
410
+ if media_type == "video":
411
+ return f'''
412
+ <video width="{width}" controls autoplay muted loop>
413
+ <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
414
+ Your browser does not support the video tag.
415
+ </video>
416
+ '''
417
+ else: # audio
418
+ return f'''
419
+ <audio controls style="width: {width};">
420
+ <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
421
+ Your browser does not support the audio element.
422
+ </audio>
423
+ '''
424
+
425
+ def create_media_gallery():
426
+ """Create the media gallery interface."""
427
+ st.header("🎬 Media Gallery")
428
+
429
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video", "🎨 Scene Generator"])
430
+
431
+ with tabs[0]:
432
+ image_files = glob.glob("*.png") + glob.glob("*.jpg")
433
+ if image_files:
434
+ num_cols = st.slider("Number of columns", 1, 5, 3)
435
+ cols = st.columns(num_cols)
436
+ for idx, image_file in enumerate(image_files):
437
+ with cols[idx % num_cols]:
438
+ img = Image.open(image_file)
439
+ st.image(img, use_column_width=True)
440
+
441
+ if st.button(f"Analyze {os.path.basename(image_file)}"):
442
+ analysis = process_image(image_file,
443
+ "Describe this image in detail and identify key elements.")
444
+ st.markdown(analysis)
445
+ SpeechSynthesis(analysis)
446
+
447
+ with tabs[1]:
448
+ audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
449
+ for audio_file in audio_files:
450
+ with st.expander(f"🎡 {os.path.basename(audio_file)}"):
451
+ st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
452
+ if st.button(f"Transcribe {os.path.basename(audio_file)}"):
453
+ with open(audio_file, "rb") as f:
454
+ transcription = process_audio(f)
455
+ st.write(transcription)
456
+ SpeechSynthesis(transcription)
457
+
458
+ with tabs[2]:
459
+ video_files = glob.glob("*.mp4")
460
+ for video_file in video_files:
461
+ with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
462
+ st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
463
+ if st.button(f"Analyze {os.path.basename(video_file)}"):
464
+ analysis = process_video_with_gpt(video_file,
465
+ "Describe what's happening in this video.")
466
+ st.markdown(analysis)
467
+ SpeechSynthesis(analysis)
468
+
469
+ with tabs[3]:
470
+ for collection_name, bikes in bike_collections.items():
471
+ st.subheader(collection_name)
472
+ cols = st.columns(len(bikes))
473
+
474
+ for idx, (bike_name, details) in enumerate(bikes.items()):
475
+ with cols[idx]:
476
+ st.markdown(f"""
477
+ <div class='bike-card'>
478
+ <h3>{details['emoji']} {bike_name}</h3>
479
+ <p>{details['prompt']}</p>
480
+ </div>
481
+ """, unsafe_allow_html=True)
482
+
483
+ if st.button(f"Generate {bike_name} Scene"):
484
+ prompt = details['prompt']
485
+ st.write(f"Generated scene description for {bike_name}:")
486
+ st.write(prompt)
487
+ SpeechSynthesis(prompt)
488
+
489
+ def display_file_manager():
490
+ """Display file management sidebar."""
491
+ st.sidebar.title("πŸ“ File Management")
492
+
493
+ all_files = glob.glob("*.md")
494
+ all_files.sort(reverse=True)
495
+
496
+ if st.sidebar.button("πŸ—‘ Delete All"):
497
+ for file in all_files:
498
+ os.remove(file)
499
+ st.rerun()
500
+
501
+ if st.sidebar.button("⬇️ Download All"):
502
+ zip_file = create_zip_of_files(all_files)
503
+ st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
504
+
505
+ for file in all_files:
506
+ col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
507
+ with col1:
508
+ if st.button("🌐", key="view_"+file):
509
+ st.session_state.current_file = file
510
+ st.session_state.file_content = load_file(file)
511
+ SpeechSynthesis(st.session_state.file_content)
512
+ with col2:
513
+ st.markdown(get_download_link(file), unsafe_allow_html=True)
514
+ with col3:
515
+ if st.button("πŸ“‚", key="edit_"+file):
516
+ st.session_state.current_file = file
517
+ st.session_state.file_content = load_file(file)
518
+ with col4:
519
+ if st.button("πŸ—‘", key="delete_"+file):
520
+ os.remove(file)
521
+ st.rerun()
522
+
523
+ def main():
524
+ st.title("🚲 Bike Cinematic Universe & AI Assistant")
525
+
526
+ # Main navigation
527
+ tab_main = st.radio("Choose Action:",
528
+ ["πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
529
+ horizontal=True)
530
+
531
+ if tab_main == "πŸ’¬ Chat":
532
+ # Model Selection
533
+ model_choice = st.sidebar.radio(
534
+ "Choose AI Model:",
535
+ ["GPT-4o", "Claude-3", "Both"]
536
+ )
537
+
538
+ # Chat Interface
539
+ user_input = st.text_area("Message:", height=100)
540
+
541
+ if st.button("Send πŸ“¨"):
542
+ if user_input:
543
+ if model_choice == "GPT-4o":
544
+ gpt_response = process_with_gpt(user_input)
545
+ SpeechSynthesis(gpt_response)
546
+ elif model_choice == "Claude-3":
547
+ claude_response = process_with_claude(user_input)
548
+ SpeechSynthesis(claude_response)
549
+ else: # Both
550
+ col1, col2 = st.columns(2)
551
+ with col1:
552
+ st.subheader("GPT-4o Response")
553
+ gpt_response = process_with_gpt(user_input)
554
+ SpeechSynthesis(gpt_response)
555
+ with col2:
556
+ st.subheader("Claude-3 Response")
557
+ claude_response = process_with_claude(user_input)
558
+ SpeechSynthesis(claude_response)
559
+
560
+ # Display Chat History
561
+ st.subheader("Chat History πŸ“œ")
562
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
563
+
564
+ with tab1:
565
+ for chat in st.session_state.chat_history:
566
+ st.text_area("You:", chat["user"], height=100, disabled=True)
567
+ st.text_area("Claude:", chat["claude"], height=200, disabled=True)
568
+ st.markdown("---")
569
+
570
+ with tab2:
571
+ for message in st.session_state.messages:
572
+ with st.chat_message(message["role"]):
573
+ st.markdown(message["content"])
574
+
575
+ elif tab_main == "πŸ“Έ Media Gallery":
576
+ create_media_gallery()
577
+
578
+ elif tab_main == "πŸ” Search ArXiv":
579
+ query = st.text_input("Enter your research query:")
580
+ if query:
581
+ with st.spinner("Searching ArXiv..."):
582
+ results = search_arxiv(query)
583
+ st.markdown(results)
584
+ SpeechSynthesis(results)
585
+
586
+ elif tab_main == "πŸ“ File Editor":
587
+ if hasattr(st.session_state, 'current_file'):
588
+ st.subheader(f"Editing: {st.session_state.current_file}")
589
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
590
+ if st.button("Save Changes"):
591
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
592
+ file.write(new_content)
593
+ st.success("File updated successfully!")
594
+ SpeechSynthesis("File updated successfully!")
595
+
596
+ # Always show file manager in sidebar
597
+ display_file_manager()
598
+
599
+ if __name__ == "__main__":
600
+ main()