awacke1 commited on
Commit
e06095f
β€’
1 Parent(s): 1771b02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +202 -298
app.py CHANGED
@@ -31,9 +31,9 @@ from urllib.parse import quote
31
  from xml.etree import ElementTree as ET
32
  from openai import OpenAI
33
 
34
- # Configuration and Setup
35
- Site_Name = 'πŸ€–πŸ§ Combined AI AppπŸ“πŸ”¬'
36
- title = "πŸ€–πŸ§ Combined AI AppπŸ“πŸ”¬"
37
  helpURL = 'https://huggingface.co/awacke1'
38
  bugURL = 'https://huggingface.co/spaces/awacke1'
39
  icons = 'πŸ€–πŸ§ πŸ”¬πŸ“'
@@ -50,7 +50,7 @@ st.set_page_config(
50
  }
51
  )
52
 
53
- # Load environment variables and initialize clients
54
  load_dotenv()
55
 
56
  # OpenAI setup
@@ -69,21 +69,24 @@ if anthropic_key == None:
69
  anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
 
 
 
 
 
 
 
 
 
 
 
 
72
  # Initialize session states
73
  if "chat_history" not in st.session_state:
74
  st.session_state.chat_history = []
75
  if "openai_model" not in st.session_state:
76
- st.session_state["openai_model"] = "gpt-4"
77
  if "messages" not in st.session_state:
78
  st.session_state.messages = []
79
- if "search_queries" not in st.session_state:
80
- st.session_state.search_queries = []
81
- if 'selected_file' not in st.session_state:
82
- st.session_state.selected_file = None
83
- if 'view_mode' not in st.session_state:
84
- st.session_state.view_mode = 'view'
85
- if 'files' not in st.session_state:
86
- st.session_state.files = []
87
 
88
  # Custom CSS
89
  st.markdown("""
@@ -177,14 +180,7 @@ bike_collections = {
177
  }
178
  }
179
 
180
- # File Operations Functions
181
- def create_file(filename, prompt, response, is_image=False, should_save=True):
182
- """Basic file creation with prompt and response."""
183
- if not should_save:
184
- return None
185
- with open(filename, "w", encoding="utf-8") as f:
186
- f.write(prompt + "\n\n" + response)
187
-
188
  def generate_filename(prompt, file_type):
189
  """Generate a safe filename using the prompt and file type."""
190
  central = pytz.timezone('US/Central')
@@ -212,39 +208,6 @@ def get_download_link(file_path):
212
  b64 = base64.b64encode(contents).decode()
213
  return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}πŸ“‚</a>'
214
 
215
- def load_file(file_name):
216
- """Load file content."""
217
- with open(file_name, "r", encoding='utf-8') as file:
218
- content = file.read()
219
- return content
220
-
221
- def create_zip_of_files(files):
222
- """Create zip archive of files."""
223
- zip_name = "all_files.zip"
224
- with zipfile.ZipFile(zip_name, 'w') as zipf:
225
- for file in files:
226
- zipf.write(file)
227
- return zip_name
228
-
229
- def get_media_html(media_path, media_type="video", width="100%"):
230
- """Generate HTML for media player."""
231
- media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
232
- if media_type == "video":
233
- return f'''
234
- <video width="{width}" controls autoplay muted loop>
235
- <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
236
- Your browser does not support the video tag.
237
- </video>
238
- '''
239
- else: # audio
240
- return f'''
241
- <audio controls style="width: {width};">
242
- <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
243
- Your browser does not support the audio element.
244
- </audio>
245
- '''
246
-
247
- # Speech Synthesis
248
  @st.cache_resource
249
  def SpeechSynthesis(result):
250
  """HTML5 Speech Synthesis."""
@@ -271,91 +234,16 @@ def SpeechSynthesis(result):
271
  '''
272
  components.html(documentHTML5, width=1280, height=300)
273
 
274
- # ArXiv Search Functions (Combined into one function)
275
- def search_arxiv(query, should_save=True):
276
- """Search ArXiv papers using Hugging Face client."""
277
- st.write("Performing AI Lookup...")
278
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
279
- start_time = time.strftime("%Y-%m-%d %H:%M:%S")
280
-
281
- # First query - Get papers
282
- response1 = client.predict(
283
- query,
284
- 10,
285
- "Semantic Search",
286
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
287
- api_name="/update_with_rag_md"
288
- )
289
-
290
- # Second query - Get summary
291
- response2 = client.predict(
292
- query,
293
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
294
- True,
295
- api_name="/ask_llm"
296
- )
297
-
298
- Question = '### πŸ”Ž ' + query + '\r\n'
299
- References = response1[0]
300
- ReferenceLinks = extract_urls(References)
301
-
302
- results = Question + '\r\n' + response2 + '\r\n' + References + '\r\n' + ReferenceLinks
303
-
304
- st.markdown(results)
305
- SpeechSynthesis(results)
306
-
307
- end_time = time.strftime("%Y-%m-%d %H:%M:%S")
308
- start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
309
- end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
310
- elapsed_seconds = end_timestamp - start_timestamp
311
-
312
- st.write(f"Start time: {start_time}")
313
- st.write(f"Finish time: {end_time}")
314
- st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
315
-
316
- filename = generate_filename(query, "md")
317
- create_file(filename, query, results, should_save=should_save)
318
- return results
319
-
320
- def extract_urls(text):
321
- """Extract URLs from ArXiv search results."""
322
- try:
323
- date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
324
- abs_link_pattern = re.compile(r'\[(.*?)\]\((https://arxiv\.org/abs/\d+\.\d+)\)')
325
- pdf_link_pattern = re.compile(r'\[⬇️\]\((https://arxiv\.org/pdf/\d+\.\d+)\)')
326
- title_pattern = re.compile(r'### \d{2} \w{3} \d{4} \| \[(.*?)\]')
327
-
328
- date_matches = date_pattern.findall(text)
329
- abs_link_matches = abs_link_pattern.findall(text)
330
- pdf_link_matches = pdf_link_pattern.findall(text)
331
- title_matches = title_pattern.findall(text)
332
-
333
- markdown_text = ""
334
- for i in range(len(date_matches)):
335
- date = date_matches[i]
336
- title = title_matches[i]
337
- abs_link = abs_link_matches[i][1]
338
- pdf_link = pdf_link_matches[i]
339
- markdown_text += f"**Date:** {date}\n\n"
340
- markdown_text += f"**Title:** {title}\n\n"
341
- markdown_text += f"**Abstract Link:** [{abs_link}]({abs_link})\n\n"
342
- markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
343
- markdown_text += "---\n\n"
344
- return markdown_text
345
- except:
346
- st.write('Error extracting URLs')
347
- return ''
348
-
349
  # Media Processing Functions
350
  def process_image(image_input, user_prompt):
351
- """Process image with GPT-4 vision."""
352
  if isinstance(image_input, str):
353
  with open(image_input, "rb") as image_file:
354
  image_input = image_file.read()
355
-
356
  base64_image = base64.b64encode(image_input).decode("utf-8")
357
-
358
- response = openai.ChatCompletion.create(
359
  model=st.session_state["openai_model"],
360
  messages=[
361
  {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
@@ -368,7 +256,7 @@ def process_image(image_input, user_prompt):
368
  ],
369
  temperature=0.0,
370
  )
371
-
372
  return response.choices[0].message.content
373
 
374
  def process_audio(audio_input, text_input=''):
@@ -377,31 +265,20 @@ def process_audio(audio_input, text_input=''):
377
  with open(audio_input, "rb") as file:
378
  audio_input = file.read()
379
 
380
- transcription = openai.Audio.transcribe(
381
  model="whisper-1",
382
  file=audio_input,
383
  )
384
-
385
  st.session_state.messages.append({"role": "user", "content": transcription.text})
386
-
387
  with st.chat_message("assistant"):
388
  st.markdown(transcription.text)
389
  SpeechSynthesis(transcription.text)
390
-
391
  filename = generate_filename(transcription.text, "wav")
392
  create_and_save_file(audio_input, "wav", transcription.text, True)
393
 
394
- def save_and_play_audio(audio_recorder):
395
- """Save and play recorded audio."""
396
- audio_bytes = audio_recorder()
397
- if audio_bytes:
398
- filename = generate_filename("Recording", "wav")
399
- with open(filename, 'wb') as f:
400
- f.write(audio_bytes)
401
- st.audio(audio_bytes, format="audio/wav")
402
- return filename
403
- return None
404
-
405
  def process_video(video_path, seconds_per_frame=1):
406
  """Process video files for frame extraction and audio."""
407
  base64Frames = []
@@ -409,7 +286,7 @@ def process_video(video_path, seconds_per_frame=1):
409
  total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
410
  fps = video.get(cv2.CAP_PROP_FPS)
411
  frames_to_skip = int(fps * seconds_per_frame)
412
-
413
  for frame_idx in range(0, total_frames, frames_to_skip):
414
  video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
415
  success, frame = video.read()
@@ -417,9 +294,9 @@ def process_video(video_path, seconds_per_frame=1):
417
  break
418
  _, buffer = cv2.imencode(".jpg", frame)
419
  base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
420
-
421
  video.release()
422
-
423
  # Extract audio
424
  base_video_path = os.path.splitext(video_path)[0]
425
  audio_path = f"{base_video_path}.mp3"
@@ -430,14 +307,14 @@ def process_video(video_path, seconds_per_frame=1):
430
  except:
431
  st.warning("No audio track found in video")
432
  audio_path = None
433
-
434
  return base64Frames, audio_path
435
 
436
  def process_video_with_gpt(video_input, user_prompt):
437
- """Process video with GPT-4 vision."""
438
  base64Frames, audio_path = process_video(video_input)
439
-
440
- response = openai.ChatCompletion.create(
441
  model=st.session_state["openai_model"],
442
  messages=[
443
  {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
@@ -448,16 +325,109 @@ def process_video_with_gpt(video_input, user_prompt):
448
  ]}
449
  ]
450
  )
451
-
452
  return response.choices[0].message.content
453
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454
  def create_media_gallery():
455
  """Create the media gallery interface."""
456
  st.header("🎬 Media Gallery")
457
-
458
  tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video", "🎨 Scene Generator"])
459
-
460
- with tabs[0]: # Images
461
  image_files = glob.glob("*.png") + glob.glob("*.jpg")
462
  if image_files:
463
  num_cols = st.slider("Number of columns", 1, 5, 3)
@@ -466,14 +436,14 @@ def create_media_gallery():
466
  with cols[idx % num_cols]:
467
  img = Image.open(image_file)
468
  st.image(img, use_column_width=True)
469
-
 
470
  if st.button(f"Analyze {os.path.basename(image_file)}"):
471
- analysis = process_image(image_file,
472
  "Describe this image in detail and identify key elements.")
473
  st.markdown(analysis)
474
- SpeechSynthesis(analysis)
475
-
476
- with tabs[1]: # Audio
477
  audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
478
  for audio_file in audio_files:
479
  with st.expander(f"🎡 {os.path.basename(audio_file)}"):
@@ -482,24 +452,22 @@ def create_media_gallery():
482
  with open(audio_file, "rb") as f:
483
  transcription = process_audio(f)
484
  st.write(transcription)
485
- SpeechSynthesis(transcription)
486
-
487
- with tabs[2]: # Video
488
  video_files = glob.glob("*.mp4")
489
  for video_file in video_files:
490
  with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
491
  st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
492
  if st.button(f"Analyze {os.path.basename(video_file)}"):
493
- analysis = process_video_with_gpt(video_file,
494
  "Describe what's happening in this video.")
495
  st.markdown(analysis)
496
- SpeechSynthesis(analysis)
497
-
498
- with tabs[3]: # Scene Generator
499
  for collection_name, bikes in bike_collections.items():
500
  st.subheader(collection_name)
501
  cols = st.columns(len(bikes))
502
-
503
  for idx, (bike_name, details) in enumerate(bikes.items()):
504
  with cols[idx]:
505
  st.markdown(f"""
@@ -508,63 +476,17 @@ def create_media_gallery():
508
  <p>{details['prompt']}</p>
509
  </div>
510
  """, unsafe_allow_html=True)
511
-
512
  if st.button(f"Generate {bike_name} Scene"):
513
  prompt = details['prompt']
 
514
  st.write(f"Generated scene description for {bike_name}:")
515
  st.write(prompt)
516
- SpeechSynthesis(prompt)
517
-
518
- # Chat Processing Functions
519
- def process_with_gpt(text_input, should_save=True):
520
- """Process text with GPT-4."""
521
- if text_input:
522
- st.session_state.messages.append({"role": "user", "content": text_input})
523
-
524
- with st.chat_message("user"):
525
- st.markdown(text_input)
526
-
527
- with st.chat_message("assistant"):
528
- completion = openai.ChatCompletion.create(
529
- model=st.session_state["openai_model"],
530
- messages=[
531
- {"role": m["role"], "content": m["content"]}
532
- for m in st.session_state.messages
533
- ],
534
- stream=False
535
- )
536
- return_text = completion.choices[0].message.content
537
- st.write("GPT-4: " + return_text)
538
-
539
- filename = generate_filename(text_input, "md")
540
- create_file(filename, text_input, return_text, should_save=should_save)
541
- st.session_state.messages.append({"role": "assistant", "content": return_text})
542
- return return_text
543
-
544
- def process_with_claude(text_input, should_save=True):
545
- """Process text with Claude."""
546
- if text_input:
547
- response = claude_client.completions.create(
548
- model="claude-2",
549
- max_tokens_to_sample=1000,
550
- prompt=text_input
551
- )
552
- response_text = response.completion
553
- st.write("Claude: " + response_text)
554
-
555
- filename = generate_filename(text_input, "md")
556
- create_file(filename, text_input, response_text, should_save=should_save)
557
-
558
- st.session_state.chat_history.append({
559
- "user": text_input,
560
- "claude": response_text
561
- })
562
- return response_text
563
 
564
  def display_file_manager():
565
  """Display file management sidebar."""
566
  st.sidebar.title("πŸ“ File Management")
567
-
568
  all_files = glob.glob("*.md")
569
  all_files.sort(reverse=True)
570
 
@@ -581,106 +503,88 @@ def display_file_manager():
581
  col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
582
  with col1:
583
  if st.button("🌐", key="view_"+file):
584
- st.session_state.selected_file = file
585
  st.session_state.file_content = load_file(file)
586
- SpeechSynthesis(st.session_state.file_content)
587
  with col2:
588
  st.markdown(get_download_link(file), unsafe_allow_html=True)
589
  with col3:
590
  if st.button("πŸ“‚", key="edit_"+file):
591
- st.session_state.selected_file = file
592
  st.session_state.file_content = load_file(file)
593
  with col4:
594
  if st.button("πŸ—‘", key="delete_"+file):
595
  os.remove(file)
596
  st.rerun()
597
 
598
- def display_file_content(file_path):
599
- """Display file content with editing capabilities."""
600
- try:
601
- with open(file_path, 'r', encoding='utf-8') as f:
602
- content = f.read()
603
-
604
- if st.session_state.view_mode == 'view':
605
- st.markdown(content)
606
- else:
607
- edited_content = st.text_area(
608
- "Edit content",
609
- content,
610
- height=400,
611
- key=f"edit_{os.path.basename(file_path)}"
612
- )
613
-
614
- if st.button("Save Changes", key=f"save_{os.path.basename(file_path)}"):
615
- try:
616
- with open(file_path, 'w', encoding='utf-8') as f:
617
- f.write(edited_content)
618
- st.success(f"Successfully saved changes to {file_path}")
619
- except Exception as e:
620
- st.error(f"Error saving changes: {e}")
621
- except Exception as e:
622
- st.error(f"Error reading file: {e}")
623
-
624
  def main():
625
- st.title("πŸš€ Combined AI Assistant App")
626
-
627
- # Main navigation with radio buttons
628
- ai_options = ["πŸ’¬ Chat with GPT-4", "πŸ’¬ Chat with Claude", "πŸ” Search ArXiv", "πŸ“Έ Media Gallery", "πŸ“ File Editor"]
629
- tab_main = st.radio("Choose Action:", ai_options, horizontal=True)
630
-
631
- if tab_main == "πŸ’¬ Chat with GPT-4":
632
- user_input = st.text_area("Message:", height=100)
633
- if st.button("Send πŸ“¨"):
634
- if user_input:
635
- gpt_response = process_with_gpt(user_input)
636
- SpeechSynthesis(gpt_response)
637
-
638
- # Display Chat History
639
- st.subheader("Chat History πŸ“œ")
640
- for message in st.session_state.messages:
641
- with st.chat_message(message["role"]):
642
- st.markdown(message["content"])
643
-
644
- elif tab_main == "πŸ’¬ Chat with Claude":
645
  user_input = st.text_area("Message:", height=100)
 
646
  if st.button("Send πŸ“¨"):
647
  if user_input:
648
- claude_response = process_with_claude(user_input)
649
- SpeechSynthesis(claude_response)
650
-
 
 
 
 
 
 
 
 
 
 
651
  # Display Chat History
652
  st.subheader("Chat History πŸ“œ")
653
- for chat in st.session_state.chat_history:
654
- st.text_area("You:", chat["user"], height=100, disabled=True)
655
- st.text_area("Claude:", chat["claude"], height=200, disabled=True)
656
- st.markdown("---")
657
-
 
 
 
 
 
 
 
 
 
 
 
658
  elif tab_main == "πŸ” Search ArXiv":
659
- query_params = st.experimental_get_query_params()
660
- query = query_params.get('q', [''])[0]
661
- query = st.text_input("Enter your research query:", value=query)
662
  if query:
663
  with st.spinner("Searching ArXiv..."):
664
  results = search_arxiv(query)
665
- # Save the query and results
666
- filename = generate_filename(query, "md")
667
- create_file(filename, query, results)
668
- st.session_state.selected_file = filename
669
- st.session_state.file_content = results
670
- SpeechSynthesis(results)
671
-
672
- elif tab_main == "πŸ“Έ Media Gallery":
673
- create_media_gallery()
674
-
675
  elif tab_main == "πŸ“ File Editor":
676
- if st.session_state.selected_file:
677
- st.subheader(f"Editing: {st.session_state.selected_file}")
678
- display_file_content(st.session_state.selected_file)
679
- else:
680
- st.write("No file selected.")
 
 
681
 
682
  # Always show file manager in sidebar
683
  display_file_manager()
684
 
685
  if __name__ == "__main__":
686
- main()
 
31
  from xml.etree import ElementTree as ET
32
  from openai import OpenAI
33
 
34
+ # 1. Configuration and Setup
35
+ Site_Name = 'πŸ€–πŸ§ Claude35πŸ“πŸ”¬'
36
+ title = "πŸ€–πŸ§ Claude35πŸ“πŸ”¬"
37
  helpURL = 'https://huggingface.co/awacke1'
38
  bugURL = 'https://huggingface.co/spaces/awacke1'
39
  icons = 'πŸ€–πŸ§ πŸ”¬πŸ“'
 
50
  }
51
  )
52
 
53
+ # 2. Load environment variables and initialize clients
54
  load_dotenv()
55
 
56
  # OpenAI setup
 
69
  anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
 
72
+ # HuggingFace setup
73
+ API_URL = os.getenv('API_URL')
74
+ HF_KEY = os.getenv('HF_KEY')
75
+ MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
76
+ MODEL2 = "openai/whisper-small.en"
77
+
78
+ headers = {
79
+ "Authorization": f"Bearer {HF_KEY}",
80
+ "Content-Type": "application/json"
81
+ }
82
+
83
  # Initialize session states
84
  if "chat_history" not in st.session_state:
85
  st.session_state.chat_history = []
86
  if "openai_model" not in st.session_state:
87
+ st.session_state["openai_model"] = "gpt-4o-2024-05-13"
88
  if "messages" not in st.session_state:
89
  st.session_state.messages = []
 
 
 
 
 
 
 
 
90
 
91
  # Custom CSS
92
  st.markdown("""
 
180
  }
181
  }
182
 
183
+ # Helper Functions
 
 
 
 
 
 
 
184
  def generate_filename(prompt, file_type):
185
  """Generate a safe filename using the prompt and file type."""
186
  central = pytz.timezone('US/Central')
 
208
  b64 = base64.b64encode(contents).decode()
209
  return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}πŸ“‚</a>'
210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  @st.cache_resource
212
  def SpeechSynthesis(result):
213
  """HTML5 Speech Synthesis."""
 
234
  '''
235
  components.html(documentHTML5, width=1280, height=300)
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  # Media Processing Functions
238
  def process_image(image_input, user_prompt):
239
+ """Process image with GPT-4o vision."""
240
  if isinstance(image_input, str):
241
  with open(image_input, "rb") as image_file:
242
  image_input = image_file.read()
243
+
244
  base64_image = base64.b64encode(image_input).decode("utf-8")
245
+
246
+ response = openai_client.chat.completions.create(
247
  model=st.session_state["openai_model"],
248
  messages=[
249
  {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
 
256
  ],
257
  temperature=0.0,
258
  )
259
+
260
  return response.choices[0].message.content
261
 
262
  def process_audio(audio_input, text_input=''):
 
265
  with open(audio_input, "rb") as file:
266
  audio_input = file.read()
267
 
268
+ transcription = openai_client.audio.transcriptions.create(
269
  model="whisper-1",
270
  file=audio_input,
271
  )
272
+
273
  st.session_state.messages.append({"role": "user", "content": transcription.text})
274
+
275
  with st.chat_message("assistant"):
276
  st.markdown(transcription.text)
277
  SpeechSynthesis(transcription.text)
278
+
279
  filename = generate_filename(transcription.text, "wav")
280
  create_and_save_file(audio_input, "wav", transcription.text, True)
281
 
 
 
 
 
 
 
 
 
 
 
 
282
  def process_video(video_path, seconds_per_frame=1):
283
  """Process video files for frame extraction and audio."""
284
  base64Frames = []
 
286
  total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
287
  fps = video.get(cv2.CAP_PROP_FPS)
288
  frames_to_skip = int(fps * seconds_per_frame)
289
+
290
  for frame_idx in range(0, total_frames, frames_to_skip):
291
  video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
292
  success, frame = video.read()
 
294
  break
295
  _, buffer = cv2.imencode(".jpg", frame)
296
  base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
297
+
298
  video.release()
299
+
300
  # Extract audio
301
  base_video_path = os.path.splitext(video_path)[0]
302
  audio_path = f"{base_video_path}.mp3"
 
307
  except:
308
  st.warning("No audio track found in video")
309
  audio_path = None
310
+
311
  return base64Frames, audio_path
312
 
313
  def process_video_with_gpt(video_input, user_prompt):
314
+ """Process video with GPT-4o vision."""
315
  base64Frames, audio_path = process_video(video_input)
316
+
317
+ response = openai_client.chat.completions.create(
318
  model=st.session_state["openai_model"],
319
  messages=[
320
  {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
 
325
  ]}
326
  ]
327
  )
328
+
329
  return response.choices[0].message.content
330
 
331
+ # ArXiv Search Functions
332
+ def search_arxiv(query):
333
+ """Search ArXiv papers using Hugging Face client."""
334
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
335
+ response = client.predict(
336
+ query,
337
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
338
+ True,
339
+ api_name="/ask_llm"
340
+ )
341
+ return response
342
+
343
+ # Chat Processing Functions
344
+ def process_with_gpt(text_input):
345
+ """Process text with GPT-4o."""
346
+ if text_input:
347
+ st.session_state.messages.append({"role": "user", "content": text_input})
348
+
349
+ with st.chat_message("user"):
350
+ st.markdown(text_input)
351
+
352
+ with st.chat_message("assistant"):
353
+ completion = openai_client.chat.completions.create(
354
+ model=st.session_state["openai_model"],
355
+ messages=[
356
+ {"role": m["role"], "content": m["content"]}
357
+ for m in st.session_state.messages
358
+ ],
359
+ stream=False
360
+ )
361
+ return_text = completion.choices[0].message.content
362
+ st.write("GPT-4o: " + return_text)
363
+
364
+ filename = generate_filename(text_input, "md")
365
+ create_file(filename, text_input, return_text)
366
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
367
+ return return_text
368
+
369
+ def process_with_claude(text_input):
370
+ """Process text with Claude."""
371
+ if text_input:
372
+ response = claude_client.messages.create(
373
+ model="claude-3-sonnet-20240229",
374
+ max_tokens=1000,
375
+ messages=[
376
+ {"role": "user", "content": text_input}
377
+ ]
378
+ )
379
+ response_text = response.content[0].text
380
+ st.write("Claude: " + response_text)
381
+
382
+ filename = generate_filename(text_input, "md")
383
+ create_file(filename, text_input, response_text)
384
+
385
+ st.session_state.chat_history.append({
386
+ "user": text_input,
387
+ "claude": response_text
388
+ })
389
+ return response_text
390
+
391
+ # File Management Functions
392
+ def load_file(file_name):
393
+ """Load file content."""
394
+ with open(file_name, "r", encoding='utf-8') as file:
395
+ content = file.read()
396
+ return content
397
+
398
+ def create_zip_of_files(files):
399
+ """Create zip archive of files."""
400
+ zip_name = "all_files.zip"
401
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
402
+ for file in files:
403
+ zipf.write(file)
404
+ return zip_name
405
+
406
+ def get_media_html(media_path, media_type="video", width="100%"):
407
+ """Generate HTML for media player."""
408
+ media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
409
+ if media_type == "video":
410
+ return f'''
411
+ <video width="{width}" controls autoplay muted loop>
412
+ <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
413
+ Your browser does not support the video tag.
414
+ </video>
415
+ '''
416
+ else: # audio
417
+ return f'''
418
+ <audio controls style="width: {width};">
419
+ <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
420
+ Your browser does not support the audio element.
421
+ </audio>
422
+ '''
423
+
424
  def create_media_gallery():
425
  """Create the media gallery interface."""
426
  st.header("🎬 Media Gallery")
427
+
428
  tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video", "🎨 Scene Generator"])
429
+
430
+ with tabs[0]:
431
  image_files = glob.glob("*.png") + glob.glob("*.jpg")
432
  if image_files:
433
  num_cols = st.slider("Number of columns", 1, 5, 3)
 
436
  with cols[idx % num_cols]:
437
  img = Image.open(image_file)
438
  st.image(img, use_column_width=True)
439
+
440
+ # Add GPT vision analysis option
441
  if st.button(f"Analyze {os.path.basename(image_file)}"):
442
+ analysis = process_image(image_file,
443
  "Describe this image in detail and identify key elements.")
444
  st.markdown(analysis)
445
+
446
+ with tabs[1]:
 
447
  audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
448
  for audio_file in audio_files:
449
  with st.expander(f"🎡 {os.path.basename(audio_file)}"):
 
452
  with open(audio_file, "rb") as f:
453
  transcription = process_audio(f)
454
  st.write(transcription)
455
+
456
+ with tabs[2]:
 
457
  video_files = glob.glob("*.mp4")
458
  for video_file in video_files:
459
  with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
460
  st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
461
  if st.button(f"Analyze {os.path.basename(video_file)}"):
462
+ analysis = process_video_with_gpt(video_file,
463
  "Describe what's happening in this video.")
464
  st.markdown(analysis)
465
+
466
+ with tabs[3]:
 
467
  for collection_name, bikes in bike_collections.items():
468
  st.subheader(collection_name)
469
  cols = st.columns(len(bikes))
470
+
471
  for idx, (bike_name, details) in enumerate(bikes.items()):
472
  with cols[idx]:
473
  st.markdown(f"""
 
476
  <p>{details['prompt']}</p>
477
  </div>
478
  """, unsafe_allow_html=True)
479
+
480
  if st.button(f"Generate {bike_name} Scene"):
481
  prompt = details['prompt']
482
+ # Here you could integrate with image generation API
483
  st.write(f"Generated scene description for {bike_name}:")
484
  st.write(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485
 
486
  def display_file_manager():
487
  """Display file management sidebar."""
488
  st.sidebar.title("πŸ“ File Management")
489
+
490
  all_files = glob.glob("*.md")
491
  all_files.sort(reverse=True)
492
 
 
503
  col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
504
  with col1:
505
  if st.button("🌐", key="view_"+file):
506
+ st.session_state.current_file = file
507
  st.session_state.file_content = load_file(file)
 
508
  with col2:
509
  st.markdown(get_download_link(file), unsafe_allow_html=True)
510
  with col3:
511
  if st.button("πŸ“‚", key="edit_"+file):
512
+ st.session_state.current_file = file
513
  st.session_state.file_content = load_file(file)
514
  with col4:
515
  if st.button("πŸ—‘", key="delete_"+file):
516
  os.remove(file)
517
  st.rerun()
518
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
519
  def main():
520
+ st.title("🚲 Bike Cinematic Universe & AI Assistant")
521
+
522
+ # Main navigation
523
+ tab_main = st.radio("Choose Action:",
524
+ ["πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
525
+ horizontal=True)
526
+
527
+ if tab_main == "πŸ’¬ Chat":
528
+ # Model Selection
529
+ model_choice = st.sidebar.radio(
530
+ "Choose AI Model:",
531
+ ["GPT-4o", "Claude-3", "Both"]
532
+ )
533
+
534
+ # Chat Interface
 
 
 
 
 
535
  user_input = st.text_area("Message:", height=100)
536
+
537
  if st.button("Send πŸ“¨"):
538
  if user_input:
539
+ if model_choice == "GPT-4o":
540
+ gpt_response = process_with_gpt(user_input)
541
+ elif model_choice == "Claude-3":
542
+ claude_response = process_with_claude(user_input)
543
+ else: # Both
544
+ col1, col2 = st.columns(2)
545
+ with col1:
546
+ st.subheader("GPT-4o Response")
547
+ gpt_response = process_with_gpt(user_input)
548
+ with col2:
549
+ st.subheader("Claude-3 Response")
550
+ claude_response = process_with_claude(user_input)
551
+
552
  # Display Chat History
553
  st.subheader("Chat History πŸ“œ")
554
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
555
+
556
+ with tab1:
557
+ for chat in st.session_state.chat_history:
558
+ st.text_area("You:", chat["user"], height=100, disabled=True)
559
+ st.text_area("Claude:", chat["claude"], height=200, disabled=True)
560
+ st.markdown("---")
561
+
562
+ with tab2:
563
+ for message in st.session_state.messages:
564
+ with st.chat_message(message["role"]):
565
+ st.markdown(message["content"])
566
+
567
+ elif tab_main == "πŸ“Έ Media Gallery":
568
+ create_media_gallery()
569
+
570
  elif tab_main == "πŸ” Search ArXiv":
571
+ query = st.text_input("Enter your research query:")
 
 
572
  if query:
573
  with st.spinner("Searching ArXiv..."):
574
  results = search_arxiv(query)
575
+ st.markdown(results)
576
+
 
 
 
 
 
 
 
 
577
  elif tab_main == "πŸ“ File Editor":
578
+ if hasattr(st.session_state, 'current_file'):
579
+ st.subheader(f"Editing: {st.session_state.current_file}")
580
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
581
+ if st.button("Save Changes"):
582
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
583
+ file.write(new_content)
584
+ st.success("File updated successfully!")
585
 
586
  # Always show file manager in sidebar
587
  display_file_manager()
588
 
589
  if __name__ == "__main__":
590
+ main()