import streamlit as st import anthropic import openai import base64 from datetime import datetime import plotly.graph_objects as go import cv2 import glob import json import math import os import pytz import random import re import requests import streamlit.components.v1 as components import textract import time import zipfile from audio_recorder_streamlit import audio_recorder from bs4 import BeautifulSoup from collections import deque from dotenv import load_dotenv from gradio_client import Client, handle_file from huggingface_hub import InferenceClient from io import BytesIO from moviepy.editor import VideoFileClip from PIL import Image from PyPDF2 import PdfReader from urllib.parse import quote from xml.etree import ElementTree as ET from openai import OpenAI # 1. Configuration and Setup Site_Name = 'πŸ€–πŸ§ Claude35πŸ“πŸ”¬' title = "πŸ€–πŸ§ Claude35πŸ“πŸ”¬" helpURL = 'https://huggingface.co/awacke1' bugURL = 'https://huggingface.co/spaces/awacke1' icons = 'πŸ€–πŸ§ πŸ”¬πŸ“' st.set_page_config( page_title=title, page_icon=icons, layout="wide", initial_sidebar_state="auto", menu_items={ 'Get Help': helpURL, 'Report a bug': bugURL, 'About': title } ) # 2. Load environment variables and initialize clients load_dotenv() # OpenAI setup openai.api_key = os.getenv('OPENAI_API_KEY') if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY'] openai_client = OpenAI( api_key=os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID') ) # Claude setup anthropic_key = os.getenv("ANTHROPIC_API_KEY_3") if anthropic_key == None: anthropic_key = st.secrets["ANTHROPIC_API_KEY"] claude_client = anthropic.Anthropic(api_key=anthropic_key) # HuggingFace setup API_URL = os.getenv('API_URL') HF_KEY = os.getenv('HF_KEY') MODEL1 = "meta-llama/Llama-2-7b-chat-hf" MODEL2 = "openai/whisper-small.en" headers = { "Authorization": f"Bearer {HF_KEY}", "Content-Type": "application/json" } # Initialize session states if "chat_history" not in st.session_state: st.session_state.chat_history = [] if "openai_model" not in st.session_state: st.session_state["openai_model"] = "gpt-4o-2024-05-13" if "messages" not in st.session_state: st.session_state.messages = [] # Custom CSS st.markdown(""" """, unsafe_allow_html=True) # Bike Collections bike_collections = { "Celestial Collection 🌌": { "Eclipse Vaulter": { "prompt": """Cinematic shot of a sleek black mountain bike silhouetted against a total solar eclipse. The corona creates an ethereal halo effect, with lens flares accentuating key points of the frame. Dynamic composition shows the bike mid-leap, with stardust particles trailing behind. Camera angle: Low angle, wide shot Lighting: Dramatic rim lighting from eclipse Color palette: Deep purples, cosmic blues, corona gold""", "emoji": "πŸŒ‘" }, "Starlight Leaper": { "prompt": """A black bike performing an epic leap under a vast Milky Way galaxy. Shimmering stars blanket the sky while the bike's wheels leave a trail of stardust. Camera angle: Wide-angle upward shot Lighting: Natural starlight with subtle rim lighting Color palette: Deep blues, silver highlights, cosmic purples""", "emoji": "✨" }, "Moonlit Hopper": { "prompt": """A sleek black bike mid-hop over a moonlit meadow, the full moon illuminating the misty surroundings. Fireflies dance around the bike, and soft shadows create a serene yet dynamic atmosphere. Camera angle: Side profile with slight low angle Lighting: Soft moonlight with atmospheric fog Color palette: Silver blues, soft whites, deep shadows""", "emoji": "πŸŒ™" } }, "Nature-Inspired Collection 🌲": { "Shadow Grasshopper": { "prompt": """A black bike jumping between forest paths, with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame as it soars above mossy logs. Camera angle: Through-the-trees tracking shot Lighting: Natural forest lighting with sun rays Color palette: Forest greens, golden sunlight, deep shadows""", "emoji": "πŸ¦—" }, "Onyx Leapfrog": { "prompt": """A bike with obsidian-black finish jumping over a sparkling creek, the reflection on the water broken into ripples by the leap. The surrounding forest is vibrant with greens and browns. Camera angle: Low angle from water level Lighting: Golden hour side lighting Color palette: Deep blacks, water blues, forest greens""", "emoji": "🐸" } } } # Helper Functions def generate_filename(prompt, file_type): """Generate a safe filename using the prompt and file type.""" central = pytz.timezone('US/Central') safe_date_time = datetime.now(central).strftime("%m%d_%H%M") replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt) safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:240] return f"{safe_date_time}_{safe_prompt}.{file_type}" def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True): """Create and save file with proper handling of different types.""" if not should_save: return None filename = generate_filename(prompt if prompt else content, file_type) with open(filename, "w", encoding="utf-8") as f: if is_image: f.write(content) else: f.write(prompt + "\n\n" + content if prompt else content) return filename def get_download_link(file_path): """Create download link for file.""" with open(file_path, "rb") as file: contents = file.read() b64 = base64.b64encode(contents).decode() return f'Download {os.path.basename(file_path)}πŸ“‚' @st.cache_resource def SpeechSynthesis(result): """HTML5 Speech Synthesis.""" documentHTML5 = f''' Read It Aloud

πŸ”Š Read It Aloud


''' components.html(documentHTML5, width=1280, height=300) # Media Processing Functions def process_image(image_input, user_prompt): """Process image with GPT-4o vision.""" if isinstance(image_input, str): with open(image_input, "rb") as image_file: image_input = image_file.read() base64_image = base64.b64encode(image_input).decode("utf-8") response = openai_client.chat.completions.create( model=st.session_state["openai_model"], messages=[ {"role": "system", "content": "You are a helpful assistant that responds in Markdown."}, {"role": "user", "content": [ {"type": "text", "text": user_prompt}, {"type": "image_url", "image_url": { "url": f"data:image/png;base64,{base64_image}" }} ]} ], temperature=0.0, ) return response.choices[0].message.content def process_audio(audio_input, text_input=''): """Process audio with Whisper and GPT.""" if isinstance(audio_input, str): with open(audio_input, "rb") as file: audio_input = file.read() transcription = openai_client.audio.transcriptions.create( model="whisper-1", file=audio_input, ) st.session_state.messages.append({"role": "user", "content": transcription.text}) with st.chat_message("assistant"): st.markdown(transcription.text) SpeechSynthesis(transcription.text) filename = generate_filename(transcription.text, "wav") create_and_save_file(audio_input, "wav", transcription.text, True) def process_video(video_path, seconds_per_frame=1): """Process video files for frame extraction and audio.""" base64Frames = [] video = cv2.VideoCapture(video_path) total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) fps = video.get(cv2.CAP_PROP_FPS) frames_to_skip = int(fps * seconds_per_frame) for frame_idx in range(0, total_frames, frames_to_skip): video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) success, frame = video.read() if not success: break _, buffer = cv2.imencode(".jpg", frame) base64Frames.append(base64.b64encode(buffer).decode("utf-8")) video.release() # Extract audio base_video_path = os.path.splitext(video_path)[0] audio_path = f"{base_video_path}.mp3" try: video_clip = VideoFileClip(video_path) video_clip.audio.write_audiofile(audio_path) video_clip.close() except: st.warning("No audio track found in video") audio_path = None return base64Frames, audio_path def process_video_with_gpt(video_input, user_prompt): """Process video with GPT-4o vision.""" base64Frames, audio_path = process_video(video_input) response = openai_client.chat.completions.create( model=st.session_state["openai_model"], messages=[ {"role": "system", "content": "Analyze the video frames and provide a detailed description."}, {"role": "user", "content": [ {"type": "text", "text": user_prompt}, *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}} for frame in base64Frames] ]} ] ) return response.choices[0].message.content # ArXiv Search Functions def search_arxiv(query): """Search ArXiv papers using Hugging Face client.""" client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern") response = client.predict( query, "mistralai/Mixtral-8x7B-Instruct-v0.1", True, api_name="/ask_llm" ) return response # Chat Processing Functions def process_with_gpt(text_input): """Process text with GPT-4o.""" if text_input: st.session_state.messages.append({"role": "user", "content": text_input}) with st.chat_message("user"): st.markdown(text_input) with st.chat_message("assistant"): completion = openai_client.chat.completions.create( model=st.session_state["openai_model"], messages=[ {"role": m["role"], "content": m["content"]} for m in st.session_state.messages ], stream=False ) return_text = completion.choices[0].message.content st.write("GPT-4o: " + return_text) filename = generate_filename(text_input, "md") create_file(filename, text_input, return_text) st.session_state.messages.append({"role": "assistant", "content": return_text}) return return_text def process_with_claude(text_input): """Process text with Claude.""" if text_input: response = claude_client.messages.create( model="claude-3-sonnet-20240229", max_tokens=1000, messages=[ {"role": "user", "content": text_input} ] ) response_text = response.content[0].text st.write("Claude: " + response_text) filename = generate_filename(text_input, "md") create_file(filename, text_input, response_text) st.session_state.chat_history.append({ "user": text_input, "claude": response_text }) return response_text # File Management Functions def load_file(file_name): """Load file content.""" with open(file_name, "r", encoding='utf-8') as file: content = file.read() return content def create_zip_of_files(files): """Create zip archive of files.""" zip_name = "all_files.zip" with zipfile.ZipFile(zip_name, 'w') as zipf: for file in files: zipf.write(file) return zip_name def get_media_html(media_path, media_type="video", width="100%"): """Generate HTML for media player.""" media_data = base64.b64encode(open(media_path, 'rb').read()).decode() if media_type == "video": return f''' ''' else: # audio return f''' ''' def create_media_gallery(): """Create the media gallery interface.""" st.header("🎬 Media Gallery") tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video", "🎨 Scene Generator"]) with tabs[0]: image_files = glob.glob("*.png") + glob.glob("*.jpg") if image_files: num_cols = st.slider("Number of columns", 1, 5, 3) cols = st.columns(num_cols) for idx, image_file in enumerate(image_files): with cols[idx % num_cols]: img = Image.open(image_file) st.image(img, use_column_width=True) # Add GPT vision analysis option if st.button(f"Analyze {os.path.basename(image_file)}"): analysis = process_image(image_file, "Describe this image in detail and identify key elements.") st.markdown(analysis) with tabs[1]: audio_files = glob.glob("*.mp3") + glob.glob("*.wav") for audio_file in audio_files: with st.expander(f"🎡 {os.path.basename(audio_file)}"): st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True) if st.button(f"Transcribe {os.path.basename(audio_file)}"): with open(audio_file, "rb") as f: transcription = process_audio(f) st.write(transcription) with tabs[2]: video_files = glob.glob("*.mp4") for video_file in video_files: with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"): st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True) if st.button(f"Analyze {os.path.basename(video_file)}"): analysis = process_video_with_gpt(video_file, "Describe what's happening in this video.") st.markdown(analysis) with tabs[3]: for collection_name, bikes in bike_collections.items(): st.subheader(collection_name) cols = st.columns(len(bikes)) for idx, (bike_name, details) in enumerate(bikes.items()): with cols[idx]: st.markdown(f"""

{details['emoji']} {bike_name}

{details['prompt']}

""", unsafe_allow_html=True) if st.button(f"Generate {bike_name} Scene"): prompt = details['prompt'] # Here you could integrate with image generation API st.write(f"Generated scene description for {bike_name}:") st.write(prompt) def display_file_manager(): """Display file management sidebar.""" st.sidebar.title("πŸ“ File Management") all_files = glob.glob("*.md") all_files.sort(reverse=True) if st.sidebar.button("πŸ—‘ Delete All"): for file in all_files: os.remove(file) st.rerun() if st.sidebar.button("⬇️ Download All"): zip_file = create_zip_of_files(all_files) st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True) for file in all_files: col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1]) with col1: if st.button("🌐", key="view_"+file): st.session_state.current_file = file st.session_state.file_content = load_file(file) with col2: st.markdown(get_download_link(file), unsafe_allow_html=True) with col3: if st.button("πŸ“‚", key="edit_"+file): st.session_state.current_file = file st.session_state.file_content = load_file(file) with col4: if st.button("πŸ—‘", key="delete_"+file): os.remove(file) st.rerun() def main(): st.title("🚲 Bike Cinematic Universe & AI Assistant") # Main navigation tab_main = st.radio("Choose Action:", ["πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"], horizontal=True) if tab_main == "πŸ’¬ Chat": # Model Selection model_choice = st.sidebar.radio( "Choose AI Model:", ["GPT-4o", "Claude-3", "Both"] ) # Chat Interface user_input = st.text_area("Message:", height=100) if st.button("Send πŸ“¨"): if user_input: if model_choice == "GPT-4o": gpt_response = process_with_gpt(user_input) elif model_choice == "Claude-3": claude_response = process_with_claude(user_input) else: # Both col1, col2 = st.columns(2) with col1: st.subheader("GPT-4o Response") gpt_response = process_with_gpt(user_input) with col2: st.subheader("Claude-3 Response") claude_response = process_with_claude(user_input) # Display Chat History st.subheader("Chat History πŸ“œ") tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"]) with tab1: for chat in st.session_state.chat_history: st.text_area("You:", chat["user"], height=100, disabled=True) st.text_area("Claude:", chat["claude"], height=200, disabled=True) st.markdown("---") with tab2: for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) elif tab_main == "πŸ“Έ Media Gallery": create_media_gallery() elif tab_main == "πŸ” Search ArXiv": query = st.text_input("Enter your research query:") if query: with st.spinner("Searching ArXiv..."): results = search_arxiv(query) st.markdown(results) elif tab_main == "πŸ“ File Editor": if hasattr(st.session_state, 'current_file'): st.subheader(f"Editing: {st.session_state.current_file}") new_content = st.text_area("Content:", st.session_state.file_content, height=300) if st.button("Save Changes"): with open(st.session_state.current_file, 'w', encoding='utf-8') as file: file.write(new_content) st.success("File updated successfully!") # Always show file manager in sidebar display_file_manager() if __name__ == "__main__": main()