Spaces:
Running
Running
import streamlit as st | |
import anthropic | |
import openai | |
import base64 | |
from datetime import datetime | |
import plotly.graph_objects as go | |
import cv2 | |
import glob | |
import json | |
import math | |
import os | |
import pytz | |
import random | |
import re | |
import requests | |
import streamlit.components.v1 as components | |
import textract | |
import time | |
import zipfile | |
from audio_recorder_streamlit import audio_recorder | |
from bs4 import BeautifulSoup | |
from collections import deque | |
from dotenv import load_dotenv | |
from gradio_client import Client, handle_file | |
from huggingface_hub import InferenceClient | |
from io import BytesIO | |
from moviepy.editor import VideoFileClip | |
from PIL import Image | |
from PyPDF2 import PdfReader | |
from urllib.parse import quote | |
from xml.etree import ElementTree as ET | |
from openai import OpenAI | |
# Configuration and Setup | |
Site_Name = 'π²BikeAIπ Claude and GPT Multi-Agent Research AI' | |
title = "π²BikeAIπ Claude and GPT Multi-Agent Research AI" | |
helpURL = 'https://huggingface.co/awacke1' | |
bugURL = 'https://huggingface.co/spaces/awacke1' | |
icons = 'π²π' | |
st.set_page_config( | |
page_title=title, | |
page_icon=icons, | |
layout="wide", | |
initial_sidebar_state="auto", | |
menu_items={ | |
'Get Help': helpURL, | |
'Report a bug': bugURL, | |
'About': title | |
} | |
) | |
# Load environment variables and initialize clients | |
load_dotenv() | |
# OpenAI setup | |
openai.api_key = os.getenv('OPENAI_API_KEY') | |
if openai.api_key == None: | |
openai.api_key = st.secrets['OPENAI_API_KEY'] | |
openai_client = OpenAI( | |
api_key=os.getenv('OPENAI_API_KEY'), | |
organization=os.getenv('OPENAI_ORG_ID') | |
) | |
# Claude setup | |
anthropic_key = os.getenv("ANTHROPIC_API_KEY_3") | |
if anthropic_key == None: | |
anthropic_key = st.secrets["ANTHROPIC_API_KEY"] | |
claude_client = anthropic.Anthropic(api_key=anthropic_key) | |
# Initialize session states | |
if 'transcript_history' not in st.session_state: | |
st.session_state.transcript_history = [] | |
if "chat_history" not in st.session_state: | |
st.session_state.chat_history = [] | |
if "openai_model" not in st.session_state: | |
st.session_state["openai_model"] = "gpt-4o-2024-05-13" | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if 'last_voice_input' not in st.session_state: | |
st.session_state.last_voice_input = "" | |
# Speech Recognition HTML Component | |
speech_recognition_html = """ | |
<!DOCTYPE html> | |
<html> | |
<head> | |
<title>Continuous Speech Demo</title> | |
<style> | |
body { | |
font-family: sans-serif; | |
padding: 20px; | |
max-width: 800px; | |
margin: 0 auto; | |
} | |
button { | |
padding: 10px 20px; | |
margin: 10px 5px; | |
font-size: 16px; | |
} | |
#status { | |
margin: 10px 0; | |
padding: 10px; | |
background: #e8f5e9; | |
border-radius: 4px; | |
} | |
#output { | |
white-space: pre-wrap; | |
padding: 15px; | |
background: #f5f5f5; | |
border-radius: 4px; | |
margin: 10px 0; | |
min-height: 100px; | |
max-height: 400px; | |
overflow-y: auto; | |
} | |
.controls { | |
margin: 10px 0; | |
} | |
</style> | |
</head> | |
<body> | |
<div class="controls"> | |
<button id="start">Start Listening</button> | |
<button id="stop" disabled>Stop Listening</button> | |
<button id="clear">Clear Text</button> | |
</div> | |
<div id="status">Ready</div> | |
<div id="output"></div> | |
<script> | |
if (!('webkitSpeechRecognition' in window)) { | |
alert('Speech recognition not supported'); | |
} else { | |
const recognition = new webkitSpeechRecognition(); | |
const startButton = document.getElementById('start'); | |
const stopButton = document.getElementById('stop'); | |
const clearButton = document.getElementById('clear'); | |
const status = document.getElementById('status'); | |
const output = document.getElementById('output'); | |
let fullTranscript = ''; | |
let lastUpdateTime = Date.now(); | |
// Configure recognition | |
recognition.continuous = true; | |
recognition.interimResults = true; | |
// Function to start recognition | |
const startRecognition = () => { | |
try { | |
recognition.start(); | |
status.textContent = 'Listening...'; | |
startButton.disabled = true; | |
stopButton.disabled = false; | |
} catch (e) { | |
console.error(e); | |
status.textContent = 'Error: ' + e.message; | |
} | |
}; | |
// Auto-start on load | |
window.addEventListener('load', () => { | |
setTimeout(startRecognition, 1000); | |
}); | |
startButton.onclick = startRecognition; | |
stopButton.onclick = () => { | |
recognition.stop(); | |
status.textContent = 'Stopped'; | |
startButton.disabled = false; | |
stopButton.disabled = true; | |
}; | |
clearButton.onclick = () => { | |
fullTranscript = ''; | |
output.textContent = ''; | |
window.parent.postMessage({ | |
type: 'clear_transcript', | |
}, '*'); | |
}; | |
recognition.onresult = (event) => { | |
let interimTranscript = ''; | |
let finalTranscript = ''; | |
for (let i = event.resultIndex; i < event.results.length; i++) { | |
const transcript = event.results[i][0].transcript; | |
if (event.results[i].isFinal) { | |
finalTranscript += transcript + '\\n'; | |
} else { | |
interimTranscript += transcript; | |
} | |
} | |
if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) { | |
if (finalTranscript) { | |
fullTranscript += finalTranscript; | |
// Send to Streamlit | |
window.parent.postMessage({ | |
type: 'final_transcript', | |
text: finalTranscript | |
}, '*'); | |
} | |
lastUpdateTime = Date.now(); | |
} | |
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : ''); | |
output.scrollTop = output.scrollHeight; | |
}; | |
recognition.onend = () => { | |
if (!stopButton.disabled) { | |
try { | |
recognition.start(); | |
console.log('Restarted recognition'); | |
} catch (e) { | |
console.error('Failed to restart recognition:', e); | |
status.textContent = 'Error restarting: ' + e.message; | |
startButton.disabled = false; | |
stopButton.disabled = true; | |
} | |
} | |
}; | |
recognition.onerror = (event) => { | |
console.error('Recognition error:', event.error); | |
status.textContent = 'Error: ' + event.error; | |
if (event.error === 'not-allowed' || event.error === 'service-not-allowed') { | |
startButton.disabled = false; | |
stopButton.disabled = true; | |
} | |
}; | |
} | |
</script> | |
</body> | |
</html> | |
""" | |
# Helper Functions | |
def generate_filename(prompt, file_type): | |
central = pytz.timezone('US/Central') | |
safe_date_time = datetime.now(central).strftime("%m%d_%H%M") | |
replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt) | |
safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230] | |
return f"{safe_date_time}_{safe_prompt}.{file_type}" | |
# [Previous helper functions remain the same] | |
# ... [Include all the helper functions from the second file] | |
def main(): | |
st.sidebar.markdown("### π²BikeAIπ Claude and GPT Multi-Agent Research AI") | |
# Main navigation | |
tab_main = st.radio("Choose Action:", | |
["π€ Voice Input", "π¬ Chat", "πΈ Media Gallery", "π Search ArXiv", "π File Editor"], | |
horizontal=True) | |
if tab_main == "π€ Voice Input": | |
st.subheader("Voice Recognition") | |
# Display speech recognition component | |
speech_component = st.components.v1.html(speech_recognition_html, height=400) | |
# Handle speech recognition output | |
if speech_component: | |
try: | |
data = speech_component | |
if isinstance(data, dict): | |
if data.get('type') == 'final_transcript': | |
text = data.get('text', '').strip() | |
if text: | |
st.session_state.last_voice_input = text | |
# Process voice input with AI | |
st.subheader("AI Response to Voice Input:") | |
col1, col2, col3 = st.columns(3) | |
with col2: | |
st.write("Claude-3.5 Sonnet:") | |
try: | |
claude_response = process_with_claude(text) | |
except: | |
st.write('Claude 3.5 Sonnet out of tokens.') | |
with col1: | |
st.write("GPT-4o Omni:") | |
try: | |
gpt_response = process_with_gpt(text) | |
except: | |
st.write('GPT 4o out of tokens') | |
with col3: | |
st.write("Arxiv and Mistral Research:") | |
with st.spinner("Searching ArXiv..."): | |
results = perform_ai_lookup(text) | |
st.markdown(results) | |
elif data.get('type') == 'clear_transcript': | |
st.session_state.last_voice_input = "" | |
st.experimental_rerun() | |
except Exception as e: | |
st.error(f"Error processing voice input: {e}") | |
# Display last voice input | |
if st.session_state.last_voice_input: | |
st.text_area("Last Voice Input:", st.session_state.last_voice_input, height=100) | |
# [Rest of the main function remains the same] | |
elif tab_main == "π¬ Chat": | |
# [Previous chat interface code] | |
pass | |
elif tab_main == "πΈ Media Gallery": | |
create_media_gallery() | |
elif tab_main == "π Search ArXiv": | |
query = st.text_input("Enter your research query:") | |
if query: | |
with st.spinner("Searching ArXiv..."): | |
results = search_arxiv(query) | |
st.markdown(results) | |
elif tab_main == "π File Editor": | |
if hasattr(st.session_state, 'current_file'): | |
st.subheader(f"Editing: {st.session_state.current_file}") | |
new_content = st.text_area("Content:", st.session_state.file_content, height=300) | |
if st.button("Save Changes"): | |
with open(st.session_state.current_file, 'w', encoding='utf-8') as file: | |
file.write(new_content) | |
st.success("File updated successfully!") | |
# Always show file manager in sidebar | |
display_file_manager() | |
if __name__ == "__main__": | |
main() |