awacke1 commited on
Commit
e31baf6
β€’
1 Parent(s): 100d7d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -101
app.py CHANGED
@@ -55,14 +55,8 @@ st.set_page_config(
55
 
56
 
57
 
58
- def get_cookie_manager():
59
- """Create and return a cookie manager."""
60
- return stx.CookieManager()
61
-
62
-
63
-
64
  def create_speech_component():
65
- """Create speech recognition component with reliable Streamlit communication."""
66
 
67
  speech_recognition_html = """
68
  <div style="padding: 20px;">
@@ -73,30 +67,30 @@ def create_speech_component():
73
  </div>
74
  <div id="status" style="margin: 10px 0; padding: 10px; background: #e8f5e9;">Ready</div>
75
  <div id="output" style="white-space: pre-wrap; padding: 15px; background: #f5f5f5; min-height: 100px; max-height: 400px; overflow-y: auto;"></div>
76
- <div id="debug" style="margin-top: 10px; color: #666;"></div>
 
 
77
 
78
  <script>
79
- // Initialize Streamlit component communication
80
- const Streamlit = window.Streamlit || {};
81
-
82
- // Set up base recognition
83
  const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
84
  const startButton = document.getElementById('start');
85
  const stopButton = document.getElementById('stop');
86
  const clearButton = document.getElementById('clear');
87
  const status = document.getElementById('status');
88
  const output = document.getElementById('output');
89
- const debug = document.getElementById('debug');
90
  let fullTranscript = '';
91
 
 
92
  recognition.continuous = true;
93
  recognition.interimResults = true;
94
 
95
- function updateStreamlit(text) {
96
- if (window.Streamlit) {
97
- window.Streamlit.setComponentValue(text);
98
- debug.textContent = `Last update: ${new Date().toLocaleTimeString()} - Length: ${text.length}`;
99
- }
 
100
  }
101
 
102
  startButton.onclick = () => {
@@ -104,7 +98,6 @@ def create_speech_component():
104
  status.textContent = '🎀 Listening...';
105
  startButton.disabled = true;
106
  stopButton.disabled = false;
107
- debug.textContent = 'Started listening...';
108
  };
109
 
110
  stopButton.onclick = () => {
@@ -112,15 +105,13 @@ def create_speech_component():
112
  status.textContent = 'Stopped';
113
  startButton.disabled = false;
114
  stopButton.disabled = true;
115
- updateStreamlit(fullTranscript);
116
- debug.textContent = 'Stopped listening...';
117
  };
118
 
119
  clearButton.onclick = () => {
120
  fullTranscript = '';
121
  output.textContent = '';
122
- updateStreamlit('');
123
- debug.textContent = 'Cleared transcript...';
124
  };
125
 
126
  recognition.onresult = (event) => {
@@ -132,7 +123,7 @@ def create_speech_component():
132
  if (event.results[i].isFinal) {
133
  finalTranscript += transcript + ' ';
134
  fullTranscript += transcript + ' ';
135
- updateStreamlit(fullTranscript);
136
  } else {
137
  interimTranscript += transcript;
138
  }
@@ -145,56 +136,41 @@ def create_speech_component():
145
  recognition.onend = () => {
146
  if (!stopButton.disabled) {
147
  recognition.start();
148
- debug.textContent = 'Restarted recognition...';
149
- }
150
- };
151
-
152
- recognition.onerror = (event) => {
153
- debug.textContent = `Error: ${event.error} at ${new Date().toLocaleTimeString()}`;
154
- if (event.error === 'not-allowed') {
155
- startButton.disabled = false;
156
- stopButton.disabled = true;
157
  }
158
  };
159
 
160
- // Auto-start after component is ready
161
- if (window.Streamlit) {
162
- window.Streamlit.setComponentReady();
163
  setTimeout(() => startButton.click(), 1000);
164
- }
165
  </script>
166
  </div>
167
  """
168
 
169
- # Create the component
170
- component_value = components.html(
171
- speech_recognition_html,
172
- height=400
173
- )
174
-
175
- return component_value
176
 
177
  def integrate_speech_component():
178
- """Integrate speech component with proper state management."""
179
  if "voice_transcript" not in st.session_state:
180
  st.session_state.voice_transcript = ""
181
 
182
- # Display debug container
183
- debug_container = st.empty()
184
-
185
- try:
186
- # Get value from component
187
- value = create_speech_component()
188
-
189
- # Update session state if we have valid data
190
- if value and isinstance(value, str):
191
- st.session_state.voice_transcript = value
192
- debug_container.text(f"Debug: Received transcript of length {len(value)}")
193
- return value
194
-
195
- except Exception as e:
196
- st.error(f"Component error: {str(e)}")
197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  return st.session_state.voice_transcript
199
 
200
 
@@ -1016,53 +992,50 @@ def set_transcript(text):
1016
  def main():
1017
  st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
1018
 
1019
- # Main navigation
1020
  tab_main = st.radio("Choose Action:",
1021
- ["🎀 Voice Input", "πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
1022
- horizontal=True)
1023
-
1024
  if tab_main == "🎀 Voice Input":
1025
  st.subheader("Voice Recognition")
1026
-
1027
- try:
1028
- # Get transcript from the speech component
1029
- current_transcript = integrate_speech_component()
1030
-
1031
- # Display the transcript with live updates
1032
- transcript_placeholder = st.empty()
1033
- if current_transcript:
1034
- transcript_placeholder.text_area(
1035
- "Voice Transcript (Live)",
1036
- value=current_transcript,
1037
- height=100
1038
- )
1039
 
1040
- # Process buttons
1041
- col1, col2, col3 = st.columns(3)
1042
 
1043
- with col1:
1044
- if st.button("Process with GPT"):
1045
- if current_transcript:
1046
- with st.spinner("Processing with GPT..."):
1047
- gpt_response = process_with_gpt(current_transcript)
1048
- st.markdown(gpt_response)
1049
-
1050
- with col2:
1051
- if st.button("Process with Claude"):
1052
- if current_transcript:
1053
- with st.spinner("Processing with Claude..."):
1054
- claude_response = process_with_claude(current_transcript)
1055
- st.markdown(claude_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056
 
1057
- with col3:
1058
- if st.button("Search ArXiv"):
1059
- if current_transcript:
1060
- with st.spinner("Searching ArXiv..."):
1061
- arxiv_results = perform_ai_lookup(current_transcript)
1062
- st.markdown(arxiv_results)
1063
-
1064
- except Exception as e:
1065
- st.error(f"Error in voice input tab: {str(e)}")
1066
 
1067
 
1068
  # Always show file manager in sidebar
 
55
 
56
 
57
 
 
 
 
 
 
 
58
  def create_speech_component():
59
+ """Create speech recognition component with direct value monitoring."""
60
 
61
  speech_recognition_html = """
62
  <div style="padding: 20px;">
 
67
  </div>
68
  <div id="status" style="margin: 10px 0; padding: 10px; background: #e8f5e9;">Ready</div>
69
  <div id="output" style="white-space: pre-wrap; padding: 15px; background: #f5f5f5; min-height: 100px; max-height: 400px; overflow-y: auto;"></div>
70
+
71
+ <!-- Hidden input for Streamlit communication -->
72
+ <input type="hidden" id="transcript_value" name="transcript_value" value="">
73
 
74
  <script>
 
 
 
 
75
  const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
76
  const startButton = document.getElementById('start');
77
  const stopButton = document.getElementById('stop');
78
  const clearButton = document.getElementById('clear');
79
  const status = document.getElementById('status');
80
  const output = document.getElementById('output');
81
+ const transcriptInput = document.getElementById('transcript_value');
82
  let fullTranscript = '';
83
 
84
+ // Set up recognition
85
  recognition.continuous = true;
86
  recognition.interimResults = true;
87
 
88
+ // Function to update transcript value
89
+ function updateTranscript(text) {
90
+ transcriptInput.value = text;
91
+ // Trigger a change event
92
+ const event = new Event('change', { bubbles: true });
93
+ transcriptInput.dispatchEvent(event);
94
  }
95
 
96
  startButton.onclick = () => {
 
98
  status.textContent = '🎀 Listening...';
99
  startButton.disabled = true;
100
  stopButton.disabled = false;
 
101
  };
102
 
103
  stopButton.onclick = () => {
 
105
  status.textContent = 'Stopped';
106
  startButton.disabled = false;
107
  stopButton.disabled = true;
108
+ updateTranscript(fullTranscript);
 
109
  };
110
 
111
  clearButton.onclick = () => {
112
  fullTranscript = '';
113
  output.textContent = '';
114
+ updateTranscript('');
 
115
  };
116
 
117
  recognition.onresult = (event) => {
 
123
  if (event.results[i].isFinal) {
124
  finalTranscript += transcript + ' ';
125
  fullTranscript += transcript + ' ';
126
+ updateTranscript(fullTranscript);
127
  } else {
128
  interimTranscript += transcript;
129
  }
 
136
  recognition.onend = () => {
137
  if (!stopButton.disabled) {
138
  recognition.start();
 
 
 
 
 
 
 
 
 
139
  }
140
  };
141
 
142
+ // Auto-start
143
+ window.addEventListener('load', () => {
 
144
  setTimeout(() => startButton.click(), 1000);
145
+ });
146
  </script>
147
  </div>
148
  """
149
 
150
+ return components.html(speech_recognition_html, height=400)
 
 
 
 
 
 
151
 
152
  def integrate_speech_component():
153
+ """Integrate speech component with session state."""
154
  if "voice_transcript" not in st.session_state:
155
  st.session_state.voice_transcript = ""
156
 
157
+ # Create a container for the transcript
158
+ transcript_container = st.empty()
 
 
 
 
 
 
 
 
 
 
 
 
 
159
 
160
+ # Create the component and get value from hidden input
161
+ component = create_speech_component()
162
+
163
+ # Get current transcript value
164
+ if component is not None:
165
+ st.session_state.voice_transcript = component
166
+ # Update display
167
+ transcript_container.text_area(
168
+ "Voice Transcript:",
169
+ value=st.session_state.voice_transcript,
170
+ height=100,
171
+ key="transcript_display"
172
+ )
173
+
174
  return st.session_state.voice_transcript
175
 
176
 
 
992
  def main():
993
  st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
994
 
 
995
  tab_main = st.radio("Choose Action:",
996
+ ["🎀 Voice Input", "πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
997
+ horizontal=True)
998
+
999
  if tab_main == "🎀 Voice Input":
1000
  st.subheader("Voice Recognition")
 
 
 
 
 
 
 
 
 
 
 
 
 
1001
 
1002
+ # Debug toggle
1003
+ show_debug = st.checkbox("Show Debug Info")
1004
 
1005
+ try:
1006
+ # Get transcript
1007
+ current_transcript = integrate_speech_component()
1008
+
1009
+ if show_debug:
1010
+ st.write("Session State:", st.session_state)
1011
+ st.write("Current Transcript:", current_transcript)
1012
+
1013
+ # Process buttons
1014
+ if current_transcript:
1015
+ col1, col2, col3 = st.columns(3)
1016
+
1017
+ with col1:
1018
+ if st.button("Process with GPT"):
1019
+ with st.spinner("Processing with GPT..."):
1020
+ response = process_with_gpt(current_transcript)
1021
+ st.markdown(response)
1022
+
1023
+ with col2:
1024
+ if st.button("Process with Claude"):
1025
+ with st.spinner("Processing with Claude..."):
1026
+ response = process_with_claude(current_transcript)
1027
+ st.markdown(response)
1028
+
1029
+ with col3:
1030
+ if st.button("Search ArXiv"):
1031
+ with st.spinner("Searching ArXiv..."):
1032
+ results = perform_ai_lookup(current_transcript)
1033
+ st.markdown(results)
1034
 
1035
+ except Exception as e:
1036
+ st.error(f"Error in voice input: {str(e)}")
1037
+ if show_debug:
1038
+ st.exception(e)
 
 
 
 
 
1039
 
1040
 
1041
  # Always show file manager in sidebar