awacke1 commited on
Commit
b587eb2
β€’
1 Parent(s): ea60561

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -28
app.py CHANGED
@@ -619,7 +619,10 @@ speech_recognition_html = """
619
  </div>
620
  <div id="status">Ready</div>
621
  <div id="output"></div>
622
-
 
 
 
623
  <script>
624
  if (!('webkitSpeechRecognition' in window)) {
625
  alert('Speech recognition not supported');
@@ -688,18 +691,18 @@ speech_recognition_html = """
688
  if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
689
  if (finalTranscript) {
690
  fullTranscript += finalTranscript;
691
- // Send to Streamlit
692
- window.parent.postMessage({
693
- type: 'final_transcript',
694
- text: finalTranscript
695
- }, '*');
696
  }
697
  lastUpdateTime = Date.now();
698
- }
699
-
700
 
701
  output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
702
  output.scrollTop = output.scrollHeight;
 
 
 
703
  };
704
 
705
  recognition.onend = () => {
@@ -864,7 +867,6 @@ def set_transcript(text):
864
 
865
  def main():
866
  st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
867
-
868
  # Main navigation
869
  tab_main = st.radio("Choose Action:",
870
  ["🎀 Voice Input", "πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
@@ -873,38 +875,31 @@ def main():
873
  if tab_main == "🎀 Voice Input":
874
  st.subheader("Voice Recognition")
875
 
876
- # Display speech recognition component
877
- speech_component = st.components.v1.html(speech_recognition_html, height=400)
878
- #Experiment: Use `st.session_state` to store the transcript
879
  if 'voice_transcript' not in st.session_state:
880
  st.session_state.voice_transcript = ""
881
-
882
- # JavaScript and Streamlit integration to capture transcript
883
- st.components.v1.html("""
884
- <script>
885
- window.addEventListener('message', (event) => {
886
- if (event.data.type === 'final_transcript') {
887
- const transcript = event.data.text;
888
- // Update Streamlit session state with the transcript
889
- Streamlit.setComponentValue(transcript);
890
- }
891
- });
892
- </script>
893
- """, height=0)
894
  # Display the transcript in a Streamlit text area
895
  st.markdown("### Processed Voice Input:")
896
  st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
897
-
898
  # Add functionality to process the transcript
899
  if st.button("Process Transcript"):
900
  st.subheader("AI Response to Transcript")
901
  gpt_response = process_with_gpt(st.session_state.voice_transcript)
902
  st.markdown(gpt_response)
903
-
904
  # Option to clear the transcript
905
  if st.button("Clear Transcript"):
906
  st.session_state.voice_transcript = ""
907
-
908
 
909
  #st.markdown("### Processed Voice Input:")
910
  #st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
 
619
  </div>
620
  <div id="status">Ready</div>
621
  <div id="output"></div>
622
+
623
+ <!-- Add the hidden input here -->
624
+ <input type="hidden" id="streamlit-data" value="">
625
+
626
  <script>
627
  if (!('webkitSpeechRecognition' in window)) {
628
  alert('Speech recognition not supported');
 
691
  if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
692
  if (finalTranscript) {
693
  fullTranscript += finalTranscript;
694
+
695
+ // Update the hidden input value
696
+ document.getElementById('streamlit-data').value = fullTranscript;
 
 
697
  }
698
  lastUpdateTime = Date.now();
699
+ }
 
700
 
701
  output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
702
  output.scrollTop = output.scrollHeight;
703
+
704
+ document.getElementById('streamlit-data').value = fullTranscript;
705
+
706
  };
707
 
708
  recognition.onend = () => {
 
867
 
868
  def main():
869
  st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
 
870
  # Main navigation
871
  tab_main = st.radio("Choose Action:",
872
  ["🎀 Voice Input", "πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
 
875
  if tab_main == "🎀 Voice Input":
876
  st.subheader("Voice Recognition")
877
 
878
+ # Initialize session state for the transcript
 
 
879
  if 'voice_transcript' not in st.session_state:
880
  st.session_state.voice_transcript = ""
881
+
882
+ # Display speech recognition component and capture returned value
883
+ transcript = st.components.v1.html(speech_recognition_html, height=400)
884
+
885
+ # Update session state if there's new data
886
+ if transcript is not None:
887
+ st.session_state.voice_transcript = transcript
888
+
 
 
 
 
 
889
  # Display the transcript in a Streamlit text area
890
  st.markdown("### Processed Voice Input:")
891
  st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
892
+
893
  # Add functionality to process the transcript
894
  if st.button("Process Transcript"):
895
  st.subheader("AI Response to Transcript")
896
  gpt_response = process_with_gpt(st.session_state.voice_transcript)
897
  st.markdown(gpt_response)
898
+
899
  # Option to clear the transcript
900
  if st.button("Clear Transcript"):
901
  st.session_state.voice_transcript = ""
902
+ st.rerun()
903
 
904
  #st.markdown("### Processed Voice Input:")
905
  #st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)