Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -619,7 +619,10 @@ speech_recognition_html = """
|
|
619 |
</div>
|
620 |
<div id="status">Ready</div>
|
621 |
<div id="output"></div>
|
622 |
-
|
|
|
|
|
|
|
623 |
<script>
|
624 |
if (!('webkitSpeechRecognition' in window)) {
|
625 |
alert('Speech recognition not supported');
|
@@ -688,18 +691,18 @@ speech_recognition_html = """
|
|
688 |
if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
|
689 |
if (finalTranscript) {
|
690 |
fullTranscript += finalTranscript;
|
691 |
-
|
692 |
-
|
693 |
-
|
694 |
-
text: finalTranscript
|
695 |
-
}, '*');
|
696 |
}
|
697 |
lastUpdateTime = Date.now();
|
698 |
-
}
|
699 |
-
|
700 |
|
701 |
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
|
702 |
output.scrollTop = output.scrollHeight;
|
|
|
|
|
|
|
703 |
};
|
704 |
|
705 |
recognition.onend = () => {
|
@@ -864,7 +867,6 @@ def set_transcript(text):
|
|
864 |
|
865 |
def main():
|
866 |
st.sidebar.markdown("### π²BikeAIπ Claude and GPT Multi-Agent Research AI")
|
867 |
-
|
868 |
# Main navigation
|
869 |
tab_main = st.radio("Choose Action:",
|
870 |
["π€ Voice Input", "π¬ Chat", "πΈ Media Gallery", "π Search ArXiv", "π File Editor"],
|
@@ -873,38 +875,31 @@ def main():
|
|
873 |
if tab_main == "π€ Voice Input":
|
874 |
st.subheader("Voice Recognition")
|
875 |
|
876 |
-
#
|
877 |
-
speech_component = st.components.v1.html(speech_recognition_html, height=400)
|
878 |
-
#Experiment: Use `st.session_state` to store the transcript
|
879 |
if 'voice_transcript' not in st.session_state:
|
880 |
st.session_state.voice_transcript = ""
|
881 |
-
|
882 |
-
#
|
883 |
-
st.components.v1.html(
|
884 |
-
|
885 |
-
|
886 |
-
|
887 |
-
|
888 |
-
|
889 |
-
Streamlit.setComponentValue(transcript);
|
890 |
-
}
|
891 |
-
});
|
892 |
-
</script>
|
893 |
-
""", height=0)
|
894 |
# Display the transcript in a Streamlit text area
|
895 |
st.markdown("### Processed Voice Input:")
|
896 |
st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
|
897 |
-
|
898 |
# Add functionality to process the transcript
|
899 |
if st.button("Process Transcript"):
|
900 |
st.subheader("AI Response to Transcript")
|
901 |
gpt_response = process_with_gpt(st.session_state.voice_transcript)
|
902 |
st.markdown(gpt_response)
|
903 |
-
|
904 |
# Option to clear the transcript
|
905 |
if st.button("Clear Transcript"):
|
906 |
st.session_state.voice_transcript = ""
|
907 |
-
|
908 |
|
909 |
#st.markdown("### Processed Voice Input:")
|
910 |
#st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
|
|
|
619 |
</div>
|
620 |
<div id="status">Ready</div>
|
621 |
<div id="output"></div>
|
622 |
+
|
623 |
+
<!-- Add the hidden input here -->
|
624 |
+
<input type="hidden" id="streamlit-data" value="">
|
625 |
+
|
626 |
<script>
|
627 |
if (!('webkitSpeechRecognition' in window)) {
|
628 |
alert('Speech recognition not supported');
|
|
|
691 |
if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
|
692 |
if (finalTranscript) {
|
693 |
fullTranscript += finalTranscript;
|
694 |
+
|
695 |
+
// Update the hidden input value
|
696 |
+
document.getElementById('streamlit-data').value = fullTranscript;
|
|
|
|
|
697 |
}
|
698 |
lastUpdateTime = Date.now();
|
699 |
+
}
|
|
|
700 |
|
701 |
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
|
702 |
output.scrollTop = output.scrollHeight;
|
703 |
+
|
704 |
+
document.getElementById('streamlit-data').value = fullTranscript;
|
705 |
+
|
706 |
};
|
707 |
|
708 |
recognition.onend = () => {
|
|
|
867 |
|
868 |
def main():
|
869 |
st.sidebar.markdown("### π²BikeAIπ Claude and GPT Multi-Agent Research AI")
|
|
|
870 |
# Main navigation
|
871 |
tab_main = st.radio("Choose Action:",
|
872 |
["π€ Voice Input", "π¬ Chat", "πΈ Media Gallery", "π Search ArXiv", "π File Editor"],
|
|
|
875 |
if tab_main == "π€ Voice Input":
|
876 |
st.subheader("Voice Recognition")
|
877 |
|
878 |
+
# Initialize session state for the transcript
|
|
|
|
|
879 |
if 'voice_transcript' not in st.session_state:
|
880 |
st.session_state.voice_transcript = ""
|
881 |
+
|
882 |
+
# Display speech recognition component and capture returned value
|
883 |
+
transcript = st.components.v1.html(speech_recognition_html, height=400)
|
884 |
+
|
885 |
+
# Update session state if there's new data
|
886 |
+
if transcript is not None:
|
887 |
+
st.session_state.voice_transcript = transcript
|
888 |
+
|
|
|
|
|
|
|
|
|
|
|
889 |
# Display the transcript in a Streamlit text area
|
890 |
st.markdown("### Processed Voice Input:")
|
891 |
st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
|
892 |
+
|
893 |
# Add functionality to process the transcript
|
894 |
if st.button("Process Transcript"):
|
895 |
st.subheader("AI Response to Transcript")
|
896 |
gpt_response = process_with_gpt(st.session_state.voice_transcript)
|
897 |
st.markdown(gpt_response)
|
898 |
+
|
899 |
# Option to clear the transcript
|
900 |
if st.button("Clear Transcript"):
|
901 |
st.session_state.voice_transcript = ""
|
902 |
+
st.rerun()
|
903 |
|
904 |
#st.markdown("### Processed Voice Input:")
|
905 |
#st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
|