Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -58,182 +58,146 @@ st.set_page_config(
|
|
58 |
def get_cookie_manager():
|
59 |
"""Create and return a cookie manager."""
|
60 |
return stx.CookieManager()
|
|
|
|
|
|
|
61 |
def create_speech_component():
|
62 |
-
"""Create speech recognition component with
|
63 |
|
64 |
speech_recognition_html = """
|
65 |
-
|
66 |
-
<html>
|
67 |
-
<head>
|
68 |
-
<title>Continuous Speech Demo</title>
|
69 |
-
<style>
|
70 |
-
body {
|
71 |
-
font-family: sans-serif;
|
72 |
-
padding: 20px;
|
73 |
-
max-width: 800px;
|
74 |
-
margin: 0 auto;
|
75 |
-
}
|
76 |
-
button {
|
77 |
-
padding: 10px 20px;
|
78 |
-
margin: 10px 5px;
|
79 |
-
font-size: 16px;
|
80 |
-
}
|
81 |
-
#status {
|
82 |
-
margin: 10px 0;
|
83 |
-
padding: 10px;
|
84 |
-
background: #e8f5e9;
|
85 |
-
border-radius: 4px;
|
86 |
-
}
|
87 |
-
#output {
|
88 |
-
white-space: pre-wrap;
|
89 |
-
padding: 15px;
|
90 |
-
background: #f5f5f5;
|
91 |
-
border-radius: 4px;
|
92 |
-
margin: 10px 0;
|
93 |
-
min-height: 100px;
|
94 |
-
max-height: 400px;
|
95 |
-
overflow-y: auto;
|
96 |
-
}
|
97 |
-
.listening {
|
98 |
-
color: green;
|
99 |
-
font-weight: bold;
|
100 |
-
}
|
101 |
-
</style>
|
102 |
-
</head>
|
103 |
-
<body>
|
104 |
<div class="controls">
|
105 |
<button id="start">Start Listening</button>
|
106 |
<button id="stop" disabled>Stop Listening</button>
|
107 |
<button id="clear">Clear Text</button>
|
108 |
</div>
|
109 |
-
<div id="status">Ready</div>
|
110 |
-
<div id="output"></div>
|
111 |
-
<div id="debug"></div>
|
112 |
|
113 |
<script>
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
}, '*');
|
125 |
-
|
126 |
-
// Debug output
|
127 |
-
document.getElementById('debug').textContent =
|
128 |
-
'Last update: ' + new Date().toLocaleTimeString();
|
129 |
}
|
130 |
}
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
recognition.start();
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
};
|
156 |
-
|
157 |
-
stopButton.onclick = () => {
|
158 |
-
recognition.stop();
|
159 |
-
status.textContent = 'Stopped';
|
160 |
-
status.className = '';
|
161 |
startButton.disabled = false;
|
162 |
stopButton.disabled = true;
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
output.textContent = '';
|
171 |
-
window.parent.postMessage({
|
172 |
-
type: 'streamlit:setComponentValue',
|
173 |
-
data: ''
|
174 |
-
}, '*');
|
175 |
-
};
|
176 |
-
|
177 |
-
recognition.onresult = (event) => {
|
178 |
-
let interimTranscript = '';
|
179 |
-
let finalTranscript = '';
|
180 |
-
|
181 |
-
for (let i = event.resultIndex; i < event.results.length; i++) {
|
182 |
-
const transcript = event.results[i][0].transcript;
|
183 |
-
if (event.results[i].isFinal) {
|
184 |
-
finalTranscript += transcript + '\\n';
|
185 |
-
fullTranscript += transcript + '\\n';
|
186 |
-
} else {
|
187 |
-
interimTranscript += transcript;
|
188 |
-
}
|
189 |
-
}
|
190 |
-
|
191 |
-
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
|
192 |
-
output.scrollTop = output.scrollHeight;
|
193 |
-
};
|
194 |
-
|
195 |
-
recognition.onend = () => {
|
196 |
-
if (!stopButton.disabled) {
|
197 |
-
recognition.start();
|
198 |
-
}
|
199 |
-
};
|
200 |
-
|
201 |
-
// Auto-start on load
|
202 |
-
window.addEventListener('load', () => {
|
203 |
-
setTimeout(() => {
|
204 |
-
startButton.click();
|
205 |
-
}, 1000);
|
206 |
-
});
|
207 |
}
|
208 |
</script>
|
209 |
-
</
|
210 |
-
</html>
|
211 |
"""
|
212 |
|
213 |
-
#
|
214 |
-
|
215 |
speech_recognition_html,
|
216 |
-
height=400
|
217 |
)
|
|
|
|
|
218 |
|
219 |
def integrate_speech_component():
|
220 |
-
"""Integrate speech component with proper
|
221 |
if "voice_transcript" not in st.session_state:
|
222 |
st.session_state.voice_transcript = ""
|
|
|
|
|
|
|
223 |
|
224 |
try:
|
225 |
-
#
|
226 |
-
|
227 |
|
228 |
# Update session state if we have valid data
|
229 |
-
if isinstance(
|
230 |
-
st.session_state.voice_transcript =
|
231 |
-
|
232 |
-
|
|
|
233 |
except Exception as e:
|
234 |
-
st.error(f"
|
235 |
-
|
236 |
return st.session_state.voice_transcript
|
|
|
|
|
237 |
|
238 |
|
239 |
|
|
|
58 |
def get_cookie_manager():
|
59 |
"""Create and return a cookie manager."""
|
60 |
return stx.CookieManager()
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
def create_speech_component():
|
65 |
+
"""Create speech recognition component with reliable Streamlit communication."""
|
66 |
|
67 |
speech_recognition_html = """
|
68 |
+
<div style="padding: 20px;">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
<div class="controls">
|
70 |
<button id="start">Start Listening</button>
|
71 |
<button id="stop" disabled>Stop Listening</button>
|
72 |
<button id="clear">Clear Text</button>
|
73 |
</div>
|
74 |
+
<div id="status" style="margin: 10px 0; padding: 10px; background: #e8f5e9;">Ready</div>
|
75 |
+
<div id="output" style="white-space: pre-wrap; padding: 15px; background: #f5f5f5; min-height: 100px; max-height: 400px; overflow-y: auto;"></div>
|
76 |
+
<div id="debug" style="margin-top: 10px; color: #666;"></div>
|
77 |
|
78 |
<script>
|
79 |
+
// Initialize Streamlit component communication
|
80 |
+
const Streamlit = window.Streamlit || {};
|
81 |
+
|
82 |
+
// Set up base recognition
|
83 |
+
const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
|
84 |
+
const startButton = document.getElementById('start');
|
85 |
+
const stopButton = document.getElementById('stop');
|
86 |
+
const clearButton = document.getElementById('clear');
|
87 |
+
const status = document.getElementById('status');
|
88 |
+
const output = document.getElementById('output');
|
89 |
+
const debug = document.getElementById('debug');
|
90 |
+
let fullTranscript = '';
|
91 |
|
92 |
+
recognition.continuous = true;
|
93 |
+
recognition.interimResults = true;
|
94 |
+
|
95 |
+
function updateStreamlit(text) {
|
96 |
+
if (window.Streamlit) {
|
97 |
+
window.Streamlit.setComponentValue(text);
|
98 |
+
debug.textContent = `Last update: ${new Date().toLocaleTimeString()} - Length: ${text.length}`;
|
|
|
|
|
|
|
|
|
|
|
99 |
}
|
100 |
}
|
101 |
|
102 |
+
startButton.onclick = () => {
|
103 |
+
recognition.start();
|
104 |
+
status.textContent = '🎤 Listening...';
|
105 |
+
startButton.disabled = true;
|
106 |
+
stopButton.disabled = false;
|
107 |
+
debug.textContent = 'Started listening...';
|
108 |
+
};
|
109 |
+
|
110 |
+
stopButton.onclick = () => {
|
111 |
+
recognition.stop();
|
112 |
+
status.textContent = 'Stopped';
|
113 |
+
startButton.disabled = false;
|
114 |
+
stopButton.disabled = true;
|
115 |
+
updateStreamlit(fullTranscript);
|
116 |
+
debug.textContent = 'Stopped listening...';
|
117 |
+
};
|
118 |
+
|
119 |
+
clearButton.onclick = () => {
|
120 |
+
fullTranscript = '';
|
121 |
+
output.textContent = '';
|
122 |
+
updateStreamlit('');
|
123 |
+
debug.textContent = 'Cleared transcript...';
|
124 |
+
};
|
125 |
+
|
126 |
+
recognition.onresult = (event) => {
|
127 |
+
let interimTranscript = '';
|
128 |
+
let finalTranscript = '';
|
129 |
|
130 |
+
for (let i = event.resultIndex; i < event.results.length; i++) {
|
131 |
+
const transcript = event.results[i][0].transcript;
|
132 |
+
if (event.results[i].isFinal) {
|
133 |
+
finalTranscript += transcript + ' ';
|
134 |
+
fullTranscript += transcript + ' ';
|
135 |
+
updateStreamlit(fullTranscript);
|
136 |
+
} else {
|
137 |
+
interimTranscript += transcript;
|
138 |
+
}
|
139 |
+
}
|
140 |
+
|
141 |
+
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
|
142 |
+
output.scrollTop = output.scrollHeight;
|
143 |
+
};
|
144 |
+
|
145 |
+
recognition.onend = () => {
|
146 |
+
if (!stopButton.disabled) {
|
147 |
recognition.start();
|
148 |
+
debug.textContent = 'Restarted recognition...';
|
149 |
+
}
|
150 |
+
};
|
151 |
+
|
152 |
+
recognition.onerror = (event) => {
|
153 |
+
debug.textContent = `Error: ${event.error} at ${new Date().toLocaleTimeString()}`;
|
154 |
+
if (event.error === 'not-allowed') {
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
startButton.disabled = false;
|
156 |
stopButton.disabled = true;
|
157 |
+
}
|
158 |
+
};
|
159 |
+
|
160 |
+
// Auto-start after component is ready
|
161 |
+
if (window.Streamlit) {
|
162 |
+
window.Streamlit.setComponentReady();
|
163 |
+
setTimeout(() => startButton.click(), 1000);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
}
|
165 |
</script>
|
166 |
+
</div>
|
|
|
167 |
"""
|
168 |
|
169 |
+
# Create the component
|
170 |
+
component_value = components.html(
|
171 |
speech_recognition_html,
|
172 |
+
height=400
|
173 |
)
|
174 |
+
|
175 |
+
return component_value
|
176 |
|
177 |
def integrate_speech_component():
|
178 |
+
"""Integrate speech component with proper state management."""
|
179 |
if "voice_transcript" not in st.session_state:
|
180 |
st.session_state.voice_transcript = ""
|
181 |
+
|
182 |
+
# Display debug container
|
183 |
+
debug_container = st.empty()
|
184 |
|
185 |
try:
|
186 |
+
# Get value from component
|
187 |
+
value = create_speech_component()
|
188 |
|
189 |
# Update session state if we have valid data
|
190 |
+
if value and isinstance(value, str):
|
191 |
+
st.session_state.voice_transcript = value
|
192 |
+
debug_container.text(f"Debug: Received transcript of length {len(value)}")
|
193 |
+
return value
|
194 |
+
|
195 |
except Exception as e:
|
196 |
+
st.error(f"Component error: {str(e)}")
|
197 |
+
|
198 |
return st.session_state.voice_transcript
|
199 |
+
|
200 |
+
|
201 |
|
202 |
|
203 |
|