import { useEffect, useState, useRef } from 'react'; import { LocalDB, Group, TranscriptionSegment } from '../lib/localdb'; import { translateText, apiHealth, preloadModel, sttStatus, transcribeAudio } from '../lib/api'; import { useAuth } from '../contexts/useAuth'; import { useNavigate } from './NavigationHooks'; import { Mic, MicOff, StopCircle, Copy, Check, QrCode, Users, Trash2, Download } from 'lucide-react'; import { QRCodeDisplay } from './QRCodeDisplay'; interface HostViewProps { groupId: string; } export function HostView({ groupId }: HostViewProps) { // ...existing code... // Subscribe to group updates so HostView always reflects latest source/target language useEffect(() => { const unsubGroup = LocalDB.onGroupUpdated(groupId, (updated) => { setGroup(updated); }); return () => unsubGroup(); }, [groupId]); // ...existing code... // Show only originals toggle const [showOriginalsOnly, setShowOriginalsOnly] = useState(false); // Malay-English model test state const [msEnAvailable, setMsEnAvailable] = useState(null); const [msEnChecking, setMsEnChecking] = useState(false); // Parallel translation: track IDs currently being translated const [translatingIds, setTranslatingIds] = useState>(new Set()); const { user } = useAuth(); const navigate = useNavigate(); const [group, setGroup] = useState(null); const [isRecording, setIsRecording] = useState(false); // Segments always sorted by sequence number const [segments, setSegments] = useState([]); const [memberCount, setMemberCount] = useState(0); const [showQR, setShowQR] = useState(false); const [copied, setCopied] = useState(false); const [interimText, setInterimText] = useState(''); const [apiOk, setApiOk] = useState(null); // Advanced audio upload support const [sttAvailable, setSttAvailable] = useState(false); const [sttBusy, setSttBusy] = useState(false); // Removed unused diag variable const [retryingIds, setRetryingIds] = useState>(new Set()); const [bulkRetrying, setBulkRetrying] = useState(false); // Sentence-level buffering for mic recognition const [bufferedText, setBufferedText] = useState(''); const flushTimerRef = useRef(null); const recognitionRef = useRef(null); const isRecordingFlagRef = useRef(false); const sequenceNumberRef = useRef(0); const preloadAttemptedRef = useRef(false); const [modelReady, setModelReady] = useState(null); useEffect(() => { if (!user) return; loadGroup(); loadSegments(); loadMemberCount(); // Preload model for selected target language when group changes if (group && apiOk) { preloadModel({ source_language: group.source_language, target_language: group.target_language }); } const unsubMembers = LocalDB.onMembersChanged(groupId, () => { loadMemberCount(); }); // poll API health let healthTimer: any; (async () => { const ok = await apiHealth(); setApiOk(ok); // Check if Whisper STT is available (backend should expose this) if (ok) { try { setSttAvailable(await sttStatus()); } catch (e) { setSttAvailable(true); } } // Auto-preload model when API is online and group is known if (ok && group && !preloadAttemptedRef.current) { preloadAttemptedRef.current = true; setModelReady(null); const warmed = await autoPreloadForGroup(group); setModelReady(warmed); } healthTimer = setInterval(async () => { const healthy = await apiHealth(); setApiOk(healthy); if (healthy) { try { setSttAvailable(await sttStatus()); } catch (e) { setSttAvailable(true); } } else { setSttAvailable(false); } // Re-attempt preload if API just came online if (healthy && group && !preloadAttemptedRef.current) { preloadAttemptedRef.current = true; setModelReady(null); const warmed = await autoPreloadForGroup(group); setModelReady(warmed); } }, 15000); })(); return () => { unsubMembers(); stopRecording(); if (healthTimer) clearInterval(healthTimer); }; }, [groupId, user]); // When group becomes available, kick off auto preload once useEffect(() => { if (group) { if (apiOk && !preloadAttemptedRef.current) { preloadAttemptedRef.current = true; (async () => { setModelReady(null); const warmed = await autoPreloadForGroup(group); setModelReady(warmed); })(); } } }, [group?.id, apiOk]); const autoPreloadForGroup = async (g: Group): Promise => { let ok = await preloadModel({ source_language: g.source_language, target_language: g.target_language }); if (!ok && g.target_language === 'en' && (g.source_language === 'auto' || !g.source_language)) { const okMs = await preloadModel({ source_language: 'ms', target_language: 'en' }); const okId = await preloadModel({ source_language: 'id', target_language: 'en' }); ok = okMs || okId; } return ok; }; const loadGroup = async () => { const data = LocalDB.getGroupById(groupId); if (data) { setGroup(data); } else { navigate('/'); } }; const loadSegments = async () => { const data = LocalDB.getSegments(groupId); setSegments(data); sequenceNumberRef.current = data.length; }; const loadMemberCount = async () => { setMemberCount(LocalDB.getMemberCount(groupId)); }; const setRecording = (v: boolean) => { setIsRecording(v); isRecordingFlagRef.current = v; }; const startRecording = async () => { if (!('webkitSpeechRecognition' in window) && !('SpeechRecognition' in window)) { alert('Speech recognition is not supported in your browser. Please use Chrome or Edge.'); return; } const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition; const recognition = new SpeechRecognition(); recognition.continuous = true; recognition.interimResults = true; const srcLang = (group?.source_language === 'auto' ? 'en' : group?.source_language || 'en').toLowerCase(); recognition.lang = srcLang === 'auto' ? 'en-US' : srcLang === 'ur' ? 'ur-PK' : srcLang === 'ar' ? 'ar-SA' : srcLang === 'es' ? 'es-ES' : srcLang === 'fr' ? 'fr-FR' : srcLang === 'de' ? 'de-DE' : srcLang === 'hi' ? 'hi-IN' : srcLang === 'zh' ? 'zh-CN' : srcLang === 'ms' ? 'ms-MY' : srcLang === 'id' ? 'id-ID' : 'en-US'; recognition.onresult = async (event: any) => { let interim = ''; let finalChunk = ''; for (let i = event.resultIndex; i < event.results.length; i++) { const transcript = (event.results[i][0].transcript || '').trim(); if (!transcript) continue; if (event.results[i].isFinal) { finalChunk += (finalChunk ? ' ' : '') + transcript; } else { interim += (interim ? ' ' : '') + transcript; } } if (interim) { setInterimText(interim); } else { setInterimText(''); } // Append final chunk to buffer and flush on sentence boundaries or heuristics if (finalChunk) { const combined = (bufferedText + ' ' + finalChunk).replace(/\s+/g, ' ').trim(); setBufferedText(combined); // Heuristics for when to flush the buffered sentence const endsSentence = /[.!?…]\s*$/.test(combined); const wordCount = combined.split(/\s+/).filter(Boolean).length; const overChars = combined.length >= 180; // max chars per segment const minWordsReached = wordCount >= 10; // avoid word-by-word for long speech // Clear any previous pending flush; we will schedule a new one below if (flushTimerRef.current) { clearTimeout(flushTimerRef.current); flushTimerRef.current = null; } const doFlush = async (forced = false) => { const txt = combined.trim(); if (!txt) return; // Skip ultra-short fillers unless sentence ended const wc = txt.split(/\s+/).filter(Boolean).length; // Immediate flush (on punctuation/length) keeps stricter rule; timer-based can be lenient if (!forced) { if (!endsSentence && wc < 3) return; // allow short 2-3 word utterances only if they feel like a sentence } else { if (wc < 1) return; // if timer fired and there's at least one word, flush } setBufferedText(''); await saveSegment(txt); }; if (endsSentence || overChars || (minWordsReached && !interim)) { await doFlush(false); } else { // Debounce-based flush after short pause flushTimerRef.current = setTimeout(async () => { await doFlush(true); }, 1500); } } }; recognition.onerror = (event: any) => { console.error('Speech recognition error:', event.error); if (event.error === 'no-speech') { return; } setRecording(false); }; recognition.onend = () => { if (isRecordingFlagRef.current) { try { recognition.start(); } catch (e) { setTimeout(() => { if (isRecordingFlagRef.current) { try { recognition.start(); } catch (_e) { /* ignore restart error */ } } }, 250); } } }; setRecording(true); recognition.start(); recognitionRef.current = recognition; }; const stopRecording = () => { if (recognitionRef.current) { setRecording(false); recognitionRef.current.stop(); recognitionRef.current = null; } setInterimText(''); setBufferedText(''); if (flushTimerRef.current) { clearTimeout(flushTimerRef.current); flushTimerRef.current = null; } }; // Save a segment and always add in order // Save segment and add to translation queue const saveSegment = async (text: string) => { if (!user || !group) return; // Add segment immediately with translation: null (processing) const tempSegment = LocalDB.addSegment({ groupId, originalText: text, translatedText: null, sequenceNumber: sequenceNumberRef.current, createdBy: user.id, }); setSegments((prev: TranscriptionSegment[]) => { const exists = prev.some((s: TranscriptionSegment) => s.id === tempSegment.id); if (exists) return prev; return [...prev, tempSegment].sort((a: TranscriptionSegment, b: TranscriptionSegment) => a.sequence_number - b.sequence_number); }); sequenceNumberRef.current++; // Start translation in background for this segment setTranslatingIds((prev) => new Set([...prev, tempSegment.id])); translateSegment(tempSegment); }; // Translate a segment in the background const translateSegment = async (segment: TranscriptionSegment) => { if (!group) return; let translatedText: string | null = null; if (group && group.source_language !== group.target_language) { const attempts: string[] = []; attempts.push(group.source_language === 'auto' ? 'auto' : group.source_language); if (!attempts.includes('ms')) attempts.push('ms'); if (!attempts.includes('id')) attempts.push('id'); let foundTranslation: string | null = null; for (const src of attempts) { const res = await translateText({ text: segment.original_text, source_language: src, target_language: group.target_language }); const isNoModel = typeof res === 'string' && /^\[No model for/i.test(res); const isEcho = !!res && res.trim() === segment.original_text.trim(); if (res && !isNoModel && !isEcho) { foundTranslation = res; break; } } if (foundTranslation) { translatedText = foundTranslation; } } else { translatedText = segment.original_text; } setSegments((prev: TranscriptionSegment[]) => prev.map((s: TranscriptionSegment) => s.id === segment.id ? { ...s, translated_text: translatedText } : s ).sort((a: TranscriptionSegment, b: TranscriptionSegment) => a.sequence_number - b.sequence_number)); LocalDB.updateSegmentTranslation(groupId, segment.id, translatedText); setTranslatingIds((prev) => { const next = new Set(prev); next.delete(segment.id); return next; }); }; // Retry translation for a segment const retryTranslateOne = async (seg: TranscriptionSegment) => { if (!group) return; setRetryingIds((prev) => new Set(Array.from(prev).concat(seg.id))); setSegments((prev: TranscriptionSegment[]) => prev.map((s: TranscriptionSegment) => s.id === seg.id ? { ...s, translated_text: null } : s )); setTranslatingIds((prev) => new Set([...prev, seg.id])); await translateSegment(seg); setRetryingIds((prev) => { const next = new Set(prev); next.delete(seg.id); return next; }); }; // Translate all segments currently missing a translation const retryTranslateMissing = async () => { if (!group) return; setBulkRetrying(true); try { const missing = segments.filter((seg) => !seg.translated_text || seg.translated_text === seg.original_text); await Promise.all(missing.map(async (seg) => { setRetryingIds((prev) => new Set(Array.from(prev).concat(seg.id))); setSegments((prev: TranscriptionSegment[]) => prev.map((s: TranscriptionSegment) => s.id === seg.id ? { ...s, translated_text: null } : s )); setTranslatingIds((prev) => new Set([...prev, seg.id])); await translateSegment(seg); setRetryingIds((prev) => { const next = new Set(prev); next.delete(seg.id); return next; }); })); } finally { setBulkRetrying(false); } }; const endSession = async () => { stopRecording(); LocalDB.setGroupActive(groupId, false); navigate('/'); }; // Upload removed const deleteSession = async () => { stopRecording(); if (confirm('Delete this session? This will remove all local data for it.')) { LocalDB.deleteGroup(groupId); navigate('/'); } }; const copyJoinCode = () => { if (group) { navigator.clipboard.writeText(group.join_code); setCopied(true); setTimeout(() => setCopied(false), 2000); } }; // Manual preload removed; handled automatically via autoPreloadForGroup() const handleExport = async () => { if (!group) return; const data = LocalDB.exportGroup(groupId); const blob = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' }); const url = URL.createObjectURL(blob); const a = document.createElement('a'); a.href = url; a.download = `${group.name.replace(/[^a-z0-9-]+/gi, '_') || 'session'}_${group.join_code}.json`; document.body.appendChild(a); a.click(); a.remove(); URL.revokeObjectURL(url); }; if (!group) { return (
); } const speechSupported = typeof window !== 'undefined' && (("webkitSpeechRecognition" in window) || ("SpeechRecognition" in window)); return (

{group.name}

{memberCount}
API
{/* Toggle to show only original text */} {/* Malay-English model test button */} {msEnAvailable !== null && ( {msEnAvailable ? 'Available' : 'Not available'} )}
Join Code: {group.join_code}
{!isRecording ? ( ) : ( )} {/* Mic language selector removed for simplicity; we infer from group source */} {/* Preload button removed; model warm-up runs automatically */}
{/* Advanced audio upload for Whisper STT */} {apiOk && sttAvailable && (
{ const file = e.target.files?.[0]; if (!file) return; setSttBusy(true); try { // Use transcribeAudio from lib/api for Whisper backend const res = await transcribeAudio(file, (group?.source_language || undefined)); if (res && res.text && res.text.trim()) { await saveSegment(res.text.trim()); } } finally { setSttBusy(false); e.currentTarget.value = ''; } }} disabled={sttBusy} className="border rounded-lg px-3 py-2 text-sm" /> {sttBusy && Transcribing…}
Supported: common audio types; processed server-side via Whisper.
)} {apiOk && (
Model: {modelReady === null ? 'warming…' : modelReady ? 'ready' : 'will load on first request'}
)} {isRecording && (
Recording in progress
)} {!isRecording && !speechSupported && (
Your browser doesn't support Speech Recognition. Use Chrome/Edge or the manual input below.
)} {/* Manual input fallback for quick testing or browsers without speech recognition */}

Live Transcription

Approx. storage used by this app: {Math.round(LocalDB.getStorageBytes()/1024)} KB
{segments .slice() .sort((a, b) => a.sequence_number - b.sequence_number) .map((segment) => (

{segment.original_text}

{!showOriginalsOnly && ( <> {translatingIds.has(segment.id) || segment.translated_text === null ? (
Processing…
) : segment.translated_text && segment.translated_text !== segment.original_text ? (

{segment.translated_text}

) : (
No English yet
)} )} {new Date(segment.created_at).toLocaleTimeString()}
))} {interimText && (

{interimText}

Processing...
)} {segments.length === 0 && !interimText && (

Start recording to see live transcriptions appear here

)}
{showQR && group && ( setShowQR(false)} /> )}
); } function ManualInput({ onSubmit, disabled }: { onSubmit: (text: string) => void | Promise; disabled?: boolean }) { const [text, setText] = useState(''); const [busy, setBusy] = useState(false); const canSubmit = text.trim().length > 0 && !busy && !disabled; return (
setText(e.target.value)} onKeyDown={async (e) => { if (e.key === 'Enter' && canSubmit) { setBusy(true); await onSubmit(text.trim()); setText(''); setBusy(false); } }} className="flex-1 px-3 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-blue-500 focus:border-transparent" placeholder="Hello everyone..." disabled={disabled} />
); }