Py-detect / src /app /py-detect /py-detect.component.ts
RajalashmiNagarajan
adminpage update
86b4aeb
import { Component, OnDestroy, OnInit, signal, ViewChild, ElementRef, ChangeDetectorRef } from '@angular/core';
import { CommonModule } from '@angular/common';
import { Router, NavigationStart } from '@angular/router';
import { Subscription } from 'rxjs';
import { FormsModule } from '@angular/forms';
import { PyDetectService } from '../services/pydetect.service';
declare global {
interface Window {
webkitSpeechRecognition?: any;
SpeechRecognition?: any;
}
}
type QAResult = {
question: string;
transcript: string;
language: string;
avgPitchHz: number | null;
avgVolume: number | null; // 0..1 (rough RMS)
audioUrl: string;
startedAt: number;
endedAt: number;
skipped?: boolean;
};
@Component({
standalone: true,
selector: 'app-py-detect',
imports: [CommonModule, FormsModule],
templateUrl: './py-detect.component.html',
styleUrls: ['./py-detect.component.css']
})
export class PyDetectComponent implements OnInit, OnDestroy {
// Store body language explanation for UI
public bodyLanguageExplanation: string | null = null;
public bodyLanguageMeaning: string | null = null;
// Fetch explanation for a body language cue from backend
public fetchBodyLanguageExplanation(cue: string) {
this.bodyLanguageExplanation = null;
this.bodyLanguageMeaning = null;
this.pyDetectService.bodyLanguageExplain(cue).subscribe({
next: (resp) => {
if (resp?.explanation) {
this.bodyLanguageExplanation = resp.explanation;
}
if (resp?.meaning) {
this.bodyLanguageMeaning = resp.meaning;
}
console.log('[PyDetect] Body Language:', {
meaning: resp?.meaning,
explanation: resp?.explanation
});
},
error: () => {
this.bodyLanguageExplanation = 'No explanation available.';
this.bodyLanguageMeaning = null;
console.warn('[PyDetect] No body language explanation available.');
}
});
}
// FER emotion result for UI display
public ferEmotion: string | null = null;
// Face detection score for UI display
public faceDetectionScore: number | null = null;
// --- Patch: Add missing properties for template and logic ---
public currentQuestionIndex: number = -1;
public totalQuestions: number = 0;
public currentQuestionText: string = '';
public evidenceSummary: string = '';
// Store the truth score for the last submitted answer
public truthScore: number | null = null;
// Timing & frame streaming additions
public questionWindowStartAt: number | null = null;
public answerStartAt: number | null = null;
public answerEndAt: number | null = null;
public answerMode: 'voice' | 'text' | 'mixed' = 'text';
private frameIntervalId: any;
private frameStreamingActive: boolean = false;
public involvementScore: number | null = null;
public involvementCues: string[] = [];
public dominantInvestigativeExpression: string | null = null;
public behaviorTagDistribution: Record<string, number> | null = null;
public guidanceCommand: string | null = null;
// --- Patch: Add missing stub methods for template bindings ---
public async speakQuestion(question: string) {
// Use TTS to speak the question (stub)
await this.speak(question);
}
public startRecognitionWithRecording(index: number) {
// Stub for starting recognition with recording
// You may want to start voice recording and speech recognition here
}
public async navigateToValidationPage() {
// Stop video recording and release camera
this.stopVideoRecording();
if (this.videoStream) {
this.videoStream.getTracks().forEach(t => t.stop());
this.videoStream = undefined;
}
this.isRecording = false;
// Wait for the video to finish processing if needed
await this.sleep(500); // Give time for onstop to fire and recordedVideoUrl to be set
// Automatically download the recorded video if available
if (this.recordedVideoUrl) {
const anchor = document.createElement('a');
anchor.href = this.recordedVideoUrl;
anchor.download = 'investigation-video.webm';
anchor.style.display = 'none';
document.body.appendChild(anchor);
anchor.click();
setTimeout(() => {
document.body.removeChild(anchor);
// Optionally revoke the object URL after download
// URL.revokeObjectURL(this.recordedVideoUrl);
}, 100);
}
// Then navigate to validation page
this.router.navigate(['/validationpage']);
}
public uploadDocument() {
// Stub for document upload logic
// You may want to handle file upload here
}
public onEvidenceFileSelect(event: any, type: string) {
// Stub for evidence file selection logic
// You may want to process selected files here
}
// Manual answer submission for testing
public submitTextAnswer() {
if (!this.textAnswer || !this.sessionId || this.currentQuestionIndex < 0 || !this.questions[this.currentQuestionIndex]) {
this.infoText = 'Please enter an answer and ensure a question is active.';
return;
}
// Call backend to submit response
this.pyDetectService.submitResponse(
this.sessionId,
this.textAnswer,
this.questions[this.currentQuestionIndex]
).subscribe({
next: async (res) => {
// Extract truth score if present
this.truthScore = (res && (res.truth_score || res.score)) ? Number(res.truth_score || res.score) : null;
this.infoText = 'Answer submitted.' + (this.truthScore !== null ? ` Truth Score: ${this.truthScore}` : '');
this.textAnswer = '';
// Fetch body language explanation for the first involvement cue
if (this.involvementCues.length) {
this.fetchBodyLanguageExplanation(this.involvementCues[0]);
}
const response = await this.pyDetectService.askQuestion(
this.sessionId,
this.crimeType,
this.briefDescription
).toPromise();
if (response && response.question) {
this.questions.push(response.question);
this.currentQuestionIndex++;
this.cdr.detectChanges();
await this.speakQuestion(response.question);
} else {
this.infoText = 'No more questions.';
}
},
error: (err) => {
this.infoText = 'Error submitting answer.';
}
});
}
public showDetailsPanel: boolean = false;
public metadata: any = null;
// Backend-driven session and investigation state
sessionId: string = '';
caseData: any = null;
briefDescription: string = '';
isSessionStarted: boolean = false;
isLoading: boolean = false;
currentQuestion: string = '';
textAnswer: string = '';
lastAnalysisResult: any = null;
questionCount: number = 0;
currentInvestigationStage: string = 'Initial Investigation';
questionNumber: number = 1;
cameraActive: boolean = false;
voiceRecordingActive: boolean = false;
investigationActive: boolean = false;
investigationStarted: boolean = false;
caseSummary: any = null;
processingResponse: boolean = false;
videoStatus: string = 'Camera Ready';
ttsEnabled: boolean = false;
isListening: boolean = false;
speechRecognition: any = null;
// Combined answer submission: prefer text box, fallback to transcript
public submitCombinedAnswer() {
// Accept answer from text box or voice transcript
let answerText = (this.textAnswer && this.textAnswer.trim()) ? this.textAnswer.trim() : (this.transcriptSoFar && this.transcriptSoFar.trim()) ? this.transcriptSoFar.trim() : '';
if (!answerText || !this.sessionId || !this.questions[this.currentQuestionIndex]) {
this.infoText = 'Please provide your answer before submitting.';
return;
}
this.stopAudioRecording();
this.infoText = 'Submitting answer...';
this.textAnswer = '';
this.transcriptSoFar = '';
const endTs = Date.now();
this.answerEndAt = endTs;
if (!this.answerStartAt) this.answerStartAt = this.questionWindowStartAt || endTs;
const durationMs = this.answerEndAt - this.answerStartAt;
this.stopFrameStreaming();
this.pyDetectService.submitResponse(
this.sessionId,
answerText,
this.questions[this.currentQuestionIndex],
{
answer_start_at: this.answerStartAt,
answer_end_at: this.answerEndAt,
duration_ms: durationMs,
mode: this.answerMode
}
).subscribe({
next: async (res) => {
// Extract truth score if present
this.truthScore = (res && (res.truth_score || res.score)) ? Number(res.truth_score || res.score) : null;
this.infoText = 'Answer submitted.' + (this.truthScore !== null ? ` Truth Score: ${this.truthScore}` : '');
// Pull involvement metrics
this.fetchLatestInvolvement();
// Fetch next question from backend
const response = await this.pyDetectService.askQuestion(
this.sessionId,
this.crimeType,
this.briefDescription
).toPromise();
if (response && response.question) {
this.questions.push(response.question);
this.currentQuestionIndex++;
this.questionNumber = this.currentQuestionIndex + 1;
this.cdr.detectChanges();
// await this.startCamera();
await this.startVideoRecording();
await this.speakQuestion(response.question);
// Restart window for next question
this.startQuestionWindow();
// Reset answer timing
this.answerStartAt = null;
this.answerEndAt = null;
this.answerMode = 'text';
} else {
this.infoText = 'No more questions.';
this.showSummary = true;
}
},
error: () => {
this.infoText = 'Error submitting answer.';
}
});
}
public stopAudioRecording() {
if (this.mediaRecorder && this.mediaRecorder.state !== 'inactive') {
this.mediaRecorder.stop();
// Show the transcribed answer in the text box
this.textAnswer = this.transcriptSoFar;
this.infoText = 'Voice recording stopped.';
// Stop speech recognition when recording stops
if (this.recognition) {
try { this.recognition.stop(); } catch { }
}
}
}
speechSynthesis: any = null;
voiceSupported: boolean = false;
microphoneSupported: boolean = false;
microphonePermissionDenied: boolean = false;
permissionStatus: string = 'unknown';
// ---- TTS active flag ----
private isActive = false;
// ---- Q/A data ----
// log: QAResult[] = [];
log: QAResult[] = [];
// ---- Constructor with Router Injection ----
private routerSubscription?: Subscription;
constructor(
private router: Router,
private cdr: ChangeDetectorRef,
private pyDetectService: PyDetectService
) {
// Cancel TTS on any navigation away
this.routerSubscription = this.router.events.subscribe(event => {
if (event instanceof NavigationStart) {
if (window.speechSynthesis) {
window.speechSynthesis.cancel();
}
}
});
}
// ---- Recording/analysis handles ----
private mediaStream?: MediaStream;
private mediaRecorder?: MediaRecorder;
private audioChunks: Blob[] = [];
private audioCtx?: AudioContext;
private analyser?: AnalyserNode;
private sourceNode?: MediaStreamAudioSourceNode;
private pitchSamples: number[] = [];
private volumeSamples: number[] = [];
private analyserBuffer: Float32Array = new Float32Array(2048);
private analyserTimer?: any;
// ---- Speech Recognition ----
private recognition?: any; // webkitSpeechRecognition
private transcriptSoFar = '';
private detectedLang = 'auto';
// ---- Settings ----
private maxAnswerMs = 10_000; // per answer recording window
private silenceTimeout?: any; // Declare silenceTimeout here
private analyserWindowMs = 100; // Declare analyserWindowMs property
// Example question source (replace with API call when ready)
// Remove legacy seedQuestions
public questions: string[] = [];
// Button state signals
// Remove legacy button state signals
// Add missing public methods and properties for template binding
// videoStatus already declared above for backend workflow
public videoStream?: MediaStream;
@ViewChild('videoElement', { static: false }) videoElement?: ElementRef<HTMLVideoElement>;
public videoChunks: Blob[] = [];
public videoAnswers: Blob[] = [];
public videoRecorder?: MediaRecorder;
public recordedVideoUrl: string = '';
// UI properties for template
caseId: string = '';
crimeType: string = '';
dateTime: string = '';
location: string = '';
suspectName: string = '';
statusText: string = '';
investigationOfficer: string = '';
progress: number = 0;
progressStage: string = '';
sessionTime: string = '';
isRecording: boolean = false;
isProcessing: boolean = false;
transcriptLines: string[] = [];
showSummary: boolean = false;
summaryData: { question: string; answer: string; duration: number }[] = [];
// Evidence panel state and placeholder data
public showEvidencePanel: boolean = false;
public showSummaryPanel: boolean = false;
public uploadedDocuments: string[] = ['Report.pdf', 'Statement.docx'];
public capturedPhotos: string[] = ['Photo1.jpg', 'Photo2.png'];
public previousRecordings: string[] = ['Recording1.webm', 'Recording2.mp3'];
// Info text for HUD below question
public floatingInfoText: string | null = null;
public infoText: string | null = null;
// Elapsed and remaining time for recording status
public elapsedTime: string = '00:00';
public remainingTime: string = '00:00';
private recordingTimerInterval: any;
// Repeat current question using TTS
public async repeatQuestion() {
await this.speakQuestion(this.currentQuestionText);
await this.sleep(1000);
this.infoText = 'Recording in progress – Listening to answer.';
}
// Navigate back to the homepage
navigateHome() {
if (window.speechSynthesis) {
window.speechSynthesis.cancel();
}
this.router.navigate(['/']);
}
goToInfoPage() {
if (window.speechSynthesis) {
window.speechSynthesis.cancel();
}
this.router.navigate(['/infopage']);
}
navigateBackToCaseDetails() {
if (window.speechSynthesis) {
window.speechSynthesis.cancel();
}
// Stop any <audio> elements
const audioElems = document.querySelectorAll('audio');
audioElems.forEach((audioElem) => {
(audioElem as HTMLAudioElement).pause();
(audioElem as HTMLAudioElement).currentTime = 0;
});
// Stop custom Audio object if present
if ((window as any).pyDetectAudioObj && typeof (window as any).pyDetectAudioObj.pause === 'function') {
(window as any).pyDetectAudioObj.pause();
(window as any).pyDetectAudioObj.currentTime = 0;
}
// Stop any media streams
if (this.mediaStream) {
this.mediaStream.getTracks().forEach(t => t.stop());
this.mediaStream = undefined;
}
this.router.navigate(['/case-details']);
}
// ======== Lifecycle ========
ngOnInit(): void {
this.isActive = true;
// Do NOT set isRecording = true here; only set when question starts
let metadata: any = null;
const nav = this.router.getCurrentNavigation();
if (nav?.extras?.state && nav.extras.state['metadata']) {
metadata = nav.extras.state['metadata'];
localStorage.setItem('pyDetectMetadata', JSON.stringify(metadata)); // Update localStorage on navigation
console.log('Received metadata from navigation:', metadata);
} else {
// Fallback: read from localStorage
const stored = localStorage.getItem('pyDetectMetadata');
if (stored) {
metadata = JSON.parse(stored);
console.log('Loaded metadata from localStorage:', metadata);
} else {
console.warn('No metadata found in navigation state or localStorage');
}
}
if (metadata) {
this.caseId = metadata.caseId || '';
this.crimeType = metadata.crimeType || '';
this.dateTime = metadata.dateTime || '';
this.location = metadata.location || '';
this.suspectName = metadata.suspectName || '';
this.statusText = metadata.status || '';
this.investigationOfficer = metadata.investigationOfficer || '';
this.progress = metadata.progress || 0;
this.progressStage = metadata.progressStage || '';
this.sessionTime = metadata.sessionTime || '';
this.briefDescription = metadata.briefDescription || '';
}
}
ngOnDestroy(): void {
this.isActive = false;
if (this.routerSubscription) {
this.routerSubscription.unsubscribe();
}
this.cleanupAll();
this.stopVideoRecording();
this.videoStatus = '';
if (this.videoStream) {
this.videoStream.getTracks().forEach(t => t.stop());
this.videoStream = undefined;
}
this.videoRecorder = undefined;
if (window.speechSynthesis) {
window.speechSynthesis.cancel(); // Stop any TTS audio
}
}
// ======== Main flow ========
// Legacy start method removed. Use backend-driven workflow only.
// Legacy stopAll method removed. Use backend-driven workflow only.
// Legacy resume method removed. Use backend-driven workflow only.
// ======== Question source ========
private async fetchNextQuestion(): Promise<string> {
// Replace this with HTTP call to your backend if needed.
// Example: const { question } = await this.http.get<{question:string}>('/api/next-question').toPromise();
// Legacy fetchNextQuestion logic removed. Use backend-driven workflow only.
return '';
}
// ======== TTS (question playback) ========
private speak(text: string): Promise<void> {
return new Promise<void>((resolve) => {
if (!this.isActive) return resolve(); // Only play TTS if component is active
const synth = window.speechSynthesis;
if (!synth) return resolve(); // gracefully continue without TTS
const utter = new SpeechSynthesisUtterance(text);
// Optional voice selection: pick an Indian English voice if available
const prefer = ['en-IN', 'en-GB', 'en-US'];
const voices = synth.getVoices();
const v = voices.find(v =>
prefer.includes(v.lang) || v.lang.toLowerCase().startsWith('en'));
if (v) utter.voice = v;
utter.rate = 1.0;
utter.pitch = 1.0;
utter.onend = () => resolve();
utter.onerror = () => resolve(); // do not block flow
synth.cancel(); // ensure clean queue
synth.speak(utter);
});
}
// ======== Recording + Recognition + Analysis ========
private async captureAnswerWithAnalysis(ms: number): Promise<{
audioUrl: string; avgPitchHz: number | null; avgVolume: number | null; transcript: string; language: string;
}> {
// reset buffers
this.audioChunks = [];
this.pitchSamples = [];
this.volumeSamples = [];
this.transcriptSoFar = '';
this.detectedLang = 'auto';
// 1) mic stream
this.mediaStream = await navigator.mediaDevices.getUserMedia({
audio: { channelCount: 1, echoCancellation: true, noiseSuppression: true },
video: false
});
// ...existing code...
// 2) prepare MediaRecorder
const mime = this.chooseMimeType();
this.mediaRecorder = new MediaRecorder(this.mediaStream, { mimeType: mime });
this.mediaRecorder.ondataavailable = (e) => {
if (e.data && e.data.size > 0) this.audioChunks.push(e.data);
};
// 3) start recognition (if available)
this.startRecognition('en-IN');
// 4) start analysis
await this.startAnalyser(this.mediaStream);
// 5) record for fixed window
const recordPromise = new Promise<void>((resolve) => {
this.mediaRecorder!.onstop = () => resolve();
this.mediaRecorder!.start(200); // gather chunks
setTimeout(() => {
if (this.mediaRecorder && this.mediaRecorder.state !== 'inactive') {
this.mediaRecorder.stop();
}
}, ms);
});
await recordPromise;
// 6) stop analysis + recognition + mic
this.stopAnalyser();
this.stopRecognition();
this.cleanupMediaStream();
// ...existing code...
// 7) build audio URL
const blob = new Blob(this.audioChunks, { type: mime });
const audioUrl = URL.createObjectURL(blob);
// 8) aggregate metrics
const avgPitchHz = this.averageNonZero(this.pitchSamples) ?? null;
const avgVolume = this.averageNonZero(this.volumeSamples) ?? null;
const transcript = this.transcriptSoFar.trim();
const language = this.detectedLang;
return { audioUrl, avgPitchHz, avgVolume, transcript, language };
}
private waitForSilenceOrContinue() {
if (this.silenceTimeout) clearTimeout(this.silenceTimeout);
this.silenceTimeout = setTimeout(() => {
// ...existing code...
}, 5002); // Timeout after 5 seconds of silence
}
private chooseMimeType(): string {
const candidates = [
'audio/webm;codecs=opus',
'audio/webm',
'audio/mp4',
'audio/mpeg'
];
for (const c of candidates) {
if (MediaRecorder.isTypeSupported(c)) return c;
}
return '';
}
// ======== Web Audio analysis (pitch + volume) ========
private async startAnalyser(stream: MediaStream) {
this.audioCtx = new (window.AudioContext || (window as any).webkitAudioContext)();
this.sourceNode = this.audioCtx.createMediaStreamSource(stream);
this.analyser = this.audioCtx.createAnalyser();
this.analyser.fftSize = 2048;
this.sourceNode.connect(this.analyser);
// Use correct constructor for Float32Array
this.analyserBuffer = new Float32Array(this.analyser.fftSize);
const tick = () => {
if (!this.analyser || !this.analyserBuffer) return;
this.analyser.getFloatTimeDomainData(this.analyserBuffer);
const pitch = this.estimatePitchFromAutocorrelation(
this.analyserBuffer, this.audioCtx!.sampleRate
);
const vol = this.rootMeanSquare(this.analyserBuffer);
if (pitch) this.pitchSamples.push(pitch);
this.volumeSamples.push(vol);
this.analyserTimer = setTimeout(tick, this.analyserWindowMs);
};
tick();
}
private stopAnalyser() {
if (this.analyserTimer) clearTimeout(this.analyserTimer);
this.analyserTimer = null;
if (this.sourceNode) { try { this.sourceNode.disconnect(); } catch { } }
if (this.analyser) { try { this.analyser.disconnect(); } catch { } }
if (this.audioCtx) { try { this.audioCtx.close(); } catch { } }
this.sourceNode = undefined;
this.analyser = undefined;
this.audioCtx = undefined;
}
// Simple autocorrelation-based pitch estimator
private estimatePitchFromAutocorrelation(buf: Float32Array, sampleRate: number): number | null {
// 1) normalize
let size = buf.length;
let rms = 0;
for (let i = 0; i < size; i++) rms += buf[i] * buf[i];
rms = Math.sqrt(rms / size);
if (rms < 0.01) return null; // too quiet
// 2) autocorrelation
const MAX_SAMPLES = Math.floor(size / 2);
let bestOffset = -1;
let bestCorr = 0;
let lastCorr = 1;
for (let offset = 1; offset < MAX_SAMPLES; offset++) {
let corr = 0;
for (let i = 0; i < MAX_SAMPLES; i++) {
corr += Math.abs(buf[i] - buf[i + offset]);
}
corr = 1 - (corr / MAX_SAMPLES);
if (corr > 0.9 && corr > lastCorr) {
bestCorr = corr;
bestOffset = offset;
}
lastCorr = corr;
}
if (bestOffset > 0) {
const freq = sampleRate / bestOffset;
if (freq >= 50 && freq <= 400) return Math.round(freq); // human speech band (rough)
}
return null;
}
private rootMeanSquare(buf: Float32Array): number {
let sum = 0;
for (let i = 0; i < buf.length; i++) sum += buf[i] * buf[i];
return Math.sqrt(sum / buf.length); // 0..~0.5 typical speech
}
private averageNonZero(arr: number[]): number | undefined {
const f = arr.filter(x => x && isFinite(x));
if (!f.length) return undefined;
return Math.round((f.reduce((a, b) => a + b, 0) / f.length) * 100) / 100;
}
// ======== Speech Recognition ========
private setupRecognition() {
const Ctor = window.webkitSpeechRecognition || window.SpeechRecognition;
if (!Ctor) return;
this.recognition = new Ctor();
this.recognition.continuous = true;
this.recognition.interimResults = false; // Disable interim results (only final results)
this.recognition.onresult = (event: any) => {
let finalText = '';
// Loop over all result sets, but only process final results
for (let i = event.resultIndex; i < event.results.length; i++) {
const result = event.results[i];
// Get the transcript of the final recognized word
if (result.isFinal) {
finalText += result[0].transcript.trim(); // Append final result and clean up
}
}
// Clean up the result by removing filler words (optional)
this.transcriptSoFar = this.removeFillerWords(finalText.trim());
};
this.recognition.onerror = (error: any) => {
console.error('Speech recognition error', error);
};
this.recognition.onend = () => {
console.log('Speech recognition has ended');
};
}
private removeFillerWords(text: string): string {
const fillerWords = ['um', 'ah', 'like', 'you know', 'so', 'actually', 'basically'];
const regex = new RegExp(`\\b(${fillerWords.join('|')})\\b`, 'gi');
return text.replace(regex, '').replace(/\s+/g, ' ').trim(); // Clean extra spaces after removal
}
private startRecognition(lang: string) {
if (!this.recognition) return;
try {
this.recognition.lang = lang; // set your target language; change to 'ta-IN' for Tamil, etc.
this.recognition.start();
} catch { /* ignore double-start */ }
}
private stopRecognition() {
if (!this.recognition) return;
try { this.recognition.stop(); } catch { /* ignore */ }
}
// ======== Clean-up ========
private cleanupMediaStream() {
if (this.mediaStream) {
this.mediaStream.getTracks().forEach(t => t.stop());
}
this.mediaStream = undefined;
}
private cleanupRecording() {
try { if (this.mediaRecorder && this.mediaRecorder.state !== 'inactive') this.mediaRecorder.stop(); } catch { }
this.mediaRecorder = undefined;
this.audioChunks = [];
this.stopAnalyser();
this.cleanupMediaStream();
}
private cleanupAll() {
this.stopRecognition();
this.cleanupRecording();
}
// ======== Helpers ========
private sleep(ms: number) { return new Promise(res => setTimeout(res, ms)); }
public async startCamera() {
if (!this.videoStream) {
this.videoStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
if (this.videoElement?.nativeElement) {
this.videoElement.nativeElement.srcObject = this.videoStream;
}
}
}
// ===== Frame streaming for nonverbal analysis =====
private startQuestionWindow() {
this.questionWindowStartAt = Date.now();
this.answerStartAt = null;
this.answerEndAt = null;
this.answerMode = 'text'; // default until voice starts
this.startFrameStreaming();
}
private startFrameStreaming() {
if (this.frameStreamingActive) return;
if (!this.videoElement?.nativeElement) return;
const videoEl = this.videoElement.nativeElement;
const canvas = document.createElement('canvas');
canvas.width = 320;
canvas.height = 240;
const ctx = canvas.getContext('2d');
if (!ctx) return;
this.frameStreamingActive = true;
this.frameIntervalId = setInterval(() => {
if (!this.frameStreamingActive) return;
try {
ctx.drawImage(videoEl, 0, 0, canvas.width, canvas.height);
const dataUrl = canvas.toDataURL('image/jpeg', 0.6);
if (this.sessionId) {
this.pyDetectService.faceFrame(this.sessionId, dataUrl).subscribe({
next: (resp) => {
if (resp?.metrics?.emotion) this.ferEmotion = resp.metrics.emotion;
if (resp?.command) this.guidanceCommand = resp.command;
},
error: () => { /* ignore */ }
});
}
} catch { /* ignore */ }
}, 150);
}
private stopFrameStreaming() {
if (this.frameIntervalId) clearInterval(this.frameIntervalId);
this.frameIntervalId = null;
this.frameStreamingActive = false;
}
// Capture text start when user focuses the answer textarea
public captureTextStart() {
if (!this.answerStartAt) {
this.answerStartAt = Date.now();
if (this.answerMode === 'voice') {
this.answerMode = 'mixed';
} else {
this.answerMode = 'text';
}
}
}
// Fetch latest involvement metrics (defined here to resolve reference)
private fetchLatestInvolvement() {
if (!this.sessionId) return;
this.pyDetectService.getReport(this.sessionId).subscribe({
next: (report) => {
const responses = report?.responses || [];
if (!responses.length) return;
const last = responses[responses.length - 1];
const assess = last?.investigative_assessment;
const fb = last?.face_body?.metrics;
if (assess) {
this.involvementScore = typeof assess.involvement_score === 'number' ? assess.involvement_score : null;
this.involvementCues = Array.isArray(assess.cues) ? assess.cues : [];
}
if (fb) {
this.dominantInvestigativeExpression = fb.dominant_investigative_expression || null;
this.behaviorTagDistribution = fb.behavior_tag_distribution || null;
}
this.cdr.detectChanges();
},
error: () => { /* silent fail */ }
});
}
public async startVideoRecording() {
if (!this.videoStream) return;
// Prevent double-start
if (this.videoRecorder && this.videoRecorder.state === 'recording') {
console.warn('[PyDetect] Video recording already in progress.');
return;
}
this.videoChunks = [];
this.videoRecorder = new MediaRecorder(this.videoStream, { mimeType: 'video/webm' });
this.videoRecorder.ondataavailable = (e: BlobEvent) => {
if (e.data && e.data.size > 0) this.videoChunks.push(e.data);
};
this.videoRecorder.onstop = () => {
const videoBlob = new Blob(this.videoChunks, { type: 'video/webm' });
this.recordedVideoUrl = URL.createObjectURL(videoBlob);
console.log('[PyDetect] Video recording complete. Blob URL:', this.recordedVideoUrl);
if (this.videoStream) {
this.videoStream.getTracks().forEach(t => t.stop());
this.videoStream = undefined;
}
this.cdr.detectChanges();
};
this.videoRecorder.start();
console.log('[PyDetect] Video recording started.');
}
public stopVideoRecording() {
// Prevent double-stop
if (this.videoRecorder && this.videoRecorder.state === 'recording') {
this.videoRecorder.stop();
// The onstop handler will release the camera and update the UI
} else {
console.warn('[PyDetect] Video recording already stopped or not started.');
}
}
// Call startVideoRecording in onStartInvestigation
public async onStartInvestigation() {
this.isLoading = true;
this.infoText = 'Starting investigation...';
// await this.startCamera();
await this.startVideoRecording();
await this.startSession();
// Ensure questions are loaded and index is set
// Use fallback logic for brief description
let briefDescriptionToSend = this.briefDescription?.trim() || '';
if (!briefDescriptionToSend) {
briefDescriptionToSend =
sessionStorage.getItem('briefDescription')?.trim() ||
this.caseData?.briefDescription?.trim() ||
this.caseData?.police?.information?.trim() ||
this.caseData?.crime?.trim() ||
'';
}
const response = await this.pyDetectService.askQuestion(
this.sessionId,
this.crimeType,
briefDescriptionToSend
).toPromise();
if (response && response.question) {
this.questions = [response.question]; // Wrap single question in array
this.currentQuestionIndex = 0;
this.cdr.detectChanges(); // Force UI update after async
this.isRecording = true;
this.infoText = 'Recording in progress – Asking question.';
this.startRecognitionWithRecording(this.currentQuestionIndex);
// Start the question window BEFORE speaking so question time included
this.startQuestionWindow();
// Speak the first question using TTS (inside window)
await this.speakQuestion(this.questions[0]);
this.infoText = 'Recording in progress – Listening to answer.';
} else {
this.questions = [];
this.currentQuestionIndex = -1;
this.cdr.detectChanges();
}
this.infoText = 'Investigation started. Please answer the question.';
this.isLoading = false;
}
// Backend-driven session start and first question fetch
public async startSession(): Promise<void> {
try {
this.isLoading = true;
if (this.voiceSupported) {
this.ttsEnabled = true;
setTimeout(() => {
this.speakQuestion('Investigation starting. I will ask you questions and you can respond using voice or text.');
}, 1000);
}
let briefDescriptionToSend = this.briefDescription?.trim() || '';
if (!briefDescriptionToSend) {
briefDescriptionToSend =
sessionStorage.getItem('briefDescription')?.trim() ||
this.caseData?.briefDescription?.trim() ||
this.caseData?.police?.information?.trim() ||
this.caseData?.crime?.trim() ||
'';
}
const sessionResponse = await this.pyDetectService.startSession(briefDescriptionToSend).toPromise();
this.sessionId = sessionResponse.session_id;
sessionStorage.setItem('sessionId', this.sessionId);
localStorage.setItem('sessionId', this.sessionId);
const caseData = this.caseData || {};
const caseDataToSend = {
...caseData,
brief_description: briefDescriptionToSend
};
await this.pyDetectService.submitCaseDetails(
this.sessionId,
caseDataToSend,
briefDescriptionToSend
).toPromise();
if (briefDescriptionToSend && briefDescriptionToSend.length > 0) {
const questionsResponse = await this.pyDetectService.askQuestion(
this.sessionId,
this.crimeType,
briefDescriptionToSend
).toPromise();
if (questionsResponse && questionsResponse.questions && questionsResponse.questions.length > 0) {
this.questions = questionsResponse.questions;
this.currentQuestion = this.questions[0];
this.currentQuestionIndex = 0;
this.questionCount = this.questions.length;
this.questionNumber = 1;
// Optionally, update UI to show the first question
} else {
this.questions = [];
this.currentQuestionIndex = -1;
}
}
this.isSessionStarted = true;
this.investigationStarted = true;
this.investigationActive = true;
this.isLoading = false;
} catch (error) {
alert('Failed to connect to backend. Please check if the Flask server is running on port 5002.');
this.isLoading = false;
}
}
public isVoiceRecording: boolean = false;
// Start audio recording and speech recognition
public async startAudioRecording() {
// Debounce: Prevent rapid start
if (this.isVoiceRecording) return;
// Clear previous answer before starting new recording
this.textAnswer = '';
console.log('[PyDetect] Voice recording started.');
try {
// Request microphone access
this.mediaStream = await navigator.mediaDevices.getUserMedia({ audio: true, video: false });
// Setup MediaRecorder
const mimeType = this.chooseMimeType();
this.mediaRecorder = new MediaRecorder(this.mediaStream, { mimeType });
this.audioChunks = [];
this.mediaRecorder.ondataavailable = (e) => {
if (e.data && e.data.size > 0) this.audioChunks.push(e.data);
};
this.mediaRecorder.onstop = () => {
console.log('[PyDetect] Voice recording stopped.');
// Stop speech recognition when recording stops
if (this.recognition) {
try { this.recognition.stop(); } catch { }
}
// UI feedback if no transcript
if (!this.transcriptSoFar) {
this.infoText = 'No voice detected. Please try again or type your answer.';
} else {
this.infoText = 'Voice recording stopped.';
}
};
this.mediaRecorder.start();
// Setup speech recognition
const Ctor = window.webkitSpeechRecognition || window.SpeechRecognition;
if (Ctor) {
this.recognition = new Ctor();
this.recognition.lang = 'en-IN';
this.recognition.continuous = true;
this.recognition.interimResults = false;
this.transcriptSoFar = '';
this.recognition.onstart = () => {
this.infoText = 'Listening...';
};
this.recognition.onresult = (event: any) => {
let finalText = '';
for (let i = event.resultIndex; i < event.results.length; i++) {
const result = event.results[i];
if (result.isFinal) {
finalText += result[0].transcript.trim();
}
}
this.transcriptSoFar = finalText.trim();
this.textAnswer = this.transcriptSoFar;
};
this.recognition.onerror = (error: any) => {
this.infoText = 'Speech recognition error: ' + error.error;
};
this.recognition.onend = () => {
if (!this.transcriptSoFar) {
this.infoText = 'No voice detected. Please try again or type your answer.';
} else {
this.infoText = 'Voice recording stopped.';
}
};
this.recognition.start();
} else {
this.infoText = 'Speech Recognition not supported.';
}
this.isVoiceRecording = true;
} catch (err) {
this.infoText = 'Could not start audio recording.';
}
}
public async toggleVoiceRecording() {
if (this.isVoiceRecording) {
this.stopAudioRecording();
this.isVoiceRecording = false;
this.infoText = 'Voice recording stopped.';
} else {
await this.startAudioRecording();
this.isVoiceRecording = true;
this.infoText = 'Voice recording started. Speak your answer.';
}
}
// Ensure the class is properly closed
}