|
|
import { Component, OnDestroy, OnInit, signal, ViewChild, ElementRef, ChangeDetectorRef } from '@angular/core'; |
|
|
import { CommonModule } from '@angular/common'; |
|
|
import { Router, NavigationStart } from '@angular/router'; |
|
|
import { Subscription } from 'rxjs'; |
|
|
import { FormsModule } from '@angular/forms'; |
|
|
import { PyDetectService } from '../services/pydetect.service'; |
|
|
|
|
|
declare global { |
|
|
interface Window { |
|
|
webkitSpeechRecognition?: any; |
|
|
SpeechRecognition?: any; |
|
|
} |
|
|
} |
|
|
|
|
|
type QAResult = { |
|
|
question: string; |
|
|
transcript: string; |
|
|
language: string; |
|
|
avgPitchHz: number | null; |
|
|
avgVolume: number | null; |
|
|
audioUrl: string; |
|
|
startedAt: number; |
|
|
endedAt: number; |
|
|
skipped?: boolean; |
|
|
}; |
|
|
|
|
|
@Component({ |
|
|
standalone: true, |
|
|
selector: 'app-py-detect', |
|
|
imports: [CommonModule, FormsModule], |
|
|
templateUrl: './py-detect.component.html', |
|
|
styleUrls: ['./py-detect.component.css'] |
|
|
}) |
|
|
export class PyDetectComponent implements OnInit, OnDestroy { |
|
|
|
|
|
public bodyLanguageExplanation: string | null = null; |
|
|
public bodyLanguageMeaning: string | null = null; |
|
|
|
|
|
|
|
|
public fetchBodyLanguageExplanation(cue: string) { |
|
|
this.bodyLanguageExplanation = null; |
|
|
this.bodyLanguageMeaning = null; |
|
|
this.pyDetectService.bodyLanguageExplain(cue).subscribe({ |
|
|
next: (resp) => { |
|
|
if (resp?.explanation) { |
|
|
this.bodyLanguageExplanation = resp.explanation; |
|
|
} |
|
|
if (resp?.meaning) { |
|
|
this.bodyLanguageMeaning = resp.meaning; |
|
|
} |
|
|
console.log('[PyDetect] Body Language:', { |
|
|
meaning: resp?.meaning, |
|
|
explanation: resp?.explanation |
|
|
}); |
|
|
}, |
|
|
error: () => { |
|
|
this.bodyLanguageExplanation = 'No explanation available.'; |
|
|
this.bodyLanguageMeaning = null; |
|
|
console.warn('[PyDetect] No body language explanation available.'); |
|
|
} |
|
|
}); |
|
|
} |
|
|
|
|
|
public ferEmotion: string | null = null; |
|
|
|
|
|
public faceDetectionScore: number | null = null; |
|
|
|
|
|
public currentQuestionIndex: number = -1; |
|
|
public totalQuestions: number = 0; |
|
|
public currentQuestionText: string = ''; |
|
|
public evidenceSummary: string = ''; |
|
|
|
|
|
public truthScore: number | null = null; |
|
|
|
|
|
public questionWindowStartAt: number | null = null; |
|
|
public answerStartAt: number | null = null; |
|
|
public answerEndAt: number | null = null; |
|
|
public answerMode: 'voice' | 'text' | 'mixed' = 'text'; |
|
|
private frameIntervalId: any; |
|
|
private frameStreamingActive: boolean = false; |
|
|
public involvementScore: number | null = null; |
|
|
public involvementCues: string[] = []; |
|
|
public dominantInvestigativeExpression: string | null = null; |
|
|
public behaviorTagDistribution: Record<string, number> | null = null; |
|
|
public guidanceCommand: string | null = null; |
|
|
|
|
|
|
|
|
public async speakQuestion(question: string) { |
|
|
|
|
|
await this.speak(question); |
|
|
} |
|
|
|
|
|
public startRecognitionWithRecording(index: number) { |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
public async navigateToValidationPage() { |
|
|
|
|
|
this.stopVideoRecording(); |
|
|
if (this.videoStream) { |
|
|
this.videoStream.getTracks().forEach(t => t.stop()); |
|
|
this.videoStream = undefined; |
|
|
} |
|
|
this.isRecording = false; |
|
|
|
|
|
await this.sleep(500); |
|
|
|
|
|
if (this.recordedVideoUrl) { |
|
|
const anchor = document.createElement('a'); |
|
|
anchor.href = this.recordedVideoUrl; |
|
|
anchor.download = 'investigation-video.webm'; |
|
|
anchor.style.display = 'none'; |
|
|
document.body.appendChild(anchor); |
|
|
anchor.click(); |
|
|
setTimeout(() => { |
|
|
document.body.removeChild(anchor); |
|
|
|
|
|
|
|
|
}, 100); |
|
|
} |
|
|
|
|
|
this.router.navigate(['/validationpage']); |
|
|
} |
|
|
|
|
|
public uploadDocument() { |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
public onEvidenceFileSelect(event: any, type: string) { |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
public submitTextAnswer() { |
|
|
if (!this.textAnswer || !this.sessionId || this.currentQuestionIndex < 0 || !this.questions[this.currentQuestionIndex]) { |
|
|
this.infoText = 'Please enter an answer and ensure a question is active.'; |
|
|
return; |
|
|
} |
|
|
|
|
|
this.pyDetectService.submitResponse( |
|
|
this.sessionId, |
|
|
this.textAnswer, |
|
|
this.questions[this.currentQuestionIndex] |
|
|
).subscribe({ |
|
|
next: async (res) => { |
|
|
|
|
|
this.truthScore = (res && (res.truth_score || res.score)) ? Number(res.truth_score || res.score) : null; |
|
|
this.infoText = 'Answer submitted.' + (this.truthScore !== null ? ` Truth Score: ${this.truthScore}` : ''); |
|
|
this.textAnswer = ''; |
|
|
|
|
|
if (this.involvementCues.length) { |
|
|
this.fetchBodyLanguageExplanation(this.involvementCues[0]); |
|
|
} |
|
|
const response = await this.pyDetectService.askQuestion( |
|
|
this.sessionId, |
|
|
this.crimeType, |
|
|
this.briefDescription |
|
|
).toPromise(); |
|
|
if (response && response.question) { |
|
|
this.questions.push(response.question); |
|
|
this.currentQuestionIndex++; |
|
|
this.cdr.detectChanges(); |
|
|
await this.speakQuestion(response.question); |
|
|
} else { |
|
|
this.infoText = 'No more questions.'; |
|
|
} |
|
|
}, |
|
|
error: (err) => { |
|
|
this.infoText = 'Error submitting answer.'; |
|
|
} |
|
|
}); |
|
|
} |
|
|
public showDetailsPanel: boolean = false; |
|
|
public metadata: any = null; |
|
|
|
|
|
|
|
|
sessionId: string = ''; |
|
|
caseData: any = null; |
|
|
briefDescription: string = ''; |
|
|
isSessionStarted: boolean = false; |
|
|
isLoading: boolean = false; |
|
|
currentQuestion: string = ''; |
|
|
textAnswer: string = ''; |
|
|
lastAnalysisResult: any = null; |
|
|
questionCount: number = 0; |
|
|
currentInvestigationStage: string = 'Initial Investigation'; |
|
|
questionNumber: number = 1; |
|
|
cameraActive: boolean = false; |
|
|
voiceRecordingActive: boolean = false; |
|
|
investigationActive: boolean = false; |
|
|
investigationStarted: boolean = false; |
|
|
caseSummary: any = null; |
|
|
processingResponse: boolean = false; |
|
|
videoStatus: string = 'Camera Ready'; |
|
|
ttsEnabled: boolean = false; |
|
|
isListening: boolean = false; |
|
|
speechRecognition: any = null; |
|
|
|
|
|
public submitCombinedAnswer() { |
|
|
|
|
|
let answerText = (this.textAnswer && this.textAnswer.trim()) ? this.textAnswer.trim() : (this.transcriptSoFar && this.transcriptSoFar.trim()) ? this.transcriptSoFar.trim() : ''; |
|
|
if (!answerText || !this.sessionId || !this.questions[this.currentQuestionIndex]) { |
|
|
this.infoText = 'Please provide your answer before submitting.'; |
|
|
return; |
|
|
} |
|
|
this.stopAudioRecording(); |
|
|
this.infoText = 'Submitting answer...'; |
|
|
this.textAnswer = ''; |
|
|
this.transcriptSoFar = ''; |
|
|
const endTs = Date.now(); |
|
|
this.answerEndAt = endTs; |
|
|
if (!this.answerStartAt) this.answerStartAt = this.questionWindowStartAt || endTs; |
|
|
const durationMs = this.answerEndAt - this.answerStartAt; |
|
|
this.stopFrameStreaming(); |
|
|
this.pyDetectService.submitResponse( |
|
|
this.sessionId, |
|
|
answerText, |
|
|
this.questions[this.currentQuestionIndex], |
|
|
{ |
|
|
answer_start_at: this.answerStartAt, |
|
|
answer_end_at: this.answerEndAt, |
|
|
duration_ms: durationMs, |
|
|
mode: this.answerMode |
|
|
} |
|
|
).subscribe({ |
|
|
next: async (res) => { |
|
|
|
|
|
this.truthScore = (res && (res.truth_score || res.score)) ? Number(res.truth_score || res.score) : null; |
|
|
this.infoText = 'Answer submitted.' + (this.truthScore !== null ? ` Truth Score: ${this.truthScore}` : ''); |
|
|
|
|
|
this.fetchLatestInvolvement(); |
|
|
|
|
|
const response = await this.pyDetectService.askQuestion( |
|
|
this.sessionId, |
|
|
this.crimeType, |
|
|
this.briefDescription |
|
|
).toPromise(); |
|
|
if (response && response.question) { |
|
|
this.questions.push(response.question); |
|
|
this.currentQuestionIndex++; |
|
|
this.questionNumber = this.currentQuestionIndex + 1; |
|
|
this.cdr.detectChanges(); |
|
|
|
|
|
await this.startVideoRecording(); |
|
|
await this.speakQuestion(response.question); |
|
|
|
|
|
this.startQuestionWindow(); |
|
|
|
|
|
this.answerStartAt = null; |
|
|
this.answerEndAt = null; |
|
|
this.answerMode = 'text'; |
|
|
} else { |
|
|
this.infoText = 'No more questions.'; |
|
|
this.showSummary = true; |
|
|
} |
|
|
}, |
|
|
error: () => { |
|
|
this.infoText = 'Error submitting answer.'; |
|
|
} |
|
|
}); |
|
|
} |
|
|
public stopAudioRecording() { |
|
|
if (this.mediaRecorder && this.mediaRecorder.state !== 'inactive') { |
|
|
this.mediaRecorder.stop(); |
|
|
|
|
|
this.textAnswer = this.transcriptSoFar; |
|
|
this.infoText = 'Voice recording stopped.'; |
|
|
|
|
|
if (this.recognition) { |
|
|
try { this.recognition.stop(); } catch { } |
|
|
} |
|
|
} |
|
|
} |
|
|
speechSynthesis: any = null; |
|
|
voiceSupported: boolean = false; |
|
|
microphoneSupported: boolean = false; |
|
|
microphonePermissionDenied: boolean = false; |
|
|
permissionStatus: string = 'unknown'; |
|
|
|
|
|
|
|
|
private isActive = false; |
|
|
|
|
|
|
|
|
|
|
|
log: QAResult[] = []; |
|
|
|
|
|
|
|
|
|
|
|
private routerSubscription?: Subscription; |
|
|
|
|
|
constructor( |
|
|
private router: Router, |
|
|
private cdr: ChangeDetectorRef, |
|
|
private pyDetectService: PyDetectService |
|
|
) { |
|
|
|
|
|
this.routerSubscription = this.router.events.subscribe(event => { |
|
|
if (event instanceof NavigationStart) { |
|
|
if (window.speechSynthesis) { |
|
|
window.speechSynthesis.cancel(); |
|
|
} |
|
|
} |
|
|
}); |
|
|
} |
|
|
|
|
|
|
|
|
private mediaStream?: MediaStream; |
|
|
private mediaRecorder?: MediaRecorder; |
|
|
private audioChunks: Blob[] = []; |
|
|
|
|
|
private audioCtx?: AudioContext; |
|
|
private analyser?: AnalyserNode; |
|
|
private sourceNode?: MediaStreamAudioSourceNode; |
|
|
|
|
|
private pitchSamples: number[] = []; |
|
|
private volumeSamples: number[] = []; |
|
|
private analyserBuffer: Float32Array = new Float32Array(2048); |
|
|
private analyserTimer?: any; |
|
|
|
|
|
|
|
|
private recognition?: any; |
|
|
private transcriptSoFar = ''; |
|
|
private detectedLang = 'auto'; |
|
|
|
|
|
|
|
|
private maxAnswerMs = 10_000; |
|
|
private silenceTimeout?: any; |
|
|
|
|
|
private analyserWindowMs = 100; |
|
|
|
|
|
|
|
|
|
|
|
public questions: string[] = []; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
public videoStream?: MediaStream; |
|
|
@ViewChild('videoElement', { static: false }) videoElement?: ElementRef<HTMLVideoElement>; |
|
|
public videoChunks: Blob[] = []; |
|
|
public videoAnswers: Blob[] = []; |
|
|
public videoRecorder?: MediaRecorder; |
|
|
public recordedVideoUrl: string = ''; |
|
|
|
|
|
|
|
|
caseId: string = ''; |
|
|
crimeType: string = ''; |
|
|
dateTime: string = ''; |
|
|
location: string = ''; |
|
|
suspectName: string = ''; |
|
|
statusText: string = ''; |
|
|
investigationOfficer: string = ''; |
|
|
progress: number = 0; |
|
|
progressStage: string = ''; |
|
|
sessionTime: string = ''; |
|
|
isRecording: boolean = false; |
|
|
isProcessing: boolean = false; |
|
|
transcriptLines: string[] = []; |
|
|
showSummary: boolean = false; |
|
|
summaryData: { question: string; answer: string; duration: number }[] = []; |
|
|
|
|
|
|
|
|
public showEvidencePanel: boolean = false; |
|
|
public showSummaryPanel: boolean = false; |
|
|
public uploadedDocuments: string[] = ['Report.pdf', 'Statement.docx']; |
|
|
public capturedPhotos: string[] = ['Photo1.jpg', 'Photo2.png']; |
|
|
public previousRecordings: string[] = ['Recording1.webm', 'Recording2.mp3']; |
|
|
|
|
|
|
|
|
public floatingInfoText: string | null = null; |
|
|
public infoText: string | null = null; |
|
|
|
|
|
|
|
|
public elapsedTime: string = '00:00'; |
|
|
public remainingTime: string = '00:00'; |
|
|
private recordingTimerInterval: any; |
|
|
|
|
|
|
|
|
public async repeatQuestion() { |
|
|
await this.speakQuestion(this.currentQuestionText); |
|
|
await this.sleep(1000); |
|
|
this.infoText = 'Recording in progress – Listening to answer.'; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
navigateHome() { |
|
|
if (window.speechSynthesis) { |
|
|
window.speechSynthesis.cancel(); |
|
|
} |
|
|
this.router.navigate(['/']); |
|
|
} |
|
|
|
|
|
goToInfoPage() { |
|
|
if (window.speechSynthesis) { |
|
|
window.speechSynthesis.cancel(); |
|
|
} |
|
|
this.router.navigate(['/infopage']); |
|
|
} |
|
|
|
|
|
navigateBackToCaseDetails() { |
|
|
if (window.speechSynthesis) { |
|
|
window.speechSynthesis.cancel(); |
|
|
} |
|
|
|
|
|
const audioElems = document.querySelectorAll('audio'); |
|
|
audioElems.forEach((audioElem) => { |
|
|
(audioElem as HTMLAudioElement).pause(); |
|
|
(audioElem as HTMLAudioElement).currentTime = 0; |
|
|
}); |
|
|
|
|
|
if ((window as any).pyDetectAudioObj && typeof (window as any).pyDetectAudioObj.pause === 'function') { |
|
|
(window as any).pyDetectAudioObj.pause(); |
|
|
(window as any).pyDetectAudioObj.currentTime = 0; |
|
|
} |
|
|
|
|
|
if (this.mediaStream) { |
|
|
this.mediaStream.getTracks().forEach(t => t.stop()); |
|
|
this.mediaStream = undefined; |
|
|
} |
|
|
this.router.navigate(['/case-details']); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
ngOnInit(): void { |
|
|
this.isActive = true; |
|
|
|
|
|
|
|
|
let metadata: any = null; |
|
|
const nav = this.router.getCurrentNavigation(); |
|
|
if (nav?.extras?.state && nav.extras.state['metadata']) { |
|
|
metadata = nav.extras.state['metadata']; |
|
|
localStorage.setItem('pyDetectMetadata', JSON.stringify(metadata)); |
|
|
console.log('Received metadata from navigation:', metadata); |
|
|
} else { |
|
|
|
|
|
const stored = localStorage.getItem('pyDetectMetadata'); |
|
|
if (stored) { |
|
|
metadata = JSON.parse(stored); |
|
|
console.log('Loaded metadata from localStorage:', metadata); |
|
|
} else { |
|
|
console.warn('No metadata found in navigation state or localStorage'); |
|
|
} |
|
|
} |
|
|
if (metadata) { |
|
|
this.caseId = metadata.caseId || ''; |
|
|
this.crimeType = metadata.crimeType || ''; |
|
|
this.dateTime = metadata.dateTime || ''; |
|
|
this.location = metadata.location || ''; |
|
|
this.suspectName = metadata.suspectName || ''; |
|
|
this.statusText = metadata.status || ''; |
|
|
this.investigationOfficer = metadata.investigationOfficer || ''; |
|
|
this.progress = metadata.progress || 0; |
|
|
this.progressStage = metadata.progressStage || ''; |
|
|
this.sessionTime = metadata.sessionTime || ''; |
|
|
this.briefDescription = metadata.briefDescription || ''; |
|
|
} |
|
|
} |
|
|
|
|
|
ngOnDestroy(): void { |
|
|
this.isActive = false; |
|
|
if (this.routerSubscription) { |
|
|
this.routerSubscription.unsubscribe(); |
|
|
} |
|
|
this.cleanupAll(); |
|
|
this.stopVideoRecording(); |
|
|
this.videoStatus = ''; |
|
|
if (this.videoStream) { |
|
|
this.videoStream.getTracks().forEach(t => t.stop()); |
|
|
this.videoStream = undefined; |
|
|
} |
|
|
this.videoRecorder = undefined; |
|
|
if (window.speechSynthesis) { |
|
|
window.speechSynthesis.cancel(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
private async fetchNextQuestion(): Promise<string> { |
|
|
|
|
|
|
|
|
|
|
|
return ''; |
|
|
} |
|
|
|
|
|
|
|
|
private speak(text: string): Promise<void> { |
|
|
return new Promise<void>((resolve) => { |
|
|
if (!this.isActive) return resolve(); |
|
|
const synth = window.speechSynthesis; |
|
|
if (!synth) return resolve(); |
|
|
|
|
|
const utter = new SpeechSynthesisUtterance(text); |
|
|
|
|
|
const prefer = ['en-IN', 'en-GB', 'en-US']; |
|
|
const voices = synth.getVoices(); |
|
|
const v = voices.find(v => |
|
|
prefer.includes(v.lang) || v.lang.toLowerCase().startsWith('en')); |
|
|
if (v) utter.voice = v; |
|
|
|
|
|
utter.rate = 1.0; |
|
|
utter.pitch = 1.0; |
|
|
utter.onend = () => resolve(); |
|
|
utter.onerror = () => resolve(); |
|
|
synth.cancel(); |
|
|
synth.speak(utter); |
|
|
}); |
|
|
} |
|
|
|
|
|
|
|
|
private async captureAnswerWithAnalysis(ms: number): Promise<{ |
|
|
audioUrl: string; avgPitchHz: number | null; avgVolume: number | null; transcript: string; language: string; |
|
|
}> { |
|
|
|
|
|
this.audioChunks = []; |
|
|
this.pitchSamples = []; |
|
|
this.volumeSamples = []; |
|
|
this.transcriptSoFar = ''; |
|
|
this.detectedLang = 'auto'; |
|
|
|
|
|
|
|
|
this.mediaStream = await navigator.mediaDevices.getUserMedia({ |
|
|
audio: { channelCount: 1, echoCancellation: true, noiseSuppression: true }, |
|
|
video: false |
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
const mime = this.chooseMimeType(); |
|
|
this.mediaRecorder = new MediaRecorder(this.mediaStream, { mimeType: mime }); |
|
|
this.mediaRecorder.ondataavailable = (e) => { |
|
|
if (e.data && e.data.size > 0) this.audioChunks.push(e.data); |
|
|
}; |
|
|
|
|
|
|
|
|
this.startRecognition('en-IN'); |
|
|
|
|
|
|
|
|
await this.startAnalyser(this.mediaStream); |
|
|
|
|
|
|
|
|
const recordPromise = new Promise<void>((resolve) => { |
|
|
this.mediaRecorder!.onstop = () => resolve(); |
|
|
this.mediaRecorder!.start(200); |
|
|
setTimeout(() => { |
|
|
if (this.mediaRecorder && this.mediaRecorder.state !== 'inactive') { |
|
|
this.mediaRecorder.stop(); |
|
|
} |
|
|
}, ms); |
|
|
}); |
|
|
|
|
|
await recordPromise; |
|
|
|
|
|
|
|
|
this.stopAnalyser(); |
|
|
this.stopRecognition(); |
|
|
this.cleanupMediaStream(); |
|
|
|
|
|
|
|
|
|
|
|
const blob = new Blob(this.audioChunks, { type: mime }); |
|
|
const audioUrl = URL.createObjectURL(blob); |
|
|
|
|
|
|
|
|
const avgPitchHz = this.averageNonZero(this.pitchSamples) ?? null; |
|
|
const avgVolume = this.averageNonZero(this.volumeSamples) ?? null; |
|
|
|
|
|
const transcript = this.transcriptSoFar.trim(); |
|
|
const language = this.detectedLang; |
|
|
|
|
|
return { audioUrl, avgPitchHz, avgVolume, transcript, language }; |
|
|
} |
|
|
|
|
|
private waitForSilenceOrContinue() { |
|
|
if (this.silenceTimeout) clearTimeout(this.silenceTimeout); |
|
|
this.silenceTimeout = setTimeout(() => { |
|
|
|
|
|
}, 5002); |
|
|
} |
|
|
|
|
|
|
|
|
private chooseMimeType(): string { |
|
|
const candidates = [ |
|
|
'audio/webm;codecs=opus', |
|
|
'audio/webm', |
|
|
'audio/mp4', |
|
|
'audio/mpeg' |
|
|
]; |
|
|
for (const c of candidates) { |
|
|
if (MediaRecorder.isTypeSupported(c)) return c; |
|
|
} |
|
|
return ''; |
|
|
} |
|
|
|
|
|
|
|
|
private async startAnalyser(stream: MediaStream) { |
|
|
this.audioCtx = new (window.AudioContext || (window as any).webkitAudioContext)(); |
|
|
this.sourceNode = this.audioCtx.createMediaStreamSource(stream); |
|
|
this.analyser = this.audioCtx.createAnalyser(); |
|
|
this.analyser.fftSize = 2048; |
|
|
this.sourceNode.connect(this.analyser); |
|
|
|
|
|
|
|
|
this.analyserBuffer = new Float32Array(this.analyser.fftSize); |
|
|
|
|
|
const tick = () => { |
|
|
if (!this.analyser || !this.analyserBuffer) return; |
|
|
this.analyser.getFloatTimeDomainData(this.analyserBuffer); |
|
|
|
|
|
const pitch = this.estimatePitchFromAutocorrelation( |
|
|
this.analyserBuffer, this.audioCtx!.sampleRate |
|
|
); |
|
|
const vol = this.rootMeanSquare(this.analyserBuffer); |
|
|
|
|
|
if (pitch) this.pitchSamples.push(pitch); |
|
|
this.volumeSamples.push(vol); |
|
|
|
|
|
this.analyserTimer = setTimeout(tick, this.analyserWindowMs); |
|
|
}; |
|
|
tick(); |
|
|
} |
|
|
|
|
|
private stopAnalyser() { |
|
|
if (this.analyserTimer) clearTimeout(this.analyserTimer); |
|
|
this.analyserTimer = null; |
|
|
if (this.sourceNode) { try { this.sourceNode.disconnect(); } catch { } } |
|
|
if (this.analyser) { try { this.analyser.disconnect(); } catch { } } |
|
|
if (this.audioCtx) { try { this.audioCtx.close(); } catch { } } |
|
|
this.sourceNode = undefined; |
|
|
this.analyser = undefined; |
|
|
this.audioCtx = undefined; |
|
|
} |
|
|
|
|
|
|
|
|
private estimatePitchFromAutocorrelation(buf: Float32Array, sampleRate: number): number | null { |
|
|
|
|
|
let size = buf.length; |
|
|
let rms = 0; |
|
|
for (let i = 0; i < size; i++) rms += buf[i] * buf[i]; |
|
|
rms = Math.sqrt(rms / size); |
|
|
if (rms < 0.01) return null; |
|
|
|
|
|
|
|
|
const MAX_SAMPLES = Math.floor(size / 2); |
|
|
let bestOffset = -1; |
|
|
let bestCorr = 0; |
|
|
let lastCorr = 1; |
|
|
|
|
|
for (let offset = 1; offset < MAX_SAMPLES; offset++) { |
|
|
let corr = 0; |
|
|
for (let i = 0; i < MAX_SAMPLES; i++) { |
|
|
corr += Math.abs(buf[i] - buf[i + offset]); |
|
|
} |
|
|
corr = 1 - (corr / MAX_SAMPLES); |
|
|
if (corr > 0.9 && corr > lastCorr) { |
|
|
bestCorr = corr; |
|
|
bestOffset = offset; |
|
|
} |
|
|
lastCorr = corr; |
|
|
} |
|
|
if (bestOffset > 0) { |
|
|
const freq = sampleRate / bestOffset; |
|
|
if (freq >= 50 && freq <= 400) return Math.round(freq); |
|
|
} |
|
|
return null; |
|
|
} |
|
|
|
|
|
private rootMeanSquare(buf: Float32Array): number { |
|
|
let sum = 0; |
|
|
for (let i = 0; i < buf.length; i++) sum += buf[i] * buf[i]; |
|
|
return Math.sqrt(sum / buf.length); |
|
|
} |
|
|
|
|
|
private averageNonZero(arr: number[]): number | undefined { |
|
|
const f = arr.filter(x => x && isFinite(x)); |
|
|
if (!f.length) return undefined; |
|
|
return Math.round((f.reduce((a, b) => a + b, 0) / f.length) * 100) / 100; |
|
|
} |
|
|
|
|
|
|
|
|
private setupRecognition() { |
|
|
const Ctor = window.webkitSpeechRecognition || window.SpeechRecognition; |
|
|
if (!Ctor) return; |
|
|
|
|
|
this.recognition = new Ctor(); |
|
|
this.recognition.continuous = true; |
|
|
this.recognition.interimResults = false; |
|
|
|
|
|
this.recognition.onresult = (event: any) => { |
|
|
let finalText = ''; |
|
|
|
|
|
|
|
|
for (let i = event.resultIndex; i < event.results.length; i++) { |
|
|
const result = event.results[i]; |
|
|
|
|
|
if (result.isFinal) { |
|
|
finalText += result[0].transcript.trim(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
this.transcriptSoFar = this.removeFillerWords(finalText.trim()); |
|
|
}; |
|
|
|
|
|
this.recognition.onerror = (error: any) => { |
|
|
console.error('Speech recognition error', error); |
|
|
}; |
|
|
|
|
|
this.recognition.onend = () => { |
|
|
console.log('Speech recognition has ended'); |
|
|
}; |
|
|
} |
|
|
|
|
|
private removeFillerWords(text: string): string { |
|
|
const fillerWords = ['um', 'ah', 'like', 'you know', 'so', 'actually', 'basically']; |
|
|
const regex = new RegExp(`\\b(${fillerWords.join('|')})\\b`, 'gi'); |
|
|
return text.replace(regex, '').replace(/\s+/g, ' ').trim(); |
|
|
} |
|
|
|
|
|
private startRecognition(lang: string) { |
|
|
if (!this.recognition) return; |
|
|
try { |
|
|
this.recognition.lang = lang; |
|
|
this.recognition.start(); |
|
|
} catch { } |
|
|
} |
|
|
|
|
|
private stopRecognition() { |
|
|
if (!this.recognition) return; |
|
|
try { this.recognition.stop(); } catch { } |
|
|
} |
|
|
|
|
|
|
|
|
private cleanupMediaStream() { |
|
|
if (this.mediaStream) { |
|
|
this.mediaStream.getTracks().forEach(t => t.stop()); |
|
|
} |
|
|
this.mediaStream = undefined; |
|
|
} |
|
|
|
|
|
private cleanupRecording() { |
|
|
try { if (this.mediaRecorder && this.mediaRecorder.state !== 'inactive') this.mediaRecorder.stop(); } catch { } |
|
|
this.mediaRecorder = undefined; |
|
|
this.audioChunks = []; |
|
|
this.stopAnalyser(); |
|
|
this.cleanupMediaStream(); |
|
|
} |
|
|
|
|
|
private cleanupAll() { |
|
|
this.stopRecognition(); |
|
|
this.cleanupRecording(); |
|
|
} |
|
|
|
|
|
|
|
|
private sleep(ms: number) { return new Promise(res => setTimeout(res, ms)); } |
|
|
|
|
|
public async startCamera() { |
|
|
if (!this.videoStream) { |
|
|
this.videoStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false }); |
|
|
if (this.videoElement?.nativeElement) { |
|
|
this.videoElement.nativeElement.srcObject = this.videoStream; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
private startQuestionWindow() { |
|
|
this.questionWindowStartAt = Date.now(); |
|
|
this.answerStartAt = null; |
|
|
this.answerEndAt = null; |
|
|
this.answerMode = 'text'; |
|
|
this.startFrameStreaming(); |
|
|
} |
|
|
|
|
|
private startFrameStreaming() { |
|
|
if (this.frameStreamingActive) return; |
|
|
if (!this.videoElement?.nativeElement) return; |
|
|
const videoEl = this.videoElement.nativeElement; |
|
|
const canvas = document.createElement('canvas'); |
|
|
canvas.width = 320; |
|
|
canvas.height = 240; |
|
|
const ctx = canvas.getContext('2d'); |
|
|
if (!ctx) return; |
|
|
this.frameStreamingActive = true; |
|
|
this.frameIntervalId = setInterval(() => { |
|
|
if (!this.frameStreamingActive) return; |
|
|
try { |
|
|
ctx.drawImage(videoEl, 0, 0, canvas.width, canvas.height); |
|
|
const dataUrl = canvas.toDataURL('image/jpeg', 0.6); |
|
|
if (this.sessionId) { |
|
|
this.pyDetectService.faceFrame(this.sessionId, dataUrl).subscribe({ |
|
|
next: (resp) => { |
|
|
if (resp?.metrics?.emotion) this.ferEmotion = resp.metrics.emotion; |
|
|
if (resp?.command) this.guidanceCommand = resp.command; |
|
|
}, |
|
|
error: () => { } |
|
|
}); |
|
|
} |
|
|
} catch { } |
|
|
}, 150); |
|
|
} |
|
|
|
|
|
private stopFrameStreaming() { |
|
|
if (this.frameIntervalId) clearInterval(this.frameIntervalId); |
|
|
this.frameIntervalId = null; |
|
|
this.frameStreamingActive = false; |
|
|
} |
|
|
|
|
|
|
|
|
public captureTextStart() { |
|
|
if (!this.answerStartAt) { |
|
|
this.answerStartAt = Date.now(); |
|
|
if (this.answerMode === 'voice') { |
|
|
this.answerMode = 'mixed'; |
|
|
} else { |
|
|
this.answerMode = 'text'; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
private fetchLatestInvolvement() { |
|
|
if (!this.sessionId) return; |
|
|
this.pyDetectService.getReport(this.sessionId).subscribe({ |
|
|
next: (report) => { |
|
|
const responses = report?.responses || []; |
|
|
if (!responses.length) return; |
|
|
const last = responses[responses.length - 1]; |
|
|
const assess = last?.investigative_assessment; |
|
|
const fb = last?.face_body?.metrics; |
|
|
if (assess) { |
|
|
this.involvementScore = typeof assess.involvement_score === 'number' ? assess.involvement_score : null; |
|
|
this.involvementCues = Array.isArray(assess.cues) ? assess.cues : []; |
|
|
} |
|
|
if (fb) { |
|
|
this.dominantInvestigativeExpression = fb.dominant_investigative_expression || null; |
|
|
this.behaviorTagDistribution = fb.behavior_tag_distribution || null; |
|
|
} |
|
|
this.cdr.detectChanges(); |
|
|
}, |
|
|
error: () => { } |
|
|
}); |
|
|
} |
|
|
|
|
|
public async startVideoRecording() { |
|
|
if (!this.videoStream) return; |
|
|
|
|
|
if (this.videoRecorder && this.videoRecorder.state === 'recording') { |
|
|
console.warn('[PyDetect] Video recording already in progress.'); |
|
|
return; |
|
|
} |
|
|
this.videoChunks = []; |
|
|
this.videoRecorder = new MediaRecorder(this.videoStream, { mimeType: 'video/webm' }); |
|
|
this.videoRecorder.ondataavailable = (e: BlobEvent) => { |
|
|
if (e.data && e.data.size > 0) this.videoChunks.push(e.data); |
|
|
}; |
|
|
this.videoRecorder.onstop = () => { |
|
|
const videoBlob = new Blob(this.videoChunks, { type: 'video/webm' }); |
|
|
this.recordedVideoUrl = URL.createObjectURL(videoBlob); |
|
|
console.log('[PyDetect] Video recording complete. Blob URL:', this.recordedVideoUrl); |
|
|
if (this.videoStream) { |
|
|
this.videoStream.getTracks().forEach(t => t.stop()); |
|
|
this.videoStream = undefined; |
|
|
} |
|
|
this.cdr.detectChanges(); |
|
|
}; |
|
|
this.videoRecorder.start(); |
|
|
console.log('[PyDetect] Video recording started.'); |
|
|
} |
|
|
|
|
|
public stopVideoRecording() { |
|
|
|
|
|
if (this.videoRecorder && this.videoRecorder.state === 'recording') { |
|
|
this.videoRecorder.stop(); |
|
|
|
|
|
} else { |
|
|
console.warn('[PyDetect] Video recording already stopped or not started.'); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
public async onStartInvestigation() { |
|
|
this.isLoading = true; |
|
|
this.infoText = 'Starting investigation...'; |
|
|
|
|
|
await this.startVideoRecording(); |
|
|
await this.startSession(); |
|
|
|
|
|
|
|
|
let briefDescriptionToSend = this.briefDescription?.trim() || ''; |
|
|
if (!briefDescriptionToSend) { |
|
|
briefDescriptionToSend = |
|
|
sessionStorage.getItem('briefDescription')?.trim() || |
|
|
this.caseData?.briefDescription?.trim() || |
|
|
this.caseData?.police?.information?.trim() || |
|
|
this.caseData?.crime?.trim() || |
|
|
''; |
|
|
} |
|
|
const response = await this.pyDetectService.askQuestion( |
|
|
this.sessionId, |
|
|
this.crimeType, |
|
|
briefDescriptionToSend |
|
|
).toPromise(); |
|
|
if (response && response.question) { |
|
|
this.questions = [response.question]; |
|
|
this.currentQuestionIndex = 0; |
|
|
this.cdr.detectChanges(); |
|
|
this.isRecording = true; |
|
|
this.infoText = 'Recording in progress – Asking question.'; |
|
|
this.startRecognitionWithRecording(this.currentQuestionIndex); |
|
|
|
|
|
this.startQuestionWindow(); |
|
|
|
|
|
await this.speakQuestion(this.questions[0]); |
|
|
this.infoText = 'Recording in progress – Listening to answer.'; |
|
|
} else { |
|
|
this.questions = []; |
|
|
this.currentQuestionIndex = -1; |
|
|
this.cdr.detectChanges(); |
|
|
} |
|
|
this.infoText = 'Investigation started. Please answer the question.'; |
|
|
this.isLoading = false; |
|
|
} |
|
|
|
|
|
|
|
|
public async startSession(): Promise<void> { |
|
|
try { |
|
|
this.isLoading = true; |
|
|
if (this.voiceSupported) { |
|
|
this.ttsEnabled = true; |
|
|
setTimeout(() => { |
|
|
this.speakQuestion('Investigation starting. I will ask you questions and you can respond using voice or text.'); |
|
|
}, 1000); |
|
|
} |
|
|
let briefDescriptionToSend = this.briefDescription?.trim() || ''; |
|
|
if (!briefDescriptionToSend) { |
|
|
briefDescriptionToSend = |
|
|
sessionStorage.getItem('briefDescription')?.trim() || |
|
|
this.caseData?.briefDescription?.trim() || |
|
|
this.caseData?.police?.information?.trim() || |
|
|
this.caseData?.crime?.trim() || |
|
|
''; |
|
|
} |
|
|
const sessionResponse = await this.pyDetectService.startSession(briefDescriptionToSend).toPromise(); |
|
|
this.sessionId = sessionResponse.session_id; |
|
|
sessionStorage.setItem('sessionId', this.sessionId); |
|
|
localStorage.setItem('sessionId', this.sessionId); |
|
|
const caseData = this.caseData || {}; |
|
|
const caseDataToSend = { |
|
|
...caseData, |
|
|
brief_description: briefDescriptionToSend |
|
|
}; |
|
|
await this.pyDetectService.submitCaseDetails( |
|
|
this.sessionId, |
|
|
caseDataToSend, |
|
|
briefDescriptionToSend |
|
|
).toPromise(); |
|
|
if (briefDescriptionToSend && briefDescriptionToSend.length > 0) { |
|
|
const questionsResponse = await this.pyDetectService.askQuestion( |
|
|
this.sessionId, |
|
|
this.crimeType, |
|
|
briefDescriptionToSend |
|
|
).toPromise(); |
|
|
if (questionsResponse && questionsResponse.questions && questionsResponse.questions.length > 0) { |
|
|
this.questions = questionsResponse.questions; |
|
|
this.currentQuestion = this.questions[0]; |
|
|
this.currentQuestionIndex = 0; |
|
|
this.questionCount = this.questions.length; |
|
|
this.questionNumber = 1; |
|
|
|
|
|
} else { |
|
|
this.questions = []; |
|
|
this.currentQuestionIndex = -1; |
|
|
} |
|
|
} |
|
|
this.isSessionStarted = true; |
|
|
this.investigationStarted = true; |
|
|
this.investigationActive = true; |
|
|
this.isLoading = false; |
|
|
} catch (error) { |
|
|
alert('Failed to connect to backend. Please check if the Flask server is running on port 5002.'); |
|
|
this.isLoading = false; |
|
|
} |
|
|
} |
|
|
public isVoiceRecording: boolean = false; |
|
|
|
|
|
|
|
|
public async startAudioRecording() { |
|
|
|
|
|
if (this.isVoiceRecording) return; |
|
|
|
|
|
this.textAnswer = ''; |
|
|
console.log('[PyDetect] Voice recording started.'); |
|
|
try { |
|
|
|
|
|
this.mediaStream = await navigator.mediaDevices.getUserMedia({ audio: true, video: false }); |
|
|
|
|
|
const mimeType = this.chooseMimeType(); |
|
|
this.mediaRecorder = new MediaRecorder(this.mediaStream, { mimeType }); |
|
|
this.audioChunks = []; |
|
|
this.mediaRecorder.ondataavailable = (e) => { |
|
|
if (e.data && e.data.size > 0) this.audioChunks.push(e.data); |
|
|
}; |
|
|
this.mediaRecorder.onstop = () => { |
|
|
console.log('[PyDetect] Voice recording stopped.'); |
|
|
|
|
|
if (this.recognition) { |
|
|
try { this.recognition.stop(); } catch { } |
|
|
} |
|
|
|
|
|
if (!this.transcriptSoFar) { |
|
|
this.infoText = 'No voice detected. Please try again or type your answer.'; |
|
|
} else { |
|
|
this.infoText = 'Voice recording stopped.'; |
|
|
} |
|
|
}; |
|
|
this.mediaRecorder.start(); |
|
|
|
|
|
const Ctor = window.webkitSpeechRecognition || window.SpeechRecognition; |
|
|
if (Ctor) { |
|
|
this.recognition = new Ctor(); |
|
|
this.recognition.lang = 'en-IN'; |
|
|
this.recognition.continuous = true; |
|
|
this.recognition.interimResults = false; |
|
|
this.transcriptSoFar = ''; |
|
|
this.recognition.onstart = () => { |
|
|
this.infoText = 'Listening...'; |
|
|
}; |
|
|
this.recognition.onresult = (event: any) => { |
|
|
let finalText = ''; |
|
|
for (let i = event.resultIndex; i < event.results.length; i++) { |
|
|
const result = event.results[i]; |
|
|
if (result.isFinal) { |
|
|
finalText += result[0].transcript.trim(); |
|
|
} |
|
|
} |
|
|
this.transcriptSoFar = finalText.trim(); |
|
|
this.textAnswer = this.transcriptSoFar; |
|
|
}; |
|
|
this.recognition.onerror = (error: any) => { |
|
|
this.infoText = 'Speech recognition error: ' + error.error; |
|
|
}; |
|
|
this.recognition.onend = () => { |
|
|
if (!this.transcriptSoFar) { |
|
|
this.infoText = 'No voice detected. Please try again or type your answer.'; |
|
|
} else { |
|
|
this.infoText = 'Voice recording stopped.'; |
|
|
} |
|
|
}; |
|
|
this.recognition.start(); |
|
|
} else { |
|
|
this.infoText = 'Speech Recognition not supported.'; |
|
|
} |
|
|
this.isVoiceRecording = true; |
|
|
} catch (err) { |
|
|
this.infoText = 'Could not start audio recording.'; |
|
|
} |
|
|
} |
|
|
public async toggleVoiceRecording() { |
|
|
if (this.isVoiceRecording) { |
|
|
this.stopAudioRecording(); |
|
|
this.isVoiceRecording = false; |
|
|
this.infoText = 'Voice recording stopped.'; |
|
|
} else { |
|
|
await this.startAudioRecording(); |
|
|
this.isVoiceRecording = true; |
|
|
this.infoText = 'Voice recording started. Speak your answer.'; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|