|
|
|
const video = document.getElementById('video'); |
|
const loadingMessage = document.getElementById('loading-message'); |
|
const errorMessage = document.getElementById('error-message'); |
|
const statusMessage = document.getElementById('status-message'); |
|
let canvas; |
|
let lastExpression = ''; |
|
let isProcessing = false; |
|
|
|
|
|
const expressionsToEmojiAndPrompt = { |
|
happy: { |
|
emoji: '๐', |
|
responses: [ |
|
"Your smile is contagious! How can I make your day even better?", |
|
"It's wonderful to see you happy! What's bringing you joy today?", |
|
"That's a beautiful smile! Let's keep that positive energy going!" |
|
] |
|
}, |
|
sad: { |
|
emoji: '๐ข', |
|
responses: [ |
|
"I notice you seem down. Would you like to talk about what's bothering you?", |
|
"Sometimes we all need a moment to feel our emotions. I'm here to listen.", |
|
"Remember that difficult moments are temporary. How can I support you?" |
|
] |
|
}, |
|
angry: { |
|
emoji: '๐ ', |
|
responses: [ |
|
"I can see you're frustrated. Let's take a deep breath together.", |
|
"Sometimes anger tells us something important. Would you like to discuss it?", |
|
"I understand you're upset. How can we work through this together?" |
|
] |
|
}, |
|
neutral: { |
|
emoji: '๐', |
|
responses: [ |
|
"How are you feeling today? I'm here to chat about anything.", |
|
"Sometimes a neutral moment is good for reflection. What's on your mind?", |
|
"Is there something specific you'd like to discuss?" |
|
] |
|
}, |
|
disgusted: { |
|
emoji: '๐คข', |
|
responses: [ |
|
"Something seems to be bothering you. Would you like to talk about it?", |
|
"Let's focus on making this situation better. What would help?", |
|
"I notice your discomfort. How can we improve things?" |
|
] |
|
}, |
|
surprised: { |
|
emoji: '๐ฎ', |
|
responses: [ |
|
"Oh! What caught you by surprise? I'd love to hear about it!", |
|
"Unexpected moments can be exciting! Want to share what surprised you?", |
|
"That's quite a reaction! What happened?" |
|
] |
|
}, |
|
fearful: { |
|
emoji: '๐จ', |
|
responses: [ |
|
"You're safe here. Would you like to talk about what's concerning you?", |
|
"I understand feeling scared. Let's work through this together.", |
|
"Sometimes sharing our fears makes them less overwhelming. I'm here to listen." |
|
] |
|
} |
|
}; |
|
|
|
|
|
async function loadModels() { |
|
loadingMessage.style.display = 'block'; |
|
try { |
|
const MODEL_URL = '/models'; |
|
await Promise.all([ |
|
faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL), |
|
faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL), |
|
faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL), |
|
faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL) |
|
]); |
|
|
|
loadingMessage.style.display = 'none'; |
|
updateStatus('Models loaded successfully!', 'success'); |
|
await startVideo(); |
|
} catch (err) { |
|
handleError('Error loading models: ' + err.message); |
|
} |
|
} |
|
|
|
|
|
async function startVideo() { |
|
try { |
|
const stream = await navigator.mediaDevices.getUserMedia({ |
|
video: { |
|
width: { ideal: 720 }, |
|
height: { ideal: 560 }, |
|
facingMode: 'user' |
|
} |
|
}); |
|
video.srcObject = stream; |
|
updateStatus('Camera initialized successfully!', 'success'); |
|
} catch (err) { |
|
handleError('Camera access denied: ' + err.message); |
|
} |
|
} |
|
|
|
|
|
function stopVideo() { |
|
const stream = video.srcObject; |
|
if (stream) { |
|
const tracks = stream.getTracks(); |
|
tracks.forEach(track => track.stop()); |
|
video.srcObject = null; |
|
} |
|
} |
|
|
|
|
|
function createCanvas() { |
|
if (!canvas) { |
|
canvas = faceapi.createCanvasFromMedia(video); |
|
canvas.style.position = 'absolute'; |
|
canvas.style.top = '0'; |
|
canvas.style.left = '0'; |
|
document.querySelector('.container').append(canvas); |
|
} |
|
} |
|
|
|
|
|
async function detectFaces(interval = 500) { |
|
if (!video || !canvas || isProcessing) return; |
|
isProcessing = true; |
|
|
|
const displaySize = { width: video.width, height: video.height }; |
|
faceapi.matchDimensions(canvas, displaySize); |
|
|
|
try { |
|
const detections = await faceapi.detectAllFaces(video, |
|
new faceapi.TinyFaceDetectorOptions()) |
|
.withFaceLandmarks() |
|
.withFaceExpressions(); |
|
|
|
if (detections.length === 0) { |
|
updateStatus('No face detected', 'warning'); |
|
return; |
|
} |
|
|
|
const resizedDetections = faceapi.resizeResults(detections, displaySize); |
|
drawDetections(resizedDetections); |
|
processExpressions(resizedDetections); |
|
|
|
} catch (err) { |
|
handleError('Detection error: ' + err.message); |
|
} finally { |
|
isProcessing = false; |
|
setTimeout(() => detectFaces(interval), interval); |
|
} |
|
} |
|
|
|
|
|
function drawDetections(resizedDetections) { |
|
const ctx = canvas.getContext('2d'); |
|
ctx.clearRect(0, 0, canvas.width, canvas.height); |
|
|
|
|
|
faceapi.draw.drawDetections(canvas, resizedDetections, { boxColor: '#00ff00' }); |
|
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections, { color: '#00ff00' }); |
|
faceapi.draw.drawFaceExpressions(canvas, resizedDetections); |
|
} |
|
|
|
|
|
function processExpressions(detections) { |
|
detections.forEach(detection => { |
|
const expressions = detection.expressions; |
|
const topExpression = Object.keys(expressions) |
|
.reduce((a, b) => expressions[a] > expressions[b] ? a : b); |
|
|
|
if (topExpression !== lastExpression) { |
|
lastExpression = topExpression; |
|
const emojiData = expressionsToEmojiAndPrompt[topExpression]; |
|
const randomResponse = emojiData.responses[ |
|
Math.floor(Math.random() * emojiData.responses.length) |
|
]; |
|
|
|
sendPromptToOllama(randomResponse); |
|
drawEmoji(detection, emojiData.emoji); |
|
updateStatus(`Detected emotion: ${topExpression}`, 'info'); |
|
} |
|
}); |
|
} |
|
|
|
|
|
function drawEmoji(detection, emoji) { |
|
const ctx = canvas.getContext('2d'); |
|
const { x, y, width } = detection.detection.box; |
|
ctx.font = `${width * 0.2}px Arial`; |
|
ctx.fillStyle = '#ffffff'; |
|
ctx.strokeStyle = '#000000'; |
|
ctx.lineWidth = 2; |
|
ctx.textAlign = 'center'; |
|
ctx.strokeText(emoji, x + width / 2, y - 10); |
|
ctx.fillText(emoji, x + width / 2, y - 10); |
|
} |
|
|
|
|
|
function handleError(message) { |
|
console.error(message); |
|
errorMessage.textContent = message; |
|
errorMessage.style.display = 'block'; |
|
loadingMessage.style.display = 'none'; |
|
|
|
|
|
setTimeout(() => { |
|
errorMessage.style.display = 'none'; |
|
}, 5000); |
|
} |
|
|
|
|
|
function updateStatus(message, type) { |
|
statusMessage.textContent = message; |
|
statusMessage.className = `status-message ${type}`; |
|
statusMessage.style.display = 'block'; |
|
setTimeout(() => { |
|
statusMessage.style.display = 'none'; |
|
}, 3000); |
|
} |
|
|
|
|
|
async function sendPromptToOllama(prompt) { |
|
try { |
|
|
|
const fullPrompt = `You are an AI assistant responding to a user's emotional state. |
|
The user appears to be ${lastExpression}. |
|
Respond naturally and empathetically to this prompt: ${prompt}`; |
|
|
|
const response = await fetch('http://localhost:11434/api/generate', { |
|
method: 'POST', |
|
headers: { 'Content-Type': 'application/json' }, |
|
body: JSON.stringify({ |
|
model: "llama2", |
|
prompt: fullPrompt, |
|
stream: false |
|
}) |
|
}); |
|
|
|
if (!response.ok) { |
|
throw new Error(`HTTP error! status: ${response.status}`); |
|
} |
|
|
|
const data = await response.json(); |
|
if (data && data.response) { |
|
playTextAsSpeech(data.response); |
|
updateStatus('Response received', 'success'); |
|
} |
|
} catch (error) { |
|
handleError('Ollama API Error: ' + error.message); |
|
console.error('Full error:', error); |
|
|
|
|
|
const fallbackResponse = expressionsToEmojiAndPrompt[lastExpression].responses[0]; |
|
playTextAsSpeech(fallbackResponse); |
|
} |
|
} |
|
|
|
|
|
function playTextAsSpeech(text) { |
|
try { |
|
const speech = new SpeechSynthesisUtterance(text); |
|
speech.rate = 1.0; |
|
speech.pitch = 1.0; |
|
speech.volume = 1.0; |
|
window.speechSynthesis.cancel(); |
|
window.speechSynthesis.speak(speech); |
|
} catch (error) { |
|
handleError('Speech synthesis error: ' + error.message); |
|
} |
|
} |
|
|
|
|
|
document.addEventListener('DOMContentLoaded', loadModels); |
|
video.addEventListener('play', () => { |
|
createCanvas(); |
|
detectFaces(500); |
|
}); |
|
|
|
|
|
window.addEventListener('beforeunload', () => { |
|
stopVideo(); |
|
}); |