mood / index.html
Bahrudin's picture
make more compact. Video in background, buttons overlaid. no emojies - Initial Deployment
a2b8e1d verified
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Mood Detector</title>
<script src="https://cdn.tailwindcss.com"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/face-api.min.js"></script>
<style>
.video-overlay {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
.face-box {
position: absolute;
border: 2px solid #3B82F6;
background-color: rgba(59, 130, 246, 0.2);
}
.mood-text {
position: absolute;
color: white;
background-color: rgba(0, 0, 0, 0.7);
padding: 2px 5px;
border-radius: 4px;
font-size: 12px;
}
.pulse {
animation: pulse 2s infinite;
}
@keyframes pulse {
0% {
transform: scale(1);
box-shadow: 0 0 0 0 rgba(59, 130, 246, 0.7);
}
70% {
transform: scale(1.05);
box-shadow: 0 0 0 10px rgba(59, 130, 246, 0);
}
100% {
transform: scale(1);
box-shadow: 0 0 0 0 rgba(59, 130, 246, 0);
}
}
</style>
</head>
<body class="bg-gray-900 text-white min-h-screen flex flex-col">
<header class="bg-gray-800 py-6 shadow-lg">
<div class="container mx-auto px-4">
<h1 class="text-3xl md:text-4xl font-bold text-center text-blue-400">Mood Detector</h1>
<p class="text-center text-gray-300 mt-2">Let your face reveal your emotions</p>
</div>
</header>
<main class="relative h-screen w-full overflow-hidden">
<div class="absolute inset-0">
<video id="video" autoplay muted class="w-full h-full object-cover bg-gray-800"></video>
<canvas id="canvas" class="absolute inset-0 w-full h-full"></canvas>
</div>
<div class="absolute bottom-4 left-0 right-0 flex justify-center gap-4">
<button id="start-btn" class="bg-blue-600 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-lg transition duration-300">
Start
</button>
<button id="stop-btn" class="bg-gray-700 hover:bg-gray-600 text-white font-bold py-2 px-4 rounded-lg transition duration-300" disabled>
Stop
</button>
</div>
<div class="absolute top-4 right-4 bg-gray-800 bg-opacity-80 rounded-lg p-4 shadow-lg">
<h2 class="text-lg font-semibold mb-2">Mood Analysis</h2>
<div id="mood-text" class="text-xl font-bold text-blue-400">Neutral</div>
<div id="confidence" class="text-gray-300 text-sm">Confidence: 0%</div>
<div class="mt-4 grid grid-cols-2 gap-2">
<div>
<div class="font-medium">Happy</div>
<div id="happy-score" class="text-gray-300 text-sm">0%</div>
</div>
<div>
<div class="font-medium">Sad</div>
<div id="sad-score" class="text-gray-300 text-sm">0%</div>
</div>
<div>
<div class="font-medium">Angry</div>
<div id="angry-score" class="text-gray-300 text-sm">0%</div>
</div>
<div>
<div class="font-medium">Fearful</div>
<div id="fearful-score" class="text-gray-300 text-sm">0%</div>
</div>
</div>
</div>
</main>
<footer class="bg-gray-800 py-4">
<div class="container mx-auto px-4 text-center text-gray-400">
<p>Mood Detector using Face API | © 2023 All Rights Reserved</p>
</div>
</footer>
<script>
// DOM Elements
const video = document.getElementById('video');
const canvas = document.getElementById('canvas');
const startBtn = document.getElementById('start-btn');
const stopBtn = document.getElementById('stop-btn');
const moodDisplay = document.getElementById('mood-display');
const moodText = document.getElementById('mood-text');
const confidenceText = document.getElementById('confidence');
// Emotion score elements
const happyScore = document.getElementById('happy-score');
const sadScore = document.getElementById('sad-score');
const angryScore = document.getElementById('angry-score');
const fearfulScore = document.getElementById('fearful-score');
let detectionInterval;
let isDetecting = false;
// Load models
async function loadModels() {
try {
await Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('https://justadudewhohacks.github.io/face-api.js/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('https://justadudewhohacks.github.io/face-api.js/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('https://justadudewhohacks.github.io/face-api.js/models'),
faceapi.nets.faceExpressionNet.loadFromUri('https://justadudewhohacks.github.io/face-api.js/models')
]);
console.log('Models loaded successfully');
} catch (err) {
console.error('Failed to load models:', err);
alert('Failed to load face detection models. Please try again later.');
}
}
// Start video stream
async function startVideo() {
try {
const stream = await navigator.mediaDevices.getUserMedia({ video: {} });
video.srcObject = stream;
return true;
} catch (err) {
console.error('Error accessing camera:', err);
alert('Could not access the camera. Please ensure you have granted camera permissions.');
return false;
}
}
// Detect faces and emotions
async function detectFaces() {
if (!isDetecting) return;
const options = new faceapi.TinyFaceDetectorOptions();
const result = await faceapi.detectAllFaces(video, options)
.withFaceLandmarks()
.withFaceExpressions();
// Clear canvas
const context = canvas.getContext('2d');
context.clearRect(0, 0, canvas.width, canvas.height);
// Resize canvas to match video dimensions
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
// Draw detections
faceapi.draw.drawDetections(canvas, result);
faceapi.draw.drawFaceLandmarks(canvas, result);
faceapi.draw.drawFaceExpressions(canvas, result);
// Update mood display if face detected
if (result.length > 0) {
const expressions = result[0].expressions;
updateMoodDisplay(expressions);
} else {
resetMoodDisplay();
}
}
// Update mood display based on expressions
function updateMoodDisplay(expressions) {
// Get the dominant emotion
const emotions = Object.entries(expressions);
const dominantEmotion = emotions.reduce((max, emotion) =>
emotion[1] > max[1] ? emotion : max
);
const [emotion, confidence] = dominantEmotion;
const confidencePercent = Math.round(confidence * 100);
// Update scores
happyScore.textContent = `${Math.round(expressions.happy * 100)}%`;
sadScore.textContent = `${Math.round(expressions.sad * 100)}%`;
angryScore.textContent = `${Math.round(expressions.angry * 100)}%`;
fearfulScore.textContent = `${Math.round(expressions.fearful * 100)}%`;
// Update main display
moodText.textContent = capitalizeFirstLetter(emotion);
confidenceText.textContent = `Confidence: ${confidencePercent}%`;
// Update mood indicator
updateMoodIndicator(emotion, confidencePercent);
}
// Reset mood display when no face is detected
function resetMoodDisplay() {
moodText.textContent = "No face detected";
confidenceText.textContent = "Confidence: 0%";
// Reset scores
happyScore.textContent = "0%";
sadScore.textContent = "0%";
angryScore.textContent = "0%";
fearfulScore.textContent = "0%";
}
// Update mood indicator
function updateMoodIndicator(emotion, confidence) {
// Adjust text color based on emotion
let colorClass = "text-blue-400";
switch(emotion) {
case 'happy':
colorClass = "text-green-400";
break;
case 'sad':
colorClass = "text-blue-400";
break;
case 'angry':
colorClass = "text-red-400";
break;
case 'fearful':
colorClass = "text-purple-400";
break;
case 'disgusted':
colorClass = "text-yellow-400";
break;
case 'surprised':
colorClass = "text-orange-400";
break;
case 'neutral':
colorClass = "text-gray-400";
break;
}
moodText.className = `text-xl font-bold ${colorClass}`;
}
// Helper function to capitalize first letter
function capitalizeFirstLetter(string) {
return string.charAt(0).toUpperCase() + string.slice(1);
}
// Start detection
async function startDetection() {
const cameraStarted = await startVideo();
if (!cameraStarted) return;
await loadModels();
isDetecting = true;
startBtn.disabled = true;
stopBtn.disabled = false;
// Start detecting faces every 500ms
detectionInterval = setInterval(detectFaces, 500);
}
// Stop detection
function stopDetection() {
isDetecting = false;
clearInterval(detectionInterval);
// Stop video stream
const stream = video.srcObject;
if (stream) {
const tracks = stream.getTracks();
tracks.forEach(track => track.stop());
video.srcObject = null;
}
// Clear canvas
const context = canvas.getContext('2d');
context.clearRect(0, 0, canvas.width, canvas.height);
// Reset UI
startBtn.disabled = false;
stopBtn.disabled = true;
resetMoodDisplay();
}
// Event listeners
startBtn.addEventListener('click', startDetection);
stopBtn.addEventListener('click', stopDetection);
// Initialize
document.addEventListener('DOMContentLoaded', () => {
// Set canvas size to match video (will be updated in detectFaces)
canvas.width = 640;
canvas.height = 480;
});
</script>
<p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=Bahrudin/mood" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body>
</html>