Initial commit: MediaPipe landmarks demo
HTML demos for face, hand, gesture, and posture tracking using MediaPipe. Includes Python CLI tools for processing video files. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
435
face.html
Normal file
435
face.html
Normal file
@@ -0,0 +1,435 @@
|
||||
<!-- face.html • Single-file MediaPipe Face Landmarker demo -->
|
||||
<!-- Copyright 2023 The MediaPipe Authors.
|
||||
Licensed under the Apache License, Version 2.0 -->
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta http-equiv="Cache-control" content="no-cache, no-store, must-revalidate" />
|
||||
<meta http-equiv="Pragma" content="no-cache" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no" />
|
||||
<title>Face Landmarker</title>
|
||||
|
||||
<!-- Material Components (styles only for the raised button) -->
|
||||
<link href="https://unpkg.com/material-components-web@latest/dist/material-components-web.min.css" rel="stylesheet" />
|
||||
<script src="https://unpkg.com/material-components-web@latest/dist/material-components-web.min.js"></script>
|
||||
|
||||
<style>
|
||||
/* Inlined CSS from your snippet (with minor cleanups) */
|
||||
|
||||
body {
|
||||
font-family: helvetica, arial, sans-serif;
|
||||
margin: 2em;
|
||||
color: #3d3d3d;
|
||||
--mdc-theme-primary: #007f8b;
|
||||
--mdc-theme-on-primary: #f1f3f4;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-style: italic;
|
||||
color: #007f8b;
|
||||
}
|
||||
|
||||
h2 {
|
||||
clear: both;
|
||||
}
|
||||
|
||||
em { font-weight: bold; }
|
||||
|
||||
video {
|
||||
clear: both;
|
||||
display: block;
|
||||
transform: rotateY(180deg);
|
||||
-webkit-transform: rotateY(180deg);
|
||||
-moz-transform: rotateY(180deg);
|
||||
}
|
||||
|
||||
section {
|
||||
opacity: 1;
|
||||
transition: opacity 500ms ease-in-out;
|
||||
}
|
||||
|
||||
.removed { display: none; }
|
||||
.invisible { opacity: 0.2; }
|
||||
|
||||
.note {
|
||||
font-style: italic;
|
||||
font-size: 130%;
|
||||
}
|
||||
|
||||
.videoView,
|
||||
.detectOnClick,
|
||||
.blend-shapes {
|
||||
position: relative;
|
||||
float: left;
|
||||
width: 48%;
|
||||
margin: 2% 1%;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.videoView p,
|
||||
.detectOnClick p {
|
||||
position: absolute;
|
||||
padding: 5px;
|
||||
background-color: #007f8b;
|
||||
color: #fff;
|
||||
border: 1px dashed rgba(255, 255, 255, 0.7);
|
||||
z-index: 2;
|
||||
font-size: 12px;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.highlighter {
|
||||
background: rgba(0, 255, 0, 0.25);
|
||||
border: 1px dashed #fff;
|
||||
z-index: 1;
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.canvas {
|
||||
z-index: 1;
|
||||
position: absolute;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.output_canvas {
|
||||
transform: rotateY(180deg);
|
||||
-webkit-transform: rotateY(180deg);
|
||||
-moz-transform: rotateY(180deg);
|
||||
}
|
||||
|
||||
.detectOnClick { z-index: 0; }
|
||||
.detectOnClick img { width: 100%; }
|
||||
|
||||
.blend-shapes-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
height: 20px;
|
||||
}
|
||||
|
||||
.blend-shapes-label {
|
||||
display: flex;
|
||||
width: 120px;
|
||||
justify-content: flex-end;
|
||||
align-items: center;
|
||||
margin-right: 4px;
|
||||
}
|
||||
|
||||
.blend-shapes-value {
|
||||
display: flex;
|
||||
height: 16px;
|
||||
align-items: center;
|
||||
background-color: #007f8b;
|
||||
color: #fff;
|
||||
padding: 0 6px;
|
||||
border-radius: 2px;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
/* Ensure video/canvas overlap correctly inside the container */
|
||||
#liveView > div {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
}
|
||||
#webcam {
|
||||
position: absolute; left: 0; top: 0;
|
||||
}
|
||||
#output_canvas {
|
||||
position: absolute; left: 0; top: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Face landmark detection using the MediaPipe FaceLandmarker task</h1>
|
||||
|
||||
<section id="demos" class="invisible">
|
||||
<h2>Demo: Webcam continuous face landmarks detection</h2>
|
||||
<p>
|
||||
Hold your face in front of your webcam to get real-time face landmarker detection.<br />
|
||||
Click <b>enable webcam</b> below and grant access to the webcam if prompted.
|
||||
</p>
|
||||
|
||||
<div id="liveView" class="videoView">
|
||||
<button id="webcamButton" class="mdc-button mdc-button--raised">
|
||||
<span class="mdc-button__ripple"></span>
|
||||
<span class="mdc-button__label">ENABLE WEBCAM</span>
|
||||
</button>
|
||||
<div>
|
||||
<video id="webcam" autoplay playsinline></video>
|
||||
<canvas class="output_canvas" id="output_canvas"></canvas>
|
||||
</div>
|
||||
</div>
|
||||
<div class="blend-shapes">
|
||||
<ul class="blend-shapes-list" id="video-blend-shapes"></ul>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<script type="module">
|
||||
// Inlined JS (converted to plain JS; removed TS types)
|
||||
|
||||
import vision from "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.3";
|
||||
const { FaceLandmarker, FilesetResolver, DrawingUtils } = vision;
|
||||
|
||||
const demosSection = document.getElementById("demos");
|
||||
const imageBlendShapes = document.getElementById("image-blend-shapes");
|
||||
const videoBlendShapes = document.getElementById("video-blend-shapes");
|
||||
|
||||
let faceLandmarker;
|
||||
let runningMode = "IMAGE"; // "IMAGE" | "VIDEO"
|
||||
let enableWebcamButton;
|
||||
let webcamRunning = false;
|
||||
const videoWidth = 480;
|
||||
|
||||
async function createFaceLandmarker() {
|
||||
const filesetResolver = await FilesetResolver.forVisionTasks(
|
||||
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.3/wasm"
|
||||
);
|
||||
faceLandmarker = await FaceLandmarker.createFromOptions(filesetResolver, {
|
||||
baseOptions: {
|
||||
modelAssetPath:
|
||||
"https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task",
|
||||
delegate: "GPU",
|
||||
},
|
||||
outputFaceBlendshapes: true,
|
||||
runningMode,
|
||||
numFaces: 1,
|
||||
});
|
||||
demosSection.classList.remove("invisible");
|
||||
}
|
||||
createFaceLandmarker();
|
||||
|
||||
/********************************************************************
|
||||
// Demo 1: Click image to detect landmarks
|
||||
********************************************************************/
|
||||
const imageContainers = document.getElementsByClassName("detectOnClick");
|
||||
for (let imageContainer of imageContainers) {
|
||||
imageContainer.children[0].addEventListener("click", handleClick);
|
||||
}
|
||||
|
||||
async function handleClick(event) {
|
||||
if (!faceLandmarker) {
|
||||
console.log("Wait for faceLandmarker to load before clicking!");
|
||||
return;
|
||||
}
|
||||
|
||||
if (runningMode === "VIDEO") {
|
||||
runningMode = "IMAGE";
|
||||
await faceLandmarker.setOptions({ runningMode });
|
||||
}
|
||||
|
||||
const parent = event.target.parentNode;
|
||||
const allCanvas = parent.getElementsByClassName("canvas");
|
||||
for (let i = allCanvas.length - 1; i >= 0; i--) {
|
||||
const n = allCanvas[i];
|
||||
n.parentNode.removeChild(n);
|
||||
}
|
||||
|
||||
const faceLandmarkerResult = faceLandmarker.detect(event.target);
|
||||
|
||||
const canvas = document.createElement("canvas");
|
||||
canvas.setAttribute("class", "canvas");
|
||||
canvas.setAttribute("width", event.target.naturalWidth + "px");
|
||||
canvas.setAttribute("height", event.target.naturalHeight + "px");
|
||||
canvas.style.left = "0px";
|
||||
canvas.style.top = "0px";
|
||||
canvas.style.width = `${event.target.width}px`;
|
||||
canvas.style.height = `${event.target.height}px`;
|
||||
|
||||
parent.appendChild(canvas);
|
||||
const ctx = canvas.getContext("2d");
|
||||
const drawingUtils = new DrawingUtils(ctx);
|
||||
|
||||
for (const landmarks of faceLandmarkerResult.faceLandmarks) {
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_TESSELATION,
|
||||
{ color: "#C0C0C070", lineWidth: 1 }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE,
|
||||
{ color: "#FF3030" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW,
|
||||
{ color: "#FF3030" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_LEFT_EYE,
|
||||
{ color: "#30FF30" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW,
|
||||
{ color: "#30FF30" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_FACE_OVAL,
|
||||
{ color: "#E0E0E0" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_LIPS,
|
||||
{ color: "#E0E0E0" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS,
|
||||
{ color: "#FF3030" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS,
|
||||
{ color: "#30FF30" }
|
||||
);
|
||||
}
|
||||
drawBlendShapes(imageBlendShapes, faceLandmarkerResult.faceBlendshapes);
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
// Demo 2: Webcam stream detection
|
||||
********************************************************************/
|
||||
const video = document.getElementById("webcam");
|
||||
const canvasElement = document.getElementById("output_canvas");
|
||||
const canvasCtx = canvasElement.getContext("2d");
|
||||
|
||||
function hasGetUserMedia() {
|
||||
return !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
|
||||
}
|
||||
|
||||
if (hasGetUserMedia()) {
|
||||
enableWebcamButton = document.getElementById("webcamButton");
|
||||
enableWebcamButton.addEventListener("click", enableCam);
|
||||
} else {
|
||||
console.warn("getUserMedia() is not supported by your browser");
|
||||
}
|
||||
|
||||
function enableCam() {
|
||||
if (!faceLandmarker) {
|
||||
console.log("Wait! faceLandmarker not loaded yet.");
|
||||
return;
|
||||
}
|
||||
|
||||
webcamRunning = !webcamRunning;
|
||||
enableWebcamButton.innerText = webcamRunning
|
||||
? "DISABLE PREDICTIONS"
|
||||
: "ENABLE PREDICTIONS";
|
||||
|
||||
const constraints = { video: true };
|
||||
|
||||
navigator.mediaDevices.getUserMedia(constraints).then((stream) => {
|
||||
video.srcObject = stream;
|
||||
video.addEventListener("loadeddata", predictWebcam);
|
||||
});
|
||||
}
|
||||
|
||||
let lastVideoTime = -1;
|
||||
let results;
|
||||
const drawingUtils = new DrawingUtils(canvasCtx);
|
||||
|
||||
async function predictWebcam() {
|
||||
const ratio = video.videoHeight / video.videoWidth;
|
||||
video.style.width = videoWidth + "px";
|
||||
video.style.height = videoWidth * ratio + "px";
|
||||
canvasElement.style.width = videoWidth + "px";
|
||||
canvasElement.style.height = videoWidth * ratio + "px";
|
||||
canvasElement.width = video.videoWidth;
|
||||
canvasElement.height = video.videoHeight;
|
||||
|
||||
if (runningMode === "IMAGE") {
|
||||
runningMode = "VIDEO";
|
||||
await faceLandmarker.setOptions({ runningMode });
|
||||
}
|
||||
|
||||
const startTimeMs = performance.now();
|
||||
if (lastVideoTime !== video.currentTime) {
|
||||
lastVideoTime = video.currentTime;
|
||||
results = faceLandmarker.detectForVideo(video, startTimeMs);
|
||||
}
|
||||
|
||||
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
|
||||
|
||||
if (results && results.faceLandmarks) {
|
||||
for (const landmarks of results.faceLandmarks) {
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_TESSELATION,
|
||||
{ color: "#C0C0C070", lineWidth: 1 }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE,
|
||||
{ color: "#FF3030" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW,
|
||||
{ color: "#FF3030" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_LEFT_EYE,
|
||||
{ color: "#30FF30" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW,
|
||||
{ color: "#30FF30" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_FACE_OVAL,
|
||||
{ color: "#E0E0E0" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_LIPS,
|
||||
{ color: "#E0E0E0" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS,
|
||||
{ color: "#FF3030" }
|
||||
);
|
||||
drawingUtils.drawConnectors(
|
||||
landmarks,
|
||||
FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS,
|
||||
{ color: "#30FF30" }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
drawBlendShapes(videoBlendShapes, (results && results.faceBlendshapes) || []);
|
||||
|
||||
if (webcamRunning === true) {
|
||||
window.requestAnimationFrame(predictWebcam);
|
||||
}
|
||||
}
|
||||
|
||||
function drawBlendShapes(el, blendShapes) {
|
||||
if (!blendShapes || !blendShapes.length) {
|
||||
el.innerHTML = "";
|
||||
return;
|
||||
}
|
||||
|
||||
let htmlMaker = "";
|
||||
blendShapes[0].categories.forEach((shape) => {
|
||||
const label = shape.displayName || shape.categoryName;
|
||||
const pct = Math.max(0, Math.min(1, Number(shape.score) || 0));
|
||||
htmlMaker += `
|
||||
<li class="blend-shapes-item">
|
||||
<span class="blend-shapes-label">${label}</span>
|
||||
<span class="blend-shapes-value" style="width: calc(${pct * 100}% - 120px)">${pct.toFixed(4)}</span>
|
||||
</li>
|
||||
`;
|
||||
});
|
||||
|
||||
el.innerHTML = htmlMaker;
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user