Files
slr_google_landmarks_demo/marker.html
jared 8bcc62b045 Initial commit: MediaPipe landmarks demo
HTML demos for face, hand, gesture, and posture tracking using MediaPipe.
Includes Python CLI tools for processing video files.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 22:38:40 -05:00

269 lines
8.6 KiB
HTML

<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>MediaPipe Hand Landmarker — Single File Demo</title>
<!-- Material Components (for the button styling) -->
<link href="https://unpkg.com/material-components-web@latest/dist/material-components-web.min.css" rel="stylesheet">
<script src="https://unpkg.com/material-components-web@latest/dist/material-components-web.min.js"></script>
<!-- Drawing utils (provides drawConnectors, drawLandmarks) -->
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/drawing_utils/drawing_utils.js" crossorigin="anonymous"></script>
<!-- Hands (provides HAND_CONNECTIONS constant) -->
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/hands/hands.js" crossorigin="anonymous"></script>
<style>
/* Inline CSS from the CodePen, cleaned for single-file use */
body {
font-family: Roboto, Arial, sans-serif;
margin: 2em;
color: #3d3d3d;
--mdc-theme-primary: #007f8b;
--mdc-theme-on-primary: #f1f3f4;
}
h1 { color: #007f8b; }
h2 { clear: both; }
em { font-weight: bold; }
video {
clear: both;
display: block;
transform: rotateY(180deg);
-webkit-transform: rotateY(180deg);
-moz-transform: rotateY(180deg);
}
section {
opacity: 1;
transition: opacity 500ms ease-in-out;
}
.removed { display: none; }
.invisible { opacity: 0.2; }
.note {
font-style: italic;
font-size: 130%;
}
.videoView, .detectOnClick {
position: relative;
float: left;
width: 48%;
margin: 2% 1%;
cursor: pointer;
}
.videoView p, .detectOnClick p {
position: absolute;
padding: 5px;
background-color: #007f8b;
color: #fff;
border: 1px dashed rgba(255, 255, 255, 0.7);
z-index: 2;
font-size: 12px;
margin: 0;
}
.highlighter {
background: rgba(0, 255, 0, 0.25);
border: 1px dashed #fff;
z-index: 1;
position: absolute;
}
.canvas, .output_canvas {
z-index: 1;
position: absolute;
pointer-events: none;
}
.output_canvas {
transform: rotateY(180deg);
-webkit-transform: rotateY(180deg);
-moz-transform: rotateY(180deg);
}
.detectOnClick { z-index: 0; }
.detectOnClick img { width: 100%; }
</style>
</head>
<body>
<h2>Demo: Webcam continuous hands landmarks detection</h2>
<p>Hold your hand in front of your webcam to get real-time hand landmarker detection.<br>Click <b>ENABLE WEBCAM</b> below and grant access to the webcam if prompted.</p>
<div id="liveView" class="videoView">
<button id="webcamButton" class="mdc-button mdc-button--raised">
<span class="mdc-button__ripple"></span>
<span class="mdc-button__label">ENABLE WEBCAM</span>
</button>
<div style="position: relative;">
<video id="webcam" style="position: absolute; left: 0; top: 0;" autoplay playsinline></video>
<canvas class="output_canvas" id="output_canvas" style="left: 0; top: 0;"></canvas>
</div>
</div>
</section>
<script type="module">
// Import the Tasks Vision ESM build
import { HandLandmarker, FilesetResolver } from "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0";
const demosSection = document.getElementById("demos");
let handLandmarker;
let runningMode = "IMAGE";
let enableWebcamButton;
let webcamRunning = false;
// Load the model and enable the demos section
const createHandLandmarker = async () => {
const vision = await FilesetResolver.forVisionTasks(
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm"
);
handLandmarker = await HandLandmarker.createFromOptions(vision, {
baseOptions: {
modelAssetPath: "https://storage.googleapis.com/mediapipe-models/hand_landmarker/hand_landmarker/float16/1/hand_landmarker.task",
delegate: "GPU"
},
runningMode,
numHands: 2
});
demosSection.classList.remove("invisible");
};
createHandLandmarker();
/********************************************************************
// Demo 1: Click images to run landmark detection
********************************************************************/
const imageContainers = document.getElementsByClassName("detectOnClick");
for (let i = 0; i < imageContainers.length; i++) {
const img = imageContainers[i].children[0];
img.addEventListener("click", handleClick);
}
async function handleClick(event) {
if (!handLandmarker) {
console.log("Wait for handLandmarker to load before clicking!");
return;
}
if (runningMode === "VIDEO") {
runningMode = "IMAGE";
await handLandmarker.setOptions({ runningMode: "IMAGE" });
}
const container = event.target.parentNode;
// Remove old overlays
const old = container.getElementsByClassName("canvas");
for (let i = old.length - 1; i >= 0; i--) {
old[i].parentNode.removeChild(old[i]);
}
// Run detection
const result = handLandmarker.detect(event.target);
// Create overlay canvas aligned to the image element
const canvas = document.createElement("canvas");
canvas.className = "canvas";
canvas.width = event.target.naturalWidth;
canvas.height = event.target.naturalHeight;
canvas.style.left = "0px";
canvas.style.top = "0px";
canvas.style.width = event.target.width + "px";
canvas.style.height = event.target.height + "px";
container.appendChild(canvas);
const ctx = canvas.getContext("2d");
if (result && result.landmarks) {
for (const landmarks of result.landmarks) {
// drawConnectors and drawLandmarks are provided by drawing_utils.js
// HAND_CONNECTIONS is provided by hands.js
drawConnectors(ctx, landmarks, HAND_CONNECTIONS, {
color: "#00FF00",
lineWidth: 5
});
drawLandmarks(ctx, landmarks, { color: "#FF0000", lineWidth: 1 });
}
}
}
/********************************************************************
// Demo 2: Webcam stream detection
********************************************************************/
const video = document.getElementById("webcam");
const canvasElement = document.getElementById("output_canvas");
const canvasCtx = canvasElement.getContext("2d");
const hasGetUserMedia = () => !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
if (hasGetUserMedia()) {
enableWebcamButton = document.getElementById("webcamButton");
enableWebcamButton.addEventListener("click", enableCam);
} else {
console.warn("getUserMedia() is not supported by your browser");
}
function enableCam() {
if (!handLandmarker) {
console.log("Wait! HandLandmarker not loaded yet.");
return;
}
webcamRunning = !webcamRunning;
enableWebcamButton.innerText = webcamRunning ? "DISABLE PREDICTIONS" : "ENABLE PREDICTIONS";
if (!webcamRunning) return;
const constraints = { video: true };
navigator.mediaDevices.getUserMedia(constraints).then((stream) => {
video.srcObject = stream;
video.addEventListener("loadeddata", predictWebcam);
});
}
let lastVideoTime = -1;
let results;
async function predictWebcam() {
// Match canvas to the video size
canvasElement.style.width = video.videoWidth + "px";
canvasElement.style.height = video.videoHeight + "px";
canvasElement.width = video.videoWidth;
canvasElement.height = video.videoHeight;
// Switch to VIDEO mode for streaming
if (runningMode === "IMAGE") {
runningMode = "VIDEO";
await handLandmarker.setOptions({ runningMode: "VIDEO" });
}
const startTimeMs = performance.now();
if (lastVideoTime !== video.currentTime) {
lastVideoTime = video.currentTime;
results = handLandmarker.detectForVideo(video, startTimeMs);
}
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
if (results && results.landmarks) {
for (const landmarks of results.landmarks) {
drawConnectors(canvasCtx, landmarks, HAND_CONNECTIONS, {
color: "#00FF00",
lineWidth: 5
});
drawLandmarks(canvasCtx, landmarks, { color: "#FF0000", lineWidth: 2 });
}
}
canvasCtx.restore();
if (webcamRunning) {
window.requestAnimationFrame(predictWebcam);
}
}
</script>
</body>
</html>