sl-webui 57420807ca feat(webui): live camera viewer — multi-stream + detection overlays (Issue #177)
UI (src/hooks/useCamera.js, src/components/CameraViewer.jsx):
  - 7 camera sources: front/left/rear/right CSI, D435i RGB/depth, panoramic
  - Compressed image subscription via rosbridge (sensor_msgs/CompressedImage)
  - Client-side 15fps gate (drops excess frames, reduces JS pressure)
  - Per-camera FPS indicator with quality badge (FULL/GOOD/LOW/NO SIGNAL)
  - Detection overlays: face boxes + names (/social/faces/detections),
    gesture icons (/social/gestures), scene object labels + hazard colours
    (/social/scene/objects); overlay mode selector (off/faces/gestures/objects/all)
  - 360° panoramic equirect viewer with mouse/touch drag azimuth pan
  - Picture-in-picture: up to 3 pinned cameras via ⊕ button
  - One-click recording (MediaRecorder → MP4/WebM download)
  - Snapshot to PNG with detection overlay composite + timestamp watermark
  - Cameras tab added to TELEMETRY group in App.jsx

Jetson (rosbridge bringup):
  - rosbridge_params.yaml: whitelist + /camera/depth/image_rect_raw/compressed,
    /camera/panoramic/compressed, /social/faces/detections,
    /social/gestures, /social/scene/objects
  - rosbridge.launch.py: D435i colour republisher (JPEG 75%) +
    depth republisher (compressedDepth/PNG16 preserving uint16 values)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-02 10:47:01 -05:00

672 lines
25 KiB
JavaScript
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

/**
* CameraViewer.jsx — Live camera stream viewer (Issue #177).
*
* Features:
* - 7 cameras: front/left/rear/right (CSI), D435i RGB/depth, panoramic
* - Detection overlays: face boxes + names, gesture icons, scene object labels
* - 360° panoramic equirect viewer with mouse drag pan
* - One-click recording (MP4/WebM) + download
* - Snapshot to PNG with annotations + timestamp
* - Picture-in-picture (up to 3 pinned cameras)
* - Per-camera FPS indicator + adaptive quality badge
*
* Topics consumed:
* /camera/<name>/image_raw/compressed sensor_msgs/CompressedImage
* /camera/color/image_raw/compressed sensor_msgs/CompressedImage (D435i)
* /camera/depth/image_rect_raw/compressed sensor_msgs/CompressedImage (D435i)
* /camera/panoramic/compressed sensor_msgs/CompressedImage
* /social/faces/detections saltybot_social_msgs/FaceDetectionArray
* /social/gestures saltybot_social_msgs/GestureArray
* /social/scene/objects saltybot_scene_msgs/SceneObjectArray
*/
import { useEffect, useRef, useState, useCallback } from 'react';
import { useCamera, CAMERAS, CAMERA_BY_ID, CAMERA_BY_ROS_ID } from '../hooks/useCamera.js';
// ── Constants ─────────────────────────────────────────────────────────────────
const GESTURE_ICONS = {
wave: '👋',
point: '👆',
stop_palm: '✋',
thumbs_up: '👍',
thumbs_down: '👎',
come_here: '🤏',
follow: '☞',
arms_up: '🙌',
crouch: '⬇',
arms_spread: '↔',
};
const HAZARD_COLORS = {
1: '#f59e0b', // stairs — amber
2: '#ef4444', // drop — red
3: '#60a5fa', // wet floor — blue
4: '#a855f7', // glass door — purple
5: '#f97316', // pet — orange
};
// ── Detection overlay drawing helpers ─────────────────────────────────────────
function drawFaceBoxes(ctx, faces, scaleX, scaleY) {
for (const face of faces) {
const x = face.bbox_x * scaleX;
const y = face.bbox_y * scaleY;
const w = face.bbox_w * scaleX;
const h = face.bbox_h * scaleY;
const isKnown = face.person_name && face.person_name !== 'unknown';
ctx.strokeStyle = isKnown ? '#06b6d4' : '#f59e0b';
ctx.lineWidth = 2;
ctx.shadowBlur = 6;
ctx.shadowColor = ctx.strokeStyle;
ctx.strokeRect(x, y, w, h);
ctx.shadowBlur = 0;
// Corner accent marks
const cLen = 8;
ctx.lineWidth = 3;
[[x,y,1,1],[x+w,y,-1,1],[x,y+h,1,-1],[x+w,y+h,-1,-1]].forEach(([cx,cy,dx,dy]) => {
ctx.beginPath();
ctx.moveTo(cx, cy + dy * cLen);
ctx.lineTo(cx, cy);
ctx.lineTo(cx + dx * cLen, cy);
ctx.stroke();
});
// Label
const label = isKnown
? `${face.person_name} ${(face.recognition_score * 100).toFixed(0)}%`
: `face #${face.face_id}`;
ctx.font = 'bold 11px monospace';
const tw = ctx.measureText(label).width;
ctx.fillStyle = isKnown ? 'rgba(6,182,212,0.8)' : 'rgba(245,158,11,0.8)';
ctx.fillRect(x, y - 16, tw + 6, 16);
ctx.fillStyle = '#000';
ctx.fillText(label, x + 3, y - 4);
}
}
function drawGestureIcons(ctx, gestures, activeCamId, scaleX, scaleY) {
for (const g of gestures) {
// Only show gestures from the currently viewed camera
const cam = CAMERA_BY_ROS_ID[g.camera_id];
if (!cam || cam.cameraId !== activeCamId) continue;
const x = g.hand_x * ctx.canvas.width;
const y = g.hand_y * ctx.canvas.height;
const icon = GESTURE_ICONS[g.gesture_type] ?? '?';
ctx.font = '24px serif';
ctx.shadowBlur = 8;
ctx.shadowColor = '#f97316';
ctx.fillText(icon, x - 12, y + 8);
ctx.shadowBlur = 0;
ctx.font = 'bold 10px monospace';
ctx.fillStyle = '#f97316';
const label = g.gesture_type;
ctx.fillText(label, x - ctx.measureText(label).width / 2, y + 22);
}
}
function drawSceneObjects(ctx, objects, scaleX, scaleY) {
for (const obj of objects) {
// vision_msgs/BoundingBox2D: center_x, center_y, size_x, size_y
const bb = obj.bbox;
const cx = bb?.center?.x ?? bb?.center_x;
const cy = bb?.center?.y ?? bb?.center_y;
const sw = bb?.size_x ?? 0;
const sh = bb?.size_y ?? 0;
if (cx == null) continue;
const x = (cx - sw / 2) * scaleX;
const y = (cy - sh / 2) * scaleY;
const w = sw * scaleX;
const h = sh * scaleY;
const color = HAZARD_COLORS[obj.hazard_type] ?? '#22c55e';
ctx.strokeStyle = color;
ctx.lineWidth = 1.5;
ctx.setLineDash([4, 3]);
ctx.strokeRect(x, y, w, h);
ctx.setLineDash([]);
const dist = obj.distance_m > 0 ? ` ${obj.distance_m.toFixed(1)}m` : '';
const label = `${obj.class_name}${dist}`;
ctx.font = '10px monospace';
const tw = ctx.measureText(label).width;
ctx.fillStyle = `${color}cc`;
ctx.fillRect(x, y + h, tw + 4, 14);
ctx.fillStyle = '#000';
ctx.fillText(label, x + 2, y + h + 11);
}
}
// ── Overlay canvas ─────────────────────────────────────────────────────────────
function OverlayCanvas({ faces, gestures, sceneObjects, activeCam, containerW, containerH }) {
const canvasRef = useRef(null);
useEffect(() => {
const canvas = canvasRef.current;
if (!canvas) return;
const ctx = canvas.getContext('2d');
ctx.clearRect(0, 0, canvas.width, canvas.height);
if (!activeCam) return;
const scaleX = canvas.width / (activeCam.width || 640);
const scaleY = canvas.height / (activeCam.height || 480);
// Draw overlays: only for front camera (face + gesture source)
if (activeCam.id === 'front') {
drawFaceBoxes(ctx, faces, scaleX, scaleY);
}
if (!activeCam.isPanoramic) {
drawGestureIcons(ctx, gestures, activeCam.cameraId, scaleX, scaleY);
}
if (activeCam.id === 'color') {
drawSceneObjects(ctx, sceneObjects, scaleX, scaleY);
}
}, [faces, gestures, sceneObjects, activeCam]);
return (
<canvas
ref={canvasRef}
width={containerW || 640}
height={containerH || 480}
className="absolute inset-0 w-full h-full pointer-events-none"
/>
);
}
// ── Panoramic equirect viewer ──────────────────────────────────────────────────
function PanoViewer({ frameUrl }) {
const canvasRef = useRef(null);
const azRef = useRef(0); // 01920px offset
const dragRef = useRef(null);
const imgRef = useRef(null);
const draw = useCallback(() => {
const canvas = canvasRef.current;
const img = imgRef.current;
if (!canvas || !img || !img.complete) return;
const ctx = canvas.getContext('2d');
const W = canvas.width;
const H = canvas.height;
const iW = img.naturalWidth; // 1920
const iH = img.naturalHeight; // 960
const vW = iW / 2; // viewport = 50% of equirect width
const vH = Math.round((H / W) * vW);
const vY = Math.round((iH - vH) / 2);
const off = Math.round(azRef.current) % iW;
ctx.clearRect(0, 0, W, H);
// Draw left segment
const srcX1 = off;
const srcW1 = Math.min(vW, iW - off);
const dstW1 = Math.round((srcW1 / vW) * W);
if (dstW1 > 0) {
ctx.drawImage(img, srcX1, vY, srcW1, vH, 0, 0, dstW1, H);
}
// Draw wrapped right segment (if viewport crosses 0°)
if (srcW1 < vW) {
const srcX2 = 0;
const srcW2 = vW - srcW1;
const dstX2 = dstW1;
const dstW2 = W - dstW1;
ctx.drawImage(img, srcX2, vY, srcW2, vH, dstX2, 0, dstW2, H);
}
// Compass badge
const azDeg = Math.round((azRef.current / iW) * 360);
ctx.fillStyle = 'rgba(0,0,0,0.5)';
ctx.fillRect(W - 58, 6, 52, 18);
ctx.fillStyle = '#06b6d4';
ctx.font = 'bold 11px monospace';
ctx.fillText(`${azDeg}°`, W - 52, 19);
}, []);
// Load image when URL changes
useEffect(() => {
if (!frameUrl) return;
const img = new Image();
img.onload = draw;
img.src = frameUrl;
imgRef.current = img;
}, [frameUrl, draw]);
// Re-draw when azimuth changes
const onMouseDown = e => { dragRef.current = e.clientX; };
const onMouseMove = e => {
if (dragRef.current == null) return;
const dx = e.clientX - dragRef.current;
dragRef.current = e.clientX;
azRef.current = ((azRef.current - dx * 2) % 1920 + 1920) % 1920;
draw();
};
const onMouseUp = () => { dragRef.current = null; };
const onTouchStart = e => { dragRef.current = e.touches[0].clientX; };
const onTouchMove = e => {
if (dragRef.current == null) return;
const dx = e.touches[0].clientX - dragRef.current;
dragRef.current = e.touches[0].clientX;
azRef.current = ((azRef.current - dx * 2) % 1920 + 1920) % 1920;
draw();
};
return (
<canvas
ref={canvasRef}
width={960}
height={240}
className="w-full object-contain bg-black cursor-ew-resize rounded"
onMouseDown={onMouseDown}
onMouseMove={onMouseMove}
onMouseUp={onMouseUp}
onMouseLeave={onMouseUp}
onTouchStart={onTouchStart}
onTouchMove={onTouchMove}
onTouchEnd={() => { dragRef.current = null; }}
/>
);
}
// ── PiP mini window ────────────────────────────────────────────────────────────
function PiPWindow({ cam, frameUrl, fps, onClose, index }) {
const positions = [
'bottom-2 left-2',
'bottom-2 left-40',
'bottom-2 left-[18rem]',
];
return (
<div className={`absolute ${positions[index] ?? 'bottom-2 left-2'} w-36 rounded border border-cyan-900 overflow-hidden bg-black shadow-lg shadow-black z-10`}>
<div className="flex items-center justify-between px-1.5 py-0.5 bg-gray-950 text-xs">
<span className="text-cyan-700 font-bold">{cam.label}</span>
<div className="flex items-center gap-1">
<span className="text-gray-700">{fps}fps</span>
<button onClick={onClose} className="text-gray-600 hover:text-red-400"></button>
</div>
</div>
{frameUrl ? (
<img src={frameUrl} alt={cam.label} className="w-full aspect-video object-cover block" />
) : (
<div className="w-full aspect-video flex items-center justify-center text-gray-800 text-xs">
no signal
</div>
)}
</div>
);
}
// ── Camera selector strip ──────────────────────────────────────────────────────
function CameraStrip({ cameras, activeId, pipList, frames, fps, onSelect, onTogglePip }) {
return (
<div className="flex gap-1.5 flex-wrap">
{cameras.map(cam => {
const hasFrame = !!frames[cam.id];
const camFps = fps[cam.id] ?? 0;
const isActive = activeId === cam.id;
const isPip = pipList.includes(cam.id);
return (
<div key={cam.id} className="relative">
<button
onClick={() => onSelect(cam.id)}
className={`flex flex-col items-start rounded border px-2.5 py-1.5 text-xs font-bold transition-colors ${
isActive
? 'border-cyan-500 bg-cyan-950 bg-opacity-50 text-cyan-300'
: hasFrame
? 'border-gray-700 bg-gray-900 text-gray-400 hover:border-cyan-800 hover:text-gray-200'
: 'border-gray-800 bg-gray-950 text-gray-700 hover:border-gray-700'
}`}
>
<span>{cam.label.toUpperCase()}</span>
<span className={`text-xs font-normal mt-0.5 ${
camFps >= 12 ? 'text-green-600' :
camFps > 0 ? 'text-amber-600' :
'text-gray-700'
}`}>
{camFps > 0 ? `${camFps}fps` : 'no signal'}
</span>
</button>
{/* PiP pin button — only when NOT the active camera */}
{!isActive && (
<button
onClick={() => onTogglePip(cam.id)}
title={isPip ? 'Unpin PiP' : 'Pin PiP'}
className={`absolute -top-1.5 -right-1.5 w-4 h-4 rounded-full text-[9px] flex items-center justify-center border transition-colors ${
isPip
? 'bg-cyan-600 border-cyan-400 text-white'
: 'bg-gray-800 border-gray-700 text-gray-600 hover:border-cyan-700 hover:text-cyan-500'
}`}
>
{isPip ? '×' : '⊕'}
</button>
)}
</div>
);
})}
</div>
);
}
// ── Recording bar ──────────────────────────────────────────────────────────────
function RecordingBar({ recording, recSeconds, onStart, onStop, onSnapshot, overlayRef }) {
const fmtTime = s => `${String(Math.floor(s / 60)).padStart(2, '0')}:${String(s % 60).padStart(2, '0')}`;
return (
<div className="flex items-center gap-2 flex-wrap">
{!recording ? (
<button
onClick={onStart}
className="flex items-center gap-1.5 px-3 py-1.5 rounded border border-red-900 bg-red-950 text-red-400 hover:bg-red-900 text-xs font-bold transition-colors"
>
<span className="w-2 h-2 rounded-full bg-red-500" />
REC
</button>
) : (
<button
onClick={onStop}
className="flex items-center gap-1.5 px-3 py-1.5 rounded border border-red-600 bg-red-900 text-red-300 hover:bg-red-800 text-xs font-bold animate-pulse"
>
<span className="w-2 h-2 rounded bg-red-400" />
STOP {fmtTime(recSeconds)}
</button>
)}
<button
onClick={() => onSnapshot(overlayRef?.current)}
className="flex items-center gap-1 px-3 py-1.5 rounded border border-gray-700 bg-gray-900 text-gray-400 hover:border-cyan-700 hover:text-cyan-400 text-xs font-bold transition-colors"
>
📷 SNAP
</button>
{recording && (
<span className="text-xs text-red-500 animate-pulse font-mono">
RECORDING {fmtTime(recSeconds)}
</span>
)}
</div>
);
}
// ── Main component ─────────────────────────────────────────────────────────────
export function CameraViewer({ subscribe }) {
const {
cameras, frames, fps,
activeId, setActiveId,
pipList, togglePip,
recording, recSeconds,
startRecording, stopRecording,
takeSnapshot,
} = useCamera({ subscribe });
// ── Detection state ─────────────────────────────────────────────────────────
const [faces, setFaces] = useState([]);
const [gestures, setGestures] = useState([]);
const [sceneObjects, setSceneObjects] = useState([]);
const [showOverlay, setShowOverlay] = useState(true);
const [overlayMode, setOverlayMode] = useState('all'); // 'all' | 'faces' | 'gestures' | 'objects' | 'off'
const overlayCanvasRef = useRef(null);
// Subscribe to detection topics
useEffect(() => {
if (!subscribe) return;
const u1 = subscribe('/social/faces/detections', 'saltybot_social_msgs/FaceDetectionArray', msg => {
setFaces(msg.faces ?? []);
});
const u2 = subscribe('/social/gestures', 'saltybot_social_msgs/GestureArray', msg => {
setGestures(msg.gestures ?? []);
});
const u3 = subscribe('/social/scene/objects', 'saltybot_scene_msgs/SceneObjectArray', msg => {
setSceneObjects(msg.objects ?? []);
});
return () => { u1?.(); u2?.(); u3?.(); };
}, [subscribe]);
const activeCam = CAMERA_BY_ID[activeId];
const activeFrame = frames[activeId];
// Filter overlay data based on mode
const visibleFaces = (overlayMode === 'all' || overlayMode === 'faces') ? faces : [];
const visibleGestures = (overlayMode === 'all' || overlayMode === 'gestures') ? gestures : [];
const visibleObjects = (overlayMode === 'all' || overlayMode === 'objects') ? sceneObjects : [];
// ── Container size tracking (for overlay canvas sizing) ────────────────────
const containerRef = useRef(null);
const [containerSize, setContainerSize] = useState({ w: 640, h: 480 });
useEffect(() => {
if (!containerRef.current) return;
const ro = new ResizeObserver(entries => {
const e = entries[0];
setContainerSize({ w: Math.round(e.contentRect.width), h: Math.round(e.contentRect.height) });
});
ro.observe(containerRef.current);
return () => ro.disconnect();
}, []);
// ── Quality badge ──────────────────────────────────────────────────────────
const camFps = fps[activeId] ?? 0;
const quality = camFps >= 13 ? 'FULL' : camFps >= 8 ? 'GOOD' : camFps > 0 ? 'LOW' : 'NO SIGNAL';
const qualColor = camFps >= 13 ? 'text-green-500' : camFps >= 8 ? 'text-amber-500' : camFps > 0 ? 'text-red-500' : 'text-gray-700';
return (
<div className="space-y-3">
{/* ── Camera strip ── */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-3 space-y-2">
<div className="flex items-center justify-between">
<div className="text-cyan-700 text-xs font-bold tracking-widest">CAMERA SELECT</div>
<span className={`text-xs font-bold ${qualColor}`}>{quality} {camFps > 0 ? `${camFps}fps` : ''}</span>
</div>
<CameraStrip
cameras={cameras}
activeId={activeId}
pipList={pipList}
frames={frames}
fps={fps}
onSelect={setActiveId}
onTogglePip={togglePip}
/>
</div>
{/* ── Main viewer ── */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 overflow-hidden">
{/* Viewer toolbar */}
<div className="flex items-center justify-between px-3 py-2 border-b border-cyan-950">
<div className="flex items-center gap-2">
<span className="text-cyan-400 text-xs font-bold">{activeCam?.label ?? '—'}</span>
{activeCam?.isDepth && (
<span className="text-xs text-gray-600 border border-gray-800 rounded px-1">DEPTH · greyscale</span>
)}
{activeCam?.isPanoramic && (
<span className="text-xs text-gray-600 border border-gray-800 rounded px-1">360° · drag to pan</span>
)}
</div>
{/* Overlay mode selector */}
<div className="flex items-center gap-1">
{['off','faces','gestures','objects','all'].map(mode => (
<button
key={mode}
onClick={() => setOverlayMode(mode)}
className={`px-2 py-0.5 rounded text-xs border transition-colors ${
overlayMode === mode
? 'border-cyan-600 bg-cyan-950 text-cyan-400'
: 'border-gray-800 text-gray-600 hover:border-gray-700 hover:text-gray-400'
}`}
>
{mode === 'all' ? 'ALL' : mode === 'off' ? 'OFF' : mode.slice(0,3).toUpperCase()}
</button>
))}
</div>
</div>
{/* Image + overlay */}
<div className="relative" ref={containerRef}>
{activeCam?.isPanoramic ? (
<PanoViewer frameUrl={activeFrame} />
) : activeFrame ? (
<img
src={activeFrame}
alt={activeCam?.label ?? 'camera'}
className="w-full object-contain block bg-black"
style={{ maxHeight: '480px' }}
/>
) : (
<div className="w-full bg-black flex items-center justify-center text-gray-800 text-sm font-mono"
style={{ height: '360px' }}>
<div className="text-center space-y-2">
<div className="text-2xl">📷</div>
<div>Waiting for {activeCam?.label ?? '—'}</div>
<div className="text-xs text-gray-700">{activeCam?.topic}</div>
</div>
</div>
)}
{/* Detection overlay canvas */}
{overlayMode !== 'off' && !activeCam?.isPanoramic && (
<OverlayCanvas
ref={overlayCanvasRef}
faces={visibleFaces}
gestures={visibleGestures}
sceneObjects={visibleObjects}
activeCam={activeCam}
containerW={containerSize.w}
containerH={containerSize.h}
/>
)}
{/* PiP windows */}
{pipList.map((id, idx) => {
const cam = CAMERA_BY_ID[id];
if (!cam) return null;
return (
<PiPWindow
key={id}
cam={cam}
frameUrl={frames[id]}
fps={fps[id] ?? 0}
index={idx}
onClose={() => togglePip(id)}
/>
);
})}
</div>
</div>
{/* ── Recording controls ── */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-3">
<div className="flex items-center justify-between mb-2">
<div className="text-cyan-700 text-xs font-bold tracking-widest">CAPTURE</div>
</div>
<RecordingBar
recording={recording}
recSeconds={recSeconds}
onStart={startRecording}
onStop={stopRecording}
onSnapshot={takeSnapshot}
overlayRef={overlayCanvasRef}
/>
<div className="mt-2 text-xs text-gray-700">
Recording saves as MP4/WebM to your Downloads.
Snapshot includes detection overlay + timestamp.
</div>
</div>
{/* ── Detection status ── */}
<div className="grid grid-cols-3 gap-2 text-xs">
<div className="bg-gray-950 rounded border border-gray-800 p-2">
<div className="text-gray-600 mb-1">FACES</div>
<div className={`font-bold ${faces.length > 0 ? 'text-cyan-400' : 'text-gray-700'}`}>
{faces.length > 0 ? `${faces.length} detected` : 'none'}
</div>
{faces.slice(0, 2).map((f, i) => (
<div key={i} className="text-gray-600 truncate">
{f.person_name && f.person_name !== 'unknown'
? `${f.person_name}`
: `↳ unknown #${f.face_id}`}
</div>
))}
</div>
<div className="bg-gray-950 rounded border border-gray-800 p-2">
<div className="text-gray-600 mb-1">GESTURES</div>
<div className={`font-bold ${gestures.length > 0 ? 'text-amber-400' : 'text-gray-700'}`}>
{gestures.length > 0 ? `${gestures.length} active` : 'none'}
</div>
{gestures.slice(0, 2).map((g, i) => {
const icon = GESTURE_ICONS[g.gesture_type] ?? '?';
return (
<div key={i} className="text-gray-600 truncate">
{icon} {g.gesture_type} cam{g.camera_id}
</div>
);
})}
</div>
<div className="bg-gray-950 rounded border border-gray-800 p-2">
<div className="text-gray-600 mb-1">OBJECTS</div>
<div className={`font-bold ${sceneObjects.length > 0 ? 'text-green-400' : 'text-gray-700'}`}>
{sceneObjects.length > 0 ? `${sceneObjects.length} objects` : 'none'}
</div>
{sceneObjects
.filter(o => o.hazard_type > 0)
.slice(0, 2)
.map((o, i) => (
<div key={i} className="text-amber-700 truncate"> {o.class_name}</div>
))
}
{sceneObjects.filter(o => o.hazard_type === 0).slice(0, 2).map((o, i) => (
<div key={`ok${i}`} className="text-gray-600 truncate">
{o.class_name} {o.distance_m > 0 ? `${o.distance_m.toFixed(1)}m` : ''}
</div>
))}
</div>
</div>
{/* ── Legend ── */}
<div className="flex gap-4 text-xs text-gray-700 flex-wrap">
<div className="flex items-center gap-1">
<div className="w-3 h-3 rounded-sm border border-cyan-600" />
Known face
</div>
<div className="flex items-center gap-1">
<div className="w-3 h-3 rounded-sm border border-amber-600" />
Unknown face
</div>
<div className="flex items-center gap-1">
<span>👆</span> Gesture
</div>
<div className="flex items-center gap-1">
<div className="w-3 h-3 rounded-sm border border-green-700 border-dashed" />
Object
</div>
<div className="flex items-center gap-1">
<div className="w-3 h-3 rounded-sm border border-amber-600 border-dashed" />
Hazard
</div>
<div className="ml-auto text-gray-800 italic">
pin = PiP · overlay: {overlayMode}
</div>
</div>
</div>
);
}