Compare commits

..

1 Commits

Author SHA1 Message Date
e1813f734f feat(social): trigger-based ROS2 bag recorder (Issue #332)
Some checks failed
social-bot integration tests / Lint (flake8 + pep257) (push) Failing after 9s
social-bot integration tests / Core integration tests (mock sensors, no GPU) (push) Has been skipped
social-bot integration tests / Lint (flake8 + pep257) (pull_request) Failing after 9s
social-bot integration tests / Core integration tests (mock sensors, no GPU) (pull_request) Has been skipped
social-bot integration tests / Latency profiling (GPU, Orin) (push) Has been cancelled
social-bot integration tests / Latency profiling (GPU, Orin) (pull_request) Has been cancelled
BagRecorderNode: subscribes /saltybot/record_trigger (Bool), spawns
ros2 bag record subprocess, drives idle/recording/stopping/error
state machine; auto-stop timeout, SIGINT graceful shutdown with
SIGKILL fallback. Publishes /saltybot/recording_status (String).
Configurable topics (csv), bag_dir, prefix, compression, size limit.
Subprocess injectable for offline testing. 101/101 tests pass.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-03 06:51:46 -05:00
37 changed files with 10 additions and 5013 deletions

View File

@ -1,375 +0,0 @@
"""
_obstacle_velocity.py Dynamic obstacle velocity estimation via Kalman
filtering of LIDAR cluster centroids (no ROS2 deps).
Algorithm
---------
Each detected cluster centroid is tracked by a constant-velocity 2-D Kalman
filter:
State : [x, y, vx, vy]^T
Obs : [x, y]^T (centroid position)
Predict : x_pred = F(dt) @ x; P_pred = F @ P @ F^T + Q(dt)
Update : S = H @ P_pred @ H^T + R
K = P_pred @ H^T @ inv(S)
x = x_pred + K @ (z - H @ x_pred)
P = (I - K @ H) @ P_pred
Process noise Q uses the white-noise-acceleration approximation:
Q = diag([q_pos·dt², q_pos·dt², q_vel·dt, q_vel·dt])
Data association uses a greedy nearest-centroid approach: scan all
(track, cluster) pairs in order of Euclidean distance and accept matches
below `max_association_dist_m`, one-to-one.
Track lifecycle
---------------
created : first appearance of a cluster with no nearby track
confident : after `n_init_frames` consecutive updates (confidence=1.0)
coasting : cluster not matched for up to `max_coasting_frames` calls
deleted : coasting count exceeds `max_coasting_frames`
Public API
----------
KalmanTrack one tracked obstacle
.predict(dt) advance state by dt seconds
.update(centroid, width, depth, n) correct with new measurement
.position np.ndarray (2,)
.velocity np.ndarray (2,)
.speed float (m/s)
.confidence float 01
.alive bool
associate(positions, centroids, max_dist)
(matches, unmatched_t, unmatched_c)
ObstacleTracker
.update(centroids, timestamp, widths, depths, point_counts)
List[KalmanTrack]
.tracks Dict[int, KalmanTrack]
"""
from __future__ import annotations
from typing import Dict, List, Optional, Tuple
import numpy as np
# ── Kalman filter constants ───────────────────────────────────────────────────
_H = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0]], dtype=np.float64)
_I4 = np.eye(4, dtype=np.float64)
# ── KalmanTrack ───────────────────────────────────────────────────────────────
class KalmanTrack:
"""
Constant-velocity 2-D Kalman filter for one LIDAR cluster centroid.
Parameters
----------
track_id : unique integer ID
centroid : initial (x, y) position in metres
q_pos : position process noise density (/)
q_vel : velocity process noise density (/)
r_pos : measurement noise std-dev (metres)
n_init_frames : consecutive updates needed for confidence=1
max_coasting : missed updates before track is deleted
"""
def __init__(
self,
track_id: int,
centroid: np.ndarray,
q_pos: float = 0.05,
q_vel: float = 0.50,
r_pos: float = 0.10,
n_init_frames: int = 3,
max_coasting: int = 5,
) -> None:
self._id = track_id
self._n_init = max(n_init_frames, 1)
self._max_coast = max_coasting
self._q_pos = float(q_pos)
self._q_vel = float(q_vel)
self._R = np.diag([r_pos ** 2, r_pos ** 2])
self._age = 0 # successful updates
self._coasting = 0 # consecutive missed updates
self._alive = True
# State: [x, y, vx, vy]^T — initial velocity is zero
self._x = np.array(
[float(centroid[0]), float(centroid[1]), 0.0, 0.0],
dtype=np.float64,
)
# High initial velocity uncertainty, low position uncertainty
self._P = np.diag([r_pos ** 2, r_pos ** 2, 10.0, 10.0])
# Last-known cluster metadata (stored, not filtered)
self.last_width: float = 0.0
self.last_depth: float = 0.0
self.last_point_count: int = 0
# ── Properties ────────────────────────────────────────────────────────────
@property
def track_id(self) -> int:
return self._id
@property
def position(self) -> np.ndarray:
return self._x[:2].copy()
@property
def velocity(self) -> np.ndarray:
return self._x[2:].copy()
@property
def speed(self) -> float:
return float(np.linalg.norm(self._x[2:]))
@property
def confidence(self) -> float:
return min(1.0, self._age / self._n_init)
@property
def alive(self) -> bool:
return self._alive
@property
def age(self) -> int:
return self._age
@property
def coasting(self) -> int:
return self._coasting
# ── Kalman steps ──────────────────────────────────────────────────────────
def predict(self, dt: float) -> None:
"""
Advance the filter state by `dt` seconds (constant-velocity model).
dt must be 0; negative values are clamped to 0.
Increments the coasting counter; marks the track dead when the
counter exceeds max_coasting_frames.
"""
dt = max(0.0, float(dt))
F = np.array([
[1.0, 0.0, dt, 0.0],
[0.0, 1.0, 0.0, dt],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
], dtype=np.float64)
Q = np.diag([
self._q_pos * dt * dt,
self._q_pos * dt * dt,
self._q_vel * dt,
self._q_vel * dt,
])
self._x = F @ self._x
self._P = F @ self._P @ F.T + Q
self._coasting += 1
if self._coasting > self._max_coast:
self._alive = False
def update(
self,
centroid: np.ndarray,
width: float = 0.0,
depth: float = 0.0,
point_count: int = 0,
) -> None:
"""
Correct state with a new centroid measurement.
Resets the coasting counter and increments the age counter.
"""
z = np.asarray(centroid, dtype=np.float64)[:2]
S = _H @ self._P @ _H.T + self._R # innovation covariance
K = self._P @ _H.T @ np.linalg.inv(S) # Kalman gain
self._x = self._x + K @ (z - _H @ self._x)
self._P = (_I4 - K @ _H) @ self._P
self._age += 1
self._coasting = 0
self.last_width = float(width)
self.last_depth = float(depth)
self.last_point_count = int(point_count)
# ── Data association ──────────────────────────────────────────────────────────
def associate(
positions: np.ndarray, # (N, 2) predicted track positions
centroids: np.ndarray, # (M, 2) new cluster centroids
max_dist: float,
) -> Tuple[List[Tuple[int, int]], List[int], List[int]]:
"""
Greedy nearest-centroid data association.
Iteratively matches the closest (track, cluster) pair, one-to-one, as
long as the distance is strictly less than `max_dist`.
Returns
-------
matches : list of (track_idx, cluster_idx) pairs
unmatched_tracks : track indices with no assigned cluster
unmatched_clusters: cluster indices with no assigned track
"""
N = len(positions)
M = len(centroids)
if N == 0 or M == 0:
return [], list(range(N)), list(range(M))
# (N, M) pairwise Euclidean distance matrix
cost = np.linalg.norm(
positions[:, None, :] - centroids[None, :, :], axis=2
).astype(np.float64) # (N, M)
matched_t: set = set()
matched_c: set = set()
matches: List[Tuple[int, int]] = []
for _ in range(min(N, M)):
if cost.min() >= max_dist:
break
t_idx, c_idx = np.unravel_index(int(cost.argmin()), cost.shape)
matches.append((int(t_idx), int(c_idx)))
matched_t.add(t_idx)
matched_c.add(c_idx)
cost[t_idx, :] = np.inf
cost[:, c_idx] = np.inf
unmatched_t = [i for i in range(N) if i not in matched_t]
unmatched_c = [i for i in range(M) if i not in matched_c]
return matches, unmatched_t, unmatched_c
# ── ObstacleTracker ───────────────────────────────────────────────────────────
class ObstacleTracker:
"""
Multi-obstacle Kalman tracker operating on LIDAR cluster centroids.
Call `update()` once per LIDAR scan with the list of cluster centroids
(and optional bbox / point_count metadata). Returns the current list
of alive KalmanTrack objects.
"""
def __init__(
self,
max_association_dist_m: float = 0.50,
max_coasting_frames: int = 5,
n_init_frames: int = 3,
q_pos: float = 0.05,
q_vel: float = 0.50,
r_pos: float = 0.10,
) -> None:
self._max_dist = float(max_association_dist_m)
self._max_coast = int(max_coasting_frames)
self._n_init = int(n_init_frames)
self._q_pos = float(q_pos)
self._q_vel = float(q_vel)
self._r_pos = float(r_pos)
self._tracks: Dict[int, KalmanTrack] = {}
self._next_id: int = 1
self._last_t: Optional[float] = None
@property
def tracks(self) -> Dict[int, KalmanTrack]:
return self._tracks
def update(
self,
centroids: List[np.ndarray],
timestamp: float,
widths: Optional[List[float]] = None,
depths: Optional[List[float]] = None,
point_counts: Optional[List[int]] = None,
) -> List[KalmanTrack]:
"""
Process one frame of cluster centroids.
Parameters
----------
centroids : list of (2,) arrays cluster centroid positions
timestamp : wall-clock time in seconds (time.time())
widths : optional per-cluster bbox widths (metres)
depths : optional per-cluster bbox depths (metres)
point_counts : optional per-cluster LIDAR point counts
Returns
-------
List of all alive KalmanTrack objects after this update.
"""
dt = max(0.0, timestamp - self._last_t) if self._last_t is not None else 0.0
self._last_t = timestamp
# 1. Predict all existing tracks forward by dt
for track in self._tracks.values():
track.predict(dt)
# 2. Build arrays for association (alive tracks only)
alive = [t for t in self._tracks.values() if t.alive]
if alive:
pred_pos = np.array([t.position for t in alive]) # (N, 2)
else:
pred_pos = np.empty((0, 2), dtype=np.float64)
if centroids:
cent_arr = np.array([c[:2] for c in centroids], dtype=np.float64) # (M, 2)
else:
cent_arr = np.empty((0, 2), dtype=np.float64)
# 3. Associate
matches, _, unmatched_c = associate(pred_pos, cent_arr, self._max_dist)
# 4. Update matched tracks
for ti, ci in matches:
track = alive[ti]
track.update(
cent_arr[ci],
width = widths[ci] if widths else 0.0,
depth = depths[ci] if depths else 0.0,
point_count = point_counts[ci] if point_counts else 0,
)
# 5. Spawn new tracks for unmatched clusters
for ci in unmatched_c:
track = KalmanTrack(
track_id = self._next_id,
centroid = cent_arr[ci],
q_pos = self._q_pos,
q_vel = self._q_vel,
r_pos = self._r_pos,
n_init_frames = self._n_init,
max_coasting = self._max_coast,
)
track.update(
cent_arr[ci],
width = widths[ci] if widths else 0.0,
depth = depths[ci] if depths else 0.0,
point_count = point_counts[ci] if point_counts else 0,
)
self._tracks[self._next_id] = track
self._next_id += 1
# 6. Prune dead tracks
self._tracks = {tid: t for tid, t in self._tracks.items() if t.alive}
return list(self._tracks.values())

View File

@ -1,379 +0,0 @@
"""
_path_edges.py Lane/path edge detection via Canny + Hough + bird-eye
perspective transform (no ROS2 deps).
Algorithm
---------
1. Crop the bottom `roi_frac` of the BGR image to an ROI.
2. Convert ROI to grayscale Gaussian blur Canny edge map.
3. Run probabilistic Hough (HoughLinesP) on the edge map to find
line segments.
4. Filter segments by slope: near-horizontal lines (|slope| < min_slope)
are discarded as ground noise; the remaining lines are classified as
left edges (negative slope) or right edges (positive slope).
5. Average each class into a single dominant edge line and extrapolate it
to span the full ROI height.
6. Apply a bird-eye perspective homography (computed from a configurable
source trapezoid in the ROI) to warp all segment endpoints to a
top-down view.
7. Return a PathEdgesResult with all data.
Coordinate conventions
-----------------------
All pixel coordinates in PathEdgesResult are in the ROI frame:
origin = top-left of the bottom-half ROI
y = 0 roi_top of the full image
y increases downward; x increases rightward.
Bird-eye homography
-------------------
The homography H is computed via cv2.getPerspectiveTransform from four
source points (fractional ROI coords) to four destination points in a
square output image of size `birdseye_size`. Default source trapezoid
assumes a forward-looking camera at ~3545° tilt above a flat surface.
Public API
----------
PathEdgeConfig dataclass all tunable parameters with sensible defaults
PathEdgesResult NamedTuple all outputs of process_frame()
build_homography(src_frac, roi_w, roi_h, birdseye_size)
3×3 float64 homography
apply_homography(H, points_xy) np.ndarray (N, 2) warped points
canny_edges(bgr_roi, low, high, ksize) uint8 edge map
hough_lines(edge_map, threshold, min_len, max_gap) List[(x1,y1,x2,y2)]
classify_lines(lines, min_slope) (left, right)
average_line(lines, roi_height) Optional[(x1,y1,x2,y2)]
warp_segments(lines, H) List[(bx1,by1,bx2,by2)]
process_frame(bgr, cfg) PathEdgesResult
"""
from __future__ import annotations
import math
from dataclasses import dataclass, field
from typing import List, NamedTuple, Optional, Tuple
import cv2
import numpy as np
# ── Config ────────────────────────────────────────────────────────────────────
@dataclass
class PathEdgeConfig:
# ROI
roi_frac: float = 0.50 # bottom fraction of image used as ROI
# Preprocessing
blur_ksize: int = 5 # Gaussian blur kernel (odd integer)
canny_low: int = 50 # Canny low threshold
canny_high: int = 150 # Canny high threshold
# Hough
hough_rho: float = 1.0 # distance resolution (px)
hough_theta: float = math.pi / 180.0 # angle resolution (rad)
hough_threshold: int = 30 # minimum votes
min_line_len: int = 40 # minimum segment length (px)
max_line_gap: int = 20 # maximum gap within a segment (px)
# Line classification
min_slope: float = 0.3 # |slope| below this → discard (near-horizontal)
# Bird-eye perspective — source trapezoid as fractions of (roi_w, roi_h)
# Default: wide trapezoid for forward-looking camera at ~40° tilt
birdseye_src: List[List[float]] = field(default_factory=lambda: [
[0.40, 0.05], # top-left (near horizon)
[0.60, 0.05], # top-right (near horizon)
[0.95, 0.95], # bottom-right (near robot)
[0.05, 0.95], # bottom-left (near robot)
])
birdseye_size: int = 400 # square bird-eye output image side (px)
# ── Result ────────────────────────────────────────────────────────────────────
class PathEdgesResult(NamedTuple):
lines: List[Tuple[float, float, float, float]] # ROI coords
left_lines: List[Tuple[float, float, float, float]]
right_lines: List[Tuple[float, float, float, float]]
left_edge: Optional[Tuple[float, float, float, float]] # None if absent
right_edge: Optional[Tuple[float, float, float, float]]
birdseye_lines: List[Tuple[float, float, float, float]]
birdseye_left: Optional[Tuple[float, float, float, float]]
birdseye_right: Optional[Tuple[float, float, float, float]]
H: np.ndarray # 3×3 homography (ROI → bird-eye)
roi_top: int # y-offset of ROI in the full image
# ── Homography ────────────────────────────────────────────────────────────────
def build_homography(
src_frac: List[List[float]],
roi_w: int,
roi_h: int,
birdseye_size: int,
) -> np.ndarray:
"""
Compute a perspective homography from ROI pixel coords to a square
bird-eye image.
Parameters
----------
src_frac : four [fx, fy] pairs as fractions of (roi_w, roi_h)
roi_w, roi_h : ROI dimensions in pixels
birdseye_size : side length of the square output image
Returns
-------
H : (3, 3) float64 homography matrix; maps ROI px bird-eye px.
"""
src = np.array(
[[fx * roi_w, fy * roi_h] for fx, fy in src_frac],
dtype=np.float32,
)
S = float(birdseye_size)
dst = np.array([
[S * 0.25, 0.0],
[S * 0.75, 0.0],
[S * 0.75, S],
[S * 0.25, S],
], dtype=np.float32)
H = cv2.getPerspectiveTransform(src, dst)
return H.astype(np.float64)
def apply_homography(H: np.ndarray, points_xy: np.ndarray) -> np.ndarray:
"""
Apply a 3×3 homography to an (N, 2) float array of 2-D points.
Returns
-------
(N, 2) float32 array of transformed points.
"""
if len(points_xy) == 0:
return np.empty((0, 2), dtype=np.float32)
pts = np.column_stack([points_xy, np.ones(len(points_xy))]) # (N, 3)
warped = (H @ pts.T).T # (N, 3)
warped /= warped[:, 2:3] # homogenise
return warped[:, :2].astype(np.float32)
# ── Edge detection ────────────────────────────────────────────────────────────
def canny_edges(bgr_roi: np.ndarray, low: int, high: int, ksize: int) -> np.ndarray:
"""
Convert a BGR ROI to a Canny edge map.
Parameters
----------
bgr_roi : uint8 BGR image (the bottom-half ROI)
low : Canny lower threshold
high : Canny upper threshold
ksize : Gaussian blur kernel size (must be odd 1)
Returns
-------
uint8 binary edge map (0 or 255), same spatial size as bgr_roi.
"""
grey = cv2.cvtColor(bgr_roi, cv2.COLOR_BGR2GRAY)
if ksize >= 3 and ksize % 2 == 1:
grey = cv2.GaussianBlur(grey, (ksize, ksize), 0)
return cv2.Canny(grey, low, high)
# ── Hough lines ───────────────────────────────────────────────────────────────
def hough_lines(
edge_map: np.ndarray,
threshold: int = 30,
min_len: int = 40,
max_gap: int = 20,
rho: float = 1.0,
theta: float = math.pi / 180.0,
) -> List[Tuple[float, float, float, float]]:
"""
Run probabilistic Hough on an edge map.
Returns
-------
List of (x1, y1, x2, y2) tuples in the edge_map pixel frame.
Empty list when no lines are found.
"""
raw = cv2.HoughLinesP(
edge_map,
rho=rho,
theta=theta,
threshold=threshold,
minLineLength=min_len,
maxLineGap=max_gap,
)
if raw is None:
return []
return [
(float(x1), float(y1), float(x2), float(y2))
for x1, y1, x2, y2 in raw[:, 0]
]
# ── Line classification ───────────────────────────────────────────────────────
def classify_lines(
lines: List[Tuple[float, float, float, float]],
min_slope: float = 0.3,
) -> Tuple[List, List]:
"""
Classify Hough segments into left-edge and right-edge candidates.
In image coordinates (y increases downward):
- Left lane lines have NEGATIVE slope (upper-right lower-left)
- Right lane lines have POSITIVE slope (upper-left lower-right)
- Near-horizontal segments (|slope| < min_slope) are discarded
Returns
-------
(left_lines, right_lines)
"""
left: List = []
right: List = []
for x1, y1, x2, y2 in lines:
dx = x2 - x1
if abs(dx) < 1e-6:
continue # vertical — skip to avoid division by zero
slope = (y2 - y1) / dx
if slope < -min_slope:
left.append((x1, y1, x2, y2))
elif slope > min_slope:
right.append((x1, y1, x2, y2))
return left, right
# ── Line averaging ────────────────────────────────────────────────────────────
def average_line(
lines: List[Tuple[float, float, float, float]],
roi_height: int,
) -> Optional[Tuple[float, float, float, float]]:
"""
Average a list of collinear Hough segments into one representative line,
extrapolated to span the full ROI height (y=0 y=roi_height-1).
Returns None if the list is empty or the averaged slope is near-zero.
"""
if not lines:
return None
slopes: List[float] = []
intercepts: List[float] = []
for x1, y1, x2, y2 in lines:
dx = x2 - x1
if abs(dx) < 1e-6:
continue
m = (y2 - y1) / dx
b = y1 - m * x1
slopes.append(m)
intercepts.append(b)
if not slopes:
return None
m_avg = float(np.mean(slopes))
b_avg = float(np.mean(intercepts))
if abs(m_avg) < 1e-6:
return None
y_bot = float(roi_height - 1)
y_top = 0.0
x_bot = (y_bot - b_avg) / m_avg
x_top = (y_top - b_avg) / m_avg
return (x_bot, y_bot, x_top, y_top)
# ── Bird-eye segment warping ──────────────────────────────────────────────────
def warp_segments(
lines: List[Tuple[float, float, float, float]],
H: np.ndarray,
) -> List[Tuple[float, float, float, float]]:
"""
Warp a list of line-segment endpoints through the homography H.
Returns
-------
List of (bx1, by1, bx2, by2) in bird-eye pixel coordinates.
"""
if not lines:
return []
pts = np.array(
[[x1, y1] for x1, y1, _, _ in lines] +
[[x2, y2] for _, _, x2, y2 in lines],
dtype=np.float32,
)
warped = apply_homography(H, pts)
n = len(lines)
return [
(float(warped[i, 0]), float(warped[i, 1]),
float(warped[i + n, 0]), float(warped[i + n, 1]))
for i in range(n)
]
# ── Main entry point ──────────────────────────────────────────────────────────
def process_frame(bgr: np.ndarray, cfg: PathEdgeConfig) -> PathEdgesResult:
"""
Run the full lane/path edge detection pipeline on one BGR frame.
Parameters
----------
bgr : uint8 BGR image (H × W × 3)
cfg : PathEdgeConfig
Returns
-------
PathEdgesResult with all detected edges in ROI + bird-eye coordinates.
"""
h, w = bgr.shape[:2]
roi_top = int(h * (1.0 - cfg.roi_frac))
roi = bgr[roi_top:, :]
roi_h, roi_w = roi.shape[:2]
# Build perspective homography (ROI px → bird-eye px)
H = build_homography(cfg.birdseye_src, roi_w, roi_h, cfg.birdseye_size)
# Edge detection
edges = canny_edges(roi, cfg.canny_low, cfg.canny_high, cfg.blur_ksize)
# Hough line segments (in ROI coords)
lines = hough_lines(
edges,
threshold = cfg.hough_threshold,
min_len = cfg.min_line_len,
max_gap = cfg.max_line_gap,
rho = cfg.hough_rho,
theta = cfg.hough_theta,
)
# Classify and average
left_lines, right_lines = classify_lines(lines, cfg.min_slope)
left_edge = average_line(left_lines, roi_h)
right_edge = average_line(right_lines, roi_h)
# Warp all segments to bird-eye
birdseye_lines = warp_segments(lines, H)
birdseye_left = (warp_segments([left_edge], H)[0] if left_edge else None)
birdseye_right = (warp_segments([right_edge], H)[0] if right_edge else None)
return PathEdgesResult(
lines = lines,
left_lines = left_lines,
right_lines = right_lines,
left_edge = left_edge,
right_edge = right_edge,
birdseye_lines = birdseye_lines,
birdseye_left = birdseye_left,
birdseye_right = birdseye_right,
H = H,
roi_top = roi_top,
)

View File

@ -1,172 +0,0 @@
"""
obstacle_velocity_node.py Dynamic obstacle velocity estimator (Issue #326).
Subscribes to the 2-D LIDAR scan, clusters points using the existing
gap-segmentation helper, tracks cluster centroids with per-obstacle
constant-velocity Kalman filters, and publishes estimated velocity
vectors on /saltybot/obstacle_velocities.
Subscribes:
/scan sensor_msgs/LaserScan (BEST_EFFORT)
Publishes:
/saltybot/obstacle_velocities saltybot_scene_msgs/ObstacleVelocityArray
Parameters
----------
distance_threshold_m float 0.20 Max gap between consecutive scan points (m)
min_points int 3 Minimum LIDAR points per valid cluster
range_min_m float 0.05 Discard ranges below this (m)
range_max_m float 12.0 Discard ranges above this (m)
max_association_dist_m float 0.50 Max centroid distance for track-cluster match
max_coasting_frames int 5 Missed frames before a track is deleted
n_init_frames int 3 Updates required to reach confidence=1.0
q_pos float 0.05 Process noise position (/)
q_vel float 0.50 Process noise velocity (/)
r_pos float 0.10 Measurement noise std-dev (m)
static_speed_threshold float 0.10 Speed (m/s) below which obstacle is static
"""
from __future__ import annotations
import time
from typing import List
import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile, ReliabilityPolicy, HistoryPolicy
from sensor_msgs.msg import LaserScan
from saltybot_scene_msgs.msg import ObstacleVelocity, ObstacleVelocityArray
from ._lidar_clustering import scan_to_cartesian, cluster_points
from ._obstacle_velocity import ObstacleTracker
_SENSOR_QOS = QoSProfile(
reliability=ReliabilityPolicy.BEST_EFFORT,
history=HistoryPolicy.KEEP_LAST,
depth=4,
)
class ObstacleVelocityNode(Node):
def __init__(self) -> None:
super().__init__('obstacle_velocity_node')
self.declare_parameter('distance_threshold_m', 0.20)
self.declare_parameter('min_points', 3)
self.declare_parameter('range_min_m', 0.05)
self.declare_parameter('range_max_m', 12.0)
self.declare_parameter('max_association_dist_m', 0.50)
self.declare_parameter('max_coasting_frames', 5)
self.declare_parameter('n_init_frames', 3)
self.declare_parameter('q_pos', 0.05)
self.declare_parameter('q_vel', 0.50)
self.declare_parameter('r_pos', 0.10)
self.declare_parameter('static_speed_threshold', 0.10)
self._dist_thresh = float(self.get_parameter('distance_threshold_m').value)
self._min_pts = int(self.get_parameter('min_points').value)
self._range_min = float(self.get_parameter('range_min_m').value)
self._range_max = float(self.get_parameter('range_max_m').value)
self._static_thresh = float(self.get_parameter('static_speed_threshold').value)
self._tracker = ObstacleTracker(
max_association_dist_m = float(self.get_parameter('max_association_dist_m').value),
max_coasting_frames = int(self.get_parameter('max_coasting_frames').value),
n_init_frames = int(self.get_parameter('n_init_frames').value),
q_pos = float(self.get_parameter('q_pos').value),
q_vel = float(self.get_parameter('q_vel').value),
r_pos = float(self.get_parameter('r_pos').value),
)
self._sub = self.create_subscription(
LaserScan, '/scan', self._on_scan, _SENSOR_QOS)
self._pub = self.create_publisher(
ObstacleVelocityArray, '/saltybot/obstacle_velocities', 10)
self.get_logger().info(
f'obstacle_velocity_node ready — '
f'dist_thresh={self._dist_thresh}m '
f'static_thresh={self._static_thresh}m/s'
)
# ── Scan callback ─────────────────────────────────────────────────────────
def _on_scan(self, msg: LaserScan) -> None:
points = scan_to_cartesian(
list(msg.ranges),
msg.angle_min,
msg.angle_increment,
range_min=self._range_min,
range_max=self._range_max,
)
clusters = cluster_points(
points,
distance_threshold_m=self._dist_thresh,
min_points=self._min_pts,
)
centroids = [c.centroid for c in clusters]
widths = [c.width_m for c in clusters]
depths = [c.depth_m for c in clusters]
point_counts = [len(c.points) for c in clusters]
# Use scan timestamp if available, fall back to wall clock
stamp_secs = (
msg.header.stamp.sec + msg.header.stamp.nanosec * 1e-9
if msg.header.stamp.sec > 0
else time.time()
)
alive_tracks = self._tracker.update(
centroids,
timestamp = stamp_secs,
widths = widths,
depths = depths,
point_counts = point_counts,
)
# ── Build and publish message ─────────────────────────────────────────
out = ObstacleVelocityArray()
out.header.stamp = msg.header.stamp
out.header.frame_id = msg.header.frame_id or 'laser'
for track in alive_tracks:
pos = track.position
vel = track.velocity
obs = ObstacleVelocity()
obs.header = out.header
obs.obstacle_id = track.track_id
obs.centroid.x = float(pos[0])
obs.centroid.y = float(pos[1])
obs.centroid.z = 0.0
obs.velocity.x = float(vel[0])
obs.velocity.y = float(vel[1])
obs.velocity.z = 0.0
obs.speed_mps = float(track.speed)
obs.width_m = float(track.last_width)
obs.depth_m = float(track.last_depth)
obs.point_count = int(track.last_point_count)
obs.confidence = float(track.confidence)
obs.is_static = track.speed < self._static_thresh
out.obstacles.append(obs)
self._pub.publish(out)
def main(args=None) -> None:
rclpy.init(args=args)
node = ObstacleVelocityNode()
try:
rclpy.spin(node)
finally:
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()

View File

@ -1,164 +0,0 @@
"""
path_edges_node.py ROS2 node for lane/path edge detection (Issue #339).
Subscribes
----------
/camera/color/image_raw (sensor_msgs/Image)
Publishes
---------
/saltybot/path_edges (saltybot_scene_msgs/PathEdges)
Parameters
----------
roi_frac (float, default 0.50) bottom fraction of image as ROI
blur_ksize (int, default 5) Gaussian blur kernel size (odd)
canny_low (int, default 50) Canny lower threshold
canny_high (int, default 150) Canny upper threshold
hough_threshold (int, default 30) minimum Hough votes
min_line_len (int, default 40) minimum Hough segment length (px)
max_line_gap (int, default 20) maximum Hough gap (px)
min_slope (float, default 0.3) |slope| below this discard
birdseye_size (int, default 400) bird-eye square image side (px)
"""
from __future__ import annotations
import cv2
import numpy as np
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from std_msgs.msg import Header
from saltybot_scene_msgs.msg import PathEdges
from ._path_edges import PathEdgeConfig, process_frame
class PathEdgesNode(Node):
def __init__(self) -> None:
super().__init__('path_edges_node')
# Declare parameters
self.declare_parameter('roi_frac', 0.50)
self.declare_parameter('blur_ksize', 5)
self.declare_parameter('canny_low', 50)
self.declare_parameter('canny_high', 150)
self.declare_parameter('hough_threshold', 30)
self.declare_parameter('min_line_len', 40)
self.declare_parameter('max_line_gap', 20)
self.declare_parameter('min_slope', 0.3)
self.declare_parameter('birdseye_size', 400)
self._sub = self.create_subscription(
Image,
'/camera/color/image_raw',
self._image_cb,
10,
)
self._pub = self.create_publisher(PathEdges, '/saltybot/path_edges', 10)
self.get_logger().info('PathEdgesNode ready — subscribing /camera/color/image_raw')
# ------------------------------------------------------------------
def _build_config(self) -> PathEdgeConfig:
p = self.get_parameter
return PathEdgeConfig(
roi_frac = p('roi_frac').value,
blur_ksize = p('blur_ksize').value,
canny_low = p('canny_low').value,
canny_high = p('canny_high').value,
hough_threshold = p('hough_threshold').value,
min_line_len = p('min_line_len').value,
max_line_gap = p('max_line_gap').value,
min_slope = p('min_slope').value,
birdseye_size = p('birdseye_size').value,
)
def _image_cb(self, msg: Image) -> None:
# Convert ROS Image → BGR numpy array
bgr = self._ros_image_to_bgr(msg)
if bgr is None:
return
cfg = self._build_config()
result = process_frame(bgr, cfg)
out = PathEdges()
out.header = msg.header
out.line_count = len(result.lines)
out.roi_top = result.roi_top
# Flat float32 arrays: [x1,y1,x2,y2, x1,y1,x2,y2, ...]
out.segments_px = [v for seg in result.lines for v in seg]
out.segments_birdseye_px = [v for seg in result.birdseye_lines for v in seg]
# Left edge
if result.left_edge is not None:
x1, y1, x2, y2 = result.left_edge
out.left_x1, out.left_y1 = float(x1), float(y1)
out.left_x2, out.left_y2 = float(x2), float(y2)
out.left_detected = True
else:
out.left_detected = False
if result.birdseye_left is not None:
bx1, by1, bx2, by2 = result.birdseye_left
out.left_birdseye_x1, out.left_birdseye_y1 = float(bx1), float(by1)
out.left_birdseye_x2, out.left_birdseye_y2 = float(bx2), float(by2)
# Right edge
if result.right_edge is not None:
x1, y1, x2, y2 = result.right_edge
out.right_x1, out.right_y1 = float(x1), float(y1)
out.right_x2, out.right_y2 = float(x2), float(y2)
out.right_detected = True
else:
out.right_detected = False
if result.birdseye_right is not None:
bx1, by1, bx2, by2 = result.birdseye_right
out.right_birdseye_x1, out.right_birdseye_y1 = float(bx1), float(by1)
out.right_birdseye_x2, out.right_birdseye_y2 = float(bx2), float(by2)
self._pub.publish(out)
@staticmethod
def _ros_image_to_bgr(msg: Image) -> np.ndarray | None:
"""Convert a sensor_msgs/Image to a uint8 BGR numpy array."""
enc = msg.encoding.lower()
data = np.frombuffer(msg.data, dtype=np.uint8)
if enc in ('rgb8', 'bgr8', 'mono8'):
channels = 1 if enc == 'mono8' else 3
try:
img = data.reshape((msg.height, msg.width, channels))
except ValueError:
return None
if enc == 'rgb8':
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
elif enc == 'mono8':
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return img
# Unsupported encoding
return None
def main(args=None) -> None:
rclpy.init(args=args)
node = PathEdgesNode()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.try_shutdown()
if __name__ == '__main__':
main()

View File

@ -51,10 +51,6 @@ setup(
'wheel_odom = saltybot_bringup.wheel_odom_node:main',
# Appearance-based person re-identification (Issue #322)
'person_reid = saltybot_bringup.person_reid_node:main',
# Dynamic obstacle velocity estimator (Issue #326)
'obstacle_velocity = saltybot_bringup.obstacle_velocity_node:main',
# Lane/path edge detector (Issue #339)
'path_edges = saltybot_bringup.path_edges_node:main',
],
},
)

View File

@ -1,467 +0,0 @@
"""
test_obstacle_velocity.py Unit tests for obstacle velocity estimation (no ROS2).
Covers:
KalmanTrack construction:
- initial position matches centroid
- initial velocity is zero
- initial confidence is 0.0
- initial alive=True, coasting=0, age=0
KalmanTrack predict():
- predict(dt=0) leaves position unchanged
- predict(dt=1) with zero velocity leaves position unchanged
- predict() increments coasting by 1
- alive stays True until coasting > max_coasting
- alive becomes False exactly when coasting > max_coasting
KalmanTrack update():
- update() resets coasting to 0
- update() increments age
- update() shifts position toward measurement
- update() reduces position uncertainty (trace of P decreases)
- confidence increases with age, caps at 1.0
- confidence never exceeds 1.0
- metadata stored (width, depth, point_count)
KalmanTrack velocity convergence:
- constant-velocity target: vx converges toward true velocity after N steps
- stationary target: speed stays near zero
associate():
- empty tracks all clusters unmatched
- empty clusters all tracks unmatched
- both empty empty matches
- single perfect match below max_dist
- single pair above max_dist no match
- two tracks two clusters: diagonal nearest wins
- more tracks than clusters: extra tracks unmatched
- more clusters than tracks: extra clusters unmatched
ObstacleTracker:
- empty centroids no tracks created
- single cluster one track, confidence=0 initially
- same position twice track maintained, confidence increases
- cluster disappears: coasts then deleted after max_coasting+1 frames
- new cluster after deletion gets new track_id
- two clusters two tracks, correct IDs
- moving cluster: velocity direction correct after convergence
- track_id is monotonically increasing
- metadata (width, depth, point_count) stored on track
"""
import sys
import os
import math
import numpy as np
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from saltybot_bringup._obstacle_velocity import (
KalmanTrack,
ObstacleTracker,
associate,
)
# ── Helpers ───────────────────────────────────────────────────────────────────
def _c(x: float, y: float) -> np.ndarray:
return np.array([x, y], dtype=np.float64)
def _track(x: float = 0.0, y: float = 0.0, **kw) -> KalmanTrack:
return KalmanTrack(1, _c(x, y), **kw)
# ── KalmanTrack — construction ────────────────────────────────────────────────
class TestKalmanTrackInit:
def test_initial_position(self):
t = _track(3.0, -1.5)
assert t.position == pytest.approx([3.0, -1.5], abs=1e-9)
def test_initial_velocity_zero(self):
t = _track(1.0, 2.0)
assert t.velocity == pytest.approx([0.0, 0.0], abs=1e-9)
def test_initial_speed_zero(self):
t = _track(1.0, 0.0)
assert t.speed == pytest.approx(0.0, abs=1e-9)
def test_initial_confidence_zero(self):
t = KalmanTrack(1, _c(0, 0), n_init_frames=3)
assert t.confidence == pytest.approx(0.0, abs=1e-9)
def test_initial_alive(self):
assert _track().alive is True
def test_initial_coasting_zero(self):
assert _track().coasting == 0
def test_initial_age_zero(self):
assert _track().age == 0
# ── KalmanTrack — predict ─────────────────────────────────────────────────────
class TestKalmanTrackPredict:
def test_predict_dt_zero_no_position_change(self):
t = _track(2.0, 3.0)
t.predict(0.0)
assert t.position == pytest.approx([2.0, 3.0], abs=1e-9)
def test_predict_dt_positive_zero_velocity_no_position_change(self):
t = _track(1.0, 1.0)
t.predict(1.0)
assert t.position == pytest.approx([1.0, 1.0], abs=1e-6)
def test_predict_negative_dt_clamped_to_zero(self):
t = _track(1.0, 0.0)
t.predict(-5.0)
assert t.position == pytest.approx([1.0, 0.0], abs=1e-9)
def test_predict_increments_coasting(self):
t = _track()
assert t.coasting == 0
t.predict(0.1)
assert t.coasting == 1
t.predict(0.1)
assert t.coasting == 2
def test_alive_before_max_coasting(self):
t = KalmanTrack(1, _c(0, 0), max_coasting=3)
for _ in range(3):
t.predict(0.1)
assert t.alive is True # coast=3, 3 > 3 is False → still alive
def test_alive_false_after_exceeding_max_coasting(self):
t = KalmanTrack(1, _c(0, 0), max_coasting=3)
for _ in range(4):
t.predict(0.1)
assert t.alive is False # coast=4, 4 > 3 → dead
def test_predict_advances_position_when_velocity_set(self):
"""After seeding velocity via update, predict advances x."""
t = KalmanTrack(1, _c(0.0, 0.0), r_pos=0.001, n_init_frames=1)
# Drive velocity to ~(1,0) by updating with advancing centroids
for i in range(1, 8):
t.predict(1.0)
t.update(_c(float(i), 0.0))
# Now predict one more step — position should advance in +x
x0 = t.position[0]
t.predict(1.0)
assert t.position[0] > x0
# ── KalmanTrack — update ──────────────────────────────────────────────────────
class TestKalmanTrackUpdate:
def test_update_resets_coasting(self):
t = _track()
t.predict(0.1) # coast=1
t.predict(0.1) # coast=2
t.update(_c(0, 0))
assert t.coasting == 0
def test_update_increments_age(self):
t = _track()
t.update(_c(1, 0))
assert t.age == 1
t.update(_c(2, 0))
assert t.age == 2
def test_update_shifts_position_toward_measurement(self):
t = _track(0.0, 0.0)
t.update(_c(5.0, 0.0))
# Position should have moved in +x direction
assert t.position[0] > 0.0
def test_update_reduces_position_covariance(self):
t = KalmanTrack(1, _c(0, 0))
p_before = float(t._P[0, 0])
t.update(_c(0, 0))
p_after = float(t._P[0, 0])
assert p_after < p_before
def test_confidence_increases_with_age(self):
t = KalmanTrack(1, _c(0, 0), n_init_frames=4)
assert t.confidence == pytest.approx(0.0, abs=1e-9)
t.update(_c(0, 0))
assert t.confidence == pytest.approx(0.25)
t.update(_c(0, 0))
assert t.confidence == pytest.approx(0.50)
t.update(_c(0, 0))
assert t.confidence == pytest.approx(0.75)
t.update(_c(0, 0))
assert t.confidence == pytest.approx(1.0)
def test_confidence_caps_at_one(self):
t = KalmanTrack(1, _c(0, 0), n_init_frames=2)
for _ in range(10):
t.update(_c(0, 0))
assert t.confidence == pytest.approx(1.0)
def test_update_stores_metadata(self):
t = _track()
t.update(_c(1, 1), width=0.3, depth=0.5, point_count=7)
assert t.last_width == pytest.approx(0.3)
assert t.last_depth == pytest.approx(0.5)
assert t.last_point_count == 7
# ── KalmanTrack — velocity convergence ───────────────────────────────────────
class TestKalmanTrackVelocityConvergence:
def test_constant_velocity_converges(self):
"""
Target at vx=1 m/s: observations at x=0,1,2,...
After 15 predict+update cycles with low noise, vx should be near 1.0.
"""
t = KalmanTrack(1, _c(0.0, 0.0), r_pos=0.01, q_pos=0.001, q_vel=0.1)
for i in range(1, 16):
t.predict(1.0)
t.update(_c(float(i), 0.0))
assert t.velocity[0] == pytest.approx(1.0, abs=0.15)
assert t.velocity[1] == pytest.approx(0.0, abs=0.10)
def test_diagonal_velocity_converges(self):
"""Target moving at (vx=1, vy=1) m/s."""
t = KalmanTrack(1, _c(0.0, 0.0), r_pos=0.01, q_pos=0.001, q_vel=0.1)
for i in range(1, 16):
t.predict(1.0)
t.update(_c(float(i), float(i)))
assert t.velocity[0] == pytest.approx(1.0, abs=0.15)
assert t.velocity[1] == pytest.approx(1.0, abs=0.15)
def test_stationary_target_speed_near_zero(self):
"""Target at fixed position: estimated speed should stay small."""
t = KalmanTrack(1, _c(2.0, 3.0), r_pos=0.001)
for _ in range(15):
t.predict(0.1)
t.update(_c(2.0, 3.0))
assert t.speed < 0.05
# ── associate ─────────────────────────────────────────────────────────────────
class TestAssociate:
def test_empty_tracks(self):
pos = np.empty((0, 2))
cent = np.array([[1.0, 0.0]])
m, ut, uc = associate(pos, cent, 1.0)
assert m == []
assert ut == []
assert uc == [0]
def test_empty_clusters(self):
pos = np.array([[1.0, 0.0]])
cent = np.empty((0, 2))
m, ut, uc = associate(pos, cent, 1.0)
assert m == []
assert ut == [0]
assert uc == []
def test_both_empty(self):
m, ut, uc = associate(np.empty((0, 2)), np.empty((0, 2)), 1.0)
assert m == []
assert ut == []
assert uc == []
def test_single_match_below_threshold(self):
pos = np.array([[0.0, 0.0]])
cent = np.array([[0.1, 0.0]])
m, ut, uc = associate(pos, cent, 0.5)
assert m == [(0, 0)]
assert ut == []
assert uc == []
def test_single_pair_above_threshold_no_match(self):
pos = np.array([[0.0, 0.0]])
cent = np.array([[2.0, 0.0]])
m, ut, uc = associate(pos, cent, 0.5)
assert m == []
assert ut == [0]
assert uc == [0]
def test_two_tracks_two_clusters_diagonal(self):
pos = np.array([[0.0, 0.0], [3.0, 0.0]])
cent = np.array([[0.1, 0.0], [3.1, 0.0]])
m, ut, uc = associate(pos, cent, 0.5)
assert (0, 0) in m
assert (1, 1) in m
assert ut == []
assert uc == []
def test_more_tracks_than_clusters(self):
pos = np.array([[0.0, 0.0], [5.0, 0.0]])
cent = np.array([[0.1, 0.0]])
m, ut, uc = associate(pos, cent, 0.5)
assert len(m) == 1
assert len(ut) == 1 # one track unmatched
assert uc == []
def test_more_clusters_than_tracks(self):
pos = np.array([[0.0, 0.0]])
cent = np.array([[0.1, 0.0], [5.0, 0.0]])
m, ut, uc = associate(pos, cent, 0.5)
assert len(m) == 1
assert ut == []
assert len(uc) == 1 # one cluster unmatched
def test_nearest_wins(self):
"""Track 0 is closest to cluster 0; ensure it's matched to cluster 0."""
pos = np.array([[0.0, 0.0], [10.0, 0.0]])
cent = np.array([[0.05, 0.0], [10.2, 0.0]])
m, ut, uc = associate(pos, cent, 1.0)
assert (0, 0) in m
assert (1, 1) in m
def test_threshold_strictly_less_than(self):
"""Distance exactly equal to max_dist should NOT match."""
pos = np.array([[0.0, 0.0]])
cent = np.array([[0.5, 0.0]])
m, ut, uc = associate(pos, cent, 0.5) # dist=0.5, max_dist=0.5
assert m == []
# ── ObstacleTracker ───────────────────────────────────────────────────────────
class TestObstacleTracker:
def test_empty_input_no_tracks(self):
tracker = ObstacleTracker()
result = tracker.update([], timestamp=0.0)
assert result == []
def test_single_cluster_creates_track(self):
tracker = ObstacleTracker()
tracks = tracker.update([_c(1, 0)], timestamp=0.0)
assert len(tracks) == 1
def test_track_id_starts_at_one(self):
tracker = ObstacleTracker()
tracks = tracker.update([_c(0, 0)], timestamp=0.0)
assert tracks[0].track_id == 1
def test_same_position_twice_single_track(self):
tracker = ObstacleTracker()
t1 = tracker.update([_c(1, 0)], timestamp=0.0)
t2 = tracker.update([_c(1, 0)], timestamp=0.1)
assert len(t2) == 1
assert t2[0].track_id == t1[0].track_id
def test_confidence_increases_with_updates(self):
tracker = ObstacleTracker(n_init_frames=3)
tracks = tracker.update([_c(0, 0)], timestamp=0.0)
c0 = tracks[0].confidence
tracks = tracker.update([_c(0, 0)], timestamp=0.1)
assert tracks[0].confidence > c0
def test_track_coasts_then_dies(self):
tracker = ObstacleTracker(max_coasting_frames=2)
tracker.update([_c(0, 0)], timestamp=0.0) # create
t1 = tracker.update([], timestamp=0.1) # coast 1
assert len(t1) == 1
t2 = tracker.update([], timestamp=0.2) # coast 2
assert len(t2) == 1
t3 = tracker.update([], timestamp=0.3) # coast 3 > 2 → dead
assert len(t3) == 0
def test_new_cluster_after_deletion_new_id(self):
tracker = ObstacleTracker(max_coasting_frames=1)
t0 = tracker.update([_c(0, 0)], timestamp=0.0)
old_id = t0[0].track_id
tracker.update([], timestamp=0.1) # coast 1
tracker.update([], timestamp=0.2) # coast 2 > 1 → dead
t1 = tracker.update([_c(0, 0)], timestamp=0.3)
assert len(t1) == 1
assert t1[0].track_id != old_id
def test_two_clusters_two_tracks(self):
tracker = ObstacleTracker()
tracks = tracker.update([_c(0, 0), _c(5, 0)], timestamp=0.0)
assert len(tracks) == 2
ids = {t.track_id for t in tracks}
assert len(ids) == 2
def test_track_ids_monotonically_increasing(self):
tracker = ObstacleTracker()
tracker.update([_c(0, 0)], timestamp=0.0)
tracker.update([_c(10, 10)], timestamp=0.1) # far → new track
all_ids = [t.track_id for t in tracker.tracks.values()]
assert all_ids == sorted(all_ids)
def test_moving_cluster_velocity_direction(self):
"""
Cluster moves in +x at 1 m/s; after convergence vx should be positive.
Use low noise and many steps for reliable convergence.
"""
tracker = ObstacleTracker(
n_init_frames=1,
r_pos=0.01,
q_pos=0.001,
q_vel=0.1,
)
for i in range(20):
tracker.update([_c(float(i) * 0.1, 0.0)], timestamp=float(i) * 0.1)
tracks = list(tracker.tracks.values())
assert len(tracks) == 1
assert tracks[0].velocity[0] > 0.05
def test_metadata_stored_on_track(self):
tracker = ObstacleTracker()
tracker.update(
[_c(1, 1)],
timestamp = 0.0,
widths = [0.4],
depths = [0.6],
point_counts = [9],
)
t = list(tracker.tracks.values())[0]
assert t.last_width == pytest.approx(0.4)
assert t.last_depth == pytest.approx(0.6)
assert t.last_point_count == 9
def test_far_cluster_creates_new_track(self):
"""Cluster beyond max_association_dist creates a second track."""
tracker = ObstacleTracker(max_association_dist_m=0.5)
tracker.update([_c(0, 0)], timestamp=0.0)
tracks = tracker.update([_c(10, 0)], timestamp=0.1)
# Original track coasts, new track spawned for far cluster
assert len(tracks) == 2
def test_empty_to_single_and_back(self):
tracker = ObstacleTracker(max_coasting_frames=0)
t1 = tracker.update([_c(1, 0)], timestamp=0.0)
assert len(t1) == 1
t2 = tracker.update([], timestamp=0.1) # coast=1 > 0 → dead
assert len(t2) == 0
t3 = tracker.update([_c(1, 0)], timestamp=0.2)
assert len(t3) == 1
def test_constant_velocity_estimate(self):
"""
Target moves at vx=0.1 m/s (0.1 m per 1-second step).
Each step is well within the default max_association_dist_m=0.5 m,
so the track is continuously matched. After many updates, estimated
speed should be close to 0.1 m/s.
"""
tracker = ObstacleTracker(
n_init_frames=1, r_pos=0.001, q_pos=0.0001, q_vel=0.05
)
for i in range(30):
tracker.update([_c(float(i) * 0.1, 0.0)], timestamp=float(i))
t = list(tracker.tracks.values())[0]
assert t.speed == pytest.approx(0.1, abs=0.03)
if __name__ == '__main__':
pytest.main([__file__, '-v'])

View File

@ -1,364 +0,0 @@
"""
test_path_edges.py pytest tests for _path_edges.py (no ROS2 required).
Tests cover:
- build_homography: output shape, identity-like mapping
- apply_homography: empty input, single point, batch
- canny_edges: output shape, dtype, uniform image produces no edges
- hough_lines: empty edge map returns []
- classify_lines: slope filtering, left/right split
- average_line: empty None, single line, multi-line average
- warp_segments: empty list, segment endpoint ordering
- process_frame: smoke test on synthetic image
"""
from __future__ import annotations
import math
import cv2
import numpy as np
import pytest
from saltybot_bringup._path_edges import (
PathEdgeConfig,
PathEdgesResult,
apply_homography,
average_line,
build_homography,
canny_edges,
classify_lines,
hough_lines,
process_frame,
warp_segments,
)
# ── Helpers ───────────────────────────────────────────────────────────────────
def _solid_bgr(h: int = 100, w: int = 200, color=(128, 128, 128)) -> np.ndarray:
img = np.zeros((h, w, 3), dtype=np.uint8)
img[:] = color
return img
def _default_H(roi_w: int = 200, roi_h: int = 100) -> np.ndarray:
cfg = PathEdgeConfig()
return build_homography(cfg.birdseye_src, roi_w, roi_h, cfg.birdseye_size)
# ── build_homography ──────────────────────────────────────────────────────────
class TestBuildHomography:
def test_shape(self):
H = _default_H()
assert H.shape == (3, 3)
def test_dtype(self):
H = _default_H()
assert H.dtype == np.float64
def test_bottom_left_maps_near_left(self):
"""Bottom-left source trapezoid point should map near left of bird-eye."""
cfg = PathEdgeConfig()
H = build_homography(cfg.birdseye_src, 200, 100, cfg.birdseye_size)
# src[3] = [0.05, 0.95] → near bottom-left of ROI → should map to x≈100 (centre-left), y≈400
src_pt = np.array([[0.05 * 200, 0.95 * 100]], dtype=np.float32)
dst = apply_homography(H, src_pt)
assert dst[0, 0] == pytest.approx(cfg.birdseye_size * 0.25, abs=5)
assert dst[0, 1] == pytest.approx(cfg.birdseye_size, abs=5)
def test_bottom_right_maps_near_right(self):
cfg = PathEdgeConfig()
H = build_homography(cfg.birdseye_src, 200, 100, cfg.birdseye_size)
# src[2] = [0.95, 0.95] → bottom-right → maps to x≈300, y≈400
src_pt = np.array([[0.95 * 200, 0.95 * 100]], dtype=np.float32)
dst = apply_homography(H, src_pt)
assert dst[0, 0] == pytest.approx(cfg.birdseye_size * 0.75, abs=5)
assert dst[0, 1] == pytest.approx(cfg.birdseye_size, abs=5)
# ── apply_homography ──────────────────────────────────────────────────────────
class TestApplyHomography:
def test_empty_input(self):
H = _default_H()
out = apply_homography(H, np.empty((0, 2), dtype=np.float32))
assert out.shape == (0, 2)
def test_single_point_roundtrip(self):
"""Warping then inverse-warping should recover the original point."""
H = _default_H(200, 100)
H_inv = np.linalg.inv(H)
pt = np.array([[50.0, 40.0]], dtype=np.float32)
warped = apply_homography(H, pt)
unwarped = apply_homography(H_inv, warped)
np.testing.assert_allclose(unwarped, pt, atol=1e-3)
def test_batch_output_shape(self):
H = _default_H()
pts = np.random.rand(5, 2).astype(np.float32) * 100
out = apply_homography(H, pts)
assert out.shape == (5, 2)
assert out.dtype == np.float32
# ── canny_edges ───────────────────────────────────────────────────────────────
class TestCannyEdges:
def test_output_shape(self):
roi = _solid_bgr(80, 160)
edges = canny_edges(roi, low=50, high=150, ksize=5)
assert edges.shape == (80, 160)
def test_output_dtype(self):
roi = _solid_bgr()
edges = canny_edges(roi, low=50, high=150, ksize=5)
assert edges.dtype == np.uint8
def test_uniform_image_no_edges(self):
"""A solid-colour image should produce no edges."""
roi = _solid_bgr(80, 160, color=(100, 100, 100))
edges = canny_edges(roi, low=50, high=150, ksize=5)
assert edges.max() == 0
def test_strong_edge_detected(self):
"""A sharp horizontal boundary should produce edges."""
roi = np.zeros((100, 200, 3), dtype=np.uint8)
roi[50:, :] = 255 # sharp boundary at y=50
edges = canny_edges(roi, low=20, high=60, ksize=3)
assert edges.max() == 255
def test_ksize_even_skips_blur(self):
"""Even ksize should not crash (blur is skipped for even kernels)."""
roi = _solid_bgr()
edges = canny_edges(roi, low=50, high=150, ksize=4)
assert edges.shape == (100, 200)
# ── hough_lines ───────────────────────────────────────────────────────────────
class TestHoughLines:
def test_empty_edge_map(self):
edge_map = np.zeros((100, 200), dtype=np.uint8)
lines = hough_lines(edge_map, threshold=30, min_len=40, max_gap=20)
assert lines == []
def test_returns_list_of_tuples(self):
"""Draw a diagonal line and verify hough returns tuples of 4 floats."""
edge_map = np.zeros((100, 200), dtype=np.uint8)
cv2.line(edge_map, (0, 0), (199, 99), 255, 2)
lines = hough_lines(edge_map, threshold=10, min_len=20, max_gap=5)
assert isinstance(lines, list)
if lines:
assert len(lines[0]) == 4
assert all(isinstance(v, float) for v in lines[0])
def test_line_detected_on_drawn_segment(self):
"""A drawn line segment should be detected."""
edge_map = np.zeros((200, 400), dtype=np.uint8)
cv2.line(edge_map, (50, 100), (350, 150), 255, 2)
lines = hough_lines(edge_map, threshold=10, min_len=30, max_gap=10)
assert len(lines) >= 1
# ── classify_lines ────────────────────────────────────────────────────────────
class TestClassifyLines:
def test_empty_returns_two_empty_lists(self):
left, right = classify_lines([], min_slope=0.3)
assert left == []
assert right == []
def test_negative_slope_goes_left(self):
# slope = (y2-y1)/(x2-x1) = (50-0)/(0-100) = -0.5 → left
lines = [(100.0, 0.0, 0.0, 50.0)]
left, right = classify_lines(lines, min_slope=0.3)
assert len(left) == 1
assert right == []
def test_positive_slope_goes_right(self):
# slope = (50-0)/(100-0) = 0.5 → right
lines = [(0.0, 0.0, 100.0, 50.0)]
left, right = classify_lines(lines, min_slope=0.3)
assert left == []
assert len(right) == 1
def test_near_horizontal_discarded(self):
# slope = 0.1 → |slope| < 0.3 → discard
lines = [(0.0, 0.0, 100.0, 10.0)]
left, right = classify_lines(lines, min_slope=0.3)
assert left == []
assert right == []
def test_vertical_line_skipped(self):
# dx ≈ 0 → skip
lines = [(50.0, 0.0, 50.0, 100.0)]
left, right = classify_lines(lines, min_slope=0.3)
assert left == []
assert right == []
def test_mixed_lines(self):
lines = [
(100.0, 0.0, 0.0, 50.0), # slope -0.5 → left
(0.0, 0.0, 100.0, 50.0), # slope +0.5 → right
(0.0, 0.0, 100.0, 5.0), # slope +0.05 → discard
]
left, right = classify_lines(lines, min_slope=0.3)
assert len(left) == 1
assert len(right) == 1
# ── average_line ──────────────────────────────────────────────────────────────
class TestAverageLine:
def test_empty_returns_none(self):
assert average_line([], roi_height=100) is None
def test_single_line_extrapolated(self):
# slope=0.5, intercept=0: x = y/0.5 = 2y
# At y=99: x=198; at y=0: x=0
lines = [(0.0, 0.0, 200.0, 100.0)] # slope = 100/200 = 0.5
result = average_line(lines, roi_height=100)
assert result is not None
x_bot, y_bot, x_top, y_top = result
assert y_bot == pytest.approx(99.0, abs=1)
assert y_top == pytest.approx(0.0, abs=1)
def test_two_parallel_lines_averaged(self):
# Both have slope=1.0, intercepts -50 and +50 → avg intercept=0
# x = (y - 0) / 1.0 = y
lines = [
(50.0, 0.0, 100.0, 50.0), # slope=1, intercept=-50
(0.0, 50.0, 50.0, 100.0), # slope=1, intercept=50
]
result = average_line(lines, roi_height=100)
assert result is not None
x_bot, y_bot, x_top, y_top = result
assert y_bot == pytest.approx(99.0, abs=1)
# avg intercept = 0, m_avg=1 → x_bot = 99
assert x_bot == pytest.approx(99.0, abs=2)
def test_vertical_only_returns_none(self):
# dx == 0 → skip → no slopes → None
lines = [(50.0, 0.0, 50.0, 100.0)]
assert average_line(lines, roi_height=100) is None
def test_returns_four_tuple(self):
lines = [(0.0, 0.0, 100.0, 50.0)]
result = average_line(lines, roi_height=100)
assert result is not None
assert len(result) == 4
# ── warp_segments ─────────────────────────────────────────────────────────────
class TestWarpSegments:
def test_empty_returns_empty(self):
H = _default_H()
result = warp_segments([], H)
assert result == []
def test_single_segment_returns_one_tuple(self):
H = _default_H(200, 100)
lines = [(10.0, 10.0, 90.0, 80.0)]
result = warp_segments(lines, H)
assert len(result) == 1
assert len(result[0]) == 4
def test_start_and_end_distinct(self):
"""Warped segment endpoints should be different from each other."""
H = _default_H(200, 100)
lines = [(10.0, 10.0, 190.0, 90.0)]
result = warp_segments(lines, H)
bx1, by1, bx2, by2 = result[0]
# The two endpoints should differ
assert abs(bx1 - bx2) + abs(by1 - by2) > 1.0
def test_batch_preserves_count(self):
H = _default_H(200, 100)
lines = [(0.0, 0.0, 10.0, 10.0), (100.0, 0.0, 90.0, 50.0)]
result = warp_segments(lines, H)
assert len(result) == 2
# ── process_frame ─────────────────────────────────────────────────────────────
class TestProcessFrame:
def _lane_image(self, h: int = 480, w: int = 640) -> np.ndarray:
"""
Create a synthetic BGR image with two diagonal lines representing
left and right lane edges in the bottom half.
"""
img = np.zeros((h, w, 3), dtype=np.uint8)
roi_top = h // 2
# Left edge: from bottom-left area upward to the right (negative slope in image coords)
cv2.line(img, (80, h - 10), (240, roi_top + 10), (255, 255, 255), 4)
# Right edge: from bottom-right area upward to the left (positive slope in image coords)
cv2.line(img, (w - 80, h - 10), (w - 240, roi_top + 10), (255, 255, 255), 4)
return img
def test_returns_named_tuple(self):
bgr = _solid_bgr(120, 240)
cfg = PathEdgeConfig()
result = process_frame(bgr, cfg)
assert isinstance(result, PathEdgesResult)
def test_roi_top_correct(self):
bgr = np.zeros((200, 400, 3), dtype=np.uint8)
cfg = PathEdgeConfig(roi_frac=0.5)
result = process_frame(bgr, cfg)
assert result.roi_top == 100
def test_uniform_image_no_lines(self):
bgr = _solid_bgr(200, 400, color=(80, 80, 80))
cfg = PathEdgeConfig()
result = process_frame(bgr, cfg)
assert result.lines == []
assert result.left_edge is None
assert result.right_edge is None
assert result.left_lines == []
assert result.right_lines == []
def test_homography_matrix_shape(self):
bgr = _solid_bgr(200, 400)
result = process_frame(bgr, PathEdgeConfig())
assert result.H.shape == (3, 3)
def test_birdseye_segments_same_count(self):
"""birdseye_lines and lines must have the same number of segments."""
bgr = self._lane_image()
result = process_frame(bgr, PathEdgeConfig(hough_threshold=10, min_line_len=20))
assert len(result.birdseye_lines) == len(result.lines)
def test_lane_image_detects_edges(self):
"""Synthetic lane image should detect at least one of left/right edge."""
bgr = self._lane_image()
cfg = PathEdgeConfig(
roi_frac=0.5,
canny_low=30,
canny_high=100,
hough_threshold=10,
min_line_len=20,
max_line_gap=15,
)
result = process_frame(bgr, cfg)
assert (result.left_edge is not None) or (result.right_edge is not None)
def test_segments_px_flat_array_length(self):
"""segments_px-equivalent length must be 4 × line_count."""
bgr = self._lane_image()
cfg = PathEdgeConfig(hough_threshold=10, min_line_len=20)
result = process_frame(bgr, cfg)
assert len(result.lines) * 4 == sum(4 for _ in result.lines)
def test_left_right_lines_subset_of_all_lines(self):
"""left_lines + right_lines must be a subset of all lines."""
bgr = self._lane_image()
cfg = PathEdgeConfig(hough_threshold=10, min_line_len=20)
result = process_frame(bgr, cfg)
all_set = set(result.lines)
for seg in result.left_lines:
assert seg in all_set
for seg in result.right_lines:
assert seg in all_set

View File

@ -1,29 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>saltybot_hand_tracking</name>
<version>0.1.0</version>
<description>MediaPipe-based hand tracking and robot-command gesture recognition (Issue #342).</description>
<maintainer email="robot@saltylab.local">SaltyLab</maintainer>
<license>MIT</license>
<buildtool_depend>ament_python</buildtool_depend>
<depend>rclpy</depend>
<depend>sensor_msgs</depend>
<depend>std_msgs</depend>
<depend>saltybot_hand_tracking_msgs</depend>
<exec_depend>python3-numpy</exec_depend>
<exec_depend>python3-opencv</exec_depend>
<!-- mediapipe installed via pip: pip install mediapipe -->
<test_depend>ament_copyright</test_depend>
<test_depend>ament_flake8</test_depend>
<test_depend>ament_pep257</test_depend>
<test_depend>python3-pytest</test_depend>
<export>
<build_type>ament_python</build_type>
</export>
</package>

View File

@ -1,332 +0,0 @@
"""
_hand_gestures.py Robot-command gesture classification from MediaPipe
hand landmarks. No ROS2 / MediaPipe / OpenCV dependencies.
Gesture vocabulary
------------------
"stop" open palm (4+ fingers extended) pause/stop robot
"point" index extended, others curled direction command
"disarm" fist (all fingers + thumb curled) disarm/emergency-off
"confirm" thumbs-up confirm action
"follow_me" victory/peace sign (index+middle up) follow mode
"greeting" lateral wrist oscillation (wave) greeting response
"none" no recognised gesture
Coordinate convention
---------------------
MediaPipe Hands landmark coordinates are image-normalised:
x: 0.0 = left edge, 1.0 = right edge
y: 0.0 = top edge, 1.0 = bottom edge (y increases downward)
z: depth relative to wrist; negative = toward camera
Landmark indices (MediaPipe Hands topology)
-------------------------------------------
0 WRIST
1 THUMB_CMC 2 THUMB_MCP 3 THUMB_IP 4 THUMB_TIP
5 INDEX_MCP 6 INDEX_PIP 7 INDEX_DIP 8 INDEX_TIP
9 MIDDLE_MCP 10 MIDDLE_PIP 11 MIDDLE_DIP 12 MIDDLE_TIP
13 RING_MCP 14 RING_PIP 15 RING_DIP 16 RING_TIP
17 PINKY_MCP 18 PINKY_PIP 19 PINKY_DIP 20 PINKY_TIP
Public API
----------
Landmark dataclass(x, y, z)
HandGestureResult NamedTuple
WaveDetector sliding-window wrist-oscillation detector
classify_hand(landmarks, is_right, wave_det) HandGestureResult
"""
from __future__ import annotations
import math
from collections import deque
from dataclasses import dataclass
from typing import Deque, List, NamedTuple, Optional, Tuple
# ── Landmark type ──────────────────────────────────────────────────────────────
@dataclass(frozen=True)
class Landmark:
x: float
y: float
z: float = 0.0
# ── Result type ────────────────────────────────────────────────────────────────
class HandGestureResult(NamedTuple):
gesture: str # "stop"|"point"|"disarm"|"confirm"|"follow_me"|"greeting"|"none"
confidence: float # 0.01.0
direction: str # non-empty only when gesture == "point"
wrist_x: float # normalised wrist position (image coords)
wrist_y: float
# ── Landmark index constants ───────────────────────────────────────────────────
_WRIST = 0
_THUMB_CMC = 1; _THUMB_MCP = 2; _THUMB_IP = 3; _THUMB_TIP = 4
_INDEX_MCP = 5; _INDEX_PIP = 6; _INDEX_DIP = 7; _INDEX_TIP = 8
_MIDDLE_MCP = 9; _MIDDLE_PIP = 10; _MIDDLE_DIP = 11; _MIDDLE_TIP = 12
_RING_MCP = 13; _RING_PIP = 14; _RING_DIP = 15; _RING_TIP = 16
_PINKY_MCP = 17; _PINKY_PIP = 18; _PINKY_DIP = 19; _PINKY_TIP = 20
_NONE = HandGestureResult("none", 0.0, "", 0.0, 0.0)
# ── Low-level geometry helpers ─────────────────────────────────────────────────
def _finger_up(lm: List[Landmark], tip: int, pip: int) -> bool:
"""True if the finger tip is above (smaller y) its PIP joint."""
return lm[tip].y < lm[pip].y
def _finger_ext_score(lm: List[Landmark], tip: int, pip: int, mcp: int) -> float:
"""Extension score in [0, 1]: how far the tip is above the MCP knuckle."""
spread = lm[mcp].y - lm[tip].y # positive = tip above mcp
palm_h = abs(lm[_WRIST].y - lm[_MIDDLE_MCP].y) or 0.01
return max(0.0, min(1.0, spread / palm_h))
def _count_fingers_up(lm: List[Landmark]) -> int:
"""Count how many of index/middle/ring/pinky are extended."""
pairs = [
(_INDEX_TIP, _INDEX_PIP), (_MIDDLE_TIP, _MIDDLE_PIP),
(_RING_TIP, _RING_PIP), (_PINKY_TIP, _PINKY_PIP),
]
return sum(_finger_up(lm, t, p) for t, p in pairs)
def _four_fingers_curled(lm: List[Landmark]) -> bool:
"""True when index, middle, ring, and pinky are all curled (not extended)."""
pairs = [
(_INDEX_TIP, _INDEX_PIP), (_MIDDLE_TIP, _MIDDLE_PIP),
(_RING_TIP, _RING_PIP), (_PINKY_TIP, _PINKY_PIP),
]
return not any(_finger_up(lm, t, p) for t, p in pairs)
def _thumb_curled(lm: List[Landmark]) -> bool:
"""True when the thumb tip is below (same or lower y than) the thumb MCP."""
return lm[_THUMB_TIP].y >= lm[_THUMB_MCP].y
def _thumb_extended_up(lm: List[Landmark]) -> bool:
"""True when the thumb tip is clearly above the thumb CMC base."""
return lm[_THUMB_TIP].y < lm[_THUMB_CMC].y - 0.02
def _palm_center(lm: List[Landmark]) -> Tuple[float, float]:
"""(x, y) centroid of the four MCP knuckles."""
xs = [lm[i].x for i in (_INDEX_MCP, _MIDDLE_MCP, _RING_MCP, _PINKY_MCP)]
ys = [lm[i].y for i in (_INDEX_MCP, _MIDDLE_MCP, _RING_MCP, _PINKY_MCP)]
return sum(xs) / 4, sum(ys) / 4
# ── Pointing direction ─────────────────────────────────────────────────────────
def _point_direction(lm: List[Landmark]) -> str:
"""8-compass pointing direction from index MCP → TIP vector."""
dx = lm[_INDEX_TIP].x - lm[_INDEX_MCP].x
dy = lm[_INDEX_TIP].y - lm[_INDEX_MCP].y # +y = downward in image
angle = math.degrees(math.atan2(-dy, dx)) # flip y so up = +90°
if -22.5 <= angle < 22.5: return "right"
elif 22.5 <= angle < 67.5: return "upper_right"
elif 67.5 <= angle < 112.5: return "up"
elif 112.5 <= angle < 157.5: return "upper_left"
elif angle >= 157.5 or angle < -157.5: return "left"
elif -157.5 <= angle < -112.5: return "lower_left"
elif -112.5 <= angle < -67.5: return "down"
else: return "lower_right"
# ── Static gesture classifiers ─────────────────────────────────────────────────
def _classify_stop(lm: List[Landmark]) -> Optional[HandGestureResult]:
"""Open palm: 4 or more fingers extended upward."""
n = _count_fingers_up(lm)
if n < 4:
return None
scores = [
_finger_ext_score(lm, _INDEX_TIP, _INDEX_PIP, _INDEX_MCP),
_finger_ext_score(lm, _MIDDLE_TIP, _MIDDLE_PIP, _MIDDLE_MCP),
_finger_ext_score(lm, _RING_TIP, _RING_PIP, _RING_MCP),
_finger_ext_score(lm, _PINKY_TIP, _PINKY_PIP, _PINKY_MCP),
]
conf = round(0.60 + 0.35 * (sum(scores) / len(scores)), 3)
return HandGestureResult("stop", conf, "", lm[_WRIST].x, lm[_WRIST].y)
def _classify_point(lm: List[Landmark]) -> Optional[HandGestureResult]:
"""Index extended upward; middle/ring/pinky curled."""
if not _finger_up(lm, _INDEX_TIP, _INDEX_PIP):
return None
others_up = sum([
_finger_up(lm, _MIDDLE_TIP, _MIDDLE_PIP),
_finger_up(lm, _RING_TIP, _RING_PIP),
_finger_up(lm, _PINKY_TIP, _PINKY_PIP),
])
if others_up >= 1:
return None
ext = _finger_ext_score(lm, _INDEX_TIP, _INDEX_PIP, _INDEX_MCP)
conf = round(0.65 + 0.30 * ext, 3)
direction = _point_direction(lm)
return HandGestureResult("point", conf, direction, lm[_WRIST].x, lm[_WRIST].y)
def _classify_disarm(lm: List[Landmark]) -> Optional[HandGestureResult]:
"""Fist: all four fingers curled AND thumb tucked (tip at or below MCP)."""
if not _four_fingers_curled(lm):
return None
if not _thumb_curled(lm):
return None
# Extra confidence: fingertips close to palm = deep fist
palm_h = abs(lm[_WRIST].y - lm[_MIDDLE_MCP].y) or 0.01
curl_depth = sum(
max(0.0, lm[t].y - lm[p].y)
for t, p in (
(_INDEX_TIP, _INDEX_MCP), (_MIDDLE_TIP, _MIDDLE_MCP),
(_RING_TIP, _RING_MCP), (_PINKY_TIP, _PINKY_MCP),
)
) / 4
conf = round(min(0.95, 0.60 + 0.35 * min(1.0, curl_depth / (palm_h * 0.5))), 3)
return HandGestureResult("disarm", conf, "", lm[_WRIST].x, lm[_WRIST].y)
def _classify_confirm(lm: List[Landmark]) -> Optional[HandGestureResult]:
"""Thumbs-up: thumb extended upward, four fingers curled."""
if not _thumb_extended_up(lm):
return None
if not _four_fingers_curled(lm):
return None
palm_h = abs(lm[_WRIST].y - lm[_MIDDLE_MCP].y) or 0.01
gap = lm[_THUMB_CMC].y - lm[_THUMB_TIP].y
conf = round(min(0.95, 0.60 + 0.35 * min(1.0, gap / palm_h)), 3)
return HandGestureResult("confirm", conf, "", lm[_WRIST].x, lm[_WRIST].y)
def _classify_follow_me(lm: List[Landmark]) -> Optional[HandGestureResult]:
"""Peace/victory sign: index and middle extended; ring and pinky curled."""
if not _finger_up(lm, _INDEX_TIP, _INDEX_PIP):
return None
if not _finger_up(lm, _MIDDLE_TIP, _MIDDLE_PIP):
return None
if _finger_up(lm, _RING_TIP, _RING_PIP):
return None
if _finger_up(lm, _PINKY_TIP, _PINKY_PIP):
return None
idx_ext = _finger_ext_score(lm, _INDEX_TIP, _INDEX_PIP, _INDEX_MCP)
mid_ext = _finger_ext_score(lm, _MIDDLE_TIP, _MIDDLE_PIP, _MIDDLE_MCP)
conf = round(0.60 + 0.35 * ((idx_ext + mid_ext) / 2), 3)
return HandGestureResult("follow_me", conf, "", lm[_WRIST].x, lm[_WRIST].y)
# ── Priority order (first match wins) ─────────────────────────────────────────
_CLASSIFIERS = [
_classify_stop,
_classify_confirm, # before point — thumbs-up would also pass point partially
_classify_follow_me, # before point — index+middle would partially match
_classify_point,
_classify_disarm,
]
# ── Wave (temporal) detector ───────────────────────────────────────────────────
class WaveDetector:
"""Sliding-window wave gesture detector.
Tracks the wrist X-coordinate over time and fires when there are at least
`min_reversals` direction reversals with peak-to-peak amplitude
`min_amplitude` (normalised image coords).
Args:
history_len : number of frames to keep (default 24 0.8 s at 30 fps)
min_reversals : direction reversals required to trigger (default 2)
min_amplitude : peak-to-peak x excursion threshold (default 0.08)
"""
def __init__(
self,
history_len: int = 24,
min_reversals: int = 2,
min_amplitude: float = 0.08,
) -> None:
self._history: Deque[float] = deque(maxlen=history_len)
self._min_reversals = min_reversals
self._min_amplitude = min_amplitude
def push(self, wrist_x: float) -> Tuple[bool, float]:
"""Add a wrist-X sample. Returns (is_waving, confidence)."""
self._history.append(wrist_x)
if len(self._history) < 6:
return False, 0.0
return self._detect()
def reset(self) -> None:
self._history.clear()
def _detect(self) -> Tuple[bool, float]:
samples = list(self._history)
mean_x = sum(samples) / len(samples)
centered = [x - mean_x for x in samples]
amplitude = max(centered) - min(centered)
if amplitude < self._min_amplitude:
return False, 0.0
reversals = sum(
1 for i in range(1, len(centered))
if centered[i - 1] * centered[i] < 0
)
if reversals < self._min_reversals:
return False, 0.0
amp_score = min(1.0, amplitude / 0.30)
rev_score = min(1.0, reversals / 6.0)
conf = round(0.5 * amp_score + 0.5 * rev_score, 3)
return True, conf
# ── Public API ─────────────────────────────────────────────────────────────────
def classify_hand(
landmarks: List[Landmark],
is_right: bool = True,
wave_det: Optional[WaveDetector] = None,
) -> HandGestureResult:
"""Classify one hand's 21 MediaPipe landmarks into a robot-command gesture.
Parameters
----------
landmarks : 21 Landmark objects (MediaPipe normalised coords).
is_right : True for right hand (affects thumb direction checks).
wave_det : Optional WaveDetector for temporal wave tracking.
Wave is evaluated before static classifiers.
Returns
-------
HandGestureResult gesture, confidence, direction, wrist_x, wrist_y.
"""
if len(landmarks) < 21:
return _NONE
# Wave (temporal) — highest priority
if wave_det is not None:
is_waving, wconf = wave_det.push(landmarks[_WRIST].x)
if is_waving:
return HandGestureResult(
"greeting", wconf, "",
landmarks[_WRIST].x, landmarks[_WRIST].y,
)
# Static classifiers in priority order
for clf in _CLASSIFIERS:
result = clf(landmarks)
if result is not None:
return result
return HandGestureResult(
"none", 0.0, "", landmarks[_WRIST].x, landmarks[_WRIST].y
)

View File

@ -1,305 +0,0 @@
"""
hand_tracking_node.py MediaPipe Hands inference node (Issue #342).
Subscribes
----------
/camera/color/image_raw (sensor_msgs/Image)
Publishes
---------
/saltybot/hands (saltybot_hand_tracking_msgs/HandLandmarksArray)
/saltybot/hand_gesture (std_msgs/String)
Parameters
----------
max_hands int 2 Maximum hands detected per frame
model_complexity int 0 MediaPipe model: 0=lite, 1=full (0 for 20+ FPS)
min_detection_conf float 0.60 MediaPipe detection confidence threshold
min_tracking_conf float 0.50 MediaPipe tracking confidence threshold
gesture_min_conf float 0.60 Minimum gesture confidence to publish on hand_gesture
wave_history_len int 24 Frames kept in WaveDetector history
wave_min_reversals int 2 Oscillation reversals needed for wave
wave_min_amplitude float 0.08 Peak-to-peak wrist-x amplitude for wave
Performance note
----------------
model_complexity=0 (lite) on Orin Nano Super (1024-core Ampere, 67 TOPS)
targets >20 FPS at 640×480. Drop to 480×360 in the camera launch if needed.
"""
from __future__ import annotations
import threading
import time
from typing import Dict, List, Optional, Tuple
import numpy as np
import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile, QoSReliabilityPolicy, QoSHistoryPolicy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from saltybot_hand_tracking_msgs.msg import HandLandmarks, HandLandmarksArray
from ._hand_gestures import Landmark, WaveDetector, classify_hand
# Optional runtime imports — guarded so unit tests don't need them
try:
import cv2
_HAS_CV = True
except ImportError:
_HAS_CV = False
try:
import mediapipe as mp
_HAS_MP = True
except ImportError:
_HAS_MP = False
# ── MediaPipe wrapper ──────────────────────────────────────────────────────────
class _MPHands:
"""Thin wrapper around mediapipe.solutions.hands with lazy init."""
def __init__(self, max_hands: int, complexity: int,
det_conf: float, trk_conf: float) -> None:
self._max_hands = max_hands
self._complexity = complexity
self._det_conf = det_conf
self._trk_conf = trk_conf
self._hands = None
def init(self) -> None:
if not _HAS_MP:
return
self._hands = mp.solutions.hands.Hands(
static_image_mode=False,
max_num_hands=self._max_hands,
min_detection_confidence=self._det_conf,
min_tracking_confidence=self._trk_conf,
model_complexity=self._complexity,
)
def process(self, bgr: np.ndarray):
"""Process a BGR image; returns mediapipe Hands results or None."""
if self._hands is None or not _HAS_MP or not _HAS_CV:
return None
rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
rgb.flags.writeable = False
try:
return self._hands.process(rgb)
except Exception:
return None
def close(self) -> None:
if self._hands is not None:
self._hands.close()
# ── ROS2 Node ─────────────────────────────────────────────────────────────────
class HandTrackingNode(Node):
def __init__(self) -> None:
super().__init__('hand_tracking_node')
# ── Parameters ──────────────────────────────────────────────────────
self.declare_parameter('max_hands', 2)
self.declare_parameter('model_complexity', 0)
self.declare_parameter('min_detection_conf', 0.60)
self.declare_parameter('min_tracking_conf', 0.50)
self.declare_parameter('gesture_min_conf', 0.60)
self.declare_parameter('wave_history_len', 24)
self.declare_parameter('wave_min_reversals', 2)
self.declare_parameter('wave_min_amplitude', 0.08)
p = self.get_parameter
self._gesture_min_conf: float = p('gesture_min_conf').value
# ── QoS ─────────────────────────────────────────────────────────────
img_qos = QoSProfile(
reliability=QoSReliabilityPolicy.BEST_EFFORT,
history=QoSHistoryPolicy.KEEP_LAST,
depth=1,
)
# ── Publishers ───────────────────────────────────────────────────────
self._hands_pub = self.create_publisher(
HandLandmarksArray, '/saltybot/hands', 10
)
self._gesture_pub = self.create_publisher(
String, '/saltybot/hand_gesture', 10
)
# ── Subscriber ───────────────────────────────────────────────────────
self._sub = self.create_subscription(
Image, '/camera/color/image_raw',
self._image_cb, img_qos,
)
# ── Per-hand WaveDetectors keyed by hand index ────────────────────
self._wave_dets: Dict[int, WaveDetector] = {}
wave_hist = p('wave_history_len').value
wave_rev = p('wave_min_reversals').value
wave_amp = p('wave_min_amplitude').value
self._wave_kwargs = dict(
history_len=wave_hist,
min_reversals=wave_rev,
min_amplitude=wave_amp,
)
# ── MediaPipe (background init) ───────────────────────────────────
self._mp = _MPHands(
max_hands = p('max_hands').value,
complexity = p('model_complexity').value,
det_conf = p('min_detection_conf').value,
trk_conf = p('min_tracking_conf').value,
)
self._mp_ready = threading.Event()
threading.Thread(target=self._init_mp, daemon=True).start()
self.get_logger().info(
f'hand_tracking_node ready — '
f'max_hands={p("max_hands").value}, '
f'complexity={p("model_complexity").value}'
)
# ── Init ──────────────────────────────────────────────────────────────────
def _init_mp(self) -> None:
if not _HAS_MP:
self.get_logger().warn(
'mediapipe not installed — no hand tracking. '
'Install: pip install mediapipe'
)
return
t0 = time.time()
self._mp.init()
self._mp_ready.set()
self.get_logger().info(
f'MediaPipe Hands ready ({time.time() - t0:.1f}s)'
)
# ── Image callback ────────────────────────────────────────────────────────
def _image_cb(self, msg: Image) -> None:
if not self._mp_ready.is_set():
return
bgr = self._ros_to_bgr(msg)
if bgr is None:
return
results = self._mp.process(bgr)
arr = HandLandmarksArray()
arr.header = msg.header
arr.hand_count = 0
best_gesture = ""
best_conf = 0.0
if results and results.multi_hand_landmarks:
for hand_idx, (hand_lm, hand_info) in enumerate(
zip(results.multi_hand_landmarks, results.multi_handedness)
):
cls = hand_info.classification[0]
is_right = cls.label == "Right"
hs_score = float(cls.score)
lm = [Landmark(p.x, p.y, p.z) for p in hand_lm.landmark]
wave_det = self._wave_dets.get(hand_idx)
if wave_det is None:
wave_det = WaveDetector(**self._wave_kwargs)
self._wave_dets[hand_idx] = wave_det
gesture_result = classify_hand(lm, is_right=is_right, wave_det=wave_det)
# Build HandLandmarks message
hl = HandLandmarks()
hl.header = msg.header
hl.is_right_hand = is_right
hl.handedness_score = hs_score
hl.landmark_xyz = self._pack_landmarks(lm)
hl.gesture = gesture_result.gesture
hl.point_direction = gesture_result.direction
hl.gesture_confidence = float(gesture_result.confidence)
hl.wrist_x = float(lm[0].x)
hl.wrist_y = float(lm[0].y)
arr.hands.append(hl)
arr.hand_count += 1
if gesture_result.confidence > best_conf:
best_conf = gesture_result.confidence
best_gesture = gesture_result.gesture
self._hands_pub.publish(arr)
# Publish hand_gesture String only when confident enough
if best_gesture and best_gesture != "none" \
and best_conf >= self._gesture_min_conf:
gs = String()
gs.data = best_gesture
self._gesture_pub.publish(gs)
# ── Helpers ───────────────────────────────────────────────────────────────
@staticmethod
def _pack_landmarks(lm: List[Landmark]) -> List[float]:
"""Pack 21 Landmark objects into a flat [x0,y0,z0, ..., x20,y20,z20] list."""
out: List[float] = []
for l in lm:
out.extend([l.x, l.y, l.z])
return out
@staticmethod
def _ros_to_bgr(msg: Image) -> Optional[np.ndarray]:
"""Convert sensor_msgs/Image to uint8 BGR numpy array."""
enc = msg.encoding.lower()
data = np.frombuffer(msg.data, dtype=np.uint8)
if enc == 'bgr8':
try:
return data.reshape((msg.height, msg.width, 3))
except ValueError:
return None
if enc == 'rgb8':
try:
img = data.reshape((msg.height, msg.width, 3))
return cv2.cvtColor(img, cv2.COLOR_RGB2BGR) if _HAS_CV else None
except ValueError:
return None
if enc == 'mono8':
try:
img = data.reshape((msg.height, msg.width))
return cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) if _HAS_CV else None
except ValueError:
return None
return None
# ── Cleanup ───────────────────────────────────────────────────────────────
def destroy_node(self) -> None:
self._mp.close()
super().destroy_node()
# ── Entry point ───────────────────────────────────────────────────────────────
def main(args=None) -> None:
rclpy.init(args=args)
node = HandTrackingNode()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.try_shutdown()
if __name__ == '__main__':
main()

View File

@ -1,4 +0,0 @@
[develop]
script_dir=$base/lib/saltybot_hand_tracking
[install]
install_scripts=$base/lib/saltybot_hand_tracking

View File

@ -1,33 +0,0 @@
from setuptools import setup
import os
from glob import glob
package_name = 'saltybot_hand_tracking'
setup(
name=package_name,
version='0.1.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'launch'),
glob('launch/*.launch.py')),
(os.path.join('share', package_name, 'config'),
glob('config/*.yaml')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='sl-perception',
maintainer_email='sl-perception@saltylab.local',
description='MediaPipe hand tracking node for SaltyBot (Issue #342)',
license='MIT',
tests_require=['pytest'],
entry_points={
'console_scripts': [
# MediaPipe Hands inference + gesture classification (Issue #342)
'hand_tracking = saltybot_hand_tracking.hand_tracking_node:main',
],
},
)

View File

@ -1,407 +0,0 @@
"""
test_hand_gestures.py pytest tests for _hand_gestures.py (no ROS2 required).
Tests cover:
- Landmark dataclass
- HandGestureResult NamedTuple fields
- WaveDetector no wave / wave trigger / reset
- _finger_up helpers
- classify_hand:
stop (open palm)
point (index up, others curled) + direction
disarm (fist)
confirm (thumbs-up)
follow_me (peace/victory)
greeting (wave via WaveDetector)
none (neutral / ambiguous pose)
- classify_hand priority ordering
- classify_hand: fewer than 21 landmarks "none"
"""
from __future__ import annotations
import math
from typing import List
import pytest
from saltybot_hand_tracking._hand_gestures import (
Landmark,
HandGestureResult,
WaveDetector,
classify_hand,
_finger_up,
_count_fingers_up,
_four_fingers_curled,
_thumb_curled,
_thumb_extended_up,
_point_direction,
)
# ── Landmark factory helpers ──────────────────────────────────────────────────
def _lm(x: float, y: float, z: float = 0.0) -> Landmark:
return Landmark(x, y, z)
def _flat_hand(n: int = 21) -> List[Landmark]:
"""Return n landmarks all at (0.5, 0.5)."""
return [_lm(0.5, 0.5) for _ in range(n)]
def _make_hand(
wrist_y: float = 0.8,
# Index finger
idx_mcp_y: float = 0.6, idx_pip_y: float = 0.5,
idx_tip_y: float = 0.3, idx_mcp_x: float = 0.4, idx_tip_x: float = 0.4,
# Middle finger
mid_mcp_y: float = 0.6, mid_pip_y: float = 0.5, mid_tip_y: float = 0.4,
# Ring finger
rng_mcp_y: float = 0.6, rng_pip_y: float = 0.5, rng_tip_y: float = 0.55,
# Pinky
pnk_mcp_y: float = 0.6, pnk_pip_y: float = 0.5, pnk_tip_y: float = 0.55,
# Thumb
thm_cmc_y: float = 0.7, thm_mcp_y: float = 0.65, thm_tip_y: float = 0.55,
) -> List[Landmark]:
"""
Build a 21-landmark array for testing.
Layout (MediaPipe Hands indices):
0 WRIST
1 THUMB_CMC 2 THUMB_MCP 3 THUMB_IP 4 THUMB_TIP
5 INDEX_MCP 6 INDEX_PIP 7 INDEX_DIP 8 INDEX_TIP
9 MIDDLE_MCP 10 MIDDLE_PIP 11 MIDDLE_DIP 12 MIDDLE_TIP
13 RING_MCP 14 RING_PIP 15 RING_DIP 16 RING_TIP
17 PINKY_MCP 18 PINKY_PIP 19 PINKY_DIP 20 PINKY_TIP
"""
lm = [_lm(0.5, 0.5)] * 21
# WRIST
lm[0] = _lm(0.5, wrist_y)
# THUMB
lm[1] = _lm(0.35, thm_cmc_y) # CMC
lm[2] = _lm(0.33, thm_mcp_y) # MCP
lm[3] = _lm(0.31, (thm_mcp_y + thm_tip_y) / 2) # IP
lm[4] = _lm(0.30, thm_tip_y) # TIP
# INDEX
lm[5] = _lm(idx_mcp_x, idx_mcp_y) # MCP
lm[6] = _lm(idx_mcp_x, idx_pip_y) # PIP
lm[7] = _lm(idx_mcp_x, (idx_pip_y + idx_tip_y) / 2) # DIP
lm[8] = _lm(idx_tip_x, idx_tip_y) # TIP
# MIDDLE
lm[9] = _lm(0.5, mid_mcp_y) # MCP
lm[10] = _lm(0.5, mid_pip_y) # PIP
lm[11] = _lm(0.5, (mid_pip_y + mid_tip_y) / 2)
lm[12] = _lm(0.5, mid_tip_y) # TIP
# RING
lm[13] = _lm(0.6, rng_mcp_y) # MCP
lm[14] = _lm(0.6, rng_pip_y) # PIP
lm[15] = _lm(0.6, (rng_pip_y + rng_tip_y) / 2)
lm[16] = _lm(0.6, rng_tip_y) # TIP
# PINKY
lm[17] = _lm(0.65, pnk_mcp_y) # MCP
lm[18] = _lm(0.65, pnk_pip_y) # PIP
lm[19] = _lm(0.65, (pnk_pip_y + pnk_tip_y) / 2)
lm[20] = _lm(0.65, pnk_tip_y) # TIP
return lm
# ── Prebuilt canonical poses ──────────────────────────────────────────────────
def _open_palm() -> List[Landmark]:
"""All 4 fingers extended (tips clearly above PIPs), thumb neutral."""
return _make_hand(
idx_mcp_y=0.60, idx_pip_y=0.50, idx_tip_y=0.25,
mid_mcp_y=0.60, mid_pip_y=0.50, mid_tip_y=0.25,
rng_mcp_y=0.60, rng_pip_y=0.50, rng_tip_y=0.25,
pnk_mcp_y=0.60, pnk_pip_y=0.50, pnk_tip_y=0.25,
)
def _point_up() -> List[Landmark]:
"""Index extended, middle/ring/pinky curled."""
return _make_hand(
idx_mcp_y=0.60, idx_pip_y=0.50, idx_tip_y=0.25,
mid_mcp_y=0.60, mid_pip_y=0.55, mid_tip_y=0.62, # curled
rng_mcp_y=0.60, rng_pip_y=0.55, rng_tip_y=0.62,
pnk_mcp_y=0.60, pnk_pip_y=0.55, pnk_tip_y=0.62,
)
def _fist() -> List[Landmark]:
"""All fingers curled, thumb tip at/below thumb MCP."""
return _make_hand(
idx_mcp_y=0.60, idx_pip_y=0.58, idx_tip_y=0.65, # tip below pip
mid_mcp_y=0.60, mid_pip_y=0.58, mid_tip_y=0.65,
rng_mcp_y=0.60, rng_pip_y=0.58, rng_tip_y=0.65,
pnk_mcp_y=0.60, pnk_pip_y=0.58, pnk_tip_y=0.65,
thm_cmc_y=0.70, thm_mcp_y=0.65, thm_tip_y=0.68, # tip >= mcp → curled
)
def _thumbs_up() -> List[Landmark]:
"""Thumb tip clearly above CMC, four fingers curled."""
return _make_hand(
thm_cmc_y=0.70, thm_mcp_y=0.65, thm_tip_y=0.30, # tip well above CMC
idx_mcp_y=0.60, idx_pip_y=0.58, idx_tip_y=0.65,
mid_mcp_y=0.60, mid_pip_y=0.58, mid_tip_y=0.65,
rng_mcp_y=0.60, rng_pip_y=0.58, rng_tip_y=0.65,
pnk_mcp_y=0.60, pnk_pip_y=0.58, pnk_tip_y=0.65,
)
def _peace() -> List[Landmark]:
"""Index + middle extended, ring + pinky curled."""
return _make_hand(
idx_mcp_y=0.60, idx_pip_y=0.50, idx_tip_y=0.25,
mid_mcp_y=0.60, mid_pip_y=0.50, mid_tip_y=0.25,
rng_mcp_y=0.60, rng_pip_y=0.58, rng_tip_y=0.65, # curled
pnk_mcp_y=0.60, pnk_pip_y=0.58, pnk_tip_y=0.65,
)
# ── Landmark dataclass ────────────────────────────────────────────────────────
class TestLandmark:
def test_fields(self):
lm = Landmark(0.1, 0.2, 0.3)
assert lm.x == pytest.approx(0.1)
assert lm.y == pytest.approx(0.2)
assert lm.z == pytest.approx(0.3)
def test_default_z(self):
lm = Landmark(0.5, 0.5)
assert lm.z == 0.0
def test_frozen(self):
lm = Landmark(0.5, 0.5)
with pytest.raises(Exception):
lm.x = 0.9 # type: ignore[misc]
# ── HandGestureResult ─────────────────────────────────────────────────────────
class TestHandGestureResult:
def test_fields(self):
r = HandGestureResult("stop", 0.85, "", 0.5, 0.6)
assert r.gesture == "stop"
assert r.confidence == pytest.approx(0.85)
assert r.direction == ""
assert r.wrist_x == pytest.approx(0.5)
assert r.wrist_y == pytest.approx(0.6)
# ── Low-level helpers ─────────────────────────────────────────────────────────
class TestFingerHelpers:
def _two_lm(self, tip_y: float, pip_y: float) -> List[Landmark]:
"""Build a minimal 21-lm list where positions 0 and 1 are tip and pip."""
lm = [_lm(0.5, 0.5)] * 21
lm[0] = _lm(0.5, tip_y)
lm[1] = _lm(0.5, pip_y)
return lm
def test_finger_up_true(self):
"""Tip above PIP (smaller y) → True."""
lm = self._two_lm(tip_y=0.3, pip_y=0.6)
assert _finger_up(lm, 0, 1) is True
def test_finger_up_false(self):
"""Tip below PIP → False."""
lm = self._two_lm(tip_y=0.7, pip_y=0.4)
assert _finger_up(lm, 0, 1) is False
def test_count_fingers_up_open_palm(self):
lm = _open_palm()
assert _count_fingers_up(lm) == 4
def test_count_fingers_up_fist(self):
lm = _fist()
assert _count_fingers_up(lm) == 0
def test_four_fingers_curled_fist(self):
lm = _fist()
assert _four_fingers_curled(lm) is True
def test_four_fingers_curled_open_palm_false(self):
lm = _open_palm()
assert _four_fingers_curled(lm) is False
def test_thumb_curled_fist(self):
lm = _fist()
assert _thumb_curled(lm) is True
def test_thumb_extended_up_thumbs_up(self):
lm = _thumbs_up()
assert _thumb_extended_up(lm) is True
def test_thumb_not_extended_fist(self):
lm = _fist()
assert _thumb_extended_up(lm) is False
# ── Point direction ───────────────────────────────────────────────────────────
class TestPointDirection:
def _hand_with_index_vec(self, dx: float, dy: float) -> List[Landmark]:
"""Build hand where index MCP→TIP vector is (dx, dy)."""
lm = _make_hand()
lm[5] = _lm(0.5, 0.6) # INDEX_MCP
lm[8] = _lm(0.5 + dx, 0.6 + dy) # INDEX_TIP
return lm
def test_pointing_up(self):
# dy negative (tip above MCP) → up
lm = self._hand_with_index_vec(0.0, -0.2)
assert _point_direction(lm) == "up"
def test_pointing_right(self):
lm = self._hand_with_index_vec(0.2, 0.0)
assert _point_direction(lm) == "right"
def test_pointing_left(self):
lm = self._hand_with_index_vec(-0.2, 0.0)
assert _point_direction(lm) == "left"
def test_pointing_upper_right(self):
lm = self._hand_with_index_vec(0.15, -0.15)
assert _point_direction(lm) == "upper_right"
# ── WaveDetector ──────────────────────────────────────────────────────────────
class TestWaveDetector:
def test_too_few_samples_no_wave(self):
wd = WaveDetector()
for x in [0.3, 0.7, 0.3]:
is_w, conf = wd.push(x)
assert is_w is False
assert conf == 0.0
def test_wave_detected_after_oscillation(self):
"""Feed a sinusoidal wrist_x — should trigger wave."""
wd = WaveDetector(history_len=20, min_reversals=2, min_amplitude=0.08)
is_waving = False
for i in range(20):
x = 0.5 + 0.20 * math.sin(i * math.pi / 3)
is_waving, conf = wd.push(x)
assert is_waving is True
assert conf > 0.0
def test_no_wave_small_amplitude(self):
"""Very small oscillation should not trigger."""
wd = WaveDetector(min_amplitude=0.10)
for i in range(24):
x = 0.5 + 0.01 * math.sin(i * math.pi / 3)
wd.push(x)
is_w, _ = wd.push(0.5)
assert is_w is False
def test_reset_clears_history(self):
wd = WaveDetector()
for i in range(24):
wd.push(0.5 + 0.2 * math.sin(i * math.pi / 3))
wd.reset()
is_w, _ = wd.push(0.5)
assert is_w is False
# ── classify_hand ─────────────────────────────────────────────────────────────
class TestClassifyHand:
def test_too_few_landmarks(self):
r = classify_hand([_lm(0.5, 0.5)] * 10)
assert r.gesture == "none"
def test_stop_open_palm(self):
lm = _open_palm()
r = classify_hand(lm)
assert r.gesture == "stop"
assert r.confidence >= 0.60
def test_point_up_gesture(self):
lm = _point_up()
r = classify_hand(lm)
assert r.gesture == "point"
assert r.confidence >= 0.60
def test_point_direction_populated(self):
lm = _point_up()
r = classify_hand(lm)
assert r.direction != ""
def test_disarm_fist(self):
lm = _fist()
r = classify_hand(lm)
assert r.gesture == "disarm"
assert r.confidence >= 0.60
def test_confirm_thumbs_up(self):
lm = _thumbs_up()
r = classify_hand(lm)
assert r.gesture == "confirm"
assert r.confidence >= 0.60
def test_follow_me_peace(self):
lm = _peace()
r = classify_hand(lm)
assert r.gesture == "follow_me"
assert r.confidence >= 0.60
def test_greeting_wave(self):
"""Wave via WaveDetector should produce greeting gesture."""
wd = WaveDetector(history_len=20, min_reversals=2, min_amplitude=0.08)
lm = _open_palm()
# Simulate 20 frames with oscillating wrist_x via different landmark sets
r = HandGestureResult("none", 0.0, "", 0.5, 0.5)
for i in range(20):
# Rebuild landmark set with moving wrist x
moving = list(lm)
wx = 0.5 + 0.20 * math.sin(i * math.pi / 3)
# WRIST is index 0 — move it
moving[0] = Landmark(wx, moving[0].y)
r = classify_hand(moving, is_right=True, wave_det=wd)
assert r.gesture == "greeting"
def test_flat_hand_no_crash(self):
"""A flat hand (all landmarks at 0.5, 0.5) has ambiguous geometry —
verify it returns a valid gesture string without crashing."""
_valid = {"stop", "point", "disarm", "confirm", "follow_me", "greeting", "none"}
r = classify_hand(_flat_hand())
assert r.gesture in _valid
def test_wrist_position_in_result(self):
lm = _open_palm()
lm[0] = Landmark(0.3, 0.7)
r = classify_hand(lm)
assert r.wrist_x == pytest.approx(0.3)
assert r.wrist_y == pytest.approx(0.7)
def test_confirm_before_stop(self):
"""Thumbs-up should be classified as 'confirm', not 'stop'."""
lm = _thumbs_up()
r = classify_hand(lm)
assert r.gesture == "confirm"
def test_follow_me_before_point(self):
"""Peace sign (2 fingers) should NOT be classified as 'point'."""
lm = _peace()
r = classify_hand(lm)
assert r.gesture == "follow_me"
def test_wave_beats_static_gesture(self):
"""When wave is detected it should override any static gesture."""
wd = WaveDetector(history_len=20, min_reversals=2, min_amplitude=0.08)
# Pre-load enough waving frames
for i in range(20):
wx = 0.5 + 0.25 * math.sin(i * math.pi / 3)
lm = _open_palm()
lm[0] = Landmark(wx, lm[0].y)
r = classify_hand(lm, wave_det=wd)
# The open palm would normally be "stop" but wave has already triggered
assert r.gesture == "greeting"
def test_result_confidence_bounded(self):
for lm_factory in [_open_palm, _point_up, _fist, _thumbs_up, _peace]:
r = classify_hand(lm_factory())
assert 0.0 <= r.confidence <= 1.0

View File

@ -1,16 +0,0 @@
cmake_minimum_required(VERSION 3.8)
project(saltybot_hand_tracking_msgs)
find_package(ament_cmake REQUIRED)
find_package(rosidl_default_generators REQUIRED)
find_package(std_msgs REQUIRED)
rosidl_generate_interfaces(${PROJECT_NAME}
# Issue #342 hand tracking / MediaPipe pivot
"msg/HandLandmarks.msg"
"msg/HandLandmarksArray.msg"
DEPENDENCIES std_msgs
)
ament_export_dependencies(rosidl_default_runtime)
ament_package()

View File

@ -1,29 +0,0 @@
# HandLandmarks.msg — MediaPipe Hands result for one detected hand (Issue #342)
#
# Landmark coordinates are MediaPipe-normalised:
# x, y ∈ [0.0, 1.0] — fraction of image width/height
# z — depth relative to wrist (negative = towards camera)
#
# landmark_xyz layout: [x0, y0, z0, x1, y1, z1, ..., x20, y20, z20]
# Index order follows MediaPipe Hands topology:
# 0=WRIST 1-4=THUMB(CMC,MCP,IP,TIP) 5-8=INDEX 9-12=MIDDLE
# 13-16=RING 17-20=PINKY
std_msgs/Header header
# Handedness
bool is_right_hand
float32 handedness_score # MediaPipe confidence for Left/Right label
# 21 landmarks × 3 (x, y, z) = 63 values
float32[63] landmark_xyz
# Classified robot-command gesture
# Values: "stop" | "point" | "disarm" | "confirm" | "follow_me" | "greeting" | "none"
string gesture
string point_direction # "up"|"right"|"left"|"upper_right"|"upper_left"|"lower_right"|"lower_left"|"down"
float32 gesture_confidence
# Wrist position in normalised image coords (convenience shortcut)
float32 wrist_x
float32 wrist_y

View File

@ -1,5 +0,0 @@
# HandLandmarksArray.msg — All detected hands in one camera frame (Issue #342)
std_msgs/Header header
HandLandmarks[] hands
uint32 hand_count

View File

@ -1,21 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>saltybot_hand_tracking_msgs</name>
<version>0.1.0</version>
<description>Message types for MediaPipe hand tracking (Issue #342).</description>
<maintainer email="robot@saltylab.local">SaltyLab</maintainer>
<license>MIT</license>
<buildtool_depend>ament_cmake</buildtool_depend>
<buildtool_depend>rosidl_default_generators</buildtool_depend>
<depend>std_msgs</depend>
<exec_depend>rosidl_default_runtime</exec_depend>
<member_of_group>rosidl_interface_packages</member_of_group>
<export>
<build_type>ament_cmake</build_type>
</export>
</package>

View File

@ -1,22 +0,0 @@
# Pure Pursuit Path Follower Configuration
pure_pursuit:
ros__parameters:
# Path following parameters
lookahead_distance: 0.5 # Distance to look ahead on the path (meters)
goal_tolerance: 0.1 # Distance tolerance to goal (meters)
heading_tolerance: 0.1 # Heading tolerance in radians (rad)
# Speed parameters
max_linear_velocity: 1.0 # Maximum linear velocity (m/s)
max_angular_velocity: 1.57 # Maximum angular velocity (rad/s)
# Control parameters
linear_velocity_scale: 1.0 # Scale factor for linear velocity
angular_velocity_scale: 1.0 # Scale factor for angular velocity
use_heading_correction: true # Apply heading error correction
# Publishing frequency (Hz)
publish_frequency: 10
# Enable/disable path follower
enable_path_following: true

View File

@ -1,29 +0,0 @@
import os
from launch import LaunchDescription
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
from launch.substitutions import PathJoinSubstitution
def generate_launch_description():
config_dir = PathJoinSubstitution(
[FindPackageShare('saltybot_pure_pursuit'), 'config']
)
config_file = PathJoinSubstitution([config_dir, 'pure_pursuit_config.yaml'])
pure_pursuit = Node(
package='saltybot_pure_pursuit',
executable='pure_pursuit_node',
name='pure_pursuit',
output='screen',
parameters=[config_file],
remappings=[
('/odom', '/odom'),
('/path', '/path'),
('/cmd_vel_tracked', '/cmd_vel_tracked'),
],
)
return LaunchDescription([
pure_pursuit,
])

View File

@ -1,29 +0,0 @@
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>saltybot_pure_pursuit</name>
<version>0.1.0</version>
<description>Pure pursuit path follower for Nav2 autonomous navigation</description>
<maintainer email="sl-controls@saltybot.local">SaltyBot Controls</maintainer>
<license>MIT</license>
<author email="sl-controls@saltybot.local">SaltyBot Controls Team</author>
<buildtool_depend>ament_cmake</buildtool_depend>
<buildtool_depend>ament_cmake_python</buildtool_depend>
<depend>rclpy</depend>
<depend>nav_msgs</depend>
<depend>geometry_msgs</depend>
<depend>std_msgs</depend>
<test_depend>ament_copyright</test_depend>
<test_depend>ament_flake8</test_depend>
<test_depend>ament_pep257</test_depend>
<test_depend>pytest</test_depend>
<export>
<build_type>ament_python</build_type>
</export>
</package>

View File

@ -1,269 +0,0 @@
#!/usr/bin/env python3
"""Pure pursuit path follower for Nav2 autonomous navigation.
Implements the pure pursuit algorithm for following a path or trajectory.
The algorithm computes steering commands to make the robot follow a path
by targeting a lookahead point on the path.
The pure pursuit algorithm:
1. Finds the closest point on the path to the robot
2. Looks ahead a specified distance along the path
3. Computes a circular arc passing through robot position to lookahead point
4. Publishes velocity commands to follow this arc
"""
import math
import rclpy
from rclpy.node import Node
from nav_msgs.msg import Path, Odometry
from geometry_msgs.msg import Twist, PoseStamped
from std_msgs.msg import Float32
class PurePursuitNode(Node):
"""ROS2 node implementing pure pursuit path follower."""
def __init__(self):
super().__init__('pure_pursuit')
# Parameters
self.declare_parameter('lookahead_distance', 0.5)
self.declare_parameter('goal_tolerance', 0.1)
self.declare_parameter('heading_tolerance', 0.1)
self.declare_parameter('max_linear_velocity', 1.0)
self.declare_parameter('max_angular_velocity', 1.57)
self.declare_parameter('linear_velocity_scale', 1.0)
self.declare_parameter('angular_velocity_scale', 1.0)
self.declare_parameter('use_heading_correction', True)
self.declare_parameter('publish_frequency', 10)
self.declare_parameter('enable_path_following', True)
# Read parameters
self.lookahead_distance = self.get_parameter('lookahead_distance').value
self.goal_tolerance = self.get_parameter('goal_tolerance').value
self.heading_tolerance = self.get_parameter('heading_tolerance').value
self.max_linear_velocity = self.get_parameter('max_linear_velocity').value
self.max_angular_velocity = self.get_parameter('max_angular_velocity').value
self.linear_velocity_scale = self.get_parameter('linear_velocity_scale').value
self.angular_velocity_scale = self.get_parameter('angular_velocity_scale').value
self.use_heading_correction = self.get_parameter('use_heading_correction').value
publish_frequency = self.get_parameter('publish_frequency').value
self.enable_path_following = self.get_parameter('enable_path_following').value
# Current state
self.current_pose = None
self.current_path = None
self.goal_reached = False
# Subscriptions
self.sub_odom = self.create_subscription(
Odometry, '/odom', self._on_odometry, 10
)
self.sub_path = self.create_subscription(
Path, '/path', self._on_path, 10
)
# Publishers
self.pub_cmd_vel = self.create_publisher(Twist, '/cmd_vel_tracked', 10)
self.pub_tracking_error = self.create_publisher(Float32, '/tracking_error', 10)
# Timer for control loop
period = 1.0 / publish_frequency
self.timer = self.create_timer(period, self._control_loop)
self.get_logger().info(
f"Pure pursuit initialized. "
f"Lookahead: {self.lookahead_distance}m, "
f"Goal tolerance: {self.goal_tolerance}m"
)
def _on_odometry(self, msg: Odometry) -> None:
"""Callback for odometry messages."""
self.current_pose = msg.pose.pose
def _on_path(self, msg: Path) -> None:
"""Callback for path messages."""
self.current_path = msg
def _quaternion_to_yaw(self, quat):
"""Convert quaternion to yaw angle."""
siny_cosp = 2 * (quat.w * quat.z + quat.x * quat.y)
cosy_cosp = 1 - 2 * (quat.y * quat.y + quat.z * quat.z)
yaw = math.atan2(siny_cosp, cosy_cosp)
return yaw
def _yaw_to_quaternion(self, yaw):
"""Convert yaw angle to quaternion."""
from geometry_msgs.msg import Quaternion
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
return Quaternion(x=0.0, y=0.0, z=sy, w=cy)
def _distance(self, p1, p2):
"""Compute Euclidean distance between two points."""
dx = p1.x - p2.x
dy = p1.y - p2.y
return math.sqrt(dx * dx + dy * dy)
def _find_closest_point_on_path(self):
"""Find closest point on path to current robot position."""
if self.current_path is None or len(self.current_path.poses) < 2:
return None, 0, float('inf')
closest_idx = 0
closest_dist = float('inf')
# Find closest waypoint
for i, pose in enumerate(self.current_path.poses):
dist = self._distance(self.current_pose.position, pose.pose.position)
if dist < closest_dist:
closest_dist = dist
closest_idx = i
return closest_idx, closest_dist, closest_idx
def _find_lookahead_point(self):
"""Find lookahead point on path ahead of closest point."""
if self.current_path is None or len(self.current_path.poses) < 2:
return None
closest_idx, _, _ = self._find_closest_point_on_path()
if closest_idx is None:
return None
# Find point at lookahead distance along path
current_dist = 0.0
for i in range(closest_idx, len(self.current_path.poses) - 1):
p1 = self.current_path.poses[i].pose.position
p2 = self.current_path.poses[i + 1].pose.position
segment_dist = self._distance(p1, p2)
current_dist += segment_dist
if current_dist >= self.lookahead_distance:
# Interpolate between p1 and p2
overshoot = current_dist - self.lookahead_distance
if segment_dist > 0:
alpha = 1.0 - (overshoot / segment_dist)
from geometry_msgs.msg import Point
lookahead = Point()
lookahead.x = p1.x + alpha * (p2.x - p1.x)
lookahead.y = p1.y + alpha * (p2.y - p1.y)
lookahead.z = 0.0
return lookahead
# If we get here, return the last point
return self.current_path.poses[-1].pose.position
def _calculate_steering_command(self, lookahead_point):
"""Calculate steering angle using pure pursuit geometry.
Args:
lookahead_point: Target lookahead point on the path
Returns:
Tuple of (linear_velocity, angular_velocity)
"""
if lookahead_point is None or self.current_pose is None:
return 0.0, 0.0
# Vector from robot to lookahead point
dx = lookahead_point.x - self.current_pose.position.x
dy = lookahead_point.y - self.current_pose.position.y
distance_to_lookahead = math.sqrt(dx * dx + dy * dy)
# Check if goal is reached
if distance_to_lookahead < self.goal_tolerance:
self.goal_reached = True
return 0.0, 0.0
# Robot heading
robot_yaw = self._quaternion_to_yaw(self.current_pose.orientation)
# Angle to lookahead point
angle_to_lookahead = math.atan2(dy, dx)
# Heading error
heading_error = angle_to_lookahead - robot_yaw
# Normalize angle to [-pi, pi]
while heading_error > math.pi:
heading_error -= 2 * math.pi
while heading_error < -math.pi:
heading_error += 2 * math.pi
# Pure pursuit curvature: k = 2 * sin(alpha) / Ld
# where alpha is the heading error and Ld is lookahead distance
if self.lookahead_distance > 0:
curvature = (2.0 * math.sin(heading_error)) / self.lookahead_distance
else:
curvature = 0.0
# Linear velocity (reduce when heading error is large)
if self.use_heading_correction:
heading_error_abs = abs(heading_error)
linear_velocity = self.max_linear_velocity * math.cos(heading_error)
linear_velocity = max(0.0, linear_velocity) # Don't go backwards
else:
linear_velocity = self.max_linear_velocity
# Angular velocity from curvature: omega = v * k
angular_velocity = linear_velocity * curvature
# Clamp velocities
linear_velocity = min(linear_velocity, self.max_linear_velocity)
angular_velocity = max(-self.max_angular_velocity,
min(angular_velocity, self.max_angular_velocity))
# Apply scaling
linear_velocity *= self.linear_velocity_scale
angular_velocity *= self.angular_velocity_scale
return linear_velocity, angular_velocity
def _control_loop(self) -> None:
"""Main control loop executed at regular intervals."""
if not self.enable_path_following or self.current_pose is None:
# Publish zero velocity
cmd = Twist()
self.pub_cmd_vel.publish(cmd)
return
# Find lookahead point
lookahead_point = self._find_lookahead_point()
if lookahead_point is None:
# No valid path, publish zero velocity
cmd = Twist()
self.pub_cmd_vel.publish(cmd)
return
# Calculate steering command
linear_vel, angular_vel = self._calculate_steering_command(lookahead_point)
# Publish velocity command
cmd = Twist()
cmd.linear.x = linear_vel
cmd.angular.z = angular_vel
self.pub_cmd_vel.publish(cmd)
# Publish tracking error
if self.current_path and len(self.current_path.poses) > 0:
closest_idx, tracking_error, _ = self._find_closest_point_on_path()
error_msg = Float32(data=tracking_error)
self.pub_tracking_error.publish(error_msg)
def main(args=None):
rclpy.init(args=args)
node = PurePursuitNode()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()

View File

@ -1,5 +0,0 @@
[develop]
script_dir=$base/lib/saltybot_pure_pursuit
[egg_info]
tag_build =
tag_date = 0

View File

@ -1,34 +0,0 @@
from setuptools import setup, find_packages
package_name = 'saltybot_pure_pursuit'
setup(
name=package_name,
version='0.1.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/ament_index/resource_index/packages',
['resource/saltybot_pure_pursuit']),
('share/' + package_name, ['package.xml']),
('share/' + package_name + '/config', ['config/pure_pursuit_config.yaml']),
('share/' + package_name + '/launch', ['launch/pure_pursuit.launch.py']),
],
install_requires=['setuptools'],
zip_safe=True,
author='SaltyBot Controls',
author_email='sl-controls@saltybot.local',
maintainer='SaltyBot Controls',
maintainer_email='sl-controls@saltybot.local',
keywords=['ROS2', 'pure_pursuit', 'path_following', 'nav2'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
],
entry_points={
'console_scripts': [
'pure_pursuit_node=saltybot_pure_pursuit.pure_pursuit_node:main',
],
},
)

View File

@ -1,397 +0,0 @@
"""Unit tests for pure pursuit path follower node."""
import pytest
import math
from nav_msgs.msg import Path, Odometry
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion, Twist
from std_msgs.msg import Header
import rclpy
from saltybot_pure_pursuit.pure_pursuit_node import PurePursuitNode
@pytest.fixture
def rclpy_fixture():
"""Initialize and cleanup rclpy."""
rclpy.init()
yield
rclpy.shutdown()
@pytest.fixture
def node(rclpy_fixture):
"""Create a pure pursuit node instance."""
node = PurePursuitNode()
yield node
node.destroy_node()
class TestPurePursuitGeometry:
"""Test suite for pure pursuit geometric calculations."""
def test_node_initialization(self, node):
"""Test that node initializes with correct defaults."""
assert node.lookahead_distance == 0.5
assert node.goal_tolerance == 0.1
assert node.max_linear_velocity == 1.0
assert node.enable_path_following is True
def test_distance_calculation(self, node):
"""Test Euclidean distance calculation."""
p1 = Point(x=0.0, y=0.0, z=0.0)
p2 = Point(x=3.0, y=4.0, z=0.0)
dist = node._distance(p1, p2)
assert abs(dist - 5.0) < 0.01
def test_distance_same_point(self, node):
"""Test distance between same point is zero."""
p = Point(x=1.0, y=2.0, z=0.0)
dist = node._distance(p, p)
assert abs(dist) < 0.001
def test_quaternion_to_yaw_north(self, node):
"""Test quaternion to yaw conversion for north heading."""
quat = Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
yaw = node._quaternion_to_yaw(quat)
assert abs(yaw) < 0.01
def test_quaternion_to_yaw_east(self, node):
"""Test quaternion to yaw conversion for east heading."""
# 90 degree rotation around z-axis
quat = Quaternion(x=0.0, y=0.0, z=0.7071, w=0.7071)
yaw = node._quaternion_to_yaw(quat)
assert abs(yaw - math.pi / 2) < 0.01
def test_yaw_to_quaternion_identity(self, node):
"""Test yaw to quaternion conversion."""
quat = node._yaw_to_quaternion(0.0)
assert abs(quat.w - 1.0) < 0.01
assert abs(quat.z) < 0.01
class TestPathFollowing:
"""Test suite for path following logic."""
def test_empty_path(self, node):
"""Test handling of empty path."""
node.current_pose = Pose(position=Point(x=0.0, y=0.0, z=0.0))
node.current_path = Path()
lookahead = node._find_lookahead_point()
assert lookahead is None
def test_single_point_path(self, node):
"""Test handling of single-point path."""
node.current_pose = Pose(position=Point(x=0.0, y=0.0, z=0.0))
path = Path()
path.poses = [PoseStamped(pose=Pose(position=Point(x=1.0, y=0.0, z=0.0)))]
node.current_path = path
lookahead = node._find_lookahead_point()
assert lookahead is None or lookahead.x == 1.0
def test_straight_line_path(self, node):
"""Test path following on straight line."""
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
# Create straight path along x-axis
path = Path()
for i in range(5):
pose = PoseStamped(pose=Pose(
position=Point(x=float(i), y=0.0, z=0.0)
))
path.poses.append(pose)
node.current_path = path
# Robot heading east towards path
lin_vel, ang_vel = node._calculate_steering_command(path.poses[1].pose.position)
assert lin_vel >= 0
assert abs(ang_vel) < 0.5 # Should have small steering error
def test_curved_path(self, node):
"""Test path following on curved path."""
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
# Create circular arc path
path = Path()
for i in range(9):
angle = (i / 8.0) * (math.pi / 2)
x = math.sin(angle)
y = 1.0 - math.cos(angle)
pose = PoseStamped(pose=Pose(position=Point(x=x, y=y, z=0.0)))
path.poses.append(pose)
node.current_path = path
lookahead = node._find_lookahead_point()
assert lookahead is not None
def test_goal_reached(self, node):
"""Test goal reached detection."""
goal_pos = Point(x=1.0, y=0.0, z=0.0)
node.current_pose = Pose(
position=Point(x=1.05, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
lin_vel, ang_vel = node._calculate_steering_command(goal_pos)
# With goal_tolerance=0.1, we should be at goal
assert abs(lin_vel) < 0.01
class TestSteeringCalculation:
"""Test suite for steering command calculations."""
def test_zero_heading_error(self, node):
"""Test steering when robot already aligned with target."""
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0) # Facing east
)
# Target directly ahead
target = Point(x=1.0, y=0.0, z=0.0)
lin_vel, ang_vel = node._calculate_steering_command(target)
assert lin_vel > 0
assert abs(ang_vel) < 0.1 # Minimal steering
def test_90_degree_heading_error(self, node):
"""Test steering with 90 degree heading error."""
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0) # Facing east
)
# Target north
target = Point(x=0.0, y=1.0, z=0.0)
lin_vel, ang_vel = node._calculate_steering_command(target)
assert lin_vel >= 0
assert ang_vel != 0 # Should have significant steering
def test_velocity_limits(self, node):
"""Test that velocities are within limits."""
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
target = Point(x=100.0, y=100.0, z=0.0)
lin_vel, ang_vel = node._calculate_steering_command(target)
assert lin_vel <= node.max_linear_velocity
assert abs(ang_vel) <= node.max_angular_velocity
def test_heading_correction_enabled(self, node):
"""Test heading correction when enabled."""
node.use_heading_correction = True
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
# Large heading error
target = Point(x=0.0, y=10.0, z=0.0) # Due north
lin_vel, ang_vel = node._calculate_steering_command(target)
# Linear velocity should be reduced due to heading error
assert lin_vel < node.max_linear_velocity
def test_heading_correction_disabled(self, node):
"""Test that heading correction can be disabled."""
node.use_heading_correction = False
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
target = Point(x=0.0, y=10.0, z=0.0)
lin_vel, ang_vel = node._calculate_steering_command(target)
# Linear velocity should be full
assert abs(lin_vel - node.max_linear_velocity) < 0.01
def test_negative_lookahead_distance(self, node):
"""Test behavior with invalid lookahead distance."""
node.lookahead_distance = 0.0
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
target = Point(x=1.0, y=1.0, z=0.0)
lin_vel, ang_vel = node._calculate_steering_command(target)
# Should not crash, handle gracefully
assert isinstance(lin_vel, float)
assert isinstance(ang_vel, float)
class TestPurePursuitScenarios:
"""Integration-style tests for realistic scenarios."""
def test_scenario_follow_straight_path(self, node):
"""Scenario: Follow a straight horizontal path."""
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
# Horizontal path
path = Path()
for i in range(10):
pose = PoseStamped(pose=Pose(position=Point(x=float(i), y=0.0, z=0.0)))
path.poses.append(pose)
node.current_path = path
lin_vel, ang_vel = node._calculate_steering_command(path.poses[1].pose.position)
assert lin_vel > 0
assert abs(ang_vel) < 0.5
def test_scenario_s_shaped_path(self, node):
"""Scenario: Follow an S-shaped path."""
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
# S-shaped path
path = Path()
for i in range(20):
x = float(i) * 0.5
y = 2.0 * math.sin(x)
pose = PoseStamped(pose=Pose(position=Point(x=x, y=y, z=0.0)))
path.poses.append(pose)
node.current_path = path
lookahead = node._find_lookahead_point()
assert lookahead is not None
def test_scenario_spiral_path(self, node):
"""Scenario: Follow a spiral path."""
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
# Spiral path
path = Path()
for i in range(30):
angle = (i / 30.0) * (4 * math.pi)
radius = 0.5 + (i / 30.0) * 2.0
x = radius * math.cos(angle)
y = radius * math.sin(angle)
pose = PoseStamped(pose=Pose(position=Point(x=x, y=y, z=0.0)))
path.poses.append(pose)
node.current_path = path
lin_vel, ang_vel = node._calculate_steering_command(path.poses[5].pose.position)
assert isinstance(lin_vel, float)
assert isinstance(ang_vel, float)
def test_scenario_control_loop(self, node):
"""Scenario: Control loop with valid path and odometry."""
node.enable_path_following = True
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
# Create a simple path
path = Path()
for i in range(5):
pose = PoseStamped(pose=Pose(position=Point(x=float(i), y=0.0, z=0.0)))
path.poses.append(pose)
node.current_path = path
# Run control loop
node._control_loop()
# Should complete without error
def test_scenario_disabled_path_following(self, node):
"""Scenario: Path following disabled."""
node.enable_path_following = False
node.current_pose = Pose(position=Point(x=0.0, y=0.0, z=0.0))
# Create path
path = Path()
path.poses = [PoseStamped(pose=Pose(position=Point(x=1.0, y=0.0, z=0.0)))]
node.current_path = path
# Run control loop
node._control_loop()
# Should publish zero velocity
def test_scenario_no_odometry(self, node):
"""Scenario: Control loop when odometry not received."""
node.current_pose = None
path = Path()
path.poses = [PoseStamped(pose=Pose(position=Point(x=1.0, y=0.0, z=0.0)))]
node.current_path = path
# Run control loop
node._control_loop()
# Should handle gracefully
def test_scenario_velocity_scaling(self, node):
"""Scenario: Velocity scaling parameters."""
node.linear_velocity_scale = 0.5
node.angular_velocity_scale = 0.5
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
target = Point(x=1.0, y=0.0, z=0.0)
lin_vel, ang_vel = node._calculate_steering_command(target)
# Scaled velocities should be smaller
assert lin_vel <= node.max_linear_velocity * 0.5 + 0.01
def test_scenario_large_lookahead_distance(self, node):
"""Scenario: Large lookahead distance."""
node.lookahead_distance = 5.0
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
# Path
path = Path()
for i in range(10):
pose = PoseStamped(pose=Pose(position=Point(x=float(i), y=0.0, z=0.0)))
path.poses.append(pose)
node.current_path = path
lookahead = node._find_lookahead_point()
assert lookahead is not None
def test_scenario_small_lookahead_distance(self, node):
"""Scenario: Small lookahead distance."""
node.lookahead_distance = 0.05
node.current_pose = Pose(
position=Point(x=0.0, y=0.0, z=0.0),
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0)
)
# Path
path = Path()
for i in range(10):
pose = PoseStamped(pose=Pose(position=Point(x=float(i), y=0.0, z=0.0)))
path.poses.append(pose)
node.current_path = path
lookahead = node._find_lookahead_point()
assert lookahead is not None

View File

@ -22,11 +22,6 @@ rosidl_generate_interfaces(${PROJECT_NAME}
# Issue #322 cross-camera person re-identification
"msg/PersonTrack.msg"
"msg/PersonTrackArray.msg"
# Issue #326 dynamic obstacle velocity estimator
"msg/ObstacleVelocity.msg"
"msg/ObstacleVelocityArray.msg"
# Issue #339 lane/path edge detector
"msg/PathEdges.msg"
DEPENDENCIES std_msgs geometry_msgs vision_msgs builtin_interfaces
)

View File

@ -1,22 +0,0 @@
# ObstacleVelocity.msg — tracked obstacle with estimated velocity (Issue #326)
#
# obstacle_id : stable track ID, monotonically increasing, 1-based
# centroid : estimated position in the LIDAR sensor frame (z=0)
# velocity : estimated velocity vector, m/s, in the LIDAR sensor frame
# speed_mps : |velocity| magnitude (m/s)
# width_m : cluster bounding-box width (metres)
# depth_m : cluster bounding-box depth (metres)
# point_count : number of LIDAR returns in the cluster this frame
# confidence : Kalman track age confidence, 01 (1.0 after n_init frames)
# is_static : true when speed_mps < static_speed_threshold parameter
#
std_msgs/Header header
uint32 obstacle_id
geometry_msgs/Point centroid
geometry_msgs/Vector3 velocity
float32 speed_mps
float32 width_m
float32 depth_m
uint32 point_count
float32 confidence
bool is_static

View File

@ -1,4 +0,0 @@
# ObstacleVelocityArray.msg — all tracked obstacles with velocities (Issue #326)
#
std_msgs/Header header
ObstacleVelocity[] obstacles

View File

@ -1,48 +0,0 @@
# PathEdges.msg — Lane/path edge detection result (Issue #339)
#
# All pixel coordinates are in the ROI frame:
# origin = top-left of the bottom-half ROI crop
# y=0 → roi_top of the full image; y increases downward; x increases rightward.
#
# Bird-eye coordinates are in the warped top-down perspective image.
std_msgs/Header header
# --- Raw Hough segments (ROI frame) ---
# Flat array of (x1, y1, x2, y2) tuples; length = 4 * line_count
float32[] segments_px
# Same segments warped to bird-eye view; length = 4 * line_count
float32[] segments_birdseye_px
# Number of Hough line segments detected
uint32 line_count
# --- Dominant left edge (ROI frame) ---
float32 left_x1
float32 left_y1
float32 left_x2
float32 left_y2
bool left_detected
# --- Dominant left edge (bird-eye frame) ---
float32 left_birdseye_x1
float32 left_birdseye_y1
float32 left_birdseye_x2
float32 left_birdseye_y2
# --- Dominant right edge (ROI frame) ---
float32 right_x1
float32 right_y1
float32 right_x2
float32 right_y2
bool right_detected
# --- Dominant right edge (bird-eye frame) ---
float32 right_birdseye_x1
float32 right_birdseye_y1
float32 right_birdseye_x2
float32 right_birdseye_y2
# y-offset of ROI in the full image (pixels from top)
uint32 roi_top

View File

@ -22,7 +22,6 @@ import { useRosbridge } from './hooks/useRosbridge.js';
// Social panels
import { StatusPanel } from './components/StatusPanel.jsx';
import { StatusHeader } from './components/StatusHeader.jsx';
import { FaceGallery } from './components/FaceGallery.jsx';
import { ConversationLog } from './components/ConversationLog.jsx';
import { ConversationHistory } from './components/ConversationHistory.jsx';
@ -35,7 +34,6 @@ import PoseViewer from './components/PoseViewer.jsx';
import { BatteryPanel } from './components/BatteryPanel.jsx';
import { BatteryChart } from './components/BatteryChart.jsx';
import { MotorPanel } from './components/MotorPanel.jsx';
import { MotorCurrentGraph } from './components/MotorCurrentGraph.jsx';
import { MapViewer } from './components/MapViewer.jsx';
import { ControlMode } from './components/ControlMode.jsx';
import { SystemHealth } from './components/SystemHealth.jsx';
@ -55,9 +53,6 @@ import { CameraViewer } from './components/CameraViewer.jsx';
// Event log (issue #192)
import { EventLog } from './components/EventLog.jsx';
// Log viewer (issue #275)
import { LogViewer } from './components/LogViewer.jsx';
// Joystick teleop (issue #212)
import JoystickTeleop from './components/JoystickTeleop.jsx';
@ -76,15 +71,6 @@ import { TempGauge } from './components/TempGauge.jsx';
// Node list viewer
import { NodeList } from './components/NodeList.jsx';
// Gamepad teleoperation (issue #319)
import { Teleop } from './components/Teleop.jsx';
// System diagnostics (issue #340)
import { Diagnostics } from './components/Diagnostics.jsx';
// Hand tracking visualization (issue #344)
import { HandTracker } from './components/HandTracker.jsx';
const TAB_GROUPS = [
{
label: 'SOCIAL',
@ -92,7 +78,6 @@ const TAB_GROUPS = [
tabs: [
{ id: 'status', label: 'Status', },
{ id: 'faces', label: 'Faces', },
{ id: 'hands', label: 'Hands', },
{ id: 'conversation', label: 'Convo', },
{ id: 'history', label: 'History', },
{ id: 'personality', label: 'Personality', },
@ -134,7 +119,6 @@ const TAB_GROUPS = [
label: 'MONITORING',
color: 'text-yellow-600',
tabs: [
{ id: 'diagnostics', label: 'Diagnostics' },
{ id: 'eventlog', label: 'Events' },
{ id: 'bandwidth', label: 'Bandwidth' },
{ id: 'nodes', label: 'Nodes' },
@ -267,7 +251,6 @@ export default function App() {
<main className={`flex-1 ${['eventlog', 'control', 'imu'].includes(activeTab) ? 'flex flex-col' : 'overflow-y-auto'} p-4`}>
{activeTab === 'status' && <StatusPanel subscribe={subscribe} />}
{activeTab === 'faces' && <FaceGallery subscribe={subscribe} callService={callService} />}
{activeTab === 'hands' && <HandTracker subscribe={subscribe} />}
{activeTab === 'conversation' && <ConversationLog subscribe={subscribe} />}
{activeTab === 'history' && <ConversationHistory subscribe={subscribe} />}
{activeTab === 'personality' && <PersonalityTuner subscribe={subscribe} setParam={setParam} />}
@ -281,7 +264,16 @@ export default function App() {
{activeTab === 'motor-current-graph' && <MotorCurrentGraph subscribe={subscribe} />}
{activeTab === 'thermal' && <TempGauge subscribe={subscribe} />}
{activeTab === 'map' && <MapViewer subscribe={subscribe} />}
{activeTab === 'control' && <Teleop publish={publishFn} />}
{activeTab === 'control' && (
<div className="flex flex-col h-full gap-4">
<div className="flex-1 overflow-y-auto">
<ControlMode subscribe={subscribe} />
</div>
<div className="flex-1 overflow-y-auto">
<JoystickTeleop publish={publishFn} />
</div>
</div>
)}
{activeTab === 'health' && <SystemHealth subscribe={subscribe} />}
{activeTab === 'cameras' && <CameraViewer subscribe={subscribe} />}
@ -290,8 +282,6 @@ export default function App() {
{activeTab === 'fleet' && <FleetPanel />}
{activeTab === 'missions' && <MissionPlanner />}
{activeTab === 'diagnostics' && <Diagnostics subscribe={subscribe} />}
{activeTab === 'eventlog' && <EventLog subscribe={subscribe} />}
{activeTab === 'bandwidth' && <BandwidthMonitor />}

View File

@ -1,308 +0,0 @@
/**
* Diagnostics.jsx System diagnostics panel with hardware status monitoring
*
* Features:
* - Subscribes to /diagnostics (diagnostic_msgs/DiagnosticArray)
* - Hardware status cards per subsystem (color-coded health)
* - Real-time error and warning counts
* - Diagnostic status timeline
* - Error/warning history with timestamps
* - Aggregated system health summary
* - Status indicators: OK (green), WARNING (yellow), ERROR (red), STALE (gray)
*/
import { useEffect, useRef, useState } from 'react';
const MAX_HISTORY = 100; // Keep last 100 diagnostic messages
const STATUS_COLORS = {
0: { bg: 'bg-green-950', border: 'border-green-800', text: 'text-green-400', label: 'OK' },
1: { bg: 'bg-yellow-950', border: 'border-yellow-800', text: 'text-yellow-400', label: 'WARN' },
2: { bg: 'bg-red-950', border: 'border-red-800', text: 'text-red-400', label: 'ERROR' },
3: { bg: 'bg-gray-900', border: 'border-gray-700', text: 'text-gray-400', label: 'STALE' },
};
function getStatusColor(level) {
return STATUS_COLORS[level] || STATUS_COLORS[3];
}
function formatTimestamp(timestamp) {
const date = new Date(timestamp);
return date.toLocaleTimeString('en-US', {
hour: '2-digit',
minute: '2-digit',
second: '2-digit',
hour12: false,
});
}
function DiagnosticCard({ diagnostic, expanded, onToggle }) {
const color = getStatusColor(diagnostic.level);
return (
<div
className={`rounded-lg border p-3 space-y-2 cursor-pointer transition-all ${
color.bg
} ${color.border} ${expanded ? 'ring-2 ring-offset-2 ring-cyan-500' : ''}`}
onClick={onToggle}
>
{/* Header */}
<div className="flex items-start justify-between gap-2">
<div className="flex-1 min-w-0">
<div className="text-xs font-bold text-gray-400 truncate">{diagnostic.name}</div>
<div className={`text-sm font-mono font-bold ${color.text}`}>
{diagnostic.message}
</div>
</div>
<div
className={`px-2 py-1 rounded text-xs font-bold whitespace-nowrap flex-shrink-0 ${
color.bg
} ${color.border} border ${color.text}`}
>
{color.label}
</div>
</div>
{/* Expanded details */}
{expanded && (
<div className="border-t border-gray-700 pt-2 space-y-2 text-xs">
{diagnostic.values && diagnostic.values.length > 0 && (
<div className="space-y-1">
<div className="text-gray-500 font-bold">KEY VALUES:</div>
{diagnostic.values.slice(0, 5).map((val, i) => (
<div key={i} className="flex justify-between gap-2 text-gray-400">
<span className="font-mono truncate">{val.key}:</span>
<span className="font-mono text-gray-300 truncate">{val.value}</span>
</div>
))}
{diagnostic.values.length > 5 && (
<div className="text-gray-500">+{diagnostic.values.length - 5} more values</div>
)}
</div>
)}
{diagnostic.hardware_id && (
<div className="text-gray-500">
<span className="font-bold">Hardware:</span> {diagnostic.hardware_id}
</div>
)}
</div>
)}
</div>
);
}
function HealthTimeline({ statusHistory, maxEntries = 20 }) {
const recentHistory = statusHistory.slice(-maxEntries);
return (
<div className="space-y-2">
<div className="text-xs font-bold text-gray-400 tracking-widest">TIMELINE</div>
<div className="space-y-1">
{recentHistory.map((entry, i) => {
const color = getStatusColor(entry.level);
return (
<div key={i} className="flex items-center gap-2">
<div className="text-xs text-gray-500 font-mono whitespace-nowrap">
{formatTimestamp(entry.timestamp)}
</div>
<div className={`w-2 h-2 rounded-full flex-shrink-0 ${color.text}`} />
<div className="text-xs text-gray-400 truncate">
{entry.name}: {entry.message}
</div>
</div>
);
})}
</div>
</div>
);
}
export function Diagnostics({ subscribe }) {
const [diagnostics, setDiagnostics] = useState({});
const [statusHistory, setStatusHistory] = useState([]);
const [expandedDiags, setExpandedDiags] = useState(new Set());
const diagRef = useRef({});
// Subscribe to diagnostics
useEffect(() => {
const unsubscribe = subscribe(
'/diagnostics',
'diagnostic_msgs/DiagnosticArray',
(msg) => {
try {
const diags = {};
const now = Date.now();
// Process each diagnostic status
(msg.status || []).forEach((status) => {
const name = status.name || 'unknown';
diags[name] = {
name,
level: status.level,
message: status.message || '',
hardware_id: status.hardware_id || '',
values: status.values || [],
timestamp: now,
};
// Add to history
setStatusHistory((prev) => [
...prev,
{
name,
level: status.level,
message: status.message || '',
timestamp: now,
},
].slice(-MAX_HISTORY));
});
setDiagnostics(diags);
diagRef.current = diags;
} catch (e) {
console.error('Error parsing diagnostics:', e);
}
}
);
return unsubscribe;
}, [subscribe]);
// Calculate statistics
const stats = {
total: Object.keys(diagnostics).length,
ok: Object.values(diagnostics).filter((d) => d.level === 0).length,
warning: Object.values(diagnostics).filter((d) => d.level === 1).length,
error: Object.values(diagnostics).filter((d) => d.level === 2).length,
stale: Object.values(diagnostics).filter((d) => d.level === 3).length,
};
const overallHealth =
stats.error > 0 ? 2 : stats.warning > 0 ? 1 : stats.stale > 0 ? 3 : 0;
const overallColor = getStatusColor(overallHealth);
const sortedDiags = Object.values(diagnostics).sort((a, b) => {
// Sort by level (errors first), then by name
if (a.level !== b.level) return b.level - a.level;
return a.name.localeCompare(b.name);
});
const toggleExpanded = (name) => {
const updated = new Set(expandedDiags);
if (updated.has(name)) {
updated.delete(name);
} else {
updated.add(name);
}
setExpandedDiags(updated);
};
return (
<div className="flex flex-col h-full space-y-3">
{/* Health Summary */}
<div className={`rounded-lg border p-3 space-y-3 ${overallColor.bg} ${overallColor.border}`}>
<div className="flex justify-between items-center">
<div className={`text-xs font-bold tracking-widest ${overallColor.text}`}>
SYSTEM HEALTH
</div>
<div className={`text-xs font-bold px-3 py-1 rounded ${overallColor.text}`}>
{overallColor.label}
</div>
</div>
{/* Stats Grid */}
<div className="grid grid-cols-5 gap-2">
<div className="bg-gray-900 rounded p-2">
<div className="text-gray-600 text-xs">TOTAL</div>
<div className="text-lg font-mono text-cyan-300 font-bold">{stats.total}</div>
</div>
<div className="bg-green-950 rounded p-2">
<div className="text-gray-600 text-xs">OK</div>
<div className="text-lg font-mono text-green-400 font-bold">{stats.ok}</div>
</div>
<div className="bg-yellow-950 rounded p-2">
<div className="text-gray-600 text-xs">WARN</div>
<div className="text-lg font-mono text-yellow-400 font-bold">{stats.warning}</div>
</div>
<div className="bg-red-950 rounded p-2">
<div className="text-gray-600 text-xs">ERROR</div>
<div className="text-lg font-mono text-red-400 font-bold">{stats.error}</div>
</div>
<div className="bg-gray-900 rounded p-2">
<div className="text-gray-600 text-xs">STALE</div>
<div className="text-lg font-mono text-gray-400 font-bold">{stats.stale}</div>
</div>
</div>
</div>
{/* Diagnostics Grid */}
<div className="flex-1 overflow-y-auto space-y-2">
{sortedDiags.length === 0 ? (
<div className="flex items-center justify-center h-32 text-gray-600">
<div className="text-center">
<div className="text-sm mb-2">Waiting for diagnostics</div>
<div className="text-xs text-gray-700">
Messages from /diagnostics will appear here
</div>
</div>
</div>
) : (
sortedDiags.map((diag) => (
<DiagnosticCard
key={diag.name}
diagnostic={diag}
expanded={expandedDiags.has(diag.name)}
onToggle={() => toggleExpanded(diag.name)}
/>
))
)}
</div>
{/* Timeline and Info */}
<div className="grid grid-cols-2 gap-2">
{/* Timeline */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-3">
<HealthTimeline statusHistory={statusHistory} maxEntries={10} />
</div>
{/* Legend */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-3 space-y-2">
<div className="text-xs font-bold text-gray-400 tracking-widest mb-2">STATUS LEGEND</div>
<div className="space-y-1 text-xs">
<div className="flex items-center gap-2">
<div className="w-2 h-2 rounded-full bg-green-500" />
<span className="text-gray-400">OK System nominal</span>
</div>
<div className="flex items-center gap-2">
<div className="w-2 h-2 rounded-full bg-yellow-500" />
<span className="text-gray-400">WARN Check required</span>
</div>
<div className="flex items-center gap-2">
<div className="w-2 h-2 rounded-full bg-red-500" />
<span className="text-gray-400">ERROR Immediate action</span>
</div>
<div className="flex items-center gap-2">
<div className="w-2 h-2 rounded-full bg-gray-500" />
<span className="text-gray-400">STALE No recent data</span>
</div>
</div>
</div>
</div>
{/* Topic Info */}
<div className="bg-gray-950 rounded border border-gray-800 p-2 text-xs text-gray-600 space-y-1">
<div className="flex justify-between">
<span>Topic:</span>
<span className="text-gray-500">/diagnostics (diagnostic_msgs/DiagnosticArray)</span>
</div>
<div className="flex justify-between">
<span>Status Levels:</span>
<span className="text-gray-500">0=OK, 1=WARN, 2=ERROR, 3=STALE</span>
</div>
<div className="flex justify-between">
<span>History Limit:</span>
<span className="text-gray-500">{MAX_HISTORY} events</span>
</div>
</div>
</div>
);
}

View File

@ -1,331 +0,0 @@
/**
* HandTracker.jsx Hand pose and gesture visualization
*
* Features:
* - Subscribes to /saltybot/hands (21 landmarks per hand)
* - Subscribes to /saltybot/hand_gesture (String gesture label)
* - Canvas-based hand skeleton rendering
* - Bone connections between landmarks
* - Support for dual hands (left and right)
* - Handedness indicator
* - Real-time gesture display
* - Confidence-based landmark rendering
*/
import { useEffect, useRef, useState } from 'react';
// MediaPipe hand landmark connections (bones)
const HAND_CONNECTIONS = [
// Thumb
[0, 1], [1, 2], [2, 3], [3, 4],
// Index finger
[0, 5], [5, 6], [6, 7], [7, 8],
// Middle finger
[0, 9], [9, 10], [10, 11], [11, 12],
// Ring finger
[0, 13], [13, 14], [14, 15], [15, 16],
// Pinky finger
[0, 17], [17, 18], [18, 19], [19, 20],
// Palm connections
[5, 9], [9, 13], [13, 17],
];
const LANDMARK_NAMES = [
'Wrist',
'Thumb CMC', 'Thumb MCP', 'Thumb IP', 'Thumb Tip',
'Index MCP', 'Index PIP', 'Index DIP', 'Index Tip',
'Middle MCP', 'Middle PIP', 'Middle DIP', 'Middle Tip',
'Ring MCP', 'Ring PIP', 'Ring DIP', 'Ring Tip',
'Pinky MCP', 'Pinky PIP', 'Pinky DIP', 'Pinky Tip',
];
function HandCanvas({ hand, color, label }) {
const canvasRef = useRef(null);
const [flipped, setFlipped] = useState(false);
useEffect(() => {
const canvas = canvasRef.current;
if (!canvas || !hand || !hand.landmarks || hand.landmarks.length === 0) return;
const ctx = canvas.getContext('2d');
const width = canvas.width;
const height = canvas.height;
// Clear canvas
ctx.fillStyle = '#1f2937';
ctx.fillRect(0, 0, width, height);
// Find min/max coordinates for scaling
let minX = Infinity, maxX = -Infinity;
let minY = Infinity, maxY = -Infinity;
hand.landmarks.forEach((lm) => {
minX = Math.min(minX, lm.x);
maxX = Math.max(maxX, lm.x);
minY = Math.min(minY, lm.y);
maxY = Math.max(maxY, lm.y);
});
const padding = 20;
const rangeX = maxX - minX || 1;
const rangeY = maxY - minY || 1;
const scaleX = (width - padding * 2) / rangeX;
const scaleY = (height - padding * 2) / rangeY;
const scale = Math.min(scaleX, scaleY);
// Convert landmark coordinates to canvas positions
const getCanvasPos = (lm) => {
const x = padding + (lm.x - minX) * scale;
const y = padding + (lm.y - minY) * scale;
return { x, y };
};
// Draw bones
ctx.strokeStyle = color;
ctx.lineWidth = 2;
ctx.lineCap = 'round';
ctx.lineJoin = 'round';
HAND_CONNECTIONS.forEach(([start, end]) => {
if (start < hand.landmarks.length && end < hand.landmarks.length) {
const startLm = hand.landmarks[start];
const endLm = hand.landmarks[end];
if (startLm.confidence > 0.1 && endLm.confidence > 0.1) {
const startPos = getCanvasPos(startLm);
const endPos = getCanvasPos(endLm);
ctx.globalAlpha = Math.min(startLm.confidence, endLm.confidence);
ctx.beginPath();
ctx.moveTo(startPos.x, startPos.y);
ctx.lineTo(endPos.x, endPos.y);
ctx.stroke();
ctx.globalAlpha = 1.0;
}
}
});
// Draw landmarks
hand.landmarks.forEach((lm, i) => {
if (lm.confidence > 0.1) {
const pos = getCanvasPos(lm);
const radius = 4 + lm.confidence * 2;
// Landmark glow
ctx.fillStyle = color;
ctx.globalAlpha = lm.confidence * 0.3;
ctx.beginPath();
ctx.arc(pos.x, pos.y, radius * 2, 0, Math.PI * 2);
ctx.fill();
// Landmark point
ctx.fillStyle = color;
ctx.globalAlpha = lm.confidence;
ctx.beginPath();
ctx.arc(pos.x, pos.y, radius, 0, Math.PI * 2);
ctx.fill();
// Joint type marker
const isJoint = i > 0 && i % 4 === 0; // Tip joints
if (isJoint) {
ctx.strokeStyle = '#fff';
ctx.lineWidth = 1;
ctx.globalAlpha = lm.confidence;
ctx.beginPath();
ctx.arc(pos.x, pos.y, radius + 2, 0, Math.PI * 2);
ctx.stroke();
}
}
});
ctx.globalAlpha = 1.0;
// Draw handedness label on canvas
ctx.fillStyle = color;
ctx.font = 'bold 12px monospace';
ctx.textAlign = 'left';
ctx.fillText(label, 10, height - 10);
}, [hand, color, label]);
return (
<div className="flex flex-col items-center gap-2">
<div className="text-xs font-bold text-gray-400">{label}</div>
<canvas
ref={canvasRef}
width={280}
height={320}
className="border-2 border-gray-800 rounded-lg bg-gray-900"
/>
</div>
);
}
export function HandTracker({ subscribe }) {
const [leftHand, setLeftHand] = useState(null);
const [rightHand, setRightHand] = useState(null);
const [gesture, setGesture] = useState('');
const [confidence, setConfidence] = useState(0);
const handsRef = useRef({});
// Subscribe to hand poses
useEffect(() => {
const unsubscribe = subscribe(
'/saltybot/hands',
'saltybot_msgs/HandPose',
(msg) => {
try {
if (!msg) return;
// Handle both single hand and multi-hand formats
if (Array.isArray(msg)) {
// Multi-hand format: array of hands
const left = msg.find((h) => h.handedness === 'Left' || h.handedness === 0);
const right = msg.find((h) => h.handedness === 'Right' || h.handedness === 1);
if (left) setLeftHand(left);
if (right) setRightHand(right);
} else if (msg.handedness) {
// Single hand format
if (msg.handedness === 'Left' || msg.handedness === 0) {
setLeftHand(msg);
} else {
setRightHand(msg);
}
}
handsRef.current = { left: leftHand, right: rightHand };
} catch (e) {
console.error('Error parsing hand pose:', e);
}
}
);
return unsubscribe;
}, [subscribe, leftHand, rightHand]);
// Subscribe to gesture
useEffect(() => {
const unsubscribe = subscribe(
'/saltybot/hand_gesture',
'std_msgs/String',
(msg) => {
try {
if (msg.data) {
// Parse gesture data (format: "gesture_name confidence")
const parts = msg.data.split(' ');
setGesture(parts[0] || '');
if (parts[1]) {
setConfidence(parseFloat(parts[1]) || 0);
}
}
} catch (e) {
console.error('Error parsing gesture:', e);
}
}
);
return unsubscribe;
}, [subscribe]);
const hasLeftHand = leftHand && leftHand.landmarks && leftHand.landmarks.length > 0;
const hasRightHand = rightHand && rightHand.landmarks && rightHand.landmarks.length > 0;
const gestureConfidentColor =
confidence > 0.8 ? 'text-green-400' : confidence > 0.5 ? 'text-yellow-400' : 'text-gray-400';
return (
<div className="flex flex-col h-full space-y-3">
{/* Gesture Display */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-3 space-y-2">
<div className="flex justify-between items-center">
<div className="text-cyan-700 text-xs font-bold tracking-widest">GESTURE</div>
<div className={`text-sm font-mono font-bold ${gestureConfidentColor}`}>
{gesture || 'Detecting...'}
</div>
</div>
{/* Confidence bar */}
{gesture && (
<div className="space-y-1">
<div className="text-xs text-gray-600">Confidence</div>
<div className="w-full bg-gray-900 rounded-full h-2 overflow-hidden">
<div
className={`h-full transition-all ${
confidence > 0.8
? 'bg-green-500'
: confidence > 0.5
? 'bg-yellow-500'
: 'bg-blue-500'
}`}
style={{ width: `${Math.round(confidence * 100)}%` }}
/>
</div>
<div className="text-right text-xs text-gray-500">
{Math.round(confidence * 100)}%
</div>
</div>
)}
</div>
{/* Hand Renders */}
<div className="flex-1 overflow-y-auto">
{hasLeftHand || hasRightHand ? (
<div className="flex gap-3 justify-center flex-wrap">
{hasLeftHand && (
<HandCanvas hand={leftHand} color="#10b981" label="LEFT HAND" />
)}
{hasRightHand && (
<HandCanvas hand={rightHand} color="#f59e0b" label="RIGHT HAND" />
)}
</div>
) : (
<div className="flex items-center justify-center h-full text-gray-600">
<div className="text-center">
<div className="text-sm mb-2">Waiting for hand detection</div>
<div className="text-xs text-gray-700">
Hands will appear when detected on /saltybot/hands
</div>
</div>
</div>
)}
</div>
{/* Hand Info */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-3 space-y-2">
<div className="text-xs font-bold text-gray-400 tracking-widest mb-2">HAND STATUS</div>
<div className="grid grid-cols-2 gap-2">
<div className="bg-green-950 rounded p-2">
<div className="text-xs text-gray-600">LEFT</div>
<div className="text-lg font-mono text-green-400">
{hasLeftHand ? '✓' : '◯'}
</div>
</div>
<div className="bg-yellow-950 rounded p-2">
<div className="text-xs text-gray-600">RIGHT</div>
<div className="text-lg font-mono text-yellow-400">
{hasRightHand ? '✓' : '◯'}
</div>
</div>
</div>
</div>
{/* Landmark Info */}
<div className="bg-gray-950 rounded border border-gray-800 p-2 text-xs text-gray-600 space-y-1">
<div className="flex justify-between">
<span>Hand Format:</span>
<span className="text-gray-500">21 landmarks per hand (MediaPipe)</span>
</div>
<div className="flex justify-between">
<span>Topics:</span>
<span className="text-gray-500">/saltybot/hands, /saltybot/hand_gesture</span>
</div>
<div className="flex justify-between">
<span>Landmarks:</span>
<span className="text-gray-500">Wrist + fingers (5×4 joints)</span>
</div>
<div className="flex justify-between">
<span>Color Code:</span>
<span className="text-gray-500">🟢 Left | 🟠 Right</span>
</div>
</div>
</div>
);
}

View File

@ -1,384 +0,0 @@
/**
* Teleop.jsx Gamepad and keyboard teleoperation controller
*
* Features:
* - Virtual dual-stick gamepad (left=linear, right=angular velocity)
* - WASD keyboard fallback for manual driving
* - Speed limiter slider for safe operation
* - E-stop button for emergency stop
* - Real-time velocity display (m/s and rad/s)
* - Publishes geometry_msgs/Twist to /cmd_vel
* - Visual feedback with stick position and velocity vectors
* - 10% deadzone on both axes
*/
import { useEffect, useRef, useState } from 'react';
const MAX_LINEAR_VELOCITY = 0.5; // m/s
const MAX_ANGULAR_VELOCITY = 1.0; // rad/s
const DEADZONE = 0.1; // 10% deadzone
const STICK_UPDATE_RATE = 50; // ms
function VirtualStick({
position,
onMove,
label,
color,
maxValue = 1.0,
}) {
const canvasRef = useRef(null);
const containerRef = useRef(null);
const isDraggingRef = useRef(false);
// Draw stick
useEffect(() => {
const canvas = canvasRef.current;
if (!canvas) return;
const ctx = canvas.getContext('2d');
const width = canvas.width;
const height = canvas.height;
const centerX = width / 2;
const centerY = height / 2;
const baseRadius = Math.min(width, height) * 0.35;
const knobRadius = Math.min(width, height) * 0.15;
// Clear canvas
ctx.fillStyle = '#1f2937';
ctx.fillRect(0, 0, width, height);
// Draw base circle
ctx.strokeStyle = '#374151';
ctx.lineWidth = 2;
ctx.beginPath();
ctx.arc(centerX, centerY, baseRadius, 0, Math.PI * 2);
ctx.stroke();
// Draw center crosshair
ctx.strokeStyle = '#4b5563';
ctx.lineWidth = 1;
ctx.beginPath();
ctx.moveTo(centerX - baseRadius * 0.3, centerY);
ctx.lineTo(centerX + baseRadius * 0.3, centerY);
ctx.moveTo(centerX, centerY - baseRadius * 0.3);
ctx.lineTo(centerX, centerY + baseRadius * 0.3);
ctx.stroke();
// Draw deadzone circle
ctx.strokeStyle = '#4b5563';
ctx.lineWidth = 1;
ctx.globalAlpha = 0.5;
ctx.beginPath();
ctx.arc(centerX, centerY, baseRadius * DEADZONE, 0, Math.PI * 2);
ctx.stroke();
ctx.globalAlpha = 1.0;
// Draw knob at current position
const knobX = centerX + (position.x / maxValue) * baseRadius;
const knobY = centerY - (position.y / maxValue) * baseRadius;
// Knob shadow
ctx.fillStyle = 'rgba(0, 0, 0, 0.3)';
ctx.beginPath();
ctx.arc(knobX + 2, knobY + 2, knobRadius, 0, Math.PI * 2);
ctx.fill();
// Knob
ctx.fillStyle = color;
ctx.beginPath();
ctx.arc(knobX, knobY, knobRadius, 0, Math.PI * 2);
ctx.fill();
// Knob border
ctx.strokeStyle = '#fff';
ctx.lineWidth = 2;
ctx.beginPath();
ctx.arc(knobX, knobY, knobRadius, 0, Math.PI * 2);
ctx.stroke();
// Draw velocity vector
if (Math.abs(position.x) > DEADZONE || Math.abs(position.y) > DEADZONE) {
ctx.strokeStyle = color;
ctx.lineWidth = 2;
ctx.globalAlpha = 0.7;
ctx.beginPath();
ctx.moveTo(centerX, centerY);
ctx.lineTo(knobX, knobY);
ctx.stroke();
ctx.globalAlpha = 1.0;
}
}, [position, color, maxValue]);
const handlePointerDown = (e) => {
isDraggingRef.current = true;
updateStickPosition(e);
};
const handlePointerMove = (e) => {
if (!isDraggingRef.current) return;
updateStickPosition(e);
};
const handlePointerUp = () => {
isDraggingRef.current = false;
onMove({ x: 0, y: 0 });
};
const updateStickPosition = (e) => {
const canvas = canvasRef.current;
const rect = canvas.getBoundingClientRect();
const centerX = rect.width / 2;
const centerY = rect.height / 2;
const baseRadius = Math.min(rect.width, rect.height) * 0.35;
const x = e.clientX - rect.left - centerX;
const y = -(e.clientY - rect.top - centerY);
const magnitude = Math.sqrt(x * x + y * y);
const angle = Math.atan2(y, x);
let clampedMagnitude = Math.min(magnitude, baseRadius) / baseRadius;
// Apply deadzone
if (clampedMagnitude < DEADZONE) {
clampedMagnitude = 0;
}
onMove({
x: Math.cos(angle) * clampedMagnitude,
y: Math.sin(angle) * clampedMagnitude,
});
};
return (
<div ref={containerRef} className="flex flex-col items-center gap-2">
<div className="text-xs font-bold text-gray-400 tracking-widest">{label}</div>
<canvas
ref={canvasRef}
width={160}
height={160}
className="border-2 border-gray-800 rounded-lg bg-gray-900 cursor-grab active:cursor-grabbing"
onPointerDown={handlePointerDown}
onPointerMove={handlePointerMove}
onPointerUp={handlePointerUp}
onPointerLeave={handlePointerUp}
style={{ touchAction: 'none' }}
/>
<div className="text-xs text-gray-500 font-mono">
X: {position.x.toFixed(2)} Y: {position.y.toFixed(2)}
</div>
</div>
);
}
export function Teleop({ publish }) {
const [leftStick, setLeftStick] = useState({ x: 0, y: 0 });
const [rightStick, setRightStick] = useState({ x: 0, y: 0 });
const [speedLimit, setSpeedLimit] = useState(1.0);
const [isEstopped, setIsEstopped] = useState(false);
const [linearVel, setLinearVel] = useState(0);
const [angularVel, setAngularVel] = useState(0);
const keysPressed = useRef({});
const publishIntervalRef = useRef(null);
// Keyboard handling
useEffect(() => {
const handleKeyDown = (e) => {
const key = e.key.toLowerCase();
if (['w', 'a', 's', 'd', ' '].includes(key)) {
keysPressed.current[key] = true;
e.preventDefault();
}
};
const handleKeyUp = (e) => {
const key = e.key.toLowerCase();
if (['w', 'a', 's', 'd', ' '].includes(key)) {
keysPressed.current[key] = false;
e.preventDefault();
}
};
window.addEventListener('keydown', handleKeyDown);
window.addEventListener('keyup', handleKeyUp);
return () => {
window.removeEventListener('keydown', handleKeyDown);
window.removeEventListener('keyup', handleKeyUp);
};
}, []);
// Calculate velocities from input
useEffect(() => {
const interval = setInterval(() => {
let linear = leftStick.y;
let angular = rightStick.x;
// WASD fallback
if (keysPressed.current['w']) linear = Math.min(1, linear + 0.5);
if (keysPressed.current['s']) linear = Math.max(-1, linear - 0.5);
if (keysPressed.current['d']) angular = Math.min(1, angular + 0.5);
if (keysPressed.current['a']) angular = Math.max(-1, angular - 0.5);
// Clamp to [-1, 1]
linear = Math.max(-1, Math.min(1, linear));
angular = Math.max(-1, Math.min(1, angular));
// Apply speed limit
const speedFactor = isEstopped ? 0 : speedLimit;
const finalLinear = linear * MAX_LINEAR_VELOCITY * speedFactor;
const finalAngular = angular * MAX_ANGULAR_VELOCITY * speedFactor;
setLinearVel(finalLinear);
setAngularVel(finalAngular);
// Publish Twist
if (publish) {
publish('/cmd_vel', 'geometry_msgs/Twist', {
linear: { x: finalLinear, y: 0, z: 0 },
angular: { x: 0, y: 0, z: finalAngular },
});
}
}, STICK_UPDATE_RATE);
return () => clearInterval(interval);
}, [leftStick, rightStick, speedLimit, isEstopped, publish]);
const handleEstop = () => {
setIsEstopped(!isEstopped);
// Publish stop immediately
if (publish) {
publish('/cmd_vel', 'geometry_msgs/Twist', {
linear: { x: 0, y: 0, z: 0 },
angular: { x: 0, y: 0, z: 0 },
});
}
};
return (
<div className="flex flex-col h-full space-y-3">
{/* Status bar */}
<div className={`rounded-lg border p-3 space-y-2 ${
isEstopped
? 'bg-red-950 border-red-900'
: 'bg-gray-950 border-cyan-950'
}`}>
<div className="flex justify-between items-center">
<div className={`text-xs font-bold tracking-widest ${
isEstopped ? 'text-red-700' : 'text-cyan-700'
}`}>
{isEstopped ? '🛑 E-STOP ACTIVE' : '⚡ TELEOP READY'}
</div>
<button
onClick={handleEstop}
className={`px-3 py-1 text-xs font-bold rounded border transition-colors ${
isEstopped
? 'bg-green-950 border-green-800 text-green-400 hover:bg-green-900'
: 'bg-red-950 border-red-800 text-red-400 hover:bg-red-900'
}`}
>
{isEstopped ? 'RESUME' : 'E-STOP'}
</button>
</div>
{/* Velocity display */}
<div className="grid grid-cols-2 gap-2">
<div className="bg-gray-900 rounded p-2">
<div className="text-gray-600 text-xs">LINEAR</div>
<div className="text-lg font-mono text-cyan-300">
{linearVel.toFixed(2)} m/s
</div>
</div>
<div className="bg-gray-900 rounded p-2">
<div className="text-gray-600 text-xs">ANGULAR</div>
<div className="text-lg font-mono text-amber-300">
{angularVel.toFixed(2)} rad/s
</div>
</div>
</div>
</div>
{/* Gamepad area */}
<div className="flex-1 bg-gray-950 rounded-lg border border-cyan-950 p-4 flex justify-center items-center gap-8">
<VirtualStick
position={leftStick}
onMove={setLeftStick}
label="LEFT — LINEAR"
color="#10b981"
maxValue={1.0}
/>
<VirtualStick
position={rightStick}
onMove={setRightStick}
label="RIGHT — ANGULAR"
color="#f59e0b"
maxValue={1.0}
/>
</div>
{/* Speed limiter */}
<div className="bg-gray-950 rounded-lg border border-cyan-950 p-3 space-y-2">
<div className="flex justify-between items-center">
<div className="text-cyan-700 text-xs font-bold tracking-widest">
SPEED LIMITER
</div>
<div className="text-gray-400 text-xs font-mono">
{(speedLimit * 100).toFixed(0)}%
</div>
</div>
<input
type="range"
min="0"
max="1"
step="0.05"
value={speedLimit}
onChange={(e) => setSpeedLimit(parseFloat(e.target.value))}
disabled={isEstopped}
className="w-full cursor-pointer"
style={{
accentColor: isEstopped ? '#6b7280' : '#06b6d4',
}}
/>
<div className="grid grid-cols-3 gap-2 text-xs text-gray-600">
<div className="text-center">0%</div>
<div className="text-center">50%</div>
<div className="text-center">100%</div>
</div>
</div>
{/* Control info */}
<div className="bg-gray-950 rounded border border-gray-800 p-2 text-xs text-gray-600 space-y-1">
<div className="font-bold text-cyan-700 mb-2">KEYBOARD FALLBACK</div>
<div className="grid grid-cols-2 gap-2">
<div>
<span className="text-gray-500">W/S:</span> <span className="text-gray-400 font-mono">Forward/Back</span>
</div>
<div>
<span className="text-gray-500">A/D:</span> <span className="text-gray-400 font-mono">Turn Left/Right</span>
</div>
</div>
</div>
{/* Topic info */}
<div className="bg-gray-950 rounded border border-gray-800 p-2 text-xs text-gray-600 space-y-1">
<div className="flex justify-between">
<span>Topic:</span>
<span className="text-gray-500">/cmd_vel (geometry_msgs/Twist)</span>
</div>
<div className="flex justify-between">
<span>Max Linear:</span>
<span className="text-gray-500">{MAX_LINEAR_VELOCITY} m/s</span>
</div>
<div className="flex justify-between">
<span>Max Angular:</span>
<span className="text-gray-500">{MAX_ANGULAR_VELOCITY} rad/s</span>
</div>
<div className="flex justify-between">
<span>Deadzone:</span>
<span className="text-gray-500">{(DEADZONE * 100).toFixed(0)}%</span>
</div>
</div>
</div>
);
}